1df8bae1dSRodney W. Grimes /* 226f9a767SRodney W. Grimes * Copyright (c) 1991 Regents of the University of California. 326f9a767SRodney W. Grimes * All rights reserved. 426f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 526f9a767SRodney W. Grimes * All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 726f9a767SRodney W. Grimes * All rights reserved. 8df8bae1dSRodney W. Grimes * 9df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 10df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 13df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 14df8bae1dSRodney W. Grimes * are met: 15df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 17df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 19df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 20df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 215929bcfaSPhilippe Charnier * must display the following acknowledgement: 22df8bae1dSRodney W. Grimes * This product includes software developed by the University of 23df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 24df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 25df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 26df8bae1dSRodney W. Grimes * without specific prior written permission. 27df8bae1dSRodney W. Grimes * 28df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38df8bae1dSRodney W. Grimes * SUCH DAMAGE. 39df8bae1dSRodney W. Grimes * 403c4dd356SDavid Greenman * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41df8bae1dSRodney W. Grimes * 42df8bae1dSRodney W. Grimes * 43df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44df8bae1dSRodney W. Grimes * All rights reserved. 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47df8bae1dSRodney W. Grimes * 48df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 49df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 50df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 51df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 52df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 53df8bae1dSRodney W. Grimes * 54df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57df8bae1dSRodney W. Grimes * 58df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 59df8bae1dSRodney W. Grimes * 60df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61df8bae1dSRodney W. Grimes * School of Computer Science 62df8bae1dSRodney W. Grimes * Carnegie Mellon University 63df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 64df8bae1dSRodney W. Grimes * 65df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 66df8bae1dSRodney W. Grimes * rights to redistribute these changes. 67df8bae1dSRodney W. Grimes */ 68df8bae1dSRodney W. Grimes 69df8bae1dSRodney W. Grimes /* 70df8bae1dSRodney W. Grimes * The proverbial page-out daemon. 71df8bae1dSRodney W. Grimes */ 72df8bae1dSRodney W. Grimes 73874651b1SDavid E. O'Brien #include <sys/cdefs.h> 74874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 75874651b1SDavid E. O'Brien 76faa5f8d8SAndrzej Bialecki #include "opt_vm.h" 77df8bae1dSRodney W. Grimes #include <sys/param.h> 7826f9a767SRodney W. Grimes #include <sys/systm.h> 79b5e8ce9fSBruce Evans #include <sys/kernel.h> 80855a310fSJeff Roberson #include <sys/eventhandler.h> 81fb919e4dSMark Murray #include <sys/lock.h> 82fb919e4dSMark Murray #include <sys/mutex.h> 8326f9a767SRodney W. Grimes #include <sys/proc.h> 849c8b8baaSPeter Wemm #include <sys/kthread.h> 850384fff8SJason Evans #include <sys/ktr.h> 8626f9a767SRodney W. Grimes #include <sys/resourcevar.h> 87b43179fbSJeff Roberson #include <sys/sched.h> 88d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h> 89f6b04d2bSDavid Greenman #include <sys/vnode.h> 90efeaf95aSDavid Greenman #include <sys/vmmeter.h> 911005a129SJohn Baldwin #include <sys/sx.h> 9238efa82bSJohn Dyson #include <sys/sysctl.h> 93df8bae1dSRodney W. Grimes 94df8bae1dSRodney W. Grimes #include <vm/vm.h> 95efeaf95aSDavid Greenman #include <vm/vm_param.h> 96efeaf95aSDavid Greenman #include <vm/vm_object.h> 97df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 98efeaf95aSDavid Greenman #include <vm/vm_map.h> 99df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 10024a1cce3SDavid Greenman #include <vm/vm_pager.h> 10105f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 102efeaf95aSDavid Greenman #include <vm/vm_extern.h> 103670d17b5SJeff Roberson #include <vm/uma.h> 104df8bae1dSRodney W. Grimes 1050384fff8SJason Evans #include <machine/mutex.h> 1060384fff8SJason Evans 1072b14f991SJulian Elischer /* 1082b14f991SJulian Elischer * System initialization 1092b14f991SJulian Elischer */ 1102b14f991SJulian Elischer 1112b14f991SJulian Elischer /* the kernel process "vm_pageout"*/ 11211caded3SAlfred Perlstein static void vm_pageout(void); 11311caded3SAlfred Perlstein static int vm_pageout_clean(vm_page_t); 114ff2023a5SAlan Cox static void vm_pageout_page_free(vm_page_t); 115eea85e9bSAlan Cox static void vm_pageout_pmap_collect(void); 11611caded3SAlfred Perlstein static void vm_pageout_scan(int pass); 11745ae1d91SAlan Cox 1182b14f991SJulian Elischer struct proc *pageproc; 1192b14f991SJulian Elischer 1202b14f991SJulian Elischer static struct kproc_desc page_kp = { 1212b14f991SJulian Elischer "pagedaemon", 1222b14f991SJulian Elischer vm_pageout, 1232b14f991SJulian Elischer &pageproc 1242b14f991SJulian Elischer }; 1259c8b8baaSPeter Wemm SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 1262b14f991SJulian Elischer 12738efa82bSJohn Dyson #if !defined(NO_SWAPPING) 1282b14f991SJulian Elischer /* the kernel process "vm_daemon"*/ 12911caded3SAlfred Perlstein static void vm_daemon(void); 130f708ef1bSPoul-Henning Kamp static struct proc *vmproc; 1312b14f991SJulian Elischer 1322b14f991SJulian Elischer static struct kproc_desc vm_kp = { 1332b14f991SJulian Elischer "vmdaemon", 1342b14f991SJulian Elischer vm_daemon, 1352b14f991SJulian Elischer &vmproc 1362b14f991SJulian Elischer }; 1379c8b8baaSPeter Wemm SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 13838efa82bSJohn Dyson #endif 1392b14f991SJulian Elischer 1402b14f991SJulian Elischer 1418b245767SAlan Cox int vm_pages_needed; /* Event on which pageout daemon sleeps */ 1428b245767SAlan Cox int vm_pageout_deficit; /* Estimated number of pages deficit */ 1438b245767SAlan Cox int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 14426f9a767SRodney W. Grimes 14538efa82bSJohn Dyson #if !defined(NO_SWAPPING) 146f708ef1bSPoul-Henning Kamp static int vm_pageout_req_swapout; /* XXX */ 147f708ef1bSPoul-Henning Kamp static int vm_daemon_needed; 14838efa82bSJohn Dyson #endif 1492b6b0df7SMatthew Dillon static int vm_max_launder = 32; 150303b270bSEivind Eklund static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 151303b270bSEivind Eklund static int vm_pageout_full_stats_interval = 0; 1522b6b0df7SMatthew Dillon static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 153303b270bSEivind Eklund static int defer_swap_pageouts=0; 154303b270bSEivind Eklund static int disable_swap_pageouts=0; 15570111b90SJohn Dyson 15638efa82bSJohn Dyson #if defined(NO_SWAPPING) 157303b270bSEivind Eklund static int vm_swap_enabled=0; 158303b270bSEivind Eklund static int vm_swap_idle_enabled=0; 15938efa82bSJohn Dyson #else 160303b270bSEivind Eklund static int vm_swap_enabled=1; 161303b270bSEivind Eklund static int vm_swap_idle_enabled=0; 16238efa82bSJohn Dyson #endif 16338efa82bSJohn Dyson 16438efa82bSJohn Dyson SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 1652b6b0df7SMatthew Dillon CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 1662b6b0df7SMatthew Dillon 1672b6b0df7SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, max_launder, 1682b6b0df7SMatthew Dillon CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 16938efa82bSJohn Dyson 170dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 171b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 172dc2efb27SJohn Dyson 173dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 174b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 175dc2efb27SJohn Dyson 176dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 177b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 178dc2efb27SJohn Dyson 179dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 180b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 181dc2efb27SJohn Dyson 18238efa82bSJohn Dyson #if defined(NO_SWAPPING) 183ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 184ceb0cf87SJohn Dyson CTLFLAG_RD, &vm_swap_enabled, 0, ""); 185ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 186ceb0cf87SJohn Dyson CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 18738efa82bSJohn Dyson #else 188ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 189b0359e2cSPeter Wemm CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 190ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 191b0359e2cSPeter Wemm CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 19238efa82bSJohn Dyson #endif 19326f9a767SRodney W. Grimes 194ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 195b0359e2cSPeter Wemm CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 19612ac6a1dSJohn Dyson 197ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 198b0359e2cSPeter Wemm CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 19912ac6a1dSJohn Dyson 20023b59018SMatthew Dillon static int pageout_lock_miss; 20123b59018SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 20223b59018SMatthew Dillon CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 20323b59018SMatthew Dillon 204ffc82b0aSJohn Dyson #define VM_PAGEOUT_PAGE_COUNT 16 205bbc0ec52SDavid Greenman int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 206df8bae1dSRodney W. Grimes 207c3cb3e12SDavid Greenman int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 208df8bae1dSRodney W. Grimes 20938efa82bSJohn Dyson #if !defined(NO_SWAPPING) 210ecf6279fSAlan Cox static void vm_pageout_map_deactivate_pages(vm_map_t, long); 211ecf6279fSAlan Cox static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long); 21211caded3SAlfred Perlstein static void vm_req_vmdaemon(void); 21338efa82bSJohn Dyson #endif 214dc2efb27SJohn Dyson static void vm_pageout_page_stats(void); 215cd41fc12SDavid Greenman 21626f9a767SRodney W. Grimes /* 21726f9a767SRodney W. Grimes * vm_pageout_clean: 21824a1cce3SDavid Greenman * 2190d94caffSDavid Greenman * Clean the page and remove it from the laundry. 22026f9a767SRodney W. Grimes * 2210d94caffSDavid Greenman * We set the busy bit to cause potential page faults on this page to 2221c7c3c6aSMatthew Dillon * block. Note the careful timing, however, the busy bit isn't set till 2231c7c3c6aSMatthew Dillon * late and we cannot do anything that will mess with the page. 22426f9a767SRodney W. Grimes */ 2253af76890SPoul-Henning Kamp static int 2268f9110f6SJohn Dyson vm_pageout_clean(m) 22724a1cce3SDavid Greenman vm_page_t m; 22824a1cce3SDavid Greenman { 22954d92145SMatthew Dillon vm_object_t object; 230f35329acSJohn Dyson vm_page_t mc[2*vm_pageout_page_count]; 2313562af12SAlan Cox int pageout_count; 23290ecac61SMatthew Dillon int ib, is, page_base; 233a316d390SJohn Dyson vm_pindex_t pindex = m->pindex; 23426f9a767SRodney W. Grimes 23555df3298SAlan Cox mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2363562af12SAlan Cox VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2370cddd8f0SMatthew Dillon 23826f9a767SRodney W. Grimes /* 2391c7c3c6aSMatthew Dillon * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 2401c7c3c6aSMatthew Dillon * with the new swapper, but we could have serious problems paging 2411c7c3c6aSMatthew Dillon * out other object types if there is insufficient memory. 2421c7c3c6aSMatthew Dillon * 2431c7c3c6aSMatthew Dillon * Unfortunately, checking free memory here is far too late, so the 2441c7c3c6aSMatthew Dillon * check has been moved up a procedural level. 2451c7c3c6aSMatthew Dillon */ 2461c7c3c6aSMatthew Dillon 24724a1cce3SDavid Greenman /* 2488b03c8edSMatthew Dillon * Don't mess with the page if it's busy, held, or special 24924a1cce3SDavid Greenman */ 2508f9110f6SJohn Dyson if ((m->hold_count != 0) || 2513562af12SAlan Cox ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { 2520d94caffSDavid Greenman return 0; 2538b03c8edSMatthew Dillon } 2540d94caffSDavid Greenman 255f35329acSJohn Dyson mc[vm_pageout_page_count] = m; 25626f9a767SRodney W. Grimes pageout_count = 1; 257f35329acSJohn Dyson page_base = vm_pageout_page_count; 25890ecac61SMatthew Dillon ib = 1; 25990ecac61SMatthew Dillon is = 1; 26090ecac61SMatthew Dillon 26124a1cce3SDavid Greenman /* 26224a1cce3SDavid Greenman * Scan object for clusterable pages. 26324a1cce3SDavid Greenman * 26424a1cce3SDavid Greenman * We can cluster ONLY if: ->> the page is NOT 26524a1cce3SDavid Greenman * clean, wired, busy, held, or mapped into a 26624a1cce3SDavid Greenman * buffer, and one of the following: 26724a1cce3SDavid Greenman * 1) The page is inactive, or a seldom used 26824a1cce3SDavid Greenman * active page. 26924a1cce3SDavid Greenman * -or- 27024a1cce3SDavid Greenman * 2) we force the issue. 27190ecac61SMatthew Dillon * 27290ecac61SMatthew Dillon * During heavy mmap/modification loads the pageout 27390ecac61SMatthew Dillon * daemon can really fragment the underlying file 27490ecac61SMatthew Dillon * due to flushing pages out of order and not trying 27590ecac61SMatthew Dillon * align the clusters (which leave sporatic out-of-order 27690ecac61SMatthew Dillon * holes). To solve this problem we do the reverse scan 27790ecac61SMatthew Dillon * first and attempt to align our cluster, then do a 27890ecac61SMatthew Dillon * forward scan if room remains. 27924a1cce3SDavid Greenman */ 2805163584cSAlan Cox object = m->object; 28190ecac61SMatthew Dillon more: 28290ecac61SMatthew Dillon while (ib && pageout_count < vm_pageout_page_count) { 28324a1cce3SDavid Greenman vm_page_t p; 284f6b04d2bSDavid Greenman 28590ecac61SMatthew Dillon if (ib > pindex) { 28690ecac61SMatthew Dillon ib = 0; 28790ecac61SMatthew Dillon break; 288f6b04d2bSDavid Greenman } 28990ecac61SMatthew Dillon 29090ecac61SMatthew Dillon if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 29190ecac61SMatthew Dillon ib = 0; 29290ecac61SMatthew Dillon break; 29390ecac61SMatthew Dillon } 2945070c7f8SJohn Dyson if (((p->queue - p->pc) == PQ_CACHE) || 2958b03c8edSMatthew Dillon (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 29690ecac61SMatthew Dillon ib = 0; 29790ecac61SMatthew Dillon break; 298f6b04d2bSDavid Greenman } 29924a1cce3SDavid Greenman vm_page_test_dirty(p); 30090ecac61SMatthew Dillon if ((p->dirty & p->valid) == 0 || 30190ecac61SMatthew Dillon p->queue != PQ_INACTIVE || 30257601bcbSMatthew Dillon p->wire_count != 0 || /* may be held by buf cache */ 30357601bcbSMatthew Dillon p->hold_count != 0) { /* may be undergoing I/O */ 30490ecac61SMatthew Dillon ib = 0; 30524a1cce3SDavid Greenman break; 306f6b04d2bSDavid Greenman } 30790ecac61SMatthew Dillon mc[--page_base] = p; 30890ecac61SMatthew Dillon ++pageout_count; 30990ecac61SMatthew Dillon ++ib; 31024a1cce3SDavid Greenman /* 31190ecac61SMatthew Dillon * alignment boundry, stop here and switch directions. Do 31290ecac61SMatthew Dillon * not clear ib. 31324a1cce3SDavid Greenman */ 31490ecac61SMatthew Dillon if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 31590ecac61SMatthew Dillon break; 31624a1cce3SDavid Greenman } 31790ecac61SMatthew Dillon 31890ecac61SMatthew Dillon while (pageout_count < vm_pageout_page_count && 31990ecac61SMatthew Dillon pindex + is < object->size) { 32090ecac61SMatthew Dillon vm_page_t p; 32190ecac61SMatthew Dillon 32290ecac61SMatthew Dillon if ((p = vm_page_lookup(object, pindex + is)) == NULL) 32390ecac61SMatthew Dillon break; 3245070c7f8SJohn Dyson if (((p->queue - p->pc) == PQ_CACHE) || 3258b03c8edSMatthew Dillon (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 32690ecac61SMatthew Dillon break; 32724a1cce3SDavid Greenman } 32824a1cce3SDavid Greenman vm_page_test_dirty(p); 32990ecac61SMatthew Dillon if ((p->dirty & p->valid) == 0 || 33090ecac61SMatthew Dillon p->queue != PQ_INACTIVE || 33157601bcbSMatthew Dillon p->wire_count != 0 || /* may be held by buf cache */ 33257601bcbSMatthew Dillon p->hold_count != 0) { /* may be undergoing I/O */ 33324a1cce3SDavid Greenman break; 33424a1cce3SDavid Greenman } 33590ecac61SMatthew Dillon mc[page_base + pageout_count] = p; 33690ecac61SMatthew Dillon ++pageout_count; 33790ecac61SMatthew Dillon ++is; 33824a1cce3SDavid Greenman } 33990ecac61SMatthew Dillon 34090ecac61SMatthew Dillon /* 34190ecac61SMatthew Dillon * If we exhausted our forward scan, continue with the reverse scan 34290ecac61SMatthew Dillon * when possible, even past a page boundry. This catches boundry 34390ecac61SMatthew Dillon * conditions. 34490ecac61SMatthew Dillon */ 34590ecac61SMatthew Dillon if (ib && pageout_count < vm_pageout_page_count) 34690ecac61SMatthew Dillon goto more; 347f6b04d2bSDavid Greenman 34867bf6868SJohn Dyson /* 34967bf6868SJohn Dyson * we allow reads during pageouts... 35067bf6868SJohn Dyson */ 3517a935082SAlan Cox return (vm_pageout_flush(&mc[page_base], pageout_count, 0)); 352aef922f5SJohn Dyson } 353aef922f5SJohn Dyson 3541c7c3c6aSMatthew Dillon /* 3551c7c3c6aSMatthew Dillon * vm_pageout_flush() - launder the given pages 3561c7c3c6aSMatthew Dillon * 3571c7c3c6aSMatthew Dillon * The given pages are laundered. Note that we setup for the start of 3581c7c3c6aSMatthew Dillon * I/O ( i.e. busy the page ), mark it read-only, and bump the object 3591c7c3c6aSMatthew Dillon * reference count all in here rather then in the parent. If we want 3601c7c3c6aSMatthew Dillon * the parent to do more sophisticated things we may have to change 3611c7c3c6aSMatthew Dillon * the ordering. 3621c7c3c6aSMatthew Dillon */ 363aef922f5SJohn Dyson int 3647a935082SAlan Cox vm_pageout_flush(vm_page_t *mc, int count, int flags) 365aef922f5SJohn Dyson { 36654d92145SMatthew Dillon vm_object_t object; 367aef922f5SJohn Dyson int pageout_status[count]; 36895461b45SJohn Dyson int numpagedout = 0; 369aef922f5SJohn Dyson int i; 370aef922f5SJohn Dyson 37155df3298SAlan Cox mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3721c7c3c6aSMatthew Dillon /* 3731c7c3c6aSMatthew Dillon * Initiate I/O. Bump the vm_page_t->busy counter and 3741c7c3c6aSMatthew Dillon * mark the pages read-only. 3751c7c3c6aSMatthew Dillon * 3761c7c3c6aSMatthew Dillon * We do not have to fixup the clean/dirty bits here... we can 3771c7c3c6aSMatthew Dillon * allow the pager to do it after the I/O completes. 37802fa91d3SMatthew Dillon * 37902fa91d3SMatthew Dillon * NOTE! mc[i]->dirty may be partial or fragmented due to an 38002fa91d3SMatthew Dillon * edge case with file fragments. 3811c7c3c6aSMatthew Dillon */ 3828f9110f6SJohn Dyson for (i = 0; i < count; i++) { 3837a935082SAlan Cox KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 3847a935082SAlan Cox ("vm_pageout_flush: partially invalid page %p index %d/%d", 3857a935082SAlan Cox mc[i], i, count)); 386e69763a3SDoug Rabson vm_page_io_start(mc[i]); 387a12cc0e4SAlan Cox pmap_page_protect(mc[i], VM_PROT_READ); 3888f9110f6SJohn Dyson } 389aef922f5SJohn Dyson object = mc[0]->object; 39055df3298SAlan Cox vm_page_unlock_queues(); 391d474eaaaSDoug Rabson vm_object_pip_add(object, count); 392d22bc710SAlan Cox VM_OBJECT_UNLOCK(object); 393aef922f5SJohn Dyson 394aef922f5SJohn Dyson vm_pager_put_pages(object, mc, count, 39540bb4f4bSMatthew Dillon (flags | ((object == kernel_object) ? VM_PAGER_PUT_SYNC : 0)), 39626f9a767SRodney W. Grimes pageout_status); 39726f9a767SRodney W. Grimes 39872ba747dSAlan Cox VM_OBJECT_LOCK(object); 39940eab1e9SAlan Cox vm_page_lock_queues(); 400aef922f5SJohn Dyson for (i = 0; i < count; i++) { 401aef922f5SJohn Dyson vm_page_t mt = mc[i]; 40224a1cce3SDavid Greenman 40326f9a767SRodney W. Grimes switch (pageout_status[i]) { 40426f9a767SRodney W. Grimes case VM_PAGER_OK: 40526f9a767SRodney W. Grimes case VM_PAGER_PEND: 40695461b45SJohn Dyson numpagedout++; 40726f9a767SRodney W. Grimes break; 40826f9a767SRodney W. Grimes case VM_PAGER_BAD: 40926f9a767SRodney W. Grimes /* 4100d94caffSDavid Greenman * Page outside of range of object. Right now we 4110d94caffSDavid Greenman * essentially lose the changes by pretending it 4120d94caffSDavid Greenman * worked. 41326f9a767SRodney W. Grimes */ 4140385347cSPeter Wemm pmap_clear_modify(mt); 41590ecac61SMatthew Dillon vm_page_undirty(mt); 41626f9a767SRodney W. Grimes break; 41726f9a767SRodney W. Grimes case VM_PAGER_ERROR: 41826f9a767SRodney W. Grimes case VM_PAGER_FAIL: 41926f9a767SRodney W. Grimes /* 4200d94caffSDavid Greenman * If page couldn't be paged out, then reactivate the 4210d94caffSDavid Greenman * page so it doesn't clog the inactive list. (We 4220d94caffSDavid Greenman * will try paging out it again later). 42326f9a767SRodney W. Grimes */ 42424a1cce3SDavid Greenman vm_page_activate(mt); 42526f9a767SRodney W. Grimes break; 42626f9a767SRodney W. Grimes case VM_PAGER_AGAIN: 42726f9a767SRodney W. Grimes break; 42826f9a767SRodney W. Grimes } 42926f9a767SRodney W. Grimes 43026f9a767SRodney W. Grimes /* 4310d94caffSDavid Greenman * If the operation is still going, leave the page busy to 4320d94caffSDavid Greenman * block all other accesses. Also, leave the paging in 4330d94caffSDavid Greenman * progress indicator set so that we don't attempt an object 4340d94caffSDavid Greenman * collapse. 43526f9a767SRodney W. Grimes */ 43626f9a767SRodney W. Grimes if (pageout_status[i] != VM_PAGER_PEND) { 437f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 438e69763a3SDoug Rabson vm_page_io_finish(mt); 439936524aaSMatthew Dillon if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 440a12cc0e4SAlan Cox pmap_page_protect(mt, VM_PROT_READ); 44126f9a767SRodney W. Grimes } 44226f9a767SRodney W. Grimes } 44395461b45SJohn Dyson return numpagedout; 44426f9a767SRodney W. Grimes } 44526f9a767SRodney W. Grimes 44638efa82bSJohn Dyson #if !defined(NO_SWAPPING) 44726f9a767SRodney W. Grimes /* 44826f9a767SRodney W. Grimes * vm_pageout_object_deactivate_pages 44926f9a767SRodney W. Grimes * 45026f9a767SRodney W. Grimes * deactivate enough pages to satisfy the inactive target 45126f9a767SRodney W. Grimes * requirements or if vm_page_proc_limit is set, then 45226f9a767SRodney W. Grimes * deactivate all of the pages in the object and its 45324a1cce3SDavid Greenman * backing_objects. 45426f9a767SRodney W. Grimes * 45526f9a767SRodney W. Grimes * The object and map must be locked. 45626f9a767SRodney W. Grimes */ 45738efa82bSJohn Dyson static void 458ecf6279fSAlan Cox vm_pageout_object_deactivate_pages(pmap, first_object, desired) 459ecf6279fSAlan Cox pmap_t pmap; 460ecf6279fSAlan Cox vm_object_t first_object; 461ecf6279fSAlan Cox long desired; 46226f9a767SRodney W. Grimes { 463ecf6279fSAlan Cox vm_object_t backing_object, object; 46454d92145SMatthew Dillon vm_page_t p, next; 465ce18aebdSAlan Cox int actcount, rcount, remove_mode; 46626f9a767SRodney W. Grimes 467ecf6279fSAlan Cox VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED); 468ecf6279fSAlan Cox if (first_object->type == OBJT_DEVICE || first_object->type == OBJT_PHYS) 46938efa82bSJohn Dyson return; 470ecf6279fSAlan Cox for (object = first_object;; object = backing_object) { 471ecf6279fSAlan Cox if (pmap_resident_count(pmap) <= desired) 472ecf6279fSAlan Cox goto unlock_return; 47324a1cce3SDavid Greenman if (object->paging_in_progress) 474ecf6279fSAlan Cox goto unlock_return; 47526f9a767SRodney W. Grimes 47685b1dc89SAlan Cox remove_mode = 0; 47738efa82bSJohn Dyson if (object->shadow_count > 1) 47838efa82bSJohn Dyson remove_mode = 1; 47926f9a767SRodney W. Grimes /* 48026f9a767SRodney W. Grimes * scan the objects entire memory queue 48126f9a767SRodney W. Grimes */ 48226f9a767SRodney W. Grimes rcount = object->resident_page_count; 483b18bfc3dSJohn Dyson p = TAILQ_FIRST(&object->memq); 484ce18aebdSAlan Cox vm_page_lock_queues(); 48526f9a767SRodney W. Grimes while (p && (rcount-- > 0)) { 486ecf6279fSAlan Cox if (pmap_resident_count(pmap) <= desired) { 487ce18aebdSAlan Cox vm_page_unlock_queues(); 488ecf6279fSAlan Cox goto unlock_return; 489ce18aebdSAlan Cox } 490b18bfc3dSJohn Dyson next = TAILQ_NEXT(p, listq); 491a58d1fa1SDavid Greenman cnt.v_pdpages++; 4920d94caffSDavid Greenman if (p->wire_count != 0 || 4930d94caffSDavid Greenman p->hold_count != 0 || 4940d94caffSDavid Greenman p->busy != 0 || 4958b03c8edSMatthew Dillon (p->flags & (PG_BUSY|PG_UNMANAGED)) || 496ecf6279fSAlan Cox !pmap_page_exists_quick(pmap, p)) { 4970d94caffSDavid Greenman p = next; 4980d94caffSDavid Greenman continue; 4990d94caffSDavid Greenman } 5000385347cSPeter Wemm actcount = pmap_ts_referenced(p); 5017e006499SJohn Dyson if (actcount) { 502e69763a3SDoug Rabson vm_page_flag_set(p, PG_REFERENCED); 503c8c4b40cSJohn Dyson } else if (p->flags & PG_REFERENCED) { 5047e006499SJohn Dyson actcount = 1; 505ef743ce6SJohn Dyson } 50638efa82bSJohn Dyson if ((p->queue != PQ_ACTIVE) && 50738efa82bSJohn Dyson (p->flags & PG_REFERENCED)) { 508ef743ce6SJohn Dyson vm_page_activate(p); 5097e006499SJohn Dyson p->act_count += actcount; 510e69763a3SDoug Rabson vm_page_flag_clear(p, PG_REFERENCED); 511c8c4b40cSJohn Dyson } else if (p->queue == PQ_ACTIVE) { 512ef743ce6SJohn Dyson if ((p->flags & PG_REFERENCED) == 0) { 513c8c4b40cSJohn Dyson p->act_count -= min(p->act_count, ACT_DECLINE); 5142b6b0df7SMatthew Dillon if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 5154fec79beSAlan Cox pmap_remove_all(p); 51626f9a767SRodney W. Grimes vm_page_deactivate(p); 51726f9a767SRodney W. Grimes } else { 5186d03d577SMatthew Dillon vm_pageq_requeue(p); 519c8c4b40cSJohn Dyson } 520c8c4b40cSJohn Dyson } else { 521eaf13dd7SJohn Dyson vm_page_activate(p); 522e69763a3SDoug Rabson vm_page_flag_clear(p, PG_REFERENCED); 52338efa82bSJohn Dyson if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 52438efa82bSJohn Dyson p->act_count += ACT_ADVANCE; 5256d03d577SMatthew Dillon vm_pageq_requeue(p); 52626f9a767SRodney W. Grimes } 527bd7e5f99SJohn Dyson } else if (p->queue == PQ_INACTIVE) { 5284fec79beSAlan Cox pmap_remove_all(p); 52926f9a767SRodney W. Grimes } 53026f9a767SRodney W. Grimes p = next; 53126f9a767SRodney W. Grimes } 532ce18aebdSAlan Cox vm_page_unlock_queues(); 533ecf6279fSAlan Cox if ((backing_object = object->backing_object) == NULL) 534ecf6279fSAlan Cox goto unlock_return; 535ecf6279fSAlan Cox VM_OBJECT_LOCK(backing_object); 536ecf6279fSAlan Cox if (object != first_object) 537ecf6279fSAlan Cox VM_OBJECT_UNLOCK(object); 53838efa82bSJohn Dyson } 539ecf6279fSAlan Cox unlock_return: 540ecf6279fSAlan Cox if (object != first_object) 541ecf6279fSAlan Cox VM_OBJECT_UNLOCK(object); 54226f9a767SRodney W. Grimes } 54326f9a767SRodney W. Grimes 54426f9a767SRodney W. Grimes /* 54526f9a767SRodney W. Grimes * deactivate some number of pages in a map, try to do it fairly, but 54626f9a767SRodney W. Grimes * that is really hard to do. 54726f9a767SRodney W. Grimes */ 548cd41fc12SDavid Greenman static void 54938efa82bSJohn Dyson vm_pageout_map_deactivate_pages(map, desired) 55026f9a767SRodney W. Grimes vm_map_t map; 551ecf6279fSAlan Cox long desired; 55226f9a767SRodney W. Grimes { 55326f9a767SRodney W. Grimes vm_map_entry_t tmpe; 55438efa82bSJohn Dyson vm_object_t obj, bigobj; 55530105b9eSTor Egge int nothingwired; 5560d94caffSDavid Greenman 557d974f03cSAlan Cox if (!vm_map_trylock(map)) 55826f9a767SRodney W. Grimes return; 55938efa82bSJohn Dyson 56038efa82bSJohn Dyson bigobj = NULL; 56130105b9eSTor Egge nothingwired = TRUE; 56238efa82bSJohn Dyson 56338efa82bSJohn Dyson /* 56438efa82bSJohn Dyson * first, search out the biggest object, and try to free pages from 56538efa82bSJohn Dyson * that. 56638efa82bSJohn Dyson */ 56726f9a767SRodney W. Grimes tmpe = map->header.next; 56838efa82bSJohn Dyson while (tmpe != &map->header) { 5699fdfe602SMatthew Dillon if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 57038efa82bSJohn Dyson obj = tmpe->object.vm_object; 5710774dfb3SAlan Cox if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) { 5720774dfb3SAlan Cox if (obj->shadow_count <= 1 && 5730774dfb3SAlan Cox (bigobj == NULL || 5740774dfb3SAlan Cox bigobj->resident_page_count < obj->resident_page_count)) { 5750774dfb3SAlan Cox if (bigobj != NULL) 5760774dfb3SAlan Cox VM_OBJECT_UNLOCK(bigobj); 57738efa82bSJohn Dyson bigobj = obj; 5780774dfb3SAlan Cox } else 5790774dfb3SAlan Cox VM_OBJECT_UNLOCK(obj); 58038efa82bSJohn Dyson } 58138efa82bSJohn Dyson } 58230105b9eSTor Egge if (tmpe->wired_count > 0) 58330105b9eSTor Egge nothingwired = FALSE; 58438efa82bSJohn Dyson tmpe = tmpe->next; 58538efa82bSJohn Dyson } 58638efa82bSJohn Dyson 5870774dfb3SAlan Cox if (bigobj != NULL) { 588ecf6279fSAlan Cox vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired); 5890774dfb3SAlan Cox VM_OBJECT_UNLOCK(bigobj); 5900774dfb3SAlan Cox } 59138efa82bSJohn Dyson /* 59238efa82bSJohn Dyson * Next, hunt around for other pages to deactivate. We actually 59338efa82bSJohn Dyson * do this search sort of wrong -- .text first is not the best idea. 59438efa82bSJohn Dyson */ 59538efa82bSJohn Dyson tmpe = map->header.next; 59638efa82bSJohn Dyson while (tmpe != &map->header) { 597b1028ad1SLuoqi Chen if (pmap_resident_count(vm_map_pmap(map)) <= desired) 59838efa82bSJohn Dyson break; 5999fdfe602SMatthew Dillon if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 60038efa82bSJohn Dyson obj = tmpe->object.vm_object; 6010774dfb3SAlan Cox if (obj != NULL) { 6020774dfb3SAlan Cox VM_OBJECT_LOCK(obj); 603ecf6279fSAlan Cox vm_pageout_object_deactivate_pages(map->pmap, obj, desired); 6040774dfb3SAlan Cox VM_OBJECT_UNLOCK(obj); 6050774dfb3SAlan Cox } 60638efa82bSJohn Dyson } 60726f9a767SRodney W. Grimes tmpe = tmpe->next; 60838857e7fSAlan Cox } 60938efa82bSJohn Dyson 61038efa82bSJohn Dyson /* 61138efa82bSJohn Dyson * Remove all mappings if a process is swapped out, this will free page 61238efa82bSJohn Dyson * table pages. 61338efa82bSJohn Dyson */ 61438857e7fSAlan Cox if (desired == 0 && nothingwired) { 6150774dfb3SAlan Cox GIANT_REQUIRED; 61638857e7fSAlan Cox vm_page_lock_queues(); 61705ba50f5SJake Burkholder pmap_remove(vm_map_pmap(map), vm_map_min(map), 61805ba50f5SJake Burkholder vm_map_max(map)); 61938857e7fSAlan Cox vm_page_unlock_queues(); 62038857e7fSAlan Cox } 62138efa82bSJohn Dyson vm_map_unlock(map); 62226f9a767SRodney W. Grimes } 623a1287949SEivind Eklund #endif /* !defined(NO_SWAPPING) */ 624df8bae1dSRodney W. Grimes 6251c7c3c6aSMatthew Dillon /* 626c4a1d732SAlan Cox * Warning! The page queue lock is released and reacquired. 6271c7c3c6aSMatthew Dillon */ 628ff2023a5SAlan Cox static void 629ba0208b9SAlan Cox vm_pageout_page_free(vm_page_t m) 630ba0208b9SAlan Cox { 6311c7c3c6aSMatthew Dillon vm_object_t object = m->object; 632925a3a41SJohn Dyson 633ba0208b9SAlan Cox mtx_assert(&vm_page_queue_mtx, MA_OWNED); 634e69763a3SDoug Rabson vm_page_busy(m); 635c4a1d732SAlan Cox vm_page_unlock_queues(); 636c4a1d732SAlan Cox /* 637c4a1d732SAlan Cox * Avoid a lock order reversal. The page must be busy. 638c4a1d732SAlan Cox */ 639c4a1d732SAlan Cox VM_OBJECT_LOCK(object); 640c4a1d732SAlan Cox vm_page_lock_queues(); 6414fec79beSAlan Cox pmap_remove_all(m); 642925a3a41SJohn Dyson vm_page_free(m); 643c4a1d732SAlan Cox VM_OBJECT_UNLOCK(object); 644299018d3SAlan Cox cnt.v_dfree++; 645925a3a41SJohn Dyson } 646925a3a41SJohn Dyson 647df8bae1dSRodney W. Grimes /* 648eea85e9bSAlan Cox * This routine is very drastic, but can save the system 649eea85e9bSAlan Cox * in a pinch. 650eea85e9bSAlan Cox */ 651eea85e9bSAlan Cox static void 652eea85e9bSAlan Cox vm_pageout_pmap_collect(void) 653eea85e9bSAlan Cox { 654eea85e9bSAlan Cox int i; 655eea85e9bSAlan Cox vm_page_t m; 656eea85e9bSAlan Cox static int warningdone; 657eea85e9bSAlan Cox 658eea85e9bSAlan Cox if (pmap_pagedaemon_waken == 0) 659eea85e9bSAlan Cox return; 660eea85e9bSAlan Cox if (warningdone < 5) { 661eea85e9bSAlan Cox printf("collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n"); 662eea85e9bSAlan Cox warningdone++; 663eea85e9bSAlan Cox } 664eea85e9bSAlan Cox vm_page_lock_queues(); 665eea85e9bSAlan Cox for (i = 0; i < vm_page_array_size; i++) { 666eea85e9bSAlan Cox m = &vm_page_array[i]; 667eea85e9bSAlan Cox if (m->wire_count || m->hold_count || m->busy || 668eea85e9bSAlan Cox (m->flags & (PG_BUSY | PG_UNMANAGED))) 669eea85e9bSAlan Cox continue; 670eea85e9bSAlan Cox pmap_remove_all(m); 671eea85e9bSAlan Cox } 672eea85e9bSAlan Cox vm_page_unlock_queues(); 673eea85e9bSAlan Cox pmap_pagedaemon_waken = 0; 674eea85e9bSAlan Cox } 675eea85e9bSAlan Cox 676eea85e9bSAlan Cox /* 677df8bae1dSRodney W. Grimes * vm_pageout_scan does the dirty work for the pageout daemon. 678df8bae1dSRodney W. Grimes */ 6792b6b0df7SMatthew Dillon static void 6802b6b0df7SMatthew Dillon vm_pageout_scan(int pass) 681df8bae1dSRodney W. Grimes { 682502ba6e4SJohn Dyson vm_page_t m, next; 683936524aaSMatthew Dillon struct vm_page marker; 6841c7c3c6aSMatthew Dillon int page_shortage, maxscan, pcount; 6851c7c3c6aSMatthew Dillon int addl_page_shortage, addl_page_shortage_init; 6865663e6deSDavid Greenman struct proc *p, *bigproc; 6875663e6deSDavid Greenman vm_offset_t size, bigsize; 688df8bae1dSRodney W. Grimes vm_object_t object; 6897e006499SJohn Dyson int actcount; 690f6b04d2bSDavid Greenman int vnodes_skipped = 0; 6912b6b0df7SMatthew Dillon int maxlaunder; 6921eeaa1e3SJohn Dyson int s; 693e602ba25SJulian Elischer struct thread *td; 6940d94caffSDavid Greenman 6950cddd8f0SMatthew Dillon GIANT_REQUIRED; 696df8bae1dSRodney W. Grimes /* 697855a310fSJeff Roberson * Decrease registered cache sizes. 698855a310fSJeff Roberson */ 699855a310fSJeff Roberson EVENTHANDLER_INVOKE(vm_lowmem, 0); 700855a310fSJeff Roberson /* 701855a310fSJeff Roberson * We do this explicitly after the caches have been drained above. 702855a310fSJeff Roberson */ 703855a310fSJeff Roberson uma_reclaim(); 704855a310fSJeff Roberson /* 7055985940eSJohn Dyson * Do whatever cleanup that the pmap code can. 7065985940eSJohn Dyson */ 707eea85e9bSAlan Cox vm_pageout_pmap_collect(); 7085985940eSJohn Dyson 709b0ef8c5fSAlan Cox addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit); 710b182ec9eSJohn Dyson 7111c7c3c6aSMatthew Dillon /* 7121c7c3c6aSMatthew Dillon * Calculate the number of pages we want to either free or move 7132b6b0df7SMatthew Dillon * to the cache. 7141c7c3c6aSMatthew Dillon */ 7152b6b0df7SMatthew Dillon page_shortage = vm_paging_target() + addl_page_shortage_init; 7161c7c3c6aSMatthew Dillon 7171c7c3c6aSMatthew Dillon /* 718936524aaSMatthew Dillon * Initialize our marker 719936524aaSMatthew Dillon */ 720936524aaSMatthew Dillon bzero(&marker, sizeof(marker)); 721936524aaSMatthew Dillon marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 722936524aaSMatthew Dillon marker.queue = PQ_INACTIVE; 723936524aaSMatthew Dillon marker.wire_count = 1; 724936524aaSMatthew Dillon 725936524aaSMatthew Dillon /* 7261c7c3c6aSMatthew Dillon * Start scanning the inactive queue for pages we can move to the 7271c7c3c6aSMatthew Dillon * cache or free. The scan will stop when the target is reached or 728936524aaSMatthew Dillon * we have scanned the entire inactive queue. Note that m->act_count 729936524aaSMatthew Dillon * is not used to form decisions for the inactive queue, only for the 730936524aaSMatthew Dillon * active queue. 7312b6b0df7SMatthew Dillon * 7322b6b0df7SMatthew Dillon * maxlaunder limits the number of dirty pages we flush per scan. 7332b6b0df7SMatthew Dillon * For most systems a smaller value (16 or 32) is more robust under 7342b6b0df7SMatthew Dillon * extreme memory and disk pressure because any unnecessary writes 7352b6b0df7SMatthew Dillon * to disk can result in extreme performance degredation. However, 7362b6b0df7SMatthew Dillon * systems with excessive dirty pages (especially when MAP_NOSYNC is 7372b6b0df7SMatthew Dillon * used) will die horribly with limited laundering. If the pageout 7382b6b0df7SMatthew Dillon * daemon cannot clean enough pages in the first pass, we let it go 7392b6b0df7SMatthew Dillon * all out in succeeding passes. 7401c7c3c6aSMatthew Dillon */ 7412b6b0df7SMatthew Dillon if ((maxlaunder = vm_max_launder) <= 1) 7422b6b0df7SMatthew Dillon maxlaunder = 1; 7432b6b0df7SMatthew Dillon if (pass) 7442b6b0df7SMatthew Dillon maxlaunder = 10000; 7453e1b578aSAlan Cox vm_page_lock_queues(); 74667bf6868SJohn Dyson rescan0: 7471c7c3c6aSMatthew Dillon addl_page_shortage = addl_page_shortage_init; 748f6b04d2bSDavid Greenman maxscan = cnt.v_inactive_count; 7496d03d577SMatthew Dillon 750be72f788SAlan Cox for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 7511c7c3c6aSMatthew Dillon m != NULL && maxscan-- > 0 && page_shortage > 0; 752e929c00dSKirk McKusick m = next) { 753df8bae1dSRodney W. Grimes 754a58d1fa1SDavid Greenman cnt.v_pdpages++; 755b182ec9eSJohn Dyson 756f35329acSJohn Dyson if (m->queue != PQ_INACTIVE) { 75767bf6868SJohn Dyson goto rescan0; 758f35329acSJohn Dyson } 759b182ec9eSJohn Dyson 760b18bfc3dSJohn Dyson next = TAILQ_NEXT(m, pageq); 761df8bae1dSRodney W. Grimes 762936524aaSMatthew Dillon /* 763936524aaSMatthew Dillon * skip marker pages 764936524aaSMatthew Dillon */ 765936524aaSMatthew Dillon if (m->flags & PG_MARKER) 766936524aaSMatthew Dillon continue; 767936524aaSMatthew Dillon 76857601bcbSMatthew Dillon /* 76957601bcbSMatthew Dillon * A held page may be undergoing I/O, so skip it. 77057601bcbSMatthew Dillon */ 771b182ec9eSJohn Dyson if (m->hold_count) { 7726d03d577SMatthew Dillon vm_pageq_requeue(m); 773b182ec9eSJohn Dyson addl_page_shortage++; 774b182ec9eSJohn Dyson continue; 775df8bae1dSRodney W. Grimes } 77626f9a767SRodney W. Grimes /* 777a1287949SEivind Eklund * Don't mess with busy pages, keep in the front of the 778b18bfc3dSJohn Dyson * queue, most likely are being paged out. 77926f9a767SRodney W. Grimes */ 780bd7e5f99SJohn Dyson if (m->busy || (m->flags & PG_BUSY)) { 781b182ec9eSJohn Dyson addl_page_shortage++; 78226f9a767SRodney W. Grimes continue; 78326f9a767SRodney W. Grimes } 784bd7e5f99SJohn Dyson 7857e006499SJohn Dyson /* 7861c7c3c6aSMatthew Dillon * If the object is not being used, we ignore previous 7871c7c3c6aSMatthew Dillon * references. 7887e006499SJohn Dyson */ 7890d94caffSDavid Greenman if (m->object->ref_count == 0) { 790e69763a3SDoug Rabson vm_page_flag_clear(m, PG_REFERENCED); 7910385347cSPeter Wemm pmap_clear_reference(m); 7927e006499SJohn Dyson 7937e006499SJohn Dyson /* 7941c7c3c6aSMatthew Dillon * Otherwise, if the page has been referenced while in the 7951c7c3c6aSMatthew Dillon * inactive queue, we bump the "activation count" upwards, 7961c7c3c6aSMatthew Dillon * making it less likely that the page will be added back to 7971c7c3c6aSMatthew Dillon * the inactive queue prematurely again. Here we check the 7981c7c3c6aSMatthew Dillon * page tables (or emulated bits, if any), given the upper 7991c7c3c6aSMatthew Dillon * level VM system not knowing anything about existing 8001c7c3c6aSMatthew Dillon * references. 8017e006499SJohn Dyson */ 802ef743ce6SJohn Dyson } else if (((m->flags & PG_REFERENCED) == 0) && 8030385347cSPeter Wemm (actcount = pmap_ts_referenced(m))) { 804ef743ce6SJohn Dyson vm_page_activate(m); 8057e006499SJohn Dyson m->act_count += (actcount + ACT_ADVANCE); 806ef743ce6SJohn Dyson continue; 8072fe6e4d7SDavid Greenman } 808ef743ce6SJohn Dyson 8097e006499SJohn Dyson /* 8101c7c3c6aSMatthew Dillon * If the upper level VM system knows about any page 8111c7c3c6aSMatthew Dillon * references, we activate the page. We also set the 8121c7c3c6aSMatthew Dillon * "activation count" higher than normal so that we will less 8131c7c3c6aSMatthew Dillon * likely place pages back onto the inactive queue again. 8147e006499SJohn Dyson */ 815bd7e5f99SJohn Dyson if ((m->flags & PG_REFERENCED) != 0) { 816e69763a3SDoug Rabson vm_page_flag_clear(m, PG_REFERENCED); 8170385347cSPeter Wemm actcount = pmap_ts_referenced(m); 81826f9a767SRodney W. Grimes vm_page_activate(m); 8197e006499SJohn Dyson m->act_count += (actcount + ACT_ADVANCE + 1); 8200d94caffSDavid Greenman continue; 8210d94caffSDavid Greenman } 82267bf6868SJohn Dyson 8237e006499SJohn Dyson /* 8241c7c3c6aSMatthew Dillon * If the upper level VM system doesn't know anything about 8251c7c3c6aSMatthew Dillon * the page being dirty, we have to check for it again. As 8261c7c3c6aSMatthew Dillon * far as the VM code knows, any partially dirty pages are 8271c7c3c6aSMatthew Dillon * fully dirty. 8287e006499SJohn Dyson */ 829f6b04d2bSDavid Greenman if (m->dirty == 0) { 830bd7e5f99SJohn Dyson vm_page_test_dirty(m); 831427e99a0SAlexander Langer } else { 8327dbf82dcSMatthew Dillon vm_page_dirty(m); 83330dcfc09SJohn Dyson } 8346989c456SAlan Cox object = m->object; 8356989c456SAlan Cox if (!VM_OBJECT_TRYLOCK(object)) 8366989c456SAlan Cox continue; 8376989c456SAlan Cox if (m->valid == 0) { 8387e006499SJohn Dyson /* 8397e006499SJohn Dyson * Invalid pages can be easily freed 8407e006499SJohn Dyson */ 8416989c456SAlan Cox vm_page_busy(m); 8426989c456SAlan Cox pmap_remove_all(m); 8436989c456SAlan Cox vm_page_free(m); 8446989c456SAlan Cox cnt.v_dfree++; 8451c7c3c6aSMatthew Dillon --page_shortage; 846bd7e5f99SJohn Dyson } else if (m->dirty == 0) { 8476989c456SAlan Cox /* 8486989c456SAlan Cox * Clean pages can be placed onto the cache queue. 8496989c456SAlan Cox * This effectively frees them. 8506989c456SAlan Cox */ 851bd7e5f99SJohn Dyson vm_page_cache(m); 8521c7c3c6aSMatthew Dillon --page_shortage; 8532b6b0df7SMatthew Dillon } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 8547e006499SJohn Dyson /* 8552b6b0df7SMatthew Dillon * Dirty pages need to be paged out, but flushing 8562b6b0df7SMatthew Dillon * a page is extremely expensive verses freeing 8572b6b0df7SMatthew Dillon * a clean page. Rather then artificially limiting 8582b6b0df7SMatthew Dillon * the number of pages we can flush, we instead give 8592b6b0df7SMatthew Dillon * dirty pages extra priority on the inactive queue 8602b6b0df7SMatthew Dillon * by forcing them to be cycled through the queue 8612b6b0df7SMatthew Dillon * twice before being flushed, after which the 8622b6b0df7SMatthew Dillon * (now clean) page will cycle through once more 8632b6b0df7SMatthew Dillon * before being freed. This significantly extends 8642b6b0df7SMatthew Dillon * the thrash point for a heavily loaded machine. 8657e006499SJohn Dyson */ 8662b6b0df7SMatthew Dillon vm_page_flag_set(m, PG_WINATCFLS); 8676d03d577SMatthew Dillon vm_pageq_requeue(m); 8680d94caffSDavid Greenman } else if (maxlaunder > 0) { 8692b6b0df7SMatthew Dillon /* 8702b6b0df7SMatthew Dillon * We always want to try to flush some dirty pages if 8712b6b0df7SMatthew Dillon * we encounter them, to keep the system stable. 8722b6b0df7SMatthew Dillon * Normally this number is small, but under extreme 8732b6b0df7SMatthew Dillon * pressure where there are insufficient clean pages 8742b6b0df7SMatthew Dillon * on the inactive queue, we may have to go all out. 8752b6b0df7SMatthew Dillon */ 87612ac6a1dSJohn Dyson int swap_pageouts_ok; 877f6b04d2bSDavid Greenman struct vnode *vp = NULL; 878f2a2857bSKirk McKusick struct mount *mp; 8790d94caffSDavid Greenman 88012ac6a1dSJohn Dyson if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 88112ac6a1dSJohn Dyson swap_pageouts_ok = 1; 88212ac6a1dSJohn Dyson } else { 88312ac6a1dSJohn Dyson swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 88412ac6a1dSJohn Dyson swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 88590ecac61SMatthew Dillon vm_page_count_min()); 88612ac6a1dSJohn Dyson 88712ac6a1dSJohn Dyson } 88870111b90SJohn Dyson 88970111b90SJohn Dyson /* 8901c7c3c6aSMatthew Dillon * We don't bother paging objects that are "dead". 8911c7c3c6aSMatthew Dillon * Those objects are in a "rundown" state. 89270111b90SJohn Dyson */ 89370111b90SJohn Dyson if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 8943562af12SAlan Cox VM_OBJECT_UNLOCK(object); 8956d03d577SMatthew Dillon vm_pageq_requeue(m); 89612ac6a1dSJohn Dyson continue; 89712ac6a1dSJohn Dyson } 89812ac6a1dSJohn Dyson 8991c7c3c6aSMatthew Dillon /* 9002b6b0df7SMatthew Dillon * The object is already known NOT to be dead. It 9012b6b0df7SMatthew Dillon * is possible for the vget() to block the whole 9022b6b0df7SMatthew Dillon * pageout daemon, but the new low-memory handling 9032b6b0df7SMatthew Dillon * code should prevent it. 9041c7c3c6aSMatthew Dillon * 9052b6b0df7SMatthew Dillon * The previous code skipped locked vnodes and, worse, 9062b6b0df7SMatthew Dillon * reordered pages in the queue. This results in 9072b6b0df7SMatthew Dillon * completely non-deterministic operation and, on a 9082b6b0df7SMatthew Dillon * busy system, can lead to extremely non-optimal 9092b6b0df7SMatthew Dillon * pageouts. For example, it can cause clean pages 9102b6b0df7SMatthew Dillon * to be freed and dirty pages to be moved to the end 9112b6b0df7SMatthew Dillon * of the queue. Since dirty pages are also moved to 9122b6b0df7SMatthew Dillon * the end of the queue once-cleaned, this gives 9132b6b0df7SMatthew Dillon * way too large a weighting to defering the freeing 9142b6b0df7SMatthew Dillon * of dirty pages. 9151c7c3c6aSMatthew Dillon * 91623b59018SMatthew Dillon * We can't wait forever for the vnode lock, we might 91723b59018SMatthew Dillon * deadlock due to a vn_read() getting stuck in 91823b59018SMatthew Dillon * vm_wait while holding this vnode. We skip the 91923b59018SMatthew Dillon * vnode if we can't get it in a reasonable amount 92023b59018SMatthew Dillon * of time. 9211c7c3c6aSMatthew Dillon */ 9221c7c3c6aSMatthew Dillon if (object->type == OBJT_VNODE) { 92324a1cce3SDavid Greenman vp = object->handle; 924f2a2857bSKirk McKusick mp = NULL; 925f2a2857bSKirk McKusick if (vp->v_type == VREG) 926f2a2857bSKirk McKusick vn_start_write(vp, &mp, V_NOWAIT); 9273e1b578aSAlan Cox vm_page_unlock_queues(); 9284b5f5531SAlan Cox VI_LOCK(vp); 9293562af12SAlan Cox VM_OBJECT_UNLOCK(object); 9304b5f5531SAlan Cox if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK | 9314b5f5531SAlan Cox LK_TIMELOCK, curthread)) { 9323562af12SAlan Cox VM_OBJECT_LOCK(object); 9333e1b578aSAlan Cox vm_page_lock_queues(); 93423b59018SMatthew Dillon ++pageout_lock_miss; 935f2a2857bSKirk McKusick vn_finished_write(mp); 936aef922f5SJohn Dyson if (object->flags & OBJ_MIGHTBEDIRTY) 937925a3a41SJohn Dyson vnodes_skipped++; 9383562af12SAlan Cox VM_OBJECT_UNLOCK(object); 939b182ec9eSJohn Dyson continue; 94085a376ebSJohn Dyson } 9413562af12SAlan Cox VM_OBJECT_LOCK(object); 9423e1b578aSAlan Cox vm_page_lock_queues(); 943f35329acSJohn Dyson /* 944936524aaSMatthew Dillon * The page might have been moved to another 945936524aaSMatthew Dillon * queue during potential blocking in vget() 946936524aaSMatthew Dillon * above. The page might have been freed and 947936524aaSMatthew Dillon * reused for another vnode. The object might 948936524aaSMatthew Dillon * have been reused for another vnode. 949f35329acSJohn Dyson */ 950936524aaSMatthew Dillon if (m->queue != PQ_INACTIVE || 951936524aaSMatthew Dillon m->object != object || 952936524aaSMatthew Dillon object->handle != vp) { 953b182ec9eSJohn Dyson if (object->flags & OBJ_MIGHTBEDIRTY) 954925a3a41SJohn Dyson vnodes_skipped++; 9553562af12SAlan Cox goto unlock_and_continue; 956b182ec9eSJohn Dyson } 957b182ec9eSJohn Dyson 958f35329acSJohn Dyson /* 959936524aaSMatthew Dillon * The page may have been busied during the 960936524aaSMatthew Dillon * blocking in vput(); We don't move the 961936524aaSMatthew Dillon * page back onto the end of the queue so that 962936524aaSMatthew Dillon * statistics are more correct if we don't. 963f35329acSJohn Dyson */ 964b182ec9eSJohn Dyson if (m->busy || (m->flags & PG_BUSY)) { 9653562af12SAlan Cox goto unlock_and_continue; 966b182ec9eSJohn Dyson } 967b182ec9eSJohn Dyson 968f35329acSJohn Dyson /* 96957601bcbSMatthew Dillon * If the page has become held it might 97057601bcbSMatthew Dillon * be undergoing I/O, so skip it 971f35329acSJohn Dyson */ 972b182ec9eSJohn Dyson if (m->hold_count) { 9736d03d577SMatthew Dillon vm_pageq_requeue(m); 974b182ec9eSJohn Dyson if (object->flags & OBJ_MIGHTBEDIRTY) 975925a3a41SJohn Dyson vnodes_skipped++; 9763562af12SAlan Cox goto unlock_and_continue; 977f6b04d2bSDavid Greenman } 978f6b04d2bSDavid Greenman } 979f6b04d2bSDavid Greenman 9800d94caffSDavid Greenman /* 9810d94caffSDavid Greenman * If a page is dirty, then it is either being washed 9820d94caffSDavid Greenman * (but not yet cleaned) or it is still in the 9830d94caffSDavid Greenman * laundry. If it is still in the laundry, then we 9842b6b0df7SMatthew Dillon * start the cleaning operation. 985936524aaSMatthew Dillon * 986936524aaSMatthew Dillon * This operation may cluster, invalidating the 'next' 987936524aaSMatthew Dillon * pointer. To prevent an inordinate number of 988936524aaSMatthew Dillon * restarts we use our marker to remember our place. 9892b6b0df7SMatthew Dillon * 9902b6b0df7SMatthew Dillon * decrement page_shortage on success to account for 9912b6b0df7SMatthew Dillon * the (future) cleaned page. Otherwise we could wind 9922b6b0df7SMatthew Dillon * up laundering or cleaning too many pages. 9930d94caffSDavid Greenman */ 994936524aaSMatthew Dillon s = splvm(); 995936524aaSMatthew Dillon TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 996936524aaSMatthew Dillon splx(s); 9972b6b0df7SMatthew Dillon if (vm_pageout_clean(m) != 0) { 9982b6b0df7SMatthew Dillon --page_shortage; 999936524aaSMatthew Dillon --maxlaunder; 10002b6b0df7SMatthew Dillon } 1001936524aaSMatthew Dillon s = splvm(); 1002936524aaSMatthew Dillon next = TAILQ_NEXT(&marker, pageq); 1003936524aaSMatthew Dillon TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 1004936524aaSMatthew Dillon splx(s); 10053562af12SAlan Cox unlock_and_continue: 10066989c456SAlan Cox VM_OBJECT_UNLOCK(object); 1007f2a2857bSKirk McKusick if (vp) { 10086989c456SAlan Cox vm_page_unlock_queues(); 1009f6b04d2bSDavid Greenman vput(vp); 1010f2a2857bSKirk McKusick vn_finished_write(mp); 10116989c456SAlan Cox vm_page_lock_queues(); 10126989c456SAlan Cox } 10136989c456SAlan Cox continue; 1014f2a2857bSKirk McKusick } 10153562af12SAlan Cox VM_OBJECT_UNLOCK(object); 10160d94caffSDavid Greenman } 101726f9a767SRodney W. Grimes 1018df8bae1dSRodney W. Grimes /* 1019936524aaSMatthew Dillon * Compute the number of pages we want to try to move from the 1020936524aaSMatthew Dillon * active queue to the inactive queue. 10211c7c3c6aSMatthew Dillon */ 1022936524aaSMatthew Dillon page_shortage = vm_paging_target() + 1023936524aaSMatthew Dillon cnt.v_inactive_target - cnt.v_inactive_count; 1024b182ec9eSJohn Dyson page_shortage += addl_page_shortage; 10251c7c3c6aSMatthew Dillon 10261c7c3c6aSMatthew Dillon /* 1027936524aaSMatthew Dillon * Scan the active queue for things we can deactivate. We nominally 1028936524aaSMatthew Dillon * track the per-page activity counter and use it to locate 1029936524aaSMatthew Dillon * deactivation candidates. 10301c7c3c6aSMatthew Dillon */ 1031b18bfc3dSJohn Dyson pcount = cnt.v_active_count; 1032be72f788SAlan Cox m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 10331c7c3c6aSMatthew Dillon 1034b18bfc3dSJohn Dyson while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 1035f35329acSJohn Dyson 10367e006499SJohn Dyson /* 1037956f3135SPhilippe Charnier * This is a consistency check, and should likely be a panic 10387e006499SJohn Dyson * or warning. 10397e006499SJohn Dyson */ 1040f35329acSJohn Dyson if (m->queue != PQ_ACTIVE) { 104138efa82bSJohn Dyson break; 1042f35329acSJohn Dyson } 1043f35329acSJohn Dyson 1044b18bfc3dSJohn Dyson next = TAILQ_NEXT(m, pageq); 1045df8bae1dSRodney W. Grimes /* 104626f9a767SRodney W. Grimes * Don't deactivate pages that are busy. 1047df8bae1dSRodney W. Grimes */ 1048a647a309SDavid Greenman if ((m->busy != 0) || 10490d94caffSDavid Greenman (m->flags & PG_BUSY) || 1050f6b04d2bSDavid Greenman (m->hold_count != 0)) { 10516d03d577SMatthew Dillon vm_pageq_requeue(m); 105226f9a767SRodney W. Grimes m = next; 105326f9a767SRodney W. Grimes continue; 1054df8bae1dSRodney W. Grimes } 1055b18bfc3dSJohn Dyson 1056b18bfc3dSJohn Dyson /* 1057b18bfc3dSJohn Dyson * The count for pagedaemon pages is done after checking the 1058956f3135SPhilippe Charnier * page for eligibility... 1059b18bfc3dSJohn Dyson */ 1060b18bfc3dSJohn Dyson cnt.v_pdpages++; 1061ef743ce6SJohn Dyson 10627e006499SJohn Dyson /* 10637e006499SJohn Dyson * Check to see "how much" the page has been used. 10647e006499SJohn Dyson */ 10657e006499SJohn Dyson actcount = 0; 1066ef743ce6SJohn Dyson if (m->object->ref_count != 0) { 1067ef743ce6SJohn Dyson if (m->flags & PG_REFERENCED) { 10687e006499SJohn Dyson actcount += 1; 10690d94caffSDavid Greenman } 10700385347cSPeter Wemm actcount += pmap_ts_referenced(m); 10717e006499SJohn Dyson if (actcount) { 10727e006499SJohn Dyson m->act_count += ACT_ADVANCE + actcount; 107338efa82bSJohn Dyson if (m->act_count > ACT_MAX) 107438efa82bSJohn Dyson m->act_count = ACT_MAX; 107538efa82bSJohn Dyson } 1076b18bfc3dSJohn Dyson } 1077ef743ce6SJohn Dyson 10787e006499SJohn Dyson /* 10797e006499SJohn Dyson * Since we have "tested" this bit, we need to clear it now. 10807e006499SJohn Dyson */ 1081e69763a3SDoug Rabson vm_page_flag_clear(m, PG_REFERENCED); 1082ef743ce6SJohn Dyson 10837e006499SJohn Dyson /* 10847e006499SJohn Dyson * Only if an object is currently being used, do we use the 10857e006499SJohn Dyson * page activation count stats. 10867e006499SJohn Dyson */ 10877e006499SJohn Dyson if (actcount && (m->object->ref_count != 0)) { 10886d03d577SMatthew Dillon vm_pageq_requeue(m); 108926f9a767SRodney W. Grimes } else { 109038efa82bSJohn Dyson m->act_count -= min(m->act_count, ACT_DECLINE); 10912b6b0df7SMatthew Dillon if (vm_pageout_algorithm || 10922b6b0df7SMatthew Dillon m->object->ref_count == 0 || 10932b6b0df7SMatthew Dillon m->act_count == 0) { 1094925a3a41SJohn Dyson page_shortage--; 1095d4a272dbSJohn Dyson if (m->object->ref_count == 0) { 10964fec79beSAlan Cox pmap_remove_all(m); 1097d4a272dbSJohn Dyson if (m->dirty == 0) 10980d94caffSDavid Greenman vm_page_cache(m); 1099d4a272dbSJohn Dyson else 1100d4a272dbSJohn Dyson vm_page_deactivate(m); 11010d94caffSDavid Greenman } else { 110226f9a767SRodney W. Grimes vm_page_deactivate(m); 1103df8bae1dSRodney W. Grimes } 110438efa82bSJohn Dyson } else { 11056d03d577SMatthew Dillon vm_pageq_requeue(m); 110638efa82bSJohn Dyson } 1107df8bae1dSRodney W. Grimes } 110826f9a767SRodney W. Grimes m = next; 110926f9a767SRodney W. Grimes } 1110f35329acSJohn Dyson s = splvm(); 11111c7c3c6aSMatthew Dillon 1112df8bae1dSRodney W. Grimes /* 11130d94caffSDavid Greenman * We try to maintain some *really* free pages, this allows interrupt 11141c7c3c6aSMatthew Dillon * code to be guaranteed space. Since both cache and free queues 11151c7c3c6aSMatthew Dillon * are considered basically 'free', moving pages from cache to free 11161c7c3c6aSMatthew Dillon * does not effect other calculations. 1117df8bae1dSRodney W. Grimes */ 1118a1f6d91cSDavid Greenman while (cnt.v_free_count < cnt.v_free_reserved) { 11195070c7f8SJohn Dyson static int cache_rover = 0; 11206d03d577SMatthew Dillon m = vm_pageq_find(PQ_CACHE, cache_rover, FALSE); 11210d94caffSDavid Greenman if (!m) 11220d94caffSDavid Greenman break; 11238b03c8edSMatthew Dillon if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 11248b03c8edSMatthew Dillon m->busy || 11258b03c8edSMatthew Dillon m->hold_count || 11268b03c8edSMatthew Dillon m->wire_count) { 1127d044d7bfSMatthew Dillon #ifdef INVARIANTS 1128d044d7bfSMatthew Dillon printf("Warning: busy page %p found in cache\n", m); 1129d044d7bfSMatthew Dillon #endif 1130aaba53daSMatthew Dillon vm_page_deactivate(m); 1131aaba53daSMatthew Dillon continue; 1132aaba53daSMatthew Dillon } 11335070c7f8SJohn Dyson cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1134925a3a41SJohn Dyson vm_pageout_page_free(m); 113526f9a767SRodney W. Grimes } 1136f35329acSJohn Dyson splx(s); 11378ffc1519SAlan Cox vm_page_unlock_queues(); 1138ceb0cf87SJohn Dyson #if !defined(NO_SWAPPING) 1139ceb0cf87SJohn Dyson /* 1140ceb0cf87SJohn Dyson * Idle process swapout -- run once per second. 1141ceb0cf87SJohn Dyson */ 1142ceb0cf87SJohn Dyson if (vm_swap_idle_enabled) { 1143ceb0cf87SJohn Dyson static long lsec; 1144227ee8a1SPoul-Henning Kamp if (time_second != lsec) { 1145ceb0cf87SJohn Dyson vm_pageout_req_swapout |= VM_SWAP_IDLE; 1146ceb0cf87SJohn Dyson vm_req_vmdaemon(); 1147227ee8a1SPoul-Henning Kamp lsec = time_second; 1148ceb0cf87SJohn Dyson } 1149ceb0cf87SJohn Dyson } 1150ceb0cf87SJohn Dyson #endif 1151ceb0cf87SJohn Dyson 11525663e6deSDavid Greenman /* 1153f6b04d2bSDavid Greenman * If we didn't get enough free pages, and we have skipped a vnode 11544c1f8ee9SDavid Greenman * in a writeable object, wakeup the sync daemon. And kick swapout 11554c1f8ee9SDavid Greenman * if we did not get enough free pages. 1156f6b04d2bSDavid Greenman */ 115790ecac61SMatthew Dillon if (vm_paging_target() > 0) { 115890ecac61SMatthew Dillon if (vnodes_skipped && vm_page_count_min()) 1159d50c1994SPeter Wemm (void) speedup_syncer(); 116038efa82bSJohn Dyson #if !defined(NO_SWAPPING) 116190ecac61SMatthew Dillon if (vm_swap_enabled && vm_page_count_target()) { 11624c1f8ee9SDavid Greenman vm_req_vmdaemon(); 1163ceb0cf87SJohn Dyson vm_pageout_req_swapout |= VM_SWAP_NORMAL; 11644c1f8ee9SDavid Greenman } 11655afce282SDavid Greenman #endif 11664c1f8ee9SDavid Greenman } 11674c1f8ee9SDavid Greenman 1168f6b04d2bSDavid Greenman /* 1169e92686d0SDavid Schultz * If we are critically low on one of RAM or swap and low on 1170e92686d0SDavid Schultz * the other, kill the largest process. However, we avoid 1171e92686d0SDavid Schultz * doing this on the first pass in order to give ourselves a 1172e92686d0SDavid Schultz * chance to flush out dirty vnode-backed pages and to allow 1173e92686d0SDavid Schultz * active pages to be moved to the inactive queue and reclaimed. 11741c58e4e5SJohn Baldwin * 11751c58e4e5SJohn Baldwin * We keep the process bigproc locked once we find it to keep anyone 11761c58e4e5SJohn Baldwin * from messing with it; however, there is a possibility of 11771c58e4e5SJohn Baldwin * deadlock if process B is bigproc and one of it's child processes 11781c58e4e5SJohn Baldwin * attempts to propagate a signal to B while we are waiting for A's 11791c58e4e5SJohn Baldwin * lock while walking this list. To avoid this, we don't block on 11801c58e4e5SJohn Baldwin * the process lock but just skip a process if it is already locked. 11815663e6deSDavid Greenman */ 1182e92686d0SDavid Schultz if (pass != 0 && 11838f60c087SPoul-Henning Kamp ((swap_pager_avail < 64 && vm_page_count_min()) || 1184e92686d0SDavid Schultz (swap_pager_full && vm_paging_target() > 0))) { 11855663e6deSDavid Greenman bigproc = NULL; 11865663e6deSDavid Greenman bigsize = 0; 11871005a129SJohn Baldwin sx_slock(&allproc_lock); 1188e602ba25SJulian Elischer FOREACH_PROC_IN_SYSTEM(p) { 1189e602ba25SJulian Elischer int breakout; 11905663e6deSDavid Greenman /* 11911c58e4e5SJohn Baldwin * If this process is already locked, skip it. 11921c58e4e5SJohn Baldwin */ 11931c58e4e5SJohn Baldwin if (PROC_TRYLOCK(p) == 0) 11941c58e4e5SJohn Baldwin continue; 11951c58e4e5SJohn Baldwin /* 1196f4cf2141SWes Peters * If this is a system or protected process, skip it. 11975663e6deSDavid Greenman */ 1198ef6020d1SMike Silbersack if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 1199f4cf2141SWes Peters (p->p_flag & P_PROTECTED) || 12008f60c087SPoul-Henning Kamp ((p->p_pid < 48) && (swap_pager_avail != 0))) { 12018606d880SJohn Baldwin PROC_UNLOCK(p); 12025663e6deSDavid Greenman continue; 12035663e6deSDavid Greenman } 12045663e6deSDavid Greenman /* 12055663e6deSDavid Greenman * if the process is in a non-running type state, 1206e602ba25SJulian Elischer * don't touch it. Check all the threads individually. 12075663e6deSDavid Greenman */ 12089ed346baSBosko Milekic mtx_lock_spin(&sched_lock); 1209e602ba25SJulian Elischer breakout = 0; 1210e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 121171fad9fdSJulian Elischer if (!TD_ON_RUNQ(td) && 121271fad9fdSJulian Elischer !TD_IS_RUNNING(td) && 121371fad9fdSJulian Elischer !TD_IS_SLEEPING(td)) { 1214e602ba25SJulian Elischer breakout = 1; 1215e602ba25SJulian Elischer break; 1216e602ba25SJulian Elischer } 1217e602ba25SJulian Elischer } 1218e602ba25SJulian Elischer if (breakout) { 12199ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 12201c58e4e5SJohn Baldwin PROC_UNLOCK(p); 12215663e6deSDavid Greenman continue; 12225663e6deSDavid Greenman } 12239ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 12245663e6deSDavid Greenman /* 12255663e6deSDavid Greenman * get the process size 12265663e6deSDavid Greenman */ 122772d97679SDavid Schultz if (!vm_map_trylock_read(&p->p_vmspace->vm_map)) { 122872d97679SDavid Schultz PROC_UNLOCK(p); 122972d97679SDavid Schultz continue; 123072d97679SDavid Schultz } 123172d97679SDavid Schultz size = vmspace_swap_count(p->p_vmspace); 123272d97679SDavid Schultz vm_map_unlock_read(&p->p_vmspace->vm_map); 123372d97679SDavid Schultz size += vmspace_resident_count(p->p_vmspace); 12345663e6deSDavid Greenman /* 12355663e6deSDavid Greenman * if the this process is bigger than the biggest one 12365663e6deSDavid Greenman * remember it. 12375663e6deSDavid Greenman */ 12385663e6deSDavid Greenman if (size > bigsize) { 12391c58e4e5SJohn Baldwin if (bigproc != NULL) 12401c58e4e5SJohn Baldwin PROC_UNLOCK(bigproc); 12415663e6deSDavid Greenman bigproc = p; 12425663e6deSDavid Greenman bigsize = size; 12431c58e4e5SJohn Baldwin } else 12441c58e4e5SJohn Baldwin PROC_UNLOCK(p); 12455663e6deSDavid Greenman } 12461005a129SJohn Baldwin sx_sunlock(&allproc_lock); 12475663e6deSDavid Greenman if (bigproc != NULL) { 1248b40ce416SJulian Elischer struct ksegrp *kg; 1249729b1e51SDavid Greenman killproc(bigproc, "out of swap space"); 12509ed346baSBosko Milekic mtx_lock_spin(&sched_lock); 1251b40ce416SJulian Elischer FOREACH_KSEGRP_IN_PROC(bigproc, kg) { 1252b43179fbSJeff Roberson sched_nice(kg, PRIO_MIN); /* XXXKSE ??? */ 1253b40ce416SJulian Elischer } 12549ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 12551c58e4e5SJohn Baldwin PROC_UNLOCK(bigproc); 125624a1cce3SDavid Greenman wakeup(&cnt.v_free_count); 12575663e6deSDavid Greenman } 12585663e6deSDavid Greenman } 125926f9a767SRodney W. Grimes } 126026f9a767SRodney W. Grimes 1261dc2efb27SJohn Dyson /* 1262dc2efb27SJohn Dyson * This routine tries to maintain the pseudo LRU active queue, 1263dc2efb27SJohn Dyson * so that during long periods of time where there is no paging, 1264956f3135SPhilippe Charnier * that some statistic accumulation still occurs. This code 1265dc2efb27SJohn Dyson * helps the situation where paging just starts to occur. 1266dc2efb27SJohn Dyson */ 1267dc2efb27SJohn Dyson static void 1268dc2efb27SJohn Dyson vm_pageout_page_stats() 1269dc2efb27SJohn Dyson { 1270dc2efb27SJohn Dyson vm_page_t m,next; 1271dc2efb27SJohn Dyson int pcount,tpcount; /* Number of pages to check */ 1272dc2efb27SJohn Dyson static int fullintervalcount = 0; 1273bef608bdSJohn Dyson int page_shortage; 127425db2c54SMatthew Dillon int s0; 1275bef608bdSJohn Dyson 127690ecac61SMatthew Dillon page_shortage = 127790ecac61SMatthew Dillon (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1278bef608bdSJohn Dyson (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 127990ecac61SMatthew Dillon 1280bef608bdSJohn Dyson if (page_shortage <= 0) 1281bef608bdSJohn Dyson return; 1282dc2efb27SJohn Dyson 128325db2c54SMatthew Dillon s0 = splvm(); 128448c0444cSAlan Cox vm_page_lock_queues(); 1285dc2efb27SJohn Dyson pcount = cnt.v_active_count; 1286dc2efb27SJohn Dyson fullintervalcount += vm_pageout_stats_interval; 1287dc2efb27SJohn Dyson if (fullintervalcount < vm_pageout_full_stats_interval) { 1288dc2efb27SJohn Dyson tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1289dc2efb27SJohn Dyson if (pcount > tpcount) 1290dc2efb27SJohn Dyson pcount = tpcount; 1291883f3caaSMatthew Dillon } else { 1292883f3caaSMatthew Dillon fullintervalcount = 0; 1293dc2efb27SJohn Dyson } 1294dc2efb27SJohn Dyson 1295be72f788SAlan Cox m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1296dc2efb27SJohn Dyson while ((m != NULL) && (pcount-- > 0)) { 12977e006499SJohn Dyson int actcount; 1298dc2efb27SJohn Dyson 1299dc2efb27SJohn Dyson if (m->queue != PQ_ACTIVE) { 1300dc2efb27SJohn Dyson break; 1301dc2efb27SJohn Dyson } 1302dc2efb27SJohn Dyson 1303dc2efb27SJohn Dyson next = TAILQ_NEXT(m, pageq); 1304dc2efb27SJohn Dyson /* 1305dc2efb27SJohn Dyson * Don't deactivate pages that are busy. 1306dc2efb27SJohn Dyson */ 1307dc2efb27SJohn Dyson if ((m->busy != 0) || 1308dc2efb27SJohn Dyson (m->flags & PG_BUSY) || 1309dc2efb27SJohn Dyson (m->hold_count != 0)) { 13106d03d577SMatthew Dillon vm_pageq_requeue(m); 1311dc2efb27SJohn Dyson m = next; 1312dc2efb27SJohn Dyson continue; 1313dc2efb27SJohn Dyson } 1314dc2efb27SJohn Dyson 13157e006499SJohn Dyson actcount = 0; 1316dc2efb27SJohn Dyson if (m->flags & PG_REFERENCED) { 1317e69763a3SDoug Rabson vm_page_flag_clear(m, PG_REFERENCED); 13187e006499SJohn Dyson actcount += 1; 1319dc2efb27SJohn Dyson } 1320dc2efb27SJohn Dyson 13210385347cSPeter Wemm actcount += pmap_ts_referenced(m); 13227e006499SJohn Dyson if (actcount) { 13237e006499SJohn Dyson m->act_count += ACT_ADVANCE + actcount; 1324dc2efb27SJohn Dyson if (m->act_count > ACT_MAX) 1325dc2efb27SJohn Dyson m->act_count = ACT_MAX; 13266d03d577SMatthew Dillon vm_pageq_requeue(m); 1327dc2efb27SJohn Dyson } else { 1328dc2efb27SJohn Dyson if (m->act_count == 0) { 13297e006499SJohn Dyson /* 13302b6b0df7SMatthew Dillon * We turn off page access, so that we have 13312b6b0df7SMatthew Dillon * more accurate RSS stats. We don't do this 13322b6b0df7SMatthew Dillon * in the normal page deactivation when the 13332b6b0df7SMatthew Dillon * system is loaded VM wise, because the 13342b6b0df7SMatthew Dillon * cost of the large number of page protect 13352b6b0df7SMatthew Dillon * operations would be higher than the value 13362b6b0df7SMatthew Dillon * of doing the operation. 13377e006499SJohn Dyson */ 13384fec79beSAlan Cox pmap_remove_all(m); 1339dc2efb27SJohn Dyson vm_page_deactivate(m); 1340dc2efb27SJohn Dyson } else { 1341dc2efb27SJohn Dyson m->act_count -= min(m->act_count, ACT_DECLINE); 13426d03d577SMatthew Dillon vm_pageq_requeue(m); 1343dc2efb27SJohn Dyson } 1344dc2efb27SJohn Dyson } 1345dc2efb27SJohn Dyson 1346dc2efb27SJohn Dyson m = next; 1347dc2efb27SJohn Dyson } 134848c0444cSAlan Cox vm_page_unlock_queues(); 134925db2c54SMatthew Dillon splx(s0); 1350dc2efb27SJohn Dyson } 1351dc2efb27SJohn Dyson 1352df8bae1dSRodney W. Grimes /* 1353df8bae1dSRodney W. Grimes * vm_pageout is the high level pageout daemon. 1354df8bae1dSRodney W. Grimes */ 13552b14f991SJulian Elischer static void 135626f9a767SRodney W. Grimes vm_pageout() 1357df8bae1dSRodney W. Grimes { 13588b245767SAlan Cox int error, pass, s; 13590384fff8SJason Evans 136069a78d46SJohn Baldwin mtx_lock(&Giant); 13610384fff8SJason Evans 1362df8bae1dSRodney W. Grimes /* 1363df8bae1dSRodney W. Grimes * Initialize some paging parameters. 1364df8bae1dSRodney W. Grimes */ 1365f6b04d2bSDavid Greenman cnt.v_interrupt_free_min = 2; 1366f35329acSJohn Dyson if (cnt.v_page_count < 2000) 1367f35329acSJohn Dyson vm_pageout_page_count = 8; 1368f6b04d2bSDavid Greenman 136945ae1d91SAlan Cox /* 137045ae1d91SAlan Cox * v_free_reserved needs to include enough for the largest 137145ae1d91SAlan Cox * swap pager structures plus enough for any pv_entry structs 137245ae1d91SAlan Cox * when paging. 137345ae1d91SAlan Cox */ 137445ae1d91SAlan Cox if (cnt.v_page_count > 1024) 137545ae1d91SAlan Cox cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 137645ae1d91SAlan Cox else 137745ae1d91SAlan Cox cnt.v_free_min = 4; 137845ae1d91SAlan Cox cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 137945ae1d91SAlan Cox cnt.v_interrupt_free_min; 138045ae1d91SAlan Cox cnt.v_free_reserved = vm_pageout_page_count + 138145ae1d91SAlan Cox cnt.v_pageout_free_min + (cnt.v_page_count / 768) + PQ_L2_SIZE; 138245ae1d91SAlan Cox cnt.v_free_severe = cnt.v_free_min / 2; 138345ae1d91SAlan Cox cnt.v_free_min += cnt.v_free_reserved; 138445ae1d91SAlan Cox cnt.v_free_severe += cnt.v_free_reserved; 138545ae1d91SAlan Cox 1386ed74321bSDavid Greenman /* 13872b6b0df7SMatthew Dillon * v_free_target and v_cache_min control pageout hysteresis. Note 13882b6b0df7SMatthew Dillon * that these are more a measure of the VM cache queue hysteresis 13892b6b0df7SMatthew Dillon * then the VM free queue. Specifically, v_free_target is the 13902b6b0df7SMatthew Dillon * high water mark (free+cache pages). 13912b6b0df7SMatthew Dillon * 13922b6b0df7SMatthew Dillon * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 13932b6b0df7SMatthew Dillon * low water mark, while v_free_min is the stop. v_cache_min must 13942b6b0df7SMatthew Dillon * be big enough to handle memory needs while the pageout daemon 13952b6b0df7SMatthew Dillon * is signalled and run to free more pages. 1396ed74321bSDavid Greenman */ 1397a15403deSJohn Dyson if (cnt.v_free_count > 6144) 13982b6b0df7SMatthew Dillon cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1399a15403deSJohn Dyson else 1400a15403deSJohn Dyson cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 14016f2b142eSDavid Greenman 1402a15403deSJohn Dyson if (cnt.v_free_count > 2048) { 1403a15403deSJohn Dyson cnt.v_cache_min = cnt.v_free_target; 1404a15403deSJohn Dyson cnt.v_cache_max = 2 * cnt.v_cache_min; 1405a15403deSJohn Dyson cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 14060d94caffSDavid Greenman } else { 14070d94caffSDavid Greenman cnt.v_cache_min = 0; 14080d94caffSDavid Greenman cnt.v_cache_max = 0; 14096f2b142eSDavid Greenman cnt.v_inactive_target = cnt.v_free_count / 4; 14100d94caffSDavid Greenman } 1411e47ed70bSJohn Dyson if (cnt.v_inactive_target > cnt.v_free_count / 3) 1412e47ed70bSJohn Dyson cnt.v_inactive_target = cnt.v_free_count / 3; 1413df8bae1dSRodney W. Grimes 1414df8bae1dSRodney W. Grimes /* XXX does not really belong here */ 1415df8bae1dSRodney W. Grimes if (vm_page_max_wired == 0) 1416df8bae1dSRodney W. Grimes vm_page_max_wired = cnt.v_free_count / 3; 1417df8bae1dSRodney W. Grimes 1418dc2efb27SJohn Dyson if (vm_pageout_stats_max == 0) 1419dc2efb27SJohn Dyson vm_pageout_stats_max = cnt.v_free_target; 1420dc2efb27SJohn Dyson 1421dc2efb27SJohn Dyson /* 1422dc2efb27SJohn Dyson * Set interval in seconds for stats scan. 1423dc2efb27SJohn Dyson */ 1424dc2efb27SJohn Dyson if (vm_pageout_stats_interval == 0) 1425bef608bdSJohn Dyson vm_pageout_stats_interval = 5; 1426dc2efb27SJohn Dyson if (vm_pageout_full_stats_interval == 0) 1427dc2efb27SJohn Dyson vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1428dc2efb27SJohn Dyson 1429dc2efb27SJohn Dyson /* 1430dc2efb27SJohn Dyson * Set maximum free per pass 1431dc2efb27SJohn Dyson */ 1432dc2efb27SJohn Dyson if (vm_pageout_stats_free_max == 0) 1433bef608bdSJohn Dyson vm_pageout_stats_free_max = 5; 1434dc2efb27SJohn Dyson 143524a1cce3SDavid Greenman swap_pager_swap_init(); 14362b6b0df7SMatthew Dillon pass = 0; 1437df8bae1dSRodney W. Grimes /* 14380d94caffSDavid Greenman * The pageout daemon is never done, so loop forever. 1439df8bae1dSRodney W. Grimes */ 1440df8bae1dSRodney W. Grimes while (TRUE) { 14418b245767SAlan Cox s = splvm(); 14428e1d8de5SAlan Cox vm_page_lock_queues(); 1443936524aaSMatthew Dillon /* 1444936524aaSMatthew Dillon * If we have enough free memory, wakeup waiters. Do 1445936524aaSMatthew Dillon * not clear vm_pages_needed until we reach our target, 1446936524aaSMatthew Dillon * otherwise we may be woken up over and over again and 1447936524aaSMatthew Dillon * waste a lot of cpu. 1448936524aaSMatthew Dillon */ 1449936524aaSMatthew Dillon if (vm_pages_needed && !vm_page_count_min()) { 1450a1c0a785SAlan Cox if (!vm_paging_needed()) 1451936524aaSMatthew Dillon vm_pages_needed = 0; 1452936524aaSMatthew Dillon wakeup(&cnt.v_free_count); 1453936524aaSMatthew Dillon } 1454936524aaSMatthew Dillon if (vm_pages_needed) { 145590ecac61SMatthew Dillon /* 14562b6b0df7SMatthew Dillon * Still not done, take a second pass without waiting 14572b6b0df7SMatthew Dillon * (unlimited dirty cleaning), otherwise sleep a bit 14582b6b0df7SMatthew Dillon * and try again. 145990ecac61SMatthew Dillon */ 14602b6b0df7SMatthew Dillon ++pass; 14612b6b0df7SMatthew Dillon if (pass > 1) 14628e1d8de5SAlan Cox msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM, 146323955314SAlfred Perlstein "psleep", hz/2); 146490ecac61SMatthew Dillon } else { 146590ecac61SMatthew Dillon /* 14662b6b0df7SMatthew Dillon * Good enough, sleep & handle stats. Prime the pass 14672b6b0df7SMatthew Dillon * for the next run. 146890ecac61SMatthew Dillon */ 14692b6b0df7SMatthew Dillon if (pass > 1) 14702b6b0df7SMatthew Dillon pass = 1; 14712b6b0df7SMatthew Dillon else 14722b6b0df7SMatthew Dillon pass = 0; 14738e1d8de5SAlan Cox error = msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM, 14740cddd8f0SMatthew Dillon "psleep", vm_pageout_stats_interval * hz); 1475dc2efb27SJohn Dyson if (error && !vm_pages_needed) { 14768e1d8de5SAlan Cox vm_page_unlock_queues(); 1477dc2efb27SJohn Dyson splx(s); 14782b6b0df7SMatthew Dillon pass = 0; 1479dc2efb27SJohn Dyson vm_pageout_page_stats(); 1480dc2efb27SJohn Dyson continue; 1481dc2efb27SJohn Dyson } 1482f919ebdeSDavid Greenman } 1483b18bfc3dSJohn Dyson if (vm_pages_needed) 1484b18bfc3dSJohn Dyson cnt.v_pdwakeups++; 14858e1d8de5SAlan Cox vm_page_unlock_queues(); 1486f919ebdeSDavid Greenman splx(s); 14872b6b0df7SMatthew Dillon vm_pageout_scan(pass); 1488df8bae1dSRodney W. Grimes } 1489df8bae1dSRodney W. Grimes } 149026f9a767SRodney W. Grimes 14916b4b77adSAlan Cox /* 14926b4b77adSAlan Cox * Unless the page queue lock is held by the caller, this function 14936b4b77adSAlan Cox * should be regarded as advisory. Specifically, the caller should 14946b4b77adSAlan Cox * not msleep() on &cnt.v_free_count following this function unless 14956b4b77adSAlan Cox * the page queue lock is held until the msleep() is performed. 14966b4b77adSAlan Cox */ 1497e0c5a895SJohn Dyson void 1498e0c5a895SJohn Dyson pagedaemon_wakeup() 1499e0c5a895SJohn Dyson { 1500a1c0a785SAlan Cox 1501b40ce416SJulian Elischer if (!vm_pages_needed && curthread->td_proc != pageproc) { 1502a1c0a785SAlan Cox vm_pages_needed = 1; 1503e0c5a895SJohn Dyson wakeup(&vm_pages_needed); 1504e0c5a895SJohn Dyson } 1505e0c5a895SJohn Dyson } 1506e0c5a895SJohn Dyson 150738efa82bSJohn Dyson #if !defined(NO_SWAPPING) 15085afce282SDavid Greenman static void 15095afce282SDavid Greenman vm_req_vmdaemon() 15105afce282SDavid Greenman { 15115afce282SDavid Greenman static int lastrun = 0; 15125afce282SDavid Greenman 1513b18bfc3dSJohn Dyson if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 15145afce282SDavid Greenman wakeup(&vm_daemon_needed); 15155afce282SDavid Greenman lastrun = ticks; 15165afce282SDavid Greenman } 15175afce282SDavid Greenman } 15185afce282SDavid Greenman 15192b14f991SJulian Elischer static void 15204f9fb771SBruce Evans vm_daemon() 15210d94caffSDavid Greenman { 15222fe6e4d7SDavid Greenman struct proc *p; 1523e602ba25SJulian Elischer int breakout; 1524e602ba25SJulian Elischer struct thread *td; 15250d94caffSDavid Greenman 15263614c6fcSJohn Baldwin mtx_lock(&Giant); 15272fe6e4d7SDavid Greenman while (TRUE) { 15280cddd8f0SMatthew Dillon tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 15294c1f8ee9SDavid Greenman if (vm_pageout_req_swapout) { 1530ceb0cf87SJohn Dyson swapout_procs(vm_pageout_req_swapout); 15314c1f8ee9SDavid Greenman vm_pageout_req_swapout = 0; 15324c1f8ee9SDavid Greenman } 15332fe6e4d7SDavid Greenman /* 15340d94caffSDavid Greenman * scan the processes for exceeding their rlimits or if 15350d94caffSDavid Greenman * process is swapped out -- deactivate pages 15362fe6e4d7SDavid Greenman */ 15371005a129SJohn Baldwin sx_slock(&allproc_lock); 1538fc2ffbe6SPoul-Henning Kamp LIST_FOREACH(p, &allproc, p_list) { 1539fe2144fdSLuoqi Chen vm_pindex_t limit, size; 15402fe6e4d7SDavid Greenman 15412fe6e4d7SDavid Greenman /* 15422fe6e4d7SDavid Greenman * if this is a system process or if we have already 15432fe6e4d7SDavid Greenman * looked at this process, skip it. 15442fe6e4d7SDavid Greenman */ 1545897ecacdSJohn Baldwin PROC_LOCK(p); 15462fe6e4d7SDavid Greenman if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1547897ecacdSJohn Baldwin PROC_UNLOCK(p); 15482fe6e4d7SDavid Greenman continue; 15492fe6e4d7SDavid Greenman } 15502fe6e4d7SDavid Greenman /* 15512fe6e4d7SDavid Greenman * if the process is in a non-running type state, 15522fe6e4d7SDavid Greenman * don't touch it. 15532fe6e4d7SDavid Greenman */ 15549ed346baSBosko Milekic mtx_lock_spin(&sched_lock); 1555e602ba25SJulian Elischer breakout = 0; 1556e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 155771fad9fdSJulian Elischer if (!TD_ON_RUNQ(td) && 155871fad9fdSJulian Elischer !TD_IS_RUNNING(td) && 155971fad9fdSJulian Elischer !TD_IS_SLEEPING(td)) { 1560e602ba25SJulian Elischer breakout = 1; 1561e602ba25SJulian Elischer break; 1562e602ba25SJulian Elischer } 1563e602ba25SJulian Elischer } 15649ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 1565897ecacdSJohn Baldwin if (breakout) { 1566897ecacdSJohn Baldwin PROC_UNLOCK(p); 15672fe6e4d7SDavid Greenman continue; 15682fe6e4d7SDavid Greenman } 15692fe6e4d7SDavid Greenman /* 15702fe6e4d7SDavid Greenman * get a limit 15712fe6e4d7SDavid Greenman */ 1572fe2144fdSLuoqi Chen limit = OFF_TO_IDX( 1573fe2144fdSLuoqi Chen qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1574fe2144fdSLuoqi Chen p->p_rlimit[RLIMIT_RSS].rlim_max)); 15752fe6e4d7SDavid Greenman 15762fe6e4d7SDavid Greenman /* 15770d94caffSDavid Greenman * let processes that are swapped out really be 15780d94caffSDavid Greenman * swapped out set the limit to nothing (will force a 15790d94caffSDavid Greenman * swap-out.) 15802fe6e4d7SDavid Greenman */ 15818606d880SJohn Baldwin if ((p->p_sflag & PS_INMEM) == 0) 15820d94caffSDavid Greenman limit = 0; /* XXX */ 1583897ecacdSJohn Baldwin PROC_UNLOCK(p); 15842fe6e4d7SDavid Greenman 1585fe2144fdSLuoqi Chen size = vmspace_resident_count(p->p_vmspace); 15862fe6e4d7SDavid Greenman if (limit >= 0 && size >= limit) { 1587fe2144fdSLuoqi Chen vm_pageout_map_deactivate_pages( 1588fe2144fdSLuoqi Chen &p->p_vmspace->vm_map, limit); 15892fe6e4d7SDavid Greenman } 15902fe6e4d7SDavid Greenman } 15911005a129SJohn Baldwin sx_sunlock(&allproc_lock); 159224a1cce3SDavid Greenman } 15932fe6e4d7SDavid Greenman } 1594a1287949SEivind Eklund #endif /* !defined(NO_SWAPPING) */ 1595