1df8bae1dSRodney W. Grimes /* 226f9a767SRodney W. Grimes * Copyright (c) 1991 Regents of the University of California. 326f9a767SRodney W. Grimes * All rights reserved. 426f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 526f9a767SRodney W. Grimes * All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 726f9a767SRodney W. Grimes * All rights reserved. 8df8bae1dSRodney W. Grimes * 9df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 10df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 13df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 14df8bae1dSRodney W. Grimes * are met: 15df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 17df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 19df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 20df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 215929bcfaSPhilippe Charnier * must display the following acknowledgement: 22df8bae1dSRodney W. Grimes * This product includes software developed by the University of 23df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 24df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 25df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 26df8bae1dSRodney W. Grimes * without specific prior written permission. 27df8bae1dSRodney W. Grimes * 28df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38df8bae1dSRodney W. Grimes * SUCH DAMAGE. 39df8bae1dSRodney W. Grimes * 403c4dd356SDavid Greenman * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41df8bae1dSRodney W. Grimes * 42df8bae1dSRodney W. Grimes * 43df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44df8bae1dSRodney W. Grimes * All rights reserved. 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47df8bae1dSRodney W. Grimes * 48df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 49df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 50df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 51df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 52df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 53df8bae1dSRodney W. Grimes * 54df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57df8bae1dSRodney W. Grimes * 58df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 59df8bae1dSRodney W. Grimes * 60df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61df8bae1dSRodney W. Grimes * School of Computer Science 62df8bae1dSRodney W. Grimes * Carnegie Mellon University 63df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 64df8bae1dSRodney W. Grimes * 65df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 66df8bae1dSRodney W. Grimes * rights to redistribute these changes. 6726f9a767SRodney W. Grimes * 68c3aac50fSPeter Wemm * $FreeBSD$ 69df8bae1dSRodney W. Grimes */ 70df8bae1dSRodney W. Grimes 71df8bae1dSRodney W. Grimes /* 72df8bae1dSRodney W. Grimes * The proverbial page-out daemon. 73df8bae1dSRodney W. Grimes */ 74df8bae1dSRodney W. Grimes 75faa5f8d8SAndrzej Bialecki #include "opt_vm.h" 76df8bae1dSRodney W. Grimes #include <sys/param.h> 7726f9a767SRodney W. Grimes #include <sys/systm.h> 78b5e8ce9fSBruce Evans #include <sys/kernel.h> 79fb919e4dSMark Murray #include <sys/lock.h> 80fb919e4dSMark Murray #include <sys/mutex.h> 8126f9a767SRodney W. Grimes #include <sys/proc.h> 829c8b8baaSPeter Wemm #include <sys/kthread.h> 830384fff8SJason Evans #include <sys/ktr.h> 8426f9a767SRodney W. Grimes #include <sys/resourcevar.h> 85b43179fbSJeff Roberson #include <sys/sched.h> 86d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h> 87f6b04d2bSDavid Greenman #include <sys/vnode.h> 88efeaf95aSDavid Greenman #include <sys/vmmeter.h> 891005a129SJohn Baldwin #include <sys/sx.h> 9038efa82bSJohn Dyson #include <sys/sysctl.h> 91df8bae1dSRodney W. Grimes 92df8bae1dSRodney W. Grimes #include <vm/vm.h> 93efeaf95aSDavid Greenman #include <vm/vm_param.h> 94efeaf95aSDavid Greenman #include <vm/vm_object.h> 95df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 96efeaf95aSDavid Greenman #include <vm/vm_map.h> 97df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 9824a1cce3SDavid Greenman #include <vm/vm_pager.h> 9905f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 100efeaf95aSDavid Greenman #include <vm/vm_extern.h> 101670d17b5SJeff Roberson #include <vm/uma.h> 102df8bae1dSRodney W. Grimes 1030384fff8SJason Evans #include <machine/mutex.h> 1040384fff8SJason Evans 1052b14f991SJulian Elischer /* 1062b14f991SJulian Elischer * System initialization 1072b14f991SJulian Elischer */ 1082b14f991SJulian Elischer 1092b14f991SJulian Elischer /* the kernel process "vm_pageout"*/ 11011caded3SAlfred Perlstein static void vm_pageout(void); 11111caded3SAlfred Perlstein static int vm_pageout_clean(vm_page_t); 11211caded3SAlfred Perlstein static void vm_pageout_scan(int pass); 11311caded3SAlfred Perlstein static int vm_pageout_free_page_calc(vm_size_t count); 1142b14f991SJulian Elischer struct proc *pageproc; 1152b14f991SJulian Elischer 1162b14f991SJulian Elischer static struct kproc_desc page_kp = { 1172b14f991SJulian Elischer "pagedaemon", 1182b14f991SJulian Elischer vm_pageout, 1192b14f991SJulian Elischer &pageproc 1202b14f991SJulian Elischer }; 1219c8b8baaSPeter Wemm SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 1222b14f991SJulian Elischer 12338efa82bSJohn Dyson #if !defined(NO_SWAPPING) 1242b14f991SJulian Elischer /* the kernel process "vm_daemon"*/ 12511caded3SAlfred Perlstein static void vm_daemon(void); 126f708ef1bSPoul-Henning Kamp static struct proc *vmproc; 1272b14f991SJulian Elischer 1282b14f991SJulian Elischer static struct kproc_desc vm_kp = { 1292b14f991SJulian Elischer "vmdaemon", 1302b14f991SJulian Elischer vm_daemon, 1312b14f991SJulian Elischer &vmproc 1322b14f991SJulian Elischer }; 1339c8b8baaSPeter Wemm SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 13438efa82bSJohn Dyson #endif 1352b14f991SJulian Elischer 1362b14f991SJulian Elischer 1372d8acc0fSJohn Dyson int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 1382d8acc0fSJohn Dyson int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 1392d8acc0fSJohn Dyson int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 14026f9a767SRodney W. Grimes 14138efa82bSJohn Dyson #if !defined(NO_SWAPPING) 142f708ef1bSPoul-Henning Kamp static int vm_pageout_req_swapout; /* XXX */ 143f708ef1bSPoul-Henning Kamp static int vm_daemon_needed; 14438efa82bSJohn Dyson #endif 1455663e6deSDavid Greenman extern int vm_swap_size; 1462b6b0df7SMatthew Dillon static int vm_max_launder = 32; 147303b270bSEivind Eklund static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 148303b270bSEivind Eklund static int vm_pageout_full_stats_interval = 0; 1492b6b0df7SMatthew Dillon static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 150303b270bSEivind Eklund static int defer_swap_pageouts=0; 151303b270bSEivind Eklund static int disable_swap_pageouts=0; 15270111b90SJohn Dyson 15338efa82bSJohn Dyson #if defined(NO_SWAPPING) 154303b270bSEivind Eklund static int vm_swap_enabled=0; 155303b270bSEivind Eklund static int vm_swap_idle_enabled=0; 15638efa82bSJohn Dyson #else 157303b270bSEivind Eklund static int vm_swap_enabled=1; 158303b270bSEivind Eklund static int vm_swap_idle_enabled=0; 15938efa82bSJohn Dyson #endif 16038efa82bSJohn Dyson 16138efa82bSJohn Dyson SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 1622b6b0df7SMatthew Dillon CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 1632b6b0df7SMatthew Dillon 1642b6b0df7SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, max_launder, 1652b6b0df7SMatthew Dillon CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 16638efa82bSJohn Dyson 167dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 168b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 169dc2efb27SJohn Dyson 170dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 171b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 172dc2efb27SJohn Dyson 173dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 174b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 175dc2efb27SJohn Dyson 176dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 177b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 178dc2efb27SJohn Dyson 17938efa82bSJohn Dyson #if defined(NO_SWAPPING) 180ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 181ceb0cf87SJohn Dyson CTLFLAG_RD, &vm_swap_enabled, 0, ""); 182ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 183ceb0cf87SJohn Dyson CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 18438efa82bSJohn Dyson #else 185ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 186b0359e2cSPeter Wemm CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 187ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 188b0359e2cSPeter Wemm CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 18938efa82bSJohn Dyson #endif 19026f9a767SRodney W. Grimes 191ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 192b0359e2cSPeter Wemm CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 19312ac6a1dSJohn Dyson 194ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 195b0359e2cSPeter Wemm CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 19612ac6a1dSJohn Dyson 19723b59018SMatthew Dillon static int pageout_lock_miss; 19823b59018SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 19923b59018SMatthew Dillon CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 20023b59018SMatthew Dillon 201ffc82b0aSJohn Dyson #define VM_PAGEOUT_PAGE_COUNT 16 202bbc0ec52SDavid Greenman int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 203df8bae1dSRodney W. Grimes 204c3cb3e12SDavid Greenman int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 205df8bae1dSRodney W. Grimes 20638efa82bSJohn Dyson #if !defined(NO_SWAPPING) 20711caded3SAlfred Perlstein typedef void freeer_fcn_t(vm_map_t, vm_object_t, vm_pindex_t, int); 20811caded3SAlfred Perlstein static void vm_pageout_map_deactivate_pages(vm_map_t, vm_pindex_t); 209cd41fc12SDavid Greenman static freeer_fcn_t vm_pageout_object_deactivate_pages; 21011caded3SAlfred Perlstein static void vm_req_vmdaemon(void); 21138efa82bSJohn Dyson #endif 212dc2efb27SJohn Dyson static void vm_pageout_page_stats(void); 213cd41fc12SDavid Greenman 21426f9a767SRodney W. Grimes /* 21526f9a767SRodney W. Grimes * vm_pageout_clean: 21624a1cce3SDavid Greenman * 2170d94caffSDavid Greenman * Clean the page and remove it from the laundry. 21826f9a767SRodney W. Grimes * 2190d94caffSDavid Greenman * We set the busy bit to cause potential page faults on this page to 2201c7c3c6aSMatthew Dillon * block. Note the careful timing, however, the busy bit isn't set till 2211c7c3c6aSMatthew Dillon * late and we cannot do anything that will mess with the page. 22226f9a767SRodney W. Grimes */ 2233af76890SPoul-Henning Kamp static int 2248f9110f6SJohn Dyson vm_pageout_clean(m) 22524a1cce3SDavid Greenman vm_page_t m; 22624a1cce3SDavid Greenman { 22754d92145SMatthew Dillon vm_object_t object; 228f35329acSJohn Dyson vm_page_t mc[2*vm_pageout_page_count]; 22924a1cce3SDavid Greenman int pageout_count; 23090ecac61SMatthew Dillon int ib, is, page_base; 231a316d390SJohn Dyson vm_pindex_t pindex = m->pindex; 23226f9a767SRodney W. Grimes 23355df3298SAlan Cox mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2340cddd8f0SMatthew Dillon 23526f9a767SRodney W. Grimes object = m->object; 23624a1cce3SDavid Greenman 23726f9a767SRodney W. Grimes /* 2381c7c3c6aSMatthew Dillon * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 2391c7c3c6aSMatthew Dillon * with the new swapper, but we could have serious problems paging 2401c7c3c6aSMatthew Dillon * out other object types if there is insufficient memory. 2411c7c3c6aSMatthew Dillon * 2421c7c3c6aSMatthew Dillon * Unfortunately, checking free memory here is far too late, so the 2431c7c3c6aSMatthew Dillon * check has been moved up a procedural level. 2441c7c3c6aSMatthew Dillon */ 2451c7c3c6aSMatthew Dillon 24624a1cce3SDavid Greenman /* 2478b03c8edSMatthew Dillon * Don't mess with the page if it's busy, held, or special 24824a1cce3SDavid Greenman */ 2498f9110f6SJohn Dyson if ((m->hold_count != 0) || 2508b03c8edSMatthew Dillon ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { 2510d94caffSDavid Greenman return 0; 2528b03c8edSMatthew Dillon } 2530d94caffSDavid Greenman 254f35329acSJohn Dyson mc[vm_pageout_page_count] = m; 25526f9a767SRodney W. Grimes pageout_count = 1; 256f35329acSJohn Dyson page_base = vm_pageout_page_count; 25790ecac61SMatthew Dillon ib = 1; 25890ecac61SMatthew Dillon is = 1; 25990ecac61SMatthew Dillon 26024a1cce3SDavid Greenman /* 26124a1cce3SDavid Greenman * Scan object for clusterable pages. 26224a1cce3SDavid Greenman * 26324a1cce3SDavid Greenman * We can cluster ONLY if: ->> the page is NOT 26424a1cce3SDavid Greenman * clean, wired, busy, held, or mapped into a 26524a1cce3SDavid Greenman * buffer, and one of the following: 26624a1cce3SDavid Greenman * 1) The page is inactive, or a seldom used 26724a1cce3SDavid Greenman * active page. 26824a1cce3SDavid Greenman * -or- 26924a1cce3SDavid Greenman * 2) we force the issue. 27090ecac61SMatthew Dillon * 27190ecac61SMatthew Dillon * During heavy mmap/modification loads the pageout 27290ecac61SMatthew Dillon * daemon can really fragment the underlying file 27390ecac61SMatthew Dillon * due to flushing pages out of order and not trying 27490ecac61SMatthew Dillon * align the clusters (which leave sporatic out-of-order 27590ecac61SMatthew Dillon * holes). To solve this problem we do the reverse scan 27690ecac61SMatthew Dillon * first and attempt to align our cluster, then do a 27790ecac61SMatthew Dillon * forward scan if room remains. 27824a1cce3SDavid Greenman */ 27990ecac61SMatthew Dillon more: 28090ecac61SMatthew Dillon while (ib && pageout_count < vm_pageout_page_count) { 28124a1cce3SDavid Greenman vm_page_t p; 282f6b04d2bSDavid Greenman 28390ecac61SMatthew Dillon if (ib > pindex) { 28490ecac61SMatthew Dillon ib = 0; 28590ecac61SMatthew Dillon break; 286f6b04d2bSDavid Greenman } 28790ecac61SMatthew Dillon 28890ecac61SMatthew Dillon if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 28990ecac61SMatthew Dillon ib = 0; 29090ecac61SMatthew Dillon break; 29190ecac61SMatthew Dillon } 2925070c7f8SJohn Dyson if (((p->queue - p->pc) == PQ_CACHE) || 2938b03c8edSMatthew Dillon (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 29490ecac61SMatthew Dillon ib = 0; 29590ecac61SMatthew Dillon break; 296f6b04d2bSDavid Greenman } 29724a1cce3SDavid Greenman vm_page_test_dirty(p); 29890ecac61SMatthew Dillon if ((p->dirty & p->valid) == 0 || 29990ecac61SMatthew Dillon p->queue != PQ_INACTIVE || 30057601bcbSMatthew Dillon p->wire_count != 0 || /* may be held by buf cache */ 30157601bcbSMatthew Dillon p->hold_count != 0) { /* may be undergoing I/O */ 30290ecac61SMatthew Dillon ib = 0; 30324a1cce3SDavid Greenman break; 304f6b04d2bSDavid Greenman } 30590ecac61SMatthew Dillon mc[--page_base] = p; 30690ecac61SMatthew Dillon ++pageout_count; 30790ecac61SMatthew Dillon ++ib; 30824a1cce3SDavid Greenman /* 30990ecac61SMatthew Dillon * alignment boundry, stop here and switch directions. Do 31090ecac61SMatthew Dillon * not clear ib. 31124a1cce3SDavid Greenman */ 31290ecac61SMatthew Dillon if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 31390ecac61SMatthew Dillon break; 31424a1cce3SDavid Greenman } 31590ecac61SMatthew Dillon 31690ecac61SMatthew Dillon while (pageout_count < vm_pageout_page_count && 31790ecac61SMatthew Dillon pindex + is < object->size) { 31890ecac61SMatthew Dillon vm_page_t p; 31990ecac61SMatthew Dillon 32090ecac61SMatthew Dillon if ((p = vm_page_lookup(object, pindex + is)) == NULL) 32190ecac61SMatthew Dillon break; 3225070c7f8SJohn Dyson if (((p->queue - p->pc) == PQ_CACHE) || 3238b03c8edSMatthew Dillon (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 32490ecac61SMatthew Dillon break; 32524a1cce3SDavid Greenman } 32624a1cce3SDavid Greenman vm_page_test_dirty(p); 32790ecac61SMatthew Dillon if ((p->dirty & p->valid) == 0 || 32890ecac61SMatthew Dillon p->queue != PQ_INACTIVE || 32957601bcbSMatthew Dillon p->wire_count != 0 || /* may be held by buf cache */ 33057601bcbSMatthew Dillon p->hold_count != 0) { /* may be undergoing I/O */ 33124a1cce3SDavid Greenman break; 33224a1cce3SDavid Greenman } 33390ecac61SMatthew Dillon mc[page_base + pageout_count] = p; 33490ecac61SMatthew Dillon ++pageout_count; 33590ecac61SMatthew Dillon ++is; 33624a1cce3SDavid Greenman } 33790ecac61SMatthew Dillon 33890ecac61SMatthew Dillon /* 33990ecac61SMatthew Dillon * If we exhausted our forward scan, continue with the reverse scan 34090ecac61SMatthew Dillon * when possible, even past a page boundry. This catches boundry 34190ecac61SMatthew Dillon * conditions. 34290ecac61SMatthew Dillon */ 34390ecac61SMatthew Dillon if (ib && pageout_count < vm_pageout_page_count) 34490ecac61SMatthew Dillon goto more; 345f6b04d2bSDavid Greenman 34667bf6868SJohn Dyson /* 34767bf6868SJohn Dyson * we allow reads during pageouts... 34867bf6868SJohn Dyson */ 3498f9110f6SJohn Dyson return vm_pageout_flush(&mc[page_base], pageout_count, 0); 350aef922f5SJohn Dyson } 351aef922f5SJohn Dyson 3521c7c3c6aSMatthew Dillon /* 3531c7c3c6aSMatthew Dillon * vm_pageout_flush() - launder the given pages 3541c7c3c6aSMatthew Dillon * 3551c7c3c6aSMatthew Dillon * The given pages are laundered. Note that we setup for the start of 3561c7c3c6aSMatthew Dillon * I/O ( i.e. busy the page ), mark it read-only, and bump the object 3571c7c3c6aSMatthew Dillon * reference count all in here rather then in the parent. If we want 3581c7c3c6aSMatthew Dillon * the parent to do more sophisticated things we may have to change 3591c7c3c6aSMatthew Dillon * the ordering. 3601c7c3c6aSMatthew Dillon */ 361aef922f5SJohn Dyson int 3628f9110f6SJohn Dyson vm_pageout_flush(mc, count, flags) 363aef922f5SJohn Dyson vm_page_t *mc; 364aef922f5SJohn Dyson int count; 3658f9110f6SJohn Dyson int flags; 366aef922f5SJohn Dyson { 36754d92145SMatthew Dillon vm_object_t object; 368aef922f5SJohn Dyson int pageout_status[count]; 36995461b45SJohn Dyson int numpagedout = 0; 370aef922f5SJohn Dyson int i; 371aef922f5SJohn Dyson 37255df3298SAlan Cox mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3731c7c3c6aSMatthew Dillon /* 3741c7c3c6aSMatthew Dillon * Initiate I/O. Bump the vm_page_t->busy counter and 3751c7c3c6aSMatthew Dillon * mark the pages read-only. 3761c7c3c6aSMatthew Dillon * 3771c7c3c6aSMatthew Dillon * We do not have to fixup the clean/dirty bits here... we can 3781c7c3c6aSMatthew Dillon * allow the pager to do it after the I/O completes. 37902fa91d3SMatthew Dillon * 38002fa91d3SMatthew Dillon * NOTE! mc[i]->dirty may be partial or fragmented due to an 38102fa91d3SMatthew Dillon * edge case with file fragments. 3821c7c3c6aSMatthew Dillon */ 3838f9110f6SJohn Dyson for (i = 0; i < count; i++) { 38402fa91d3SMatthew Dillon KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); 385e69763a3SDoug Rabson vm_page_io_start(mc[i]); 3868f9110f6SJohn Dyson vm_page_protect(mc[i], VM_PROT_READ); 3878f9110f6SJohn Dyson } 388aef922f5SJohn Dyson object = mc[0]->object; 38955df3298SAlan Cox vm_page_unlock_queues(); 390d474eaaaSDoug Rabson vm_object_pip_add(object, count); 391aef922f5SJohn Dyson 392aef922f5SJohn Dyson vm_pager_put_pages(object, mc, count, 3938f9110f6SJohn Dyson (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)), 39426f9a767SRodney W. Grimes pageout_status); 39526f9a767SRodney W. Grimes 39640eab1e9SAlan Cox vm_page_lock_queues(); 397aef922f5SJohn Dyson for (i = 0; i < count; i++) { 398aef922f5SJohn Dyson vm_page_t mt = mc[i]; 39924a1cce3SDavid Greenman 40026f9a767SRodney W. Grimes switch (pageout_status[i]) { 40126f9a767SRodney W. Grimes case VM_PAGER_OK: 40295461b45SJohn Dyson numpagedout++; 40326f9a767SRodney W. Grimes break; 40426f9a767SRodney W. Grimes case VM_PAGER_PEND: 40595461b45SJohn Dyson numpagedout++; 40626f9a767SRodney W. Grimes break; 40726f9a767SRodney W. Grimes case VM_PAGER_BAD: 40826f9a767SRodney W. Grimes /* 4090d94caffSDavid Greenman * Page outside of range of object. Right now we 4100d94caffSDavid Greenman * essentially lose the changes by pretending it 4110d94caffSDavid Greenman * worked. 41226f9a767SRodney W. Grimes */ 4130385347cSPeter Wemm pmap_clear_modify(mt); 41490ecac61SMatthew Dillon vm_page_undirty(mt); 41526f9a767SRodney W. Grimes break; 41626f9a767SRodney W. Grimes case VM_PAGER_ERROR: 41726f9a767SRodney W. Grimes case VM_PAGER_FAIL: 41826f9a767SRodney W. Grimes /* 4190d94caffSDavid Greenman * If page couldn't be paged out, then reactivate the 4200d94caffSDavid Greenman * page so it doesn't clog the inactive list. (We 4210d94caffSDavid Greenman * will try paging out it again later). 42226f9a767SRodney W. Grimes */ 42324a1cce3SDavid Greenman vm_page_activate(mt); 42426f9a767SRodney W. Grimes break; 42526f9a767SRodney W. Grimes case VM_PAGER_AGAIN: 42626f9a767SRodney W. Grimes break; 42726f9a767SRodney W. Grimes } 42826f9a767SRodney W. Grimes 42926f9a767SRodney W. Grimes /* 4300d94caffSDavid Greenman * If the operation is still going, leave the page busy to 4310d94caffSDavid Greenman * block all other accesses. Also, leave the paging in 4320d94caffSDavid Greenman * progress indicator set so that we don't attempt an object 4330d94caffSDavid Greenman * collapse. 43426f9a767SRodney W. Grimes */ 43526f9a767SRodney W. Grimes if (pageout_status[i] != VM_PAGER_PEND) { 436f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 437e69763a3SDoug Rabson vm_page_io_finish(mt); 438936524aaSMatthew Dillon if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 439936524aaSMatthew Dillon vm_page_protect(mt, VM_PROT_READ); 44026f9a767SRodney W. Grimes } 44126f9a767SRodney W. Grimes } 44295461b45SJohn Dyson return numpagedout; 44326f9a767SRodney W. Grimes } 44426f9a767SRodney W. Grimes 44538efa82bSJohn Dyson #if !defined(NO_SWAPPING) 44626f9a767SRodney W. Grimes /* 44726f9a767SRodney W. Grimes * vm_pageout_object_deactivate_pages 44826f9a767SRodney W. Grimes * 44926f9a767SRodney W. Grimes * deactivate enough pages to satisfy the inactive target 45026f9a767SRodney W. Grimes * requirements or if vm_page_proc_limit is set, then 45126f9a767SRodney W. Grimes * deactivate all of the pages in the object and its 45224a1cce3SDavid Greenman * backing_objects. 45326f9a767SRodney W. Grimes * 45426f9a767SRodney W. Grimes * The object and map must be locked. 45526f9a767SRodney W. Grimes */ 45638efa82bSJohn Dyson static void 45738efa82bSJohn Dyson vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 45826f9a767SRodney W. Grimes vm_map_t map; 45926f9a767SRodney W. Grimes vm_object_t object; 46038efa82bSJohn Dyson vm_pindex_t desired; 4610d94caffSDavid Greenman int map_remove_only; 46226f9a767SRodney W. Grimes { 46354d92145SMatthew Dillon vm_page_t p, next; 464ce18aebdSAlan Cox int actcount, rcount, remove_mode; 46526f9a767SRodney W. Grimes 4660cddd8f0SMatthew Dillon GIANT_REQUIRED; 46724964514SPeter Wemm if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) 46838efa82bSJohn Dyson return; 4698f895206SDavid Greenman 47038efa82bSJohn Dyson while (object) { 471b1028ad1SLuoqi Chen if (pmap_resident_count(vm_map_pmap(map)) <= desired) 47238efa82bSJohn Dyson return; 47324a1cce3SDavid Greenman if (object->paging_in_progress) 47438efa82bSJohn Dyson return; 47526f9a767SRodney W. Grimes 47638efa82bSJohn Dyson remove_mode = map_remove_only; 47738efa82bSJohn Dyson if (object->shadow_count > 1) 47838efa82bSJohn Dyson remove_mode = 1; 47926f9a767SRodney W. Grimes /* 48026f9a767SRodney W. Grimes * scan the objects entire memory queue 48126f9a767SRodney W. Grimes */ 48226f9a767SRodney W. Grimes rcount = object->resident_page_count; 483b18bfc3dSJohn Dyson p = TAILQ_FIRST(&object->memq); 484ce18aebdSAlan Cox vm_page_lock_queues(); 48526f9a767SRodney W. Grimes while (p && (rcount-- > 0)) { 486ce18aebdSAlan Cox if (pmap_resident_count(map->pmap) <= desired) { 487ce18aebdSAlan Cox vm_page_unlock_queues(); 48838efa82bSJohn Dyson return; 489ce18aebdSAlan Cox } 490b18bfc3dSJohn Dyson next = TAILQ_NEXT(p, listq); 491a58d1fa1SDavid Greenman cnt.v_pdpages++; 4920d94caffSDavid Greenman if (p->wire_count != 0 || 4930d94caffSDavid Greenman p->hold_count != 0 || 4940d94caffSDavid Greenman p->busy != 0 || 4958b03c8edSMatthew Dillon (p->flags & (PG_BUSY|PG_UNMANAGED)) || 4967f3a4093SMike Silbersack !pmap_page_exists_quick(vm_map_pmap(map), p)) { 4970d94caffSDavid Greenman p = next; 4980d94caffSDavid Greenman continue; 4990d94caffSDavid Greenman } 5000385347cSPeter Wemm actcount = pmap_ts_referenced(p); 5017e006499SJohn Dyson if (actcount) { 502e69763a3SDoug Rabson vm_page_flag_set(p, PG_REFERENCED); 503c8c4b40cSJohn Dyson } else if (p->flags & PG_REFERENCED) { 5047e006499SJohn Dyson actcount = 1; 505ef743ce6SJohn Dyson } 50638efa82bSJohn Dyson if ((p->queue != PQ_ACTIVE) && 50738efa82bSJohn Dyson (p->flags & PG_REFERENCED)) { 508ef743ce6SJohn Dyson vm_page_activate(p); 5097e006499SJohn Dyson p->act_count += actcount; 510e69763a3SDoug Rabson vm_page_flag_clear(p, PG_REFERENCED); 511c8c4b40cSJohn Dyson } else if (p->queue == PQ_ACTIVE) { 512ef743ce6SJohn Dyson if ((p->flags & PG_REFERENCED) == 0) { 513c8c4b40cSJohn Dyson p->act_count -= min(p->act_count, ACT_DECLINE); 5142b6b0df7SMatthew Dillon if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 515b18bfc3dSJohn Dyson vm_page_protect(p, VM_PROT_NONE); 51626f9a767SRodney W. Grimes vm_page_deactivate(p); 51726f9a767SRodney W. Grimes } else { 5186d03d577SMatthew Dillon vm_pageq_requeue(p); 519c8c4b40cSJohn Dyson } 520c8c4b40cSJohn Dyson } else { 521eaf13dd7SJohn Dyson vm_page_activate(p); 522e69763a3SDoug Rabson vm_page_flag_clear(p, PG_REFERENCED); 52338efa82bSJohn Dyson if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 52438efa82bSJohn Dyson p->act_count += ACT_ADVANCE; 5256d03d577SMatthew Dillon vm_pageq_requeue(p); 52626f9a767SRodney W. Grimes } 527bd7e5f99SJohn Dyson } else if (p->queue == PQ_INACTIVE) { 528f919ebdeSDavid Greenman vm_page_protect(p, VM_PROT_NONE); 52926f9a767SRodney W. Grimes } 53026f9a767SRodney W. Grimes p = next; 53126f9a767SRodney W. Grimes } 532ce18aebdSAlan Cox vm_page_unlock_queues(); 53338efa82bSJohn Dyson object = object->backing_object; 53438efa82bSJohn Dyson } 53526f9a767SRodney W. Grimes } 53626f9a767SRodney W. Grimes 53726f9a767SRodney W. Grimes /* 53826f9a767SRodney W. Grimes * deactivate some number of pages in a map, try to do it fairly, but 53926f9a767SRodney W. Grimes * that is really hard to do. 54026f9a767SRodney W. Grimes */ 541cd41fc12SDavid Greenman static void 54238efa82bSJohn Dyson vm_pageout_map_deactivate_pages(map, desired) 54326f9a767SRodney W. Grimes vm_map_t map; 54438efa82bSJohn Dyson vm_pindex_t desired; 54526f9a767SRodney W. Grimes { 54626f9a767SRodney W. Grimes vm_map_entry_t tmpe; 54738efa82bSJohn Dyson vm_object_t obj, bigobj; 54830105b9eSTor Egge int nothingwired; 5490d94caffSDavid Greenman 5500cddd8f0SMatthew Dillon GIANT_REQUIRED; 551d974f03cSAlan Cox if (!vm_map_trylock(map)) 55226f9a767SRodney W. Grimes return; 55338efa82bSJohn Dyson 55438efa82bSJohn Dyson bigobj = NULL; 55530105b9eSTor Egge nothingwired = TRUE; 55638efa82bSJohn Dyson 55738efa82bSJohn Dyson /* 55838efa82bSJohn Dyson * first, search out the biggest object, and try to free pages from 55938efa82bSJohn Dyson * that. 56038efa82bSJohn Dyson */ 56126f9a767SRodney W. Grimes tmpe = map->header.next; 56238efa82bSJohn Dyson while (tmpe != &map->header) { 5639fdfe602SMatthew Dillon if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 56438efa82bSJohn Dyson obj = tmpe->object.vm_object; 56538efa82bSJohn Dyson if ((obj != NULL) && (obj->shadow_count <= 1) && 56638efa82bSJohn Dyson ((bigobj == NULL) || 56738efa82bSJohn Dyson (bigobj->resident_page_count < obj->resident_page_count))) { 56838efa82bSJohn Dyson bigobj = obj; 56938efa82bSJohn Dyson } 57038efa82bSJohn Dyson } 57130105b9eSTor Egge if (tmpe->wired_count > 0) 57230105b9eSTor Egge nothingwired = FALSE; 57338efa82bSJohn Dyson tmpe = tmpe->next; 57438efa82bSJohn Dyson } 57538efa82bSJohn Dyson 57638efa82bSJohn Dyson if (bigobj) 57738efa82bSJohn Dyson vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 57838efa82bSJohn Dyson 57938efa82bSJohn Dyson /* 58038efa82bSJohn Dyson * Next, hunt around for other pages to deactivate. We actually 58138efa82bSJohn Dyson * do this search sort of wrong -- .text first is not the best idea. 58238efa82bSJohn Dyson */ 58338efa82bSJohn Dyson tmpe = map->header.next; 58438efa82bSJohn Dyson while (tmpe != &map->header) { 585b1028ad1SLuoqi Chen if (pmap_resident_count(vm_map_pmap(map)) <= desired) 58638efa82bSJohn Dyson break; 5879fdfe602SMatthew Dillon if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 58838efa82bSJohn Dyson obj = tmpe->object.vm_object; 58901155bd7SDavid Greenman if (obj) 59038efa82bSJohn Dyson vm_pageout_object_deactivate_pages(map, obj, desired, 0); 59138efa82bSJohn Dyson } 59226f9a767SRodney W. Grimes tmpe = tmpe->next; 59326f9a767SRodney W. Grimes }; 59438efa82bSJohn Dyson 59538efa82bSJohn Dyson /* 59638efa82bSJohn Dyson * Remove all mappings if a process is swapped out, this will free page 59738efa82bSJohn Dyson * table pages. 59838efa82bSJohn Dyson */ 59930105b9eSTor Egge if (desired == 0 && nothingwired) 60005ba50f5SJake Burkholder pmap_remove(vm_map_pmap(map), vm_map_min(map), 60105ba50f5SJake Burkholder vm_map_max(map)); 60238efa82bSJohn Dyson vm_map_unlock(map); 60326f9a767SRodney W. Grimes return; 60426f9a767SRodney W. Grimes } 605a1287949SEivind Eklund #endif /* !defined(NO_SWAPPING) */ 606df8bae1dSRodney W. Grimes 6071c7c3c6aSMatthew Dillon /* 6081c7c3c6aSMatthew Dillon * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore 6091c7c3c6aSMatthew Dillon * to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects 6101c7c3c6aSMatthew Dillon * which we know can be trivially freed. 6111c7c3c6aSMatthew Dillon */ 612925a3a41SJohn Dyson void 613925a3a41SJohn Dyson vm_pageout_page_free(vm_page_t m) { 6141c7c3c6aSMatthew Dillon vm_object_t object = m->object; 6151c7c3c6aSMatthew Dillon int type = object->type; 616925a3a41SJohn Dyson 6170cddd8f0SMatthew Dillon GIANT_REQUIRED; 6181c7c3c6aSMatthew Dillon if (type == OBJT_SWAP || type == OBJT_DEFAULT) 6191c7c3c6aSMatthew Dillon vm_object_reference(object); 620e69763a3SDoug Rabson vm_page_busy(m); 621925a3a41SJohn Dyson vm_page_protect(m, VM_PROT_NONE); 622925a3a41SJohn Dyson vm_page_free(m); 623299018d3SAlan Cox cnt.v_dfree++; 6241c7c3c6aSMatthew Dillon if (type == OBJT_SWAP || type == OBJT_DEFAULT) 62547221757SJohn Dyson vm_object_deallocate(object); 626925a3a41SJohn Dyson } 627925a3a41SJohn Dyson 628df8bae1dSRodney W. Grimes /* 629df8bae1dSRodney W. Grimes * vm_pageout_scan does the dirty work for the pageout daemon. 630df8bae1dSRodney W. Grimes */ 6312b6b0df7SMatthew Dillon static void 6322b6b0df7SMatthew Dillon vm_pageout_scan(int pass) 633df8bae1dSRodney W. Grimes { 634502ba6e4SJohn Dyson vm_page_t m, next; 635936524aaSMatthew Dillon struct vm_page marker; 6362b6b0df7SMatthew Dillon int save_page_shortage; 6372b6b0df7SMatthew Dillon int save_inactive_count; 6381c7c3c6aSMatthew Dillon int page_shortage, maxscan, pcount; 6391c7c3c6aSMatthew Dillon int addl_page_shortage, addl_page_shortage_init; 6405663e6deSDavid Greenman struct proc *p, *bigproc; 6415663e6deSDavid Greenman vm_offset_t size, bigsize; 642df8bae1dSRodney W. Grimes vm_object_t object; 6437e006499SJohn Dyson int actcount; 644f6b04d2bSDavid Greenman int vnodes_skipped = 0; 6452b6b0df7SMatthew Dillon int maxlaunder; 6461eeaa1e3SJohn Dyson int s; 647e602ba25SJulian Elischer struct thread *td; 6480d94caffSDavid Greenman 6490cddd8f0SMatthew Dillon GIANT_REQUIRED; 650df8bae1dSRodney W. Grimes /* 6515985940eSJohn Dyson * Do whatever cleanup that the pmap code can. 6525985940eSJohn Dyson */ 6535985940eSJohn Dyson pmap_collect(); 6548355f576SJeff Roberson uma_reclaim(); 6555985940eSJohn Dyson 6561c7c3c6aSMatthew Dillon addl_page_shortage_init = vm_pageout_deficit; 65795461b45SJohn Dyson vm_pageout_deficit = 0; 658b182ec9eSJohn Dyson 6591c7c3c6aSMatthew Dillon /* 6601c7c3c6aSMatthew Dillon * Calculate the number of pages we want to either free or move 6612b6b0df7SMatthew Dillon * to the cache. 6621c7c3c6aSMatthew Dillon */ 6632b6b0df7SMatthew Dillon page_shortage = vm_paging_target() + addl_page_shortage_init; 6642b6b0df7SMatthew Dillon save_page_shortage = page_shortage; 6652b6b0df7SMatthew Dillon save_inactive_count = cnt.v_inactive_count; 6661c7c3c6aSMatthew Dillon 6671c7c3c6aSMatthew Dillon /* 668936524aaSMatthew Dillon * Initialize our marker 669936524aaSMatthew Dillon */ 670936524aaSMatthew Dillon bzero(&marker, sizeof(marker)); 671936524aaSMatthew Dillon marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 672936524aaSMatthew Dillon marker.queue = PQ_INACTIVE; 673936524aaSMatthew Dillon marker.wire_count = 1; 674936524aaSMatthew Dillon 675936524aaSMatthew Dillon /* 6761c7c3c6aSMatthew Dillon * Start scanning the inactive queue for pages we can move to the 6771c7c3c6aSMatthew Dillon * cache or free. The scan will stop when the target is reached or 678936524aaSMatthew Dillon * we have scanned the entire inactive queue. Note that m->act_count 679936524aaSMatthew Dillon * is not used to form decisions for the inactive queue, only for the 680936524aaSMatthew Dillon * active queue. 6812b6b0df7SMatthew Dillon * 6822b6b0df7SMatthew Dillon * maxlaunder limits the number of dirty pages we flush per scan. 6832b6b0df7SMatthew Dillon * For most systems a smaller value (16 or 32) is more robust under 6842b6b0df7SMatthew Dillon * extreme memory and disk pressure because any unnecessary writes 6852b6b0df7SMatthew Dillon * to disk can result in extreme performance degredation. However, 6862b6b0df7SMatthew Dillon * systems with excessive dirty pages (especially when MAP_NOSYNC is 6872b6b0df7SMatthew Dillon * used) will die horribly with limited laundering. If the pageout 6882b6b0df7SMatthew Dillon * daemon cannot clean enough pages in the first pass, we let it go 6892b6b0df7SMatthew Dillon * all out in succeeding passes. 6901c7c3c6aSMatthew Dillon */ 6912b6b0df7SMatthew Dillon if ((maxlaunder = vm_max_launder) <= 1) 6922b6b0df7SMatthew Dillon maxlaunder = 1; 6932b6b0df7SMatthew Dillon if (pass) 6942b6b0df7SMatthew Dillon maxlaunder = 10000; 69567bf6868SJohn Dyson rescan0: 6961c7c3c6aSMatthew Dillon addl_page_shortage = addl_page_shortage_init; 697f6b04d2bSDavid Greenman maxscan = cnt.v_inactive_count; 6986d03d577SMatthew Dillon 699be72f788SAlan Cox for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 7001c7c3c6aSMatthew Dillon m != NULL && maxscan-- > 0 && page_shortage > 0; 701e929c00dSKirk McKusick m = next) { 702df8bae1dSRodney W. Grimes 703a58d1fa1SDavid Greenman cnt.v_pdpages++; 704b182ec9eSJohn Dyson 705f35329acSJohn Dyson if (m->queue != PQ_INACTIVE) { 70667bf6868SJohn Dyson goto rescan0; 707f35329acSJohn Dyson } 708b182ec9eSJohn Dyson 709b18bfc3dSJohn Dyson next = TAILQ_NEXT(m, pageq); 710df8bae1dSRodney W. Grimes 711936524aaSMatthew Dillon /* 712936524aaSMatthew Dillon * skip marker pages 713936524aaSMatthew Dillon */ 714936524aaSMatthew Dillon if (m->flags & PG_MARKER) 715936524aaSMatthew Dillon continue; 716936524aaSMatthew Dillon 71757601bcbSMatthew Dillon /* 71857601bcbSMatthew Dillon * A held page may be undergoing I/O, so skip it. 71957601bcbSMatthew Dillon */ 720b182ec9eSJohn Dyson if (m->hold_count) { 7216d03d577SMatthew Dillon vm_pageq_requeue(m); 722b182ec9eSJohn Dyson addl_page_shortage++; 723b182ec9eSJohn Dyson continue; 724df8bae1dSRodney W. Grimes } 72526f9a767SRodney W. Grimes /* 726a1287949SEivind Eklund * Don't mess with busy pages, keep in the front of the 727b18bfc3dSJohn Dyson * queue, most likely are being paged out. 72826f9a767SRodney W. Grimes */ 729bd7e5f99SJohn Dyson if (m->busy || (m->flags & PG_BUSY)) { 730b182ec9eSJohn Dyson addl_page_shortage++; 73126f9a767SRodney W. Grimes continue; 73226f9a767SRodney W. Grimes } 733bd7e5f99SJohn Dyson 7347e006499SJohn Dyson /* 7351c7c3c6aSMatthew Dillon * If the object is not being used, we ignore previous 7361c7c3c6aSMatthew Dillon * references. 7377e006499SJohn Dyson */ 7380d94caffSDavid Greenman if (m->object->ref_count == 0) { 739e69763a3SDoug Rabson vm_page_flag_clear(m, PG_REFERENCED); 7400385347cSPeter Wemm pmap_clear_reference(m); 7417e006499SJohn Dyson 7427e006499SJohn Dyson /* 7431c7c3c6aSMatthew Dillon * Otherwise, if the page has been referenced while in the 7441c7c3c6aSMatthew Dillon * inactive queue, we bump the "activation count" upwards, 7451c7c3c6aSMatthew Dillon * making it less likely that the page will be added back to 7461c7c3c6aSMatthew Dillon * the inactive queue prematurely again. Here we check the 7471c7c3c6aSMatthew Dillon * page tables (or emulated bits, if any), given the upper 7481c7c3c6aSMatthew Dillon * level VM system not knowing anything about existing 7491c7c3c6aSMatthew Dillon * references. 7507e006499SJohn Dyson */ 751ef743ce6SJohn Dyson } else if (((m->flags & PG_REFERENCED) == 0) && 7520385347cSPeter Wemm (actcount = pmap_ts_referenced(m))) { 75367ef391eSAlan Cox vm_page_lock_queues(); 754ef743ce6SJohn Dyson vm_page_activate(m); 75567ef391eSAlan Cox vm_page_unlock_queues(); 7567e006499SJohn Dyson m->act_count += (actcount + ACT_ADVANCE); 757ef743ce6SJohn Dyson continue; 7582fe6e4d7SDavid Greenman } 759ef743ce6SJohn Dyson 7607e006499SJohn Dyson /* 7611c7c3c6aSMatthew Dillon * If the upper level VM system knows about any page 7621c7c3c6aSMatthew Dillon * references, we activate the page. We also set the 7631c7c3c6aSMatthew Dillon * "activation count" higher than normal so that we will less 7641c7c3c6aSMatthew Dillon * likely place pages back onto the inactive queue again. 7657e006499SJohn Dyson */ 766bd7e5f99SJohn Dyson if ((m->flags & PG_REFERENCED) != 0) { 767e69763a3SDoug Rabson vm_page_flag_clear(m, PG_REFERENCED); 7680385347cSPeter Wemm actcount = pmap_ts_referenced(m); 76967ef391eSAlan Cox vm_page_lock_queues(); 77026f9a767SRodney W. Grimes vm_page_activate(m); 77167ef391eSAlan Cox vm_page_unlock_queues(); 7727e006499SJohn Dyson m->act_count += (actcount + ACT_ADVANCE + 1); 7730d94caffSDavid Greenman continue; 7740d94caffSDavid Greenman } 77567bf6868SJohn Dyson 7767e006499SJohn Dyson /* 7771c7c3c6aSMatthew Dillon * If the upper level VM system doesn't know anything about 7781c7c3c6aSMatthew Dillon * the page being dirty, we have to check for it again. As 7791c7c3c6aSMatthew Dillon * far as the VM code knows, any partially dirty pages are 7801c7c3c6aSMatthew Dillon * fully dirty. 7817e006499SJohn Dyson */ 782f6b04d2bSDavid Greenman if (m->dirty == 0) { 783bd7e5f99SJohn Dyson vm_page_test_dirty(m); 784427e99a0SAlexander Langer } else { 7857dbf82dcSMatthew Dillon vm_page_dirty(m); 78630dcfc09SJohn Dyson } 787ef743ce6SJohn Dyson 7887e006499SJohn Dyson /* 7897e006499SJohn Dyson * Invalid pages can be easily freed 7907e006499SJohn Dyson */ 7916d40c3d3SDavid Greenman if (m->valid == 0) { 792299018d3SAlan Cox vm_page_lock_queues(); 793925a3a41SJohn Dyson vm_pageout_page_free(m); 794299018d3SAlan Cox vm_page_unlock_queues(); 7951c7c3c6aSMatthew Dillon --page_shortage; 7967e006499SJohn Dyson 7977e006499SJohn Dyson /* 798936524aaSMatthew Dillon * Clean pages can be placed onto the cache queue. This 799936524aaSMatthew Dillon * effectively frees them. 8007e006499SJohn Dyson */ 801bd7e5f99SJohn Dyson } else if (m->dirty == 0) { 80215a5d210SAlan Cox vm_page_lock_queues(); 803bd7e5f99SJohn Dyson vm_page_cache(m); 80415a5d210SAlan Cox vm_page_unlock_queues(); 8051c7c3c6aSMatthew Dillon --page_shortage; 8062b6b0df7SMatthew Dillon } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 8077e006499SJohn Dyson /* 8082b6b0df7SMatthew Dillon * Dirty pages need to be paged out, but flushing 8092b6b0df7SMatthew Dillon * a page is extremely expensive verses freeing 8102b6b0df7SMatthew Dillon * a clean page. Rather then artificially limiting 8112b6b0df7SMatthew Dillon * the number of pages we can flush, we instead give 8122b6b0df7SMatthew Dillon * dirty pages extra priority on the inactive queue 8132b6b0df7SMatthew Dillon * by forcing them to be cycled through the queue 8142b6b0df7SMatthew Dillon * twice before being flushed, after which the 8152b6b0df7SMatthew Dillon * (now clean) page will cycle through once more 8162b6b0df7SMatthew Dillon * before being freed. This significantly extends 8172b6b0df7SMatthew Dillon * the thrash point for a heavily loaded machine. 8187e006499SJohn Dyson */ 8192b6b0df7SMatthew Dillon vm_page_flag_set(m, PG_WINATCFLS); 8206d03d577SMatthew Dillon vm_pageq_requeue(m); 8210d94caffSDavid Greenman } else if (maxlaunder > 0) { 8222b6b0df7SMatthew Dillon /* 8232b6b0df7SMatthew Dillon * We always want to try to flush some dirty pages if 8242b6b0df7SMatthew Dillon * we encounter them, to keep the system stable. 8252b6b0df7SMatthew Dillon * Normally this number is small, but under extreme 8262b6b0df7SMatthew Dillon * pressure where there are insufficient clean pages 8272b6b0df7SMatthew Dillon * on the inactive queue, we may have to go all out. 8282b6b0df7SMatthew Dillon */ 82912ac6a1dSJohn Dyson int swap_pageouts_ok; 830f6b04d2bSDavid Greenman struct vnode *vp = NULL; 831f2a2857bSKirk McKusick struct mount *mp; 8320d94caffSDavid Greenman 8330d94caffSDavid Greenman object = m->object; 8347e006499SJohn Dyson 83512ac6a1dSJohn Dyson if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 83612ac6a1dSJohn Dyson swap_pageouts_ok = 1; 83712ac6a1dSJohn Dyson } else { 83812ac6a1dSJohn Dyson swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 83912ac6a1dSJohn Dyson swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 84090ecac61SMatthew Dillon vm_page_count_min()); 84112ac6a1dSJohn Dyson 84212ac6a1dSJohn Dyson } 84370111b90SJohn Dyson 84470111b90SJohn Dyson /* 8451c7c3c6aSMatthew Dillon * We don't bother paging objects that are "dead". 8461c7c3c6aSMatthew Dillon * Those objects are in a "rundown" state. 84770111b90SJohn Dyson */ 84870111b90SJohn Dyson if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 8496d03d577SMatthew Dillon vm_pageq_requeue(m); 85012ac6a1dSJohn Dyson continue; 85112ac6a1dSJohn Dyson } 85212ac6a1dSJohn Dyson 8531c7c3c6aSMatthew Dillon /* 8542b6b0df7SMatthew Dillon * The object is already known NOT to be dead. It 8552b6b0df7SMatthew Dillon * is possible for the vget() to block the whole 8562b6b0df7SMatthew Dillon * pageout daemon, but the new low-memory handling 8572b6b0df7SMatthew Dillon * code should prevent it. 8581c7c3c6aSMatthew Dillon * 8592b6b0df7SMatthew Dillon * The previous code skipped locked vnodes and, worse, 8602b6b0df7SMatthew Dillon * reordered pages in the queue. This results in 8612b6b0df7SMatthew Dillon * completely non-deterministic operation and, on a 8622b6b0df7SMatthew Dillon * busy system, can lead to extremely non-optimal 8632b6b0df7SMatthew Dillon * pageouts. For example, it can cause clean pages 8642b6b0df7SMatthew Dillon * to be freed and dirty pages to be moved to the end 8652b6b0df7SMatthew Dillon * of the queue. Since dirty pages are also moved to 8662b6b0df7SMatthew Dillon * the end of the queue once-cleaned, this gives 8672b6b0df7SMatthew Dillon * way too large a weighting to defering the freeing 8682b6b0df7SMatthew Dillon * of dirty pages. 8691c7c3c6aSMatthew Dillon * 87023b59018SMatthew Dillon * We can't wait forever for the vnode lock, we might 87123b59018SMatthew Dillon * deadlock due to a vn_read() getting stuck in 87223b59018SMatthew Dillon * vm_wait while holding this vnode. We skip the 87323b59018SMatthew Dillon * vnode if we can't get it in a reasonable amount 87423b59018SMatthew Dillon * of time. 8751c7c3c6aSMatthew Dillon */ 8761c7c3c6aSMatthew Dillon if (object->type == OBJT_VNODE) { 87724a1cce3SDavid Greenman vp = object->handle; 8781c7c3c6aSMatthew Dillon 879f2a2857bSKirk McKusick mp = NULL; 880f2a2857bSKirk McKusick if (vp->v_type == VREG) 881f2a2857bSKirk McKusick vn_start_write(vp, &mp, V_NOWAIT); 8823ef3e7c4SJeff Roberson if (vget(vp, LK_EXCLUSIVE|LK_TIMELOCK, curthread)) { 88323b59018SMatthew Dillon ++pageout_lock_miss; 884f2a2857bSKirk McKusick vn_finished_write(mp); 885aef922f5SJohn Dyson if (object->flags & OBJ_MIGHTBEDIRTY) 886925a3a41SJohn Dyson vnodes_skipped++; 887b182ec9eSJohn Dyson continue; 88885a376ebSJohn Dyson } 889b182ec9eSJohn Dyson 890f35329acSJohn Dyson /* 891936524aaSMatthew Dillon * The page might have been moved to another 892936524aaSMatthew Dillon * queue during potential blocking in vget() 893936524aaSMatthew Dillon * above. The page might have been freed and 894936524aaSMatthew Dillon * reused for another vnode. The object might 895936524aaSMatthew Dillon * have been reused for another vnode. 896f35329acSJohn Dyson */ 897936524aaSMatthew Dillon if (m->queue != PQ_INACTIVE || 898936524aaSMatthew Dillon m->object != object || 899936524aaSMatthew Dillon object->handle != vp) { 900b182ec9eSJohn Dyson if (object->flags & OBJ_MIGHTBEDIRTY) 901925a3a41SJohn Dyson vnodes_skipped++; 902b182ec9eSJohn Dyson vput(vp); 903f2a2857bSKirk McKusick vn_finished_write(mp); 904b182ec9eSJohn Dyson continue; 905b182ec9eSJohn Dyson } 906b182ec9eSJohn Dyson 907f35329acSJohn Dyson /* 908936524aaSMatthew Dillon * The page may have been busied during the 909936524aaSMatthew Dillon * blocking in vput(); We don't move the 910936524aaSMatthew Dillon * page back onto the end of the queue so that 911936524aaSMatthew Dillon * statistics are more correct if we don't. 912f35329acSJohn Dyson */ 913b182ec9eSJohn Dyson if (m->busy || (m->flags & PG_BUSY)) { 914b182ec9eSJohn Dyson vput(vp); 915f2a2857bSKirk McKusick vn_finished_write(mp); 916b182ec9eSJohn Dyson continue; 917b182ec9eSJohn Dyson } 918b182ec9eSJohn Dyson 919f35329acSJohn Dyson /* 92057601bcbSMatthew Dillon * If the page has become held it might 92157601bcbSMatthew Dillon * be undergoing I/O, so skip it 922f35329acSJohn Dyson */ 923b182ec9eSJohn Dyson if (m->hold_count) { 9246d03d577SMatthew Dillon vm_pageq_requeue(m); 925b182ec9eSJohn Dyson if (object->flags & OBJ_MIGHTBEDIRTY) 926925a3a41SJohn Dyson vnodes_skipped++; 927b182ec9eSJohn Dyson vput(vp); 928f2a2857bSKirk McKusick vn_finished_write(mp); 929f6b04d2bSDavid Greenman continue; 930f6b04d2bSDavid Greenman } 931f6b04d2bSDavid Greenman } 932f6b04d2bSDavid Greenman 9330d94caffSDavid Greenman /* 9340d94caffSDavid Greenman * If a page is dirty, then it is either being washed 9350d94caffSDavid Greenman * (but not yet cleaned) or it is still in the 9360d94caffSDavid Greenman * laundry. If it is still in the laundry, then we 9372b6b0df7SMatthew Dillon * start the cleaning operation. 938936524aaSMatthew Dillon * 939936524aaSMatthew Dillon * This operation may cluster, invalidating the 'next' 940936524aaSMatthew Dillon * pointer. To prevent an inordinate number of 941936524aaSMatthew Dillon * restarts we use our marker to remember our place. 9422b6b0df7SMatthew Dillon * 9432b6b0df7SMatthew Dillon * decrement page_shortage on success to account for 9442b6b0df7SMatthew Dillon * the (future) cleaned page. Otherwise we could wind 9452b6b0df7SMatthew Dillon * up laundering or cleaning too many pages. 9460d94caffSDavid Greenman */ 94755df3298SAlan Cox vm_page_lock_queues(); 948936524aaSMatthew Dillon s = splvm(); 949936524aaSMatthew Dillon TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 950936524aaSMatthew Dillon splx(s); 9512b6b0df7SMatthew Dillon if (vm_pageout_clean(m) != 0) { 9522b6b0df7SMatthew Dillon --page_shortage; 953936524aaSMatthew Dillon --maxlaunder; 9542b6b0df7SMatthew Dillon } 955936524aaSMatthew Dillon s = splvm(); 956936524aaSMatthew Dillon next = TAILQ_NEXT(&marker, pageq); 957936524aaSMatthew Dillon TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 958936524aaSMatthew Dillon splx(s); 95955df3298SAlan Cox vm_page_unlock_queues(); 960f2a2857bSKirk McKusick if (vp) { 961f6b04d2bSDavid Greenman vput(vp); 962f2a2857bSKirk McKusick vn_finished_write(mp); 963f2a2857bSKirk McKusick } 9640d94caffSDavid Greenman } 965df8bae1dSRodney W. Grimes } 96626f9a767SRodney W. Grimes 967df8bae1dSRodney W. Grimes /* 968936524aaSMatthew Dillon * Compute the number of pages we want to try to move from the 969936524aaSMatthew Dillon * active queue to the inactive queue. 9701c7c3c6aSMatthew Dillon */ 971936524aaSMatthew Dillon page_shortage = vm_paging_target() + 972936524aaSMatthew Dillon cnt.v_inactive_target - cnt.v_inactive_count; 973b182ec9eSJohn Dyson page_shortage += addl_page_shortage; 9741c7c3c6aSMatthew Dillon 97548c0444cSAlan Cox vm_page_lock_queues(); 9761c7c3c6aSMatthew Dillon /* 977936524aaSMatthew Dillon * Scan the active queue for things we can deactivate. We nominally 978936524aaSMatthew Dillon * track the per-page activity counter and use it to locate 979936524aaSMatthew Dillon * deactivation candidates. 9801c7c3c6aSMatthew Dillon */ 981b18bfc3dSJohn Dyson pcount = cnt.v_active_count; 982be72f788SAlan Cox m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 9831c7c3c6aSMatthew Dillon 984b18bfc3dSJohn Dyson while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 985f35329acSJohn Dyson 9867e006499SJohn Dyson /* 987956f3135SPhilippe Charnier * This is a consistency check, and should likely be a panic 9887e006499SJohn Dyson * or warning. 9897e006499SJohn Dyson */ 990f35329acSJohn Dyson if (m->queue != PQ_ACTIVE) { 99138efa82bSJohn Dyson break; 992f35329acSJohn Dyson } 993f35329acSJohn Dyson 994b18bfc3dSJohn Dyson next = TAILQ_NEXT(m, pageq); 995df8bae1dSRodney W. Grimes /* 99626f9a767SRodney W. Grimes * Don't deactivate pages that are busy. 997df8bae1dSRodney W. Grimes */ 998a647a309SDavid Greenman if ((m->busy != 0) || 9990d94caffSDavid Greenman (m->flags & PG_BUSY) || 1000f6b04d2bSDavid Greenman (m->hold_count != 0)) { 10016d03d577SMatthew Dillon vm_pageq_requeue(m); 100226f9a767SRodney W. Grimes m = next; 100326f9a767SRodney W. Grimes continue; 1004df8bae1dSRodney W. Grimes } 1005b18bfc3dSJohn Dyson 1006b18bfc3dSJohn Dyson /* 1007b18bfc3dSJohn Dyson * The count for pagedaemon pages is done after checking the 1008956f3135SPhilippe Charnier * page for eligibility... 1009b18bfc3dSJohn Dyson */ 1010b18bfc3dSJohn Dyson cnt.v_pdpages++; 1011ef743ce6SJohn Dyson 10127e006499SJohn Dyson /* 10137e006499SJohn Dyson * Check to see "how much" the page has been used. 10147e006499SJohn Dyson */ 10157e006499SJohn Dyson actcount = 0; 1016ef743ce6SJohn Dyson if (m->object->ref_count != 0) { 1017ef743ce6SJohn Dyson if (m->flags & PG_REFERENCED) { 10187e006499SJohn Dyson actcount += 1; 10190d94caffSDavid Greenman } 10200385347cSPeter Wemm actcount += pmap_ts_referenced(m); 10217e006499SJohn Dyson if (actcount) { 10227e006499SJohn Dyson m->act_count += ACT_ADVANCE + actcount; 102338efa82bSJohn Dyson if (m->act_count > ACT_MAX) 102438efa82bSJohn Dyson m->act_count = ACT_MAX; 102538efa82bSJohn Dyson } 1026b18bfc3dSJohn Dyson } 1027ef743ce6SJohn Dyson 10287e006499SJohn Dyson /* 10297e006499SJohn Dyson * Since we have "tested" this bit, we need to clear it now. 10307e006499SJohn Dyson */ 1031e69763a3SDoug Rabson vm_page_flag_clear(m, PG_REFERENCED); 1032ef743ce6SJohn Dyson 10337e006499SJohn Dyson /* 10347e006499SJohn Dyson * Only if an object is currently being used, do we use the 10357e006499SJohn Dyson * page activation count stats. 10367e006499SJohn Dyson */ 10377e006499SJohn Dyson if (actcount && (m->object->ref_count != 0)) { 10386d03d577SMatthew Dillon vm_pageq_requeue(m); 103926f9a767SRodney W. Grimes } else { 104038efa82bSJohn Dyson m->act_count -= min(m->act_count, ACT_DECLINE); 10412b6b0df7SMatthew Dillon if (vm_pageout_algorithm || 10422b6b0df7SMatthew Dillon m->object->ref_count == 0 || 10432b6b0df7SMatthew Dillon m->act_count == 0) { 1044925a3a41SJohn Dyson page_shortage--; 1045d4a272dbSJohn Dyson if (m->object->ref_count == 0) { 1046ef743ce6SJohn Dyson vm_page_protect(m, VM_PROT_NONE); 1047d4a272dbSJohn Dyson if (m->dirty == 0) 10480d94caffSDavid Greenman vm_page_cache(m); 1049d4a272dbSJohn Dyson else 1050d4a272dbSJohn Dyson vm_page_deactivate(m); 10510d94caffSDavid Greenman } else { 105226f9a767SRodney W. Grimes vm_page_deactivate(m); 1053df8bae1dSRodney W. Grimes } 105438efa82bSJohn Dyson } else { 10556d03d577SMatthew Dillon vm_pageq_requeue(m); 105638efa82bSJohn Dyson } 1057df8bae1dSRodney W. Grimes } 105826f9a767SRodney W. Grimes m = next; 105926f9a767SRodney W. Grimes } 1060f35329acSJohn Dyson s = splvm(); 10611c7c3c6aSMatthew Dillon 1062df8bae1dSRodney W. Grimes /* 10630d94caffSDavid Greenman * We try to maintain some *really* free pages, this allows interrupt 10641c7c3c6aSMatthew Dillon * code to be guaranteed space. Since both cache and free queues 10651c7c3c6aSMatthew Dillon * are considered basically 'free', moving pages from cache to free 10661c7c3c6aSMatthew Dillon * does not effect other calculations. 1067df8bae1dSRodney W. Grimes */ 1068a1f6d91cSDavid Greenman while (cnt.v_free_count < cnt.v_free_reserved) { 10695070c7f8SJohn Dyson static int cache_rover = 0; 10706d03d577SMatthew Dillon m = vm_pageq_find(PQ_CACHE, cache_rover, FALSE); 10710d94caffSDavid Greenman if (!m) 10720d94caffSDavid Greenman break; 10738b03c8edSMatthew Dillon if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 10748b03c8edSMatthew Dillon m->busy || 10758b03c8edSMatthew Dillon m->hold_count || 10768b03c8edSMatthew Dillon m->wire_count) { 1077d044d7bfSMatthew Dillon #ifdef INVARIANTS 1078d044d7bfSMatthew Dillon printf("Warning: busy page %p found in cache\n", m); 1079d044d7bfSMatthew Dillon #endif 1080aaba53daSMatthew Dillon vm_page_deactivate(m); 1081aaba53daSMatthew Dillon continue; 1082aaba53daSMatthew Dillon } 10835070c7f8SJohn Dyson cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1084925a3a41SJohn Dyson vm_pageout_page_free(m); 108526f9a767SRodney W. Grimes } 1086f35329acSJohn Dyson splx(s); 10878ffc1519SAlan Cox vm_page_unlock_queues(); 1088ceb0cf87SJohn Dyson #if !defined(NO_SWAPPING) 1089ceb0cf87SJohn Dyson /* 1090ceb0cf87SJohn Dyson * Idle process swapout -- run once per second. 1091ceb0cf87SJohn Dyson */ 1092ceb0cf87SJohn Dyson if (vm_swap_idle_enabled) { 1093ceb0cf87SJohn Dyson static long lsec; 1094227ee8a1SPoul-Henning Kamp if (time_second != lsec) { 1095ceb0cf87SJohn Dyson vm_pageout_req_swapout |= VM_SWAP_IDLE; 1096ceb0cf87SJohn Dyson vm_req_vmdaemon(); 1097227ee8a1SPoul-Henning Kamp lsec = time_second; 1098ceb0cf87SJohn Dyson } 1099ceb0cf87SJohn Dyson } 1100ceb0cf87SJohn Dyson #endif 1101ceb0cf87SJohn Dyson 11025663e6deSDavid Greenman /* 1103f6b04d2bSDavid Greenman * If we didn't get enough free pages, and we have skipped a vnode 11044c1f8ee9SDavid Greenman * in a writeable object, wakeup the sync daemon. And kick swapout 11054c1f8ee9SDavid Greenman * if we did not get enough free pages. 1106f6b04d2bSDavid Greenman */ 110790ecac61SMatthew Dillon if (vm_paging_target() > 0) { 110890ecac61SMatthew Dillon if (vnodes_skipped && vm_page_count_min()) 1109d50c1994SPeter Wemm (void) speedup_syncer(); 111038efa82bSJohn Dyson #if !defined(NO_SWAPPING) 111190ecac61SMatthew Dillon if (vm_swap_enabled && vm_page_count_target()) { 11124c1f8ee9SDavid Greenman vm_req_vmdaemon(); 1113ceb0cf87SJohn Dyson vm_pageout_req_swapout |= VM_SWAP_NORMAL; 11144c1f8ee9SDavid Greenman } 11155afce282SDavid Greenman #endif 11164c1f8ee9SDavid Greenman } 11174c1f8ee9SDavid Greenman 1118f6b04d2bSDavid Greenman /* 1119ff2b5645SMatthew Dillon * If we are out of swap and were not able to reach our paging 1120ff2b5645SMatthew Dillon * target, kill the largest process. 11211c58e4e5SJohn Baldwin * 11221c58e4e5SJohn Baldwin * We keep the process bigproc locked once we find it to keep anyone 11231c58e4e5SJohn Baldwin * from messing with it; however, there is a possibility of 11241c58e4e5SJohn Baldwin * deadlock if process B is bigproc and one of it's child processes 11251c58e4e5SJohn Baldwin * attempts to propagate a signal to B while we are waiting for A's 11261c58e4e5SJohn Baldwin * lock while walking this list. To avoid this, we don't block on 11271c58e4e5SJohn Baldwin * the process lock but just skip a process if it is already locked. 11285663e6deSDavid Greenman */ 1129ff2b5645SMatthew Dillon if ((vm_swap_size < 64 && vm_page_count_min()) || 1130ff2b5645SMatthew Dillon (swap_pager_full && vm_paging_target() > 0)) { 1131ff2b5645SMatthew Dillon #if 0 1132936524aaSMatthew Dillon if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) { 1133ff2b5645SMatthew Dillon #endif 11345663e6deSDavid Greenman bigproc = NULL; 11355663e6deSDavid Greenman bigsize = 0; 11361005a129SJohn Baldwin sx_slock(&allproc_lock); 1137e602ba25SJulian Elischer FOREACH_PROC_IN_SYSTEM(p) { 1138e602ba25SJulian Elischer int breakout; 11395663e6deSDavid Greenman /* 11401c58e4e5SJohn Baldwin * If this process is already locked, skip it. 11411c58e4e5SJohn Baldwin */ 11421c58e4e5SJohn Baldwin if (PROC_TRYLOCK(p) == 0) 11431c58e4e5SJohn Baldwin continue; 11441c58e4e5SJohn Baldwin /* 11455663e6deSDavid Greenman * if this is a system process, skip it 11465663e6deSDavid Greenman */ 1147ef6020d1SMike Silbersack if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 114879221631SDavid Greenman ((p->p_pid < 48) && (vm_swap_size != 0))) { 11498606d880SJohn Baldwin PROC_UNLOCK(p); 11505663e6deSDavid Greenman continue; 11515663e6deSDavid Greenman } 11525663e6deSDavid Greenman /* 11535663e6deSDavid Greenman * if the process is in a non-running type state, 1154e602ba25SJulian Elischer * don't touch it. Check all the threads individually. 11555663e6deSDavid Greenman */ 11569ed346baSBosko Milekic mtx_lock_spin(&sched_lock); 1157e602ba25SJulian Elischer breakout = 0; 1158e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 115971fad9fdSJulian Elischer if (!TD_ON_RUNQ(td) && 116071fad9fdSJulian Elischer !TD_IS_RUNNING(td) && 116171fad9fdSJulian Elischer !TD_IS_SLEEPING(td)) { 1162e602ba25SJulian Elischer breakout = 1; 1163e602ba25SJulian Elischer break; 1164e602ba25SJulian Elischer } 1165e602ba25SJulian Elischer } 1166e602ba25SJulian Elischer if (breakout) { 11679ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 11681c58e4e5SJohn Baldwin PROC_UNLOCK(p); 11695663e6deSDavid Greenman continue; 11705663e6deSDavid Greenman } 11719ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 11725663e6deSDavid Greenman /* 11735663e6deSDavid Greenman * get the process size 11745663e6deSDavid Greenman */ 1175ff2b5645SMatthew Dillon size = vmspace_resident_count(p->p_vmspace) + 1176ff2b5645SMatthew Dillon vmspace_swap_count(p->p_vmspace); 11775663e6deSDavid Greenman /* 11785663e6deSDavid Greenman * if the this process is bigger than the biggest one 11795663e6deSDavid Greenman * remember it. 11805663e6deSDavid Greenman */ 11815663e6deSDavid Greenman if (size > bigsize) { 11821c58e4e5SJohn Baldwin if (bigproc != NULL) 11831c58e4e5SJohn Baldwin PROC_UNLOCK(bigproc); 11845663e6deSDavid Greenman bigproc = p; 11855663e6deSDavid Greenman bigsize = size; 11861c58e4e5SJohn Baldwin } else 11871c58e4e5SJohn Baldwin PROC_UNLOCK(p); 11885663e6deSDavid Greenman } 11891005a129SJohn Baldwin sx_sunlock(&allproc_lock); 11905663e6deSDavid Greenman if (bigproc != NULL) { 1191b40ce416SJulian Elischer struct ksegrp *kg; 1192729b1e51SDavid Greenman killproc(bigproc, "out of swap space"); 11939ed346baSBosko Milekic mtx_lock_spin(&sched_lock); 1194b40ce416SJulian Elischer FOREACH_KSEGRP_IN_PROC(bigproc, kg) { 1195b43179fbSJeff Roberson sched_nice(kg, PRIO_MIN); /* XXXKSE ??? */ 1196b40ce416SJulian Elischer } 11979ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 11981c58e4e5SJohn Baldwin PROC_UNLOCK(bigproc); 119924a1cce3SDavid Greenman wakeup(&cnt.v_free_count); 12005663e6deSDavid Greenman } 12015663e6deSDavid Greenman } 120226f9a767SRodney W. Grimes } 120326f9a767SRodney W. Grimes 1204dc2efb27SJohn Dyson /* 1205dc2efb27SJohn Dyson * This routine tries to maintain the pseudo LRU active queue, 1206dc2efb27SJohn Dyson * so that during long periods of time where there is no paging, 1207956f3135SPhilippe Charnier * that some statistic accumulation still occurs. This code 1208dc2efb27SJohn Dyson * helps the situation where paging just starts to occur. 1209dc2efb27SJohn Dyson */ 1210dc2efb27SJohn Dyson static void 1211dc2efb27SJohn Dyson vm_pageout_page_stats() 1212dc2efb27SJohn Dyson { 1213dc2efb27SJohn Dyson vm_page_t m,next; 1214dc2efb27SJohn Dyson int pcount,tpcount; /* Number of pages to check */ 1215dc2efb27SJohn Dyson static int fullintervalcount = 0; 1216bef608bdSJohn Dyson int page_shortage; 121725db2c54SMatthew Dillon int s0; 1218bef608bdSJohn Dyson 121990ecac61SMatthew Dillon page_shortage = 122090ecac61SMatthew Dillon (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1221bef608bdSJohn Dyson (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 122290ecac61SMatthew Dillon 1223bef608bdSJohn Dyson if (page_shortage <= 0) 1224bef608bdSJohn Dyson return; 1225dc2efb27SJohn Dyson 122625db2c54SMatthew Dillon s0 = splvm(); 122748c0444cSAlan Cox vm_page_lock_queues(); 1228dc2efb27SJohn Dyson pcount = cnt.v_active_count; 1229dc2efb27SJohn Dyson fullintervalcount += vm_pageout_stats_interval; 1230dc2efb27SJohn Dyson if (fullintervalcount < vm_pageout_full_stats_interval) { 1231dc2efb27SJohn Dyson tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1232dc2efb27SJohn Dyson if (pcount > tpcount) 1233dc2efb27SJohn Dyson pcount = tpcount; 1234883f3caaSMatthew Dillon } else { 1235883f3caaSMatthew Dillon fullintervalcount = 0; 1236dc2efb27SJohn Dyson } 1237dc2efb27SJohn Dyson 1238be72f788SAlan Cox m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1239dc2efb27SJohn Dyson while ((m != NULL) && (pcount-- > 0)) { 12407e006499SJohn Dyson int actcount; 1241dc2efb27SJohn Dyson 1242dc2efb27SJohn Dyson if (m->queue != PQ_ACTIVE) { 1243dc2efb27SJohn Dyson break; 1244dc2efb27SJohn Dyson } 1245dc2efb27SJohn Dyson 1246dc2efb27SJohn Dyson next = TAILQ_NEXT(m, pageq); 1247dc2efb27SJohn Dyson /* 1248dc2efb27SJohn Dyson * Don't deactivate pages that are busy. 1249dc2efb27SJohn Dyson */ 1250dc2efb27SJohn Dyson if ((m->busy != 0) || 1251dc2efb27SJohn Dyson (m->flags & PG_BUSY) || 1252dc2efb27SJohn Dyson (m->hold_count != 0)) { 12536d03d577SMatthew Dillon vm_pageq_requeue(m); 1254dc2efb27SJohn Dyson m = next; 1255dc2efb27SJohn Dyson continue; 1256dc2efb27SJohn Dyson } 1257dc2efb27SJohn Dyson 12587e006499SJohn Dyson actcount = 0; 1259dc2efb27SJohn Dyson if (m->flags & PG_REFERENCED) { 1260e69763a3SDoug Rabson vm_page_flag_clear(m, PG_REFERENCED); 12617e006499SJohn Dyson actcount += 1; 1262dc2efb27SJohn Dyson } 1263dc2efb27SJohn Dyson 12640385347cSPeter Wemm actcount += pmap_ts_referenced(m); 12657e006499SJohn Dyson if (actcount) { 12667e006499SJohn Dyson m->act_count += ACT_ADVANCE + actcount; 1267dc2efb27SJohn Dyson if (m->act_count > ACT_MAX) 1268dc2efb27SJohn Dyson m->act_count = ACT_MAX; 12696d03d577SMatthew Dillon vm_pageq_requeue(m); 1270dc2efb27SJohn Dyson } else { 1271dc2efb27SJohn Dyson if (m->act_count == 0) { 12727e006499SJohn Dyson /* 12732b6b0df7SMatthew Dillon * We turn off page access, so that we have 12742b6b0df7SMatthew Dillon * more accurate RSS stats. We don't do this 12752b6b0df7SMatthew Dillon * in the normal page deactivation when the 12762b6b0df7SMatthew Dillon * system is loaded VM wise, because the 12772b6b0df7SMatthew Dillon * cost of the large number of page protect 12782b6b0df7SMatthew Dillon * operations would be higher than the value 12792b6b0df7SMatthew Dillon * of doing the operation. 12807e006499SJohn Dyson */ 1281dc2efb27SJohn Dyson vm_page_protect(m, VM_PROT_NONE); 1282dc2efb27SJohn Dyson vm_page_deactivate(m); 1283dc2efb27SJohn Dyson } else { 1284dc2efb27SJohn Dyson m->act_count -= min(m->act_count, ACT_DECLINE); 12856d03d577SMatthew Dillon vm_pageq_requeue(m); 1286dc2efb27SJohn Dyson } 1287dc2efb27SJohn Dyson } 1288dc2efb27SJohn Dyson 1289dc2efb27SJohn Dyson m = next; 1290dc2efb27SJohn Dyson } 129148c0444cSAlan Cox vm_page_unlock_queues(); 129225db2c54SMatthew Dillon splx(s0); 1293dc2efb27SJohn Dyson } 1294dc2efb27SJohn Dyson 1295b182ec9eSJohn Dyson static int 1296b182ec9eSJohn Dyson vm_pageout_free_page_calc(count) 1297b182ec9eSJohn Dyson vm_size_t count; 1298b182ec9eSJohn Dyson { 1299b182ec9eSJohn Dyson if (count < cnt.v_page_count) 1300b182ec9eSJohn Dyson return 0; 1301b182ec9eSJohn Dyson /* 1302b182ec9eSJohn Dyson * free_reserved needs to include enough for the largest swap pager 1303b182ec9eSJohn Dyson * structures plus enough for any pv_entry structs when paging. 1304b182ec9eSJohn Dyson */ 1305b182ec9eSJohn Dyson if (cnt.v_page_count > 1024) 1306b182ec9eSJohn Dyson cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1307b182ec9eSJohn Dyson else 1308b182ec9eSJohn Dyson cnt.v_free_min = 4; 1309f35329acSJohn Dyson cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1310f35329acSJohn Dyson cnt.v_interrupt_free_min; 1311f35329acSJohn Dyson cnt.v_free_reserved = vm_pageout_page_count + 1312a15403deSJohn Dyson cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 131390ecac61SMatthew Dillon cnt.v_free_severe = cnt.v_free_min / 2; 1314a2f4a846SJohn Dyson cnt.v_free_min += cnt.v_free_reserved; 131590ecac61SMatthew Dillon cnt.v_free_severe += cnt.v_free_reserved; 1316b182ec9eSJohn Dyson return 1; 1317b182ec9eSJohn Dyson } 1318b182ec9eSJohn Dyson 1319df8bae1dSRodney W. Grimes /* 1320df8bae1dSRodney W. Grimes * vm_pageout is the high level pageout daemon. 1321df8bae1dSRodney W. Grimes */ 13222b14f991SJulian Elischer static void 132326f9a767SRodney W. Grimes vm_pageout() 1324df8bae1dSRodney W. Grimes { 13252b6b0df7SMatthew Dillon int pass; 13260384fff8SJason Evans 132769a78d46SJohn Baldwin mtx_lock(&Giant); 13280384fff8SJason Evans 1329df8bae1dSRodney W. Grimes /* 1330df8bae1dSRodney W. Grimes * Initialize some paging parameters. 1331df8bae1dSRodney W. Grimes */ 1332f6b04d2bSDavid Greenman cnt.v_interrupt_free_min = 2; 1333f35329acSJohn Dyson if (cnt.v_page_count < 2000) 1334f35329acSJohn Dyson vm_pageout_page_count = 8; 1335f6b04d2bSDavid Greenman 1336b182ec9eSJohn Dyson vm_pageout_free_page_calc(cnt.v_page_count); 1337ed74321bSDavid Greenman /* 13382b6b0df7SMatthew Dillon * v_free_target and v_cache_min control pageout hysteresis. Note 13392b6b0df7SMatthew Dillon * that these are more a measure of the VM cache queue hysteresis 13402b6b0df7SMatthew Dillon * then the VM free queue. Specifically, v_free_target is the 13412b6b0df7SMatthew Dillon * high water mark (free+cache pages). 13422b6b0df7SMatthew Dillon * 13432b6b0df7SMatthew Dillon * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 13442b6b0df7SMatthew Dillon * low water mark, while v_free_min is the stop. v_cache_min must 13452b6b0df7SMatthew Dillon * be big enough to handle memory needs while the pageout daemon 13462b6b0df7SMatthew Dillon * is signalled and run to free more pages. 1347ed74321bSDavid Greenman */ 1348a15403deSJohn Dyson if (cnt.v_free_count > 6144) 13492b6b0df7SMatthew Dillon cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1350a15403deSJohn Dyson else 1351a15403deSJohn Dyson cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 13526f2b142eSDavid Greenman 1353a15403deSJohn Dyson if (cnt.v_free_count > 2048) { 1354a15403deSJohn Dyson cnt.v_cache_min = cnt.v_free_target; 1355a15403deSJohn Dyson cnt.v_cache_max = 2 * cnt.v_cache_min; 1356a15403deSJohn Dyson cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 13570d94caffSDavid Greenman } else { 13580d94caffSDavid Greenman cnt.v_cache_min = 0; 13590d94caffSDavid Greenman cnt.v_cache_max = 0; 13606f2b142eSDavid Greenman cnt.v_inactive_target = cnt.v_free_count / 4; 13610d94caffSDavid Greenman } 1362e47ed70bSJohn Dyson if (cnt.v_inactive_target > cnt.v_free_count / 3) 1363e47ed70bSJohn Dyson cnt.v_inactive_target = cnt.v_free_count / 3; 1364df8bae1dSRodney W. Grimes 1365df8bae1dSRodney W. Grimes /* XXX does not really belong here */ 1366df8bae1dSRodney W. Grimes if (vm_page_max_wired == 0) 1367df8bae1dSRodney W. Grimes vm_page_max_wired = cnt.v_free_count / 3; 1368df8bae1dSRodney W. Grimes 1369dc2efb27SJohn Dyson if (vm_pageout_stats_max == 0) 1370dc2efb27SJohn Dyson vm_pageout_stats_max = cnt.v_free_target; 1371dc2efb27SJohn Dyson 1372dc2efb27SJohn Dyson /* 1373dc2efb27SJohn Dyson * Set interval in seconds for stats scan. 1374dc2efb27SJohn Dyson */ 1375dc2efb27SJohn Dyson if (vm_pageout_stats_interval == 0) 1376bef608bdSJohn Dyson vm_pageout_stats_interval = 5; 1377dc2efb27SJohn Dyson if (vm_pageout_full_stats_interval == 0) 1378dc2efb27SJohn Dyson vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1379dc2efb27SJohn Dyson 1380dc2efb27SJohn Dyson /* 1381dc2efb27SJohn Dyson * Set maximum free per pass 1382dc2efb27SJohn Dyson */ 1383dc2efb27SJohn Dyson if (vm_pageout_stats_free_max == 0) 1384bef608bdSJohn Dyson vm_pageout_stats_free_max = 5; 1385dc2efb27SJohn Dyson 138624a1cce3SDavid Greenman swap_pager_swap_init(); 13872b6b0df7SMatthew Dillon pass = 0; 1388df8bae1dSRodney W. Grimes /* 13890d94caffSDavid Greenman * The pageout daemon is never done, so loop forever. 1390df8bae1dSRodney W. Grimes */ 1391df8bae1dSRodney W. Grimes while (TRUE) { 1392dc2efb27SJohn Dyson int error; 1393b18bfc3dSJohn Dyson int s = splvm(); 139490ecac61SMatthew Dillon 1395936524aaSMatthew Dillon /* 1396936524aaSMatthew Dillon * If we have enough free memory, wakeup waiters. Do 1397936524aaSMatthew Dillon * not clear vm_pages_needed until we reach our target, 1398936524aaSMatthew Dillon * otherwise we may be woken up over and over again and 1399936524aaSMatthew Dillon * waste a lot of cpu. 1400936524aaSMatthew Dillon */ 1401936524aaSMatthew Dillon if (vm_pages_needed && !vm_page_count_min()) { 1402936524aaSMatthew Dillon if (vm_paging_needed() <= 0) 1403936524aaSMatthew Dillon vm_pages_needed = 0; 1404936524aaSMatthew Dillon wakeup(&cnt.v_free_count); 1405936524aaSMatthew Dillon } 1406936524aaSMatthew Dillon if (vm_pages_needed) { 140790ecac61SMatthew Dillon /* 14082b6b0df7SMatthew Dillon * Still not done, take a second pass without waiting 14092b6b0df7SMatthew Dillon * (unlimited dirty cleaning), otherwise sleep a bit 14102b6b0df7SMatthew Dillon * and try again. 141190ecac61SMatthew Dillon */ 14122b6b0df7SMatthew Dillon ++pass; 14132b6b0df7SMatthew Dillon if (pass > 1) 14140cddd8f0SMatthew Dillon tsleep(&vm_pages_needed, PVM, 141523955314SAlfred Perlstein "psleep", hz/2); 141690ecac61SMatthew Dillon } else { 141790ecac61SMatthew Dillon /* 14182b6b0df7SMatthew Dillon * Good enough, sleep & handle stats. Prime the pass 14192b6b0df7SMatthew Dillon * for the next run. 142090ecac61SMatthew Dillon */ 14212b6b0df7SMatthew Dillon if (pass > 1) 14222b6b0df7SMatthew Dillon pass = 1; 14232b6b0df7SMatthew Dillon else 14242b6b0df7SMatthew Dillon pass = 0; 14250cddd8f0SMatthew Dillon error = tsleep(&vm_pages_needed, PVM, 14260cddd8f0SMatthew Dillon "psleep", vm_pageout_stats_interval * hz); 1427dc2efb27SJohn Dyson if (error && !vm_pages_needed) { 1428dc2efb27SJohn Dyson splx(s); 14292b6b0df7SMatthew Dillon pass = 0; 1430dc2efb27SJohn Dyson vm_pageout_page_stats(); 1431dc2efb27SJohn Dyson continue; 1432dc2efb27SJohn Dyson } 1433f919ebdeSDavid Greenman } 1434e47ed70bSJohn Dyson 1435b18bfc3dSJohn Dyson if (vm_pages_needed) 1436b18bfc3dSJohn Dyson cnt.v_pdwakeups++; 1437f919ebdeSDavid Greenman splx(s); 14382b6b0df7SMatthew Dillon vm_pageout_scan(pass); 14392d8acc0fSJohn Dyson vm_pageout_deficit = 0; 1440df8bae1dSRodney W. Grimes } 1441df8bae1dSRodney W. Grimes } 144226f9a767SRodney W. Grimes 1443e0c5a895SJohn Dyson void 1444e0c5a895SJohn Dyson pagedaemon_wakeup() 1445e0c5a895SJohn Dyson { 1446b40ce416SJulian Elischer if (!vm_pages_needed && curthread->td_proc != pageproc) { 1447e0c5a895SJohn Dyson vm_pages_needed++; 1448e0c5a895SJohn Dyson wakeup(&vm_pages_needed); 1449e0c5a895SJohn Dyson } 1450e0c5a895SJohn Dyson } 1451e0c5a895SJohn Dyson 145238efa82bSJohn Dyson #if !defined(NO_SWAPPING) 14535afce282SDavid Greenman static void 14545afce282SDavid Greenman vm_req_vmdaemon() 14555afce282SDavid Greenman { 14565afce282SDavid Greenman static int lastrun = 0; 14575afce282SDavid Greenman 1458b18bfc3dSJohn Dyson if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 14595afce282SDavid Greenman wakeup(&vm_daemon_needed); 14605afce282SDavid Greenman lastrun = ticks; 14615afce282SDavid Greenman } 14625afce282SDavid Greenman } 14635afce282SDavid Greenman 14642b14f991SJulian Elischer static void 14654f9fb771SBruce Evans vm_daemon() 14660d94caffSDavid Greenman { 14672fe6e4d7SDavid Greenman struct proc *p; 1468e602ba25SJulian Elischer int breakout; 1469e602ba25SJulian Elischer struct thread *td; 14700d94caffSDavid Greenman 14713614c6fcSJohn Baldwin mtx_lock(&Giant); 14722fe6e4d7SDavid Greenman while (TRUE) { 14730cddd8f0SMatthew Dillon tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 14744c1f8ee9SDavid Greenman if (vm_pageout_req_swapout) { 1475ceb0cf87SJohn Dyson swapout_procs(vm_pageout_req_swapout); 14764c1f8ee9SDavid Greenman vm_pageout_req_swapout = 0; 14774c1f8ee9SDavid Greenman } 14782fe6e4d7SDavid Greenman /* 14790d94caffSDavid Greenman * scan the processes for exceeding their rlimits or if 14800d94caffSDavid Greenman * process is swapped out -- deactivate pages 14812fe6e4d7SDavid Greenman */ 14821005a129SJohn Baldwin sx_slock(&allproc_lock); 1483fc2ffbe6SPoul-Henning Kamp LIST_FOREACH(p, &allproc, p_list) { 1484fe2144fdSLuoqi Chen vm_pindex_t limit, size; 14852fe6e4d7SDavid Greenman 14862fe6e4d7SDavid Greenman /* 14872fe6e4d7SDavid Greenman * if this is a system process or if we have already 14882fe6e4d7SDavid Greenman * looked at this process, skip it. 14892fe6e4d7SDavid Greenman */ 14902fe6e4d7SDavid Greenman if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 14912fe6e4d7SDavid Greenman continue; 14922fe6e4d7SDavid Greenman } 14932fe6e4d7SDavid Greenman /* 14942fe6e4d7SDavid Greenman * if the process is in a non-running type state, 14952fe6e4d7SDavid Greenman * don't touch it. 14962fe6e4d7SDavid Greenman */ 14979ed346baSBosko Milekic mtx_lock_spin(&sched_lock); 1498e602ba25SJulian Elischer breakout = 0; 1499e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 150071fad9fdSJulian Elischer if (!TD_ON_RUNQ(td) && 150171fad9fdSJulian Elischer !TD_IS_RUNNING(td) && 150271fad9fdSJulian Elischer !TD_IS_SLEEPING(td)) { 1503e602ba25SJulian Elischer breakout = 1; 1504e602ba25SJulian Elischer break; 1505e602ba25SJulian Elischer } 1506e602ba25SJulian Elischer } 1507e602ba25SJulian Elischer if (breakout) { 15089ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 15092fe6e4d7SDavid Greenman continue; 15102fe6e4d7SDavid Greenman } 15112fe6e4d7SDavid Greenman /* 15122fe6e4d7SDavid Greenman * get a limit 15132fe6e4d7SDavid Greenman */ 1514fe2144fdSLuoqi Chen limit = OFF_TO_IDX( 1515fe2144fdSLuoqi Chen qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1516fe2144fdSLuoqi Chen p->p_rlimit[RLIMIT_RSS].rlim_max)); 15172fe6e4d7SDavid Greenman 15182fe6e4d7SDavid Greenman /* 15190d94caffSDavid Greenman * let processes that are swapped out really be 15200d94caffSDavid Greenman * swapped out set the limit to nothing (will force a 15210d94caffSDavid Greenman * swap-out.) 15222fe6e4d7SDavid Greenman */ 15238606d880SJohn Baldwin if ((p->p_sflag & PS_INMEM) == 0) 15240d94caffSDavid Greenman limit = 0; /* XXX */ 15259ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 15262fe6e4d7SDavid Greenman 1527fe2144fdSLuoqi Chen size = vmspace_resident_count(p->p_vmspace); 15282fe6e4d7SDavid Greenman if (limit >= 0 && size >= limit) { 1529fe2144fdSLuoqi Chen vm_pageout_map_deactivate_pages( 1530fe2144fdSLuoqi Chen &p->p_vmspace->vm_map, limit); 15312fe6e4d7SDavid Greenman } 15322fe6e4d7SDavid Greenman } 15331005a129SJohn Baldwin sx_sunlock(&allproc_lock); 153424a1cce3SDavid Greenman } 15352fe6e4d7SDavid Greenman } 1536a1287949SEivind Eklund #endif /* !defined(NO_SWAPPING) */ 1537