160727d8bSWarner Losh /*- 226f9a767SRodney W. Grimes * Copyright (c) 1991 Regents of the University of California. 326f9a767SRodney W. Grimes * All rights reserved. 426f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 526f9a767SRodney W. Grimes * All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 726f9a767SRodney W. Grimes * All rights reserved. 88dbca793STor Egge * Copyright (c) 2005 Yahoo! Technologies Norway AS 98dbca793STor Egge * All rights reserved. 10df8bae1dSRodney W. Grimes * 11df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 12df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 13df8bae1dSRodney W. Grimes * 14df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 15df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 16df8bae1dSRodney W. Grimes * are met: 17df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 19df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 20df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 21df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 22df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 235929bcfaSPhilippe Charnier * must display the following acknowledgement: 24df8bae1dSRodney W. Grimes * This product includes software developed by the University of 25df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 26df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 27df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 28df8bae1dSRodney W. Grimes * without specific prior written permission. 29df8bae1dSRodney W. Grimes * 30df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40df8bae1dSRodney W. Grimes * SUCH DAMAGE. 41df8bae1dSRodney W. Grimes * 423c4dd356SDavid Greenman * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 43df8bae1dSRodney W. Grimes * 44df8bae1dSRodney W. Grimes * 45df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46df8bae1dSRodney W. Grimes * All rights reserved. 47df8bae1dSRodney W. Grimes * 48df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 51df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 52df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 53df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 54df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 55df8bae1dSRodney W. Grimes * 56df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 57df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 58df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 59df8bae1dSRodney W. Grimes * 60df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 61df8bae1dSRodney W. Grimes * 62df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 63df8bae1dSRodney W. Grimes * School of Computer Science 64df8bae1dSRodney W. Grimes * Carnegie Mellon University 65df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 66df8bae1dSRodney W. Grimes * 67df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 68df8bae1dSRodney W. Grimes * rights to redistribute these changes. 69df8bae1dSRodney W. Grimes */ 70df8bae1dSRodney W. Grimes 71df8bae1dSRodney W. Grimes /* 72df8bae1dSRodney W. Grimes * The proverbial page-out daemon. 73df8bae1dSRodney W. Grimes */ 74df8bae1dSRodney W. Grimes 75874651b1SDavid E. O'Brien #include <sys/cdefs.h> 76874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 77874651b1SDavid E. O'Brien 78faa5f8d8SAndrzej Bialecki #include "opt_vm.h" 79df8bae1dSRodney W. Grimes #include <sys/param.h> 8026f9a767SRodney W. Grimes #include <sys/systm.h> 81b5e8ce9fSBruce Evans #include <sys/kernel.h> 82855a310fSJeff Roberson #include <sys/eventhandler.h> 83fb919e4dSMark Murray #include <sys/lock.h> 84fb919e4dSMark Murray #include <sys/mutex.h> 8526f9a767SRodney W. Grimes #include <sys/proc.h> 869c8b8baaSPeter Wemm #include <sys/kthread.h> 870384fff8SJason Evans #include <sys/ktr.h> 8897824da3SAlan Cox #include <sys/mount.h> 8926f9a767SRodney W. Grimes #include <sys/resourcevar.h> 90b43179fbSJeff Roberson #include <sys/sched.h> 91d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h> 92f6b04d2bSDavid Greenman #include <sys/vnode.h> 93efeaf95aSDavid Greenman #include <sys/vmmeter.h> 941005a129SJohn Baldwin #include <sys/sx.h> 9538efa82bSJohn Dyson #include <sys/sysctl.h> 96df8bae1dSRodney W. Grimes 97df8bae1dSRodney W. Grimes #include <vm/vm.h> 98efeaf95aSDavid Greenman #include <vm/vm_param.h> 99efeaf95aSDavid Greenman #include <vm/vm_object.h> 100df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 101efeaf95aSDavid Greenman #include <vm/vm_map.h> 102df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 10324a1cce3SDavid Greenman #include <vm/vm_pager.h> 10405f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 105efeaf95aSDavid Greenman #include <vm/vm_extern.h> 106670d17b5SJeff Roberson #include <vm/uma.h> 107df8bae1dSRodney W. Grimes 1082b14f991SJulian Elischer /* 1092b14f991SJulian Elischer * System initialization 1102b14f991SJulian Elischer */ 1112b14f991SJulian Elischer 1122b14f991SJulian Elischer /* the kernel process "vm_pageout"*/ 11311caded3SAlfred Perlstein static void vm_pageout(void); 11411caded3SAlfred Perlstein static int vm_pageout_clean(vm_page_t); 11511caded3SAlfred Perlstein static void vm_pageout_scan(int pass); 11645ae1d91SAlan Cox 1172b14f991SJulian Elischer struct proc *pageproc; 1182b14f991SJulian Elischer 1192b14f991SJulian Elischer static struct kproc_desc page_kp = { 1202b14f991SJulian Elischer "pagedaemon", 1212b14f991SJulian Elischer vm_pageout, 1222b14f991SJulian Elischer &pageproc 1232b14f991SJulian Elischer }; 124237fdd78SRobert Watson SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, 125237fdd78SRobert Watson &page_kp); 1262b14f991SJulian Elischer 12738efa82bSJohn Dyson #if !defined(NO_SWAPPING) 1282b14f991SJulian Elischer /* the kernel process "vm_daemon"*/ 12911caded3SAlfred Perlstein static void vm_daemon(void); 130f708ef1bSPoul-Henning Kamp static struct proc *vmproc; 1312b14f991SJulian Elischer 1322b14f991SJulian Elischer static struct kproc_desc vm_kp = { 1332b14f991SJulian Elischer "vmdaemon", 1342b14f991SJulian Elischer vm_daemon, 1352b14f991SJulian Elischer &vmproc 1362b14f991SJulian Elischer }; 137237fdd78SRobert Watson SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp); 13838efa82bSJohn Dyson #endif 1392b14f991SJulian Elischer 1402b14f991SJulian Elischer 1418b245767SAlan Cox int vm_pages_needed; /* Event on which pageout daemon sleeps */ 1428b245767SAlan Cox int vm_pageout_deficit; /* Estimated number of pages deficit */ 1438b245767SAlan Cox int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 14426f9a767SRodney W. Grimes 14538efa82bSJohn Dyson #if !defined(NO_SWAPPING) 146f708ef1bSPoul-Henning Kamp static int vm_pageout_req_swapout; /* XXX */ 147f708ef1bSPoul-Henning Kamp static int vm_daemon_needed; 14897824da3SAlan Cox static struct mtx vm_daemon_mtx; 14997824da3SAlan Cox /* Allow for use by vm_pageout before vm_daemon is initialized. */ 15097824da3SAlan Cox MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF); 15138efa82bSJohn Dyson #endif 1522b6b0df7SMatthew Dillon static int vm_max_launder = 32; 153303b270bSEivind Eklund static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 154303b270bSEivind Eklund static int vm_pageout_full_stats_interval = 0; 15526354d4cSAlan Cox static int vm_pageout_algorithm=0; 156303b270bSEivind Eklund static int defer_swap_pageouts=0; 157303b270bSEivind Eklund static int disable_swap_pageouts=0; 15870111b90SJohn Dyson 15938efa82bSJohn Dyson #if defined(NO_SWAPPING) 160303b270bSEivind Eklund static int vm_swap_enabled=0; 161303b270bSEivind Eklund static int vm_swap_idle_enabled=0; 16238efa82bSJohn Dyson #else 163303b270bSEivind Eklund static int vm_swap_enabled=1; 164303b270bSEivind Eklund static int vm_swap_idle_enabled=0; 16538efa82bSJohn Dyson #endif 16638efa82bSJohn Dyson 16738efa82bSJohn Dyson SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 1682b6b0df7SMatthew Dillon CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 1692b6b0df7SMatthew Dillon 1702b6b0df7SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, max_launder, 1712b6b0df7SMatthew Dillon CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 17238efa82bSJohn Dyson 173dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 174b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 175dc2efb27SJohn Dyson 176dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 177b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 178dc2efb27SJohn Dyson 179dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 180b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 181dc2efb27SJohn Dyson 18238efa82bSJohn Dyson #if defined(NO_SWAPPING) 183ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 1846bd9cb1cSTom Rhodes CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout"); 185ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 1866bd9cb1cSTom Rhodes CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 18738efa82bSJohn Dyson #else 188ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 189b0359e2cSPeter Wemm CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 190ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 191b0359e2cSPeter Wemm CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 19238efa82bSJohn Dyson #endif 19326f9a767SRodney W. Grimes 194ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 195b0359e2cSPeter Wemm CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 19612ac6a1dSJohn Dyson 197ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 198b0359e2cSPeter Wemm CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 19912ac6a1dSJohn Dyson 20023b59018SMatthew Dillon static int pageout_lock_miss; 20123b59018SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 20223b59018SMatthew Dillon CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 20323b59018SMatthew Dillon 204ffc82b0aSJohn Dyson #define VM_PAGEOUT_PAGE_COUNT 16 205bbc0ec52SDavid Greenman int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 206df8bae1dSRodney W. Grimes 207c3cb3e12SDavid Greenman int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 2085dfc2870SAlan Cox SYSCTL_INT(_vm, OID_AUTO, max_wired, 2095dfc2870SAlan Cox CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count"); 210df8bae1dSRodney W. Grimes 21138efa82bSJohn Dyson #if !defined(NO_SWAPPING) 212ecf6279fSAlan Cox static void vm_pageout_map_deactivate_pages(vm_map_t, long); 213ecf6279fSAlan Cox static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long); 21497824da3SAlan Cox static void vm_req_vmdaemon(int req); 21538efa82bSJohn Dyson #endif 216dc2efb27SJohn Dyson static void vm_pageout_page_stats(void); 217cd41fc12SDavid Greenman 21826f9a767SRodney W. Grimes /* 2198dbca793STor Egge * vm_pageout_fallback_object_lock: 2208dbca793STor Egge * 2218dbca793STor Egge * Lock vm object currently associated with `m'. VM_OBJECT_TRYLOCK is 2228dbca793STor Egge * known to have failed and page queue must be either PQ_ACTIVE or 2238dbca793STor Egge * PQ_INACTIVE. To avoid lock order violation, unlock the page queues 2248dbca793STor Egge * while locking the vm object. Use marker page to detect page queue 2258dbca793STor Egge * changes and maintain notion of next page on page queue. Return 2268dbca793STor Egge * TRUE if no changes were detected, FALSE otherwise. vm object is 2278dbca793STor Egge * locked on return. 2288dbca793STor Egge * 2298dbca793STor Egge * This function depends on both the lock portion of struct vm_object 2308dbca793STor Egge * and normal struct vm_page being type stable. 2318dbca793STor Egge */ 232da31e3aaSAlan Cox boolean_t 2338dbca793STor Egge vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next) 2348dbca793STor Egge { 2358dbca793STor Egge struct vm_page marker; 2368dbca793STor Egge boolean_t unchanged; 2378dbca793STor Egge u_short queue; 2388dbca793STor Egge vm_object_t object; 2398dbca793STor Egge 2408dbca793STor Egge /* 2418dbca793STor Egge * Initialize our marker 2428dbca793STor Egge */ 2438dbca793STor Egge bzero(&marker, sizeof(marker)); 2449af80719SAlan Cox marker.flags = PG_FICTITIOUS | PG_MARKER; 2459af80719SAlan Cox marker.oflags = VPO_BUSY; 2468dbca793STor Egge marker.queue = m->queue; 2478dbca793STor Egge marker.wire_count = 1; 2488dbca793STor Egge 2498dbca793STor Egge queue = m->queue; 2508dbca793STor Egge object = m->object; 2518dbca793STor Egge 2528dbca793STor Egge TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl, 2538dbca793STor Egge m, &marker, pageq); 2548dbca793STor Egge vm_page_unlock_queues(); 2552965a453SKip Macy vm_page_unlock(m); 2568dbca793STor Egge VM_OBJECT_LOCK(object); 2572965a453SKip Macy vm_page_lock(m); 2588dbca793STor Egge vm_page_lock_queues(); 2598dbca793STor Egge 2608dbca793STor Egge /* Page queue might have changed. */ 2618dbca793STor Egge *next = TAILQ_NEXT(&marker, pageq); 2628dbca793STor Egge unchanged = (m->queue == queue && 2638dbca793STor Egge m->object == object && 2648dbca793STor Egge &marker == TAILQ_NEXT(m, pageq)); 2658dbca793STor Egge TAILQ_REMOVE(&vm_page_queues[queue].pl, 2668dbca793STor Egge &marker, pageq); 2678dbca793STor Egge return (unchanged); 2688dbca793STor Egge } 2698dbca793STor Egge 2708dbca793STor Egge /* 27126f9a767SRodney W. Grimes * vm_pageout_clean: 27224a1cce3SDavid Greenman * 2730d94caffSDavid Greenman * Clean the page and remove it from the laundry. 27426f9a767SRodney W. Grimes * 2750d94caffSDavid Greenman * We set the busy bit to cause potential page faults on this page to 2761c7c3c6aSMatthew Dillon * block. Note the careful timing, however, the busy bit isn't set till 2771c7c3c6aSMatthew Dillon * late and we cannot do anything that will mess with the page. 27826f9a767SRodney W. Grimes */ 2793af76890SPoul-Henning Kamp static int 2802965a453SKip Macy vm_pageout_clean(vm_page_t m) 28124a1cce3SDavid Greenman { 28254d92145SMatthew Dillon vm_object_t object; 283f35329acSJohn Dyson vm_page_t mc[2*vm_pageout_page_count]; 2843562af12SAlan Cox int pageout_count; 28590ecac61SMatthew Dillon int ib, is, page_base; 286a316d390SJohn Dyson vm_pindex_t pindex = m->pindex; 28726f9a767SRodney W. Grimes 2882965a453SKip Macy vm_page_lock_assert(m, MA_NOTOWNED); 2892965a453SKip Macy vm_page_lock(m); 2903562af12SAlan Cox VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2910cddd8f0SMatthew Dillon 29226f9a767SRodney W. Grimes /* 2931c7c3c6aSMatthew Dillon * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 2941c7c3c6aSMatthew Dillon * with the new swapper, but we could have serious problems paging 2951c7c3c6aSMatthew Dillon * out other object types if there is insufficient memory. 2961c7c3c6aSMatthew Dillon * 2971c7c3c6aSMatthew Dillon * Unfortunately, checking free memory here is far too late, so the 2981c7c3c6aSMatthew Dillon * check has been moved up a procedural level. 2991c7c3c6aSMatthew Dillon */ 3001c7c3c6aSMatthew Dillon 30124a1cce3SDavid Greenman /* 3029e897b1bSAlan Cox * Can't clean the page if it's busy or held. 30324a1cce3SDavid Greenman */ 3048f9110f6SJohn Dyson if ((m->hold_count != 0) || 3059e897b1bSAlan Cox ((m->busy != 0) || (m->oflags & VPO_BUSY))) { 3062965a453SKip Macy vm_page_unlock(m); 3070d94caffSDavid Greenman return 0; 3088b03c8edSMatthew Dillon } 3090d94caffSDavid Greenman 310f35329acSJohn Dyson mc[vm_pageout_page_count] = m; 31126f9a767SRodney W. Grimes pageout_count = 1; 312f35329acSJohn Dyson page_base = vm_pageout_page_count; 31390ecac61SMatthew Dillon ib = 1; 31490ecac61SMatthew Dillon is = 1; 31590ecac61SMatthew Dillon 31624a1cce3SDavid Greenman /* 31724a1cce3SDavid Greenman * Scan object for clusterable pages. 31824a1cce3SDavid Greenman * 31924a1cce3SDavid Greenman * We can cluster ONLY if: ->> the page is NOT 32024a1cce3SDavid Greenman * clean, wired, busy, held, or mapped into a 32124a1cce3SDavid Greenman * buffer, and one of the following: 32224a1cce3SDavid Greenman * 1) The page is inactive, or a seldom used 32324a1cce3SDavid Greenman * active page. 32424a1cce3SDavid Greenman * -or- 32524a1cce3SDavid Greenman * 2) we force the issue. 32690ecac61SMatthew Dillon * 32790ecac61SMatthew Dillon * During heavy mmap/modification loads the pageout 32890ecac61SMatthew Dillon * daemon can really fragment the underlying file 32990ecac61SMatthew Dillon * due to flushing pages out of order and not trying 33090ecac61SMatthew Dillon * align the clusters (which leave sporatic out-of-order 33190ecac61SMatthew Dillon * holes). To solve this problem we do the reverse scan 33290ecac61SMatthew Dillon * first and attempt to align our cluster, then do a 33390ecac61SMatthew Dillon * forward scan if room remains. 33424a1cce3SDavid Greenman */ 3355163584cSAlan Cox object = m->object; 33690ecac61SMatthew Dillon more: 33790ecac61SMatthew Dillon while (ib && pageout_count < vm_pageout_page_count) { 33824a1cce3SDavid Greenman vm_page_t p; 339f6b04d2bSDavid Greenman 34090ecac61SMatthew Dillon if (ib > pindex) { 34190ecac61SMatthew Dillon ib = 0; 34290ecac61SMatthew Dillon break; 343f6b04d2bSDavid Greenman } 34490ecac61SMatthew Dillon 34590ecac61SMatthew Dillon if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 34690ecac61SMatthew Dillon ib = 0; 34790ecac61SMatthew Dillon break; 34890ecac61SMatthew Dillon } 3497bfda801SAlan Cox if ((p->oflags & VPO_BUSY) || p->busy) { 35090ecac61SMatthew Dillon ib = 0; 35190ecac61SMatthew Dillon break; 352f6b04d2bSDavid Greenman } 3532965a453SKip Macy vm_page_lock(p); 3542965a453SKip Macy vm_page_lock_queues(); 35524a1cce3SDavid Greenman vm_page_test_dirty(p); 35626f4eea5SAlan Cox if (p->dirty == 0 || 35790ecac61SMatthew Dillon p->queue != PQ_INACTIVE || 35857601bcbSMatthew Dillon p->hold_count != 0) { /* may be undergoing I/O */ 3592965a453SKip Macy vm_page_unlock(p); 3602965a453SKip Macy vm_page_unlock_queues(); 36190ecac61SMatthew Dillon ib = 0; 36224a1cce3SDavid Greenman break; 363f6b04d2bSDavid Greenman } 3642965a453SKip Macy vm_page_unlock_queues(); 3652965a453SKip Macy vm_page_unlock(p); 36690ecac61SMatthew Dillon mc[--page_base] = p; 36790ecac61SMatthew Dillon ++pageout_count; 36890ecac61SMatthew Dillon ++ib; 36924a1cce3SDavid Greenman /* 37090ecac61SMatthew Dillon * alignment boundry, stop here and switch directions. Do 37190ecac61SMatthew Dillon * not clear ib. 37224a1cce3SDavid Greenman */ 37390ecac61SMatthew Dillon if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 37490ecac61SMatthew Dillon break; 37524a1cce3SDavid Greenman } 37690ecac61SMatthew Dillon 37790ecac61SMatthew Dillon while (pageout_count < vm_pageout_page_count && 37890ecac61SMatthew Dillon pindex + is < object->size) { 37990ecac61SMatthew Dillon vm_page_t p; 38090ecac61SMatthew Dillon 38190ecac61SMatthew Dillon if ((p = vm_page_lookup(object, pindex + is)) == NULL) 38290ecac61SMatthew Dillon break; 3837bfda801SAlan Cox if ((p->oflags & VPO_BUSY) || p->busy) { 38490ecac61SMatthew Dillon break; 38524a1cce3SDavid Greenman } 3862965a453SKip Macy vm_page_lock(p); 3872965a453SKip Macy vm_page_lock_queues(); 38824a1cce3SDavid Greenman vm_page_test_dirty(p); 38926f4eea5SAlan Cox if (p->dirty == 0 || 39090ecac61SMatthew Dillon p->queue != PQ_INACTIVE || 39157601bcbSMatthew Dillon p->hold_count != 0) { /* may be undergoing I/O */ 3922965a453SKip Macy vm_page_unlock_queues(); 3932965a453SKip Macy vm_page_unlock(p); 39424a1cce3SDavid Greenman break; 39524a1cce3SDavid Greenman } 3962965a453SKip Macy vm_page_unlock_queues(); 3972965a453SKip Macy vm_page_unlock(p); 39890ecac61SMatthew Dillon mc[page_base + pageout_count] = p; 39990ecac61SMatthew Dillon ++pageout_count; 40090ecac61SMatthew Dillon ++is; 40124a1cce3SDavid Greenman } 40290ecac61SMatthew Dillon 40390ecac61SMatthew Dillon /* 40490ecac61SMatthew Dillon * If we exhausted our forward scan, continue with the reverse scan 40590ecac61SMatthew Dillon * when possible, even past a page boundry. This catches boundry 40690ecac61SMatthew Dillon * conditions. 40790ecac61SMatthew Dillon */ 40890ecac61SMatthew Dillon if (ib && pageout_count < vm_pageout_page_count) 40990ecac61SMatthew Dillon goto more; 410f6b04d2bSDavid Greenman 4112965a453SKip Macy vm_page_unlock(m); 41267bf6868SJohn Dyson /* 41367bf6868SJohn Dyson * we allow reads during pageouts... 41467bf6868SJohn Dyson */ 4157a935082SAlan Cox return (vm_pageout_flush(&mc[page_base], pageout_count, 0)); 416aef922f5SJohn Dyson } 417aef922f5SJohn Dyson 4181c7c3c6aSMatthew Dillon /* 4191c7c3c6aSMatthew Dillon * vm_pageout_flush() - launder the given pages 4201c7c3c6aSMatthew Dillon * 4211c7c3c6aSMatthew Dillon * The given pages are laundered. Note that we setup for the start of 4221c7c3c6aSMatthew Dillon * I/O ( i.e. busy the page ), mark it read-only, and bump the object 4231c7c3c6aSMatthew Dillon * reference count all in here rather then in the parent. If we want 4241c7c3c6aSMatthew Dillon * the parent to do more sophisticated things we may have to change 4251c7c3c6aSMatthew Dillon * the ordering. 4261c7c3c6aSMatthew Dillon */ 427aef922f5SJohn Dyson int 4287a935082SAlan Cox vm_pageout_flush(vm_page_t *mc, int count, int flags) 429aef922f5SJohn Dyson { 4302e3b314dSAlan Cox vm_object_t object = mc[0]->object; 431aef922f5SJohn Dyson int pageout_status[count]; 43295461b45SJohn Dyson int numpagedout = 0; 433aef922f5SJohn Dyson int i; 434aef922f5SJohn Dyson 4352e3b314dSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 436*7bec141bSKip Macy mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); 437*7bec141bSKip Macy 4381c7c3c6aSMatthew Dillon /* 4391c7c3c6aSMatthew Dillon * Initiate I/O. Bump the vm_page_t->busy counter and 4401c7c3c6aSMatthew Dillon * mark the pages read-only. 4411c7c3c6aSMatthew Dillon * 4421c7c3c6aSMatthew Dillon * We do not have to fixup the clean/dirty bits here... we can 4431c7c3c6aSMatthew Dillon * allow the pager to do it after the I/O completes. 44402fa91d3SMatthew Dillon * 44502fa91d3SMatthew Dillon * NOTE! mc[i]->dirty may be partial or fragmented due to an 44602fa91d3SMatthew Dillon * edge case with file fragments. 4471c7c3c6aSMatthew Dillon */ 4488f9110f6SJohn Dyson for (i = 0; i < count; i++) { 4497a935082SAlan Cox KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 4507a935082SAlan Cox ("vm_pageout_flush: partially invalid page %p index %d/%d", 4517a935082SAlan Cox mc[i], i, count)); 452e69763a3SDoug Rabson vm_page_io_start(mc[i]); 4532965a453SKip Macy vm_page_lock(mc[i]); 4542965a453SKip Macy vm_page_lock_queues(); 45578985e42SAlan Cox pmap_remove_write(mc[i]); 4562965a453SKip Macy vm_page_unlock(mc[i]); 45755df3298SAlan Cox vm_page_unlock_queues(); 4582965a453SKip Macy } 459d474eaaaSDoug Rabson vm_object_pip_add(object, count); 460aef922f5SJohn Dyson 461d076fbeaSAlan Cox vm_pager_put_pages(object, mc, count, flags, pageout_status); 46226f9a767SRodney W. Grimes 463aef922f5SJohn Dyson for (i = 0; i < count; i++) { 464aef922f5SJohn Dyson vm_page_t mt = mc[i]; 46524a1cce3SDavid Greenman 4662965a453SKip Macy vm_page_lock(mt); 4672965a453SKip Macy vm_page_lock_queues(); 4684cd45723SAlan Cox KASSERT(pageout_status[i] == VM_PAGER_PEND || 4694cd45723SAlan Cox (mt->flags & PG_WRITEABLE) == 0, 4709ea8d1a6SAlan Cox ("vm_pageout_flush: page %p is not write protected", mt)); 47126f9a767SRodney W. Grimes switch (pageout_status[i]) { 47226f9a767SRodney W. Grimes case VM_PAGER_OK: 47326f9a767SRodney W. Grimes case VM_PAGER_PEND: 47495461b45SJohn Dyson numpagedout++; 47526f9a767SRodney W. Grimes break; 47626f9a767SRodney W. Grimes case VM_PAGER_BAD: 47726f9a767SRodney W. Grimes /* 4780d94caffSDavid Greenman * Page outside of range of object. Right now we 4790d94caffSDavid Greenman * essentially lose the changes by pretending it 4800d94caffSDavid Greenman * worked. 48126f9a767SRodney W. Grimes */ 48290ecac61SMatthew Dillon vm_page_undirty(mt); 48326f9a767SRodney W. Grimes break; 48426f9a767SRodney W. Grimes case VM_PAGER_ERROR: 48526f9a767SRodney W. Grimes case VM_PAGER_FAIL: 48626f9a767SRodney W. Grimes /* 4870d94caffSDavid Greenman * If page couldn't be paged out, then reactivate the 4880d94caffSDavid Greenman * page so it doesn't clog the inactive list. (We 4890d94caffSDavid Greenman * will try paging out it again later). 49026f9a767SRodney W. Grimes */ 49124a1cce3SDavid Greenman vm_page_activate(mt); 49226f9a767SRodney W. Grimes break; 49326f9a767SRodney W. Grimes case VM_PAGER_AGAIN: 49426f9a767SRodney W. Grimes break; 49526f9a767SRodney W. Grimes } 49626f9a767SRodney W. Grimes 49726f9a767SRodney W. Grimes /* 4980d94caffSDavid Greenman * If the operation is still going, leave the page busy to 4990d94caffSDavid Greenman * block all other accesses. Also, leave the paging in 5000d94caffSDavid Greenman * progress indicator set so that we don't attempt an object 5010d94caffSDavid Greenman * collapse. 50226f9a767SRodney W. Grimes */ 50326f9a767SRodney W. Grimes if (pageout_status[i] != VM_PAGER_PEND) { 504f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 505e69763a3SDoug Rabson vm_page_io_finish(mt); 5069ea8d1a6SAlan Cox if (vm_page_count_severe()) 5079ea8d1a6SAlan Cox vm_page_try_to_cache(mt); 50826f9a767SRodney W. Grimes } 5092965a453SKip Macy vm_page_unlock_queues(); 5102965a453SKip Macy vm_page_unlock(mt); 51126f9a767SRodney W. Grimes } 51295461b45SJohn Dyson return numpagedout; 51326f9a767SRodney W. Grimes } 51426f9a767SRodney W. Grimes 51538efa82bSJohn Dyson #if !defined(NO_SWAPPING) 51626f9a767SRodney W. Grimes /* 51726f9a767SRodney W. Grimes * vm_pageout_object_deactivate_pages 51826f9a767SRodney W. Grimes * 51926f9a767SRodney W. Grimes * deactivate enough pages to satisfy the inactive target 52026f9a767SRodney W. Grimes * requirements or if vm_page_proc_limit is set, then 52126f9a767SRodney W. Grimes * deactivate all of the pages in the object and its 52224a1cce3SDavid Greenman * backing_objects. 52326f9a767SRodney W. Grimes * 52426f9a767SRodney W. Grimes * The object and map must be locked. 52526f9a767SRodney W. Grimes */ 52638efa82bSJohn Dyson static void 527ecf6279fSAlan Cox vm_pageout_object_deactivate_pages(pmap, first_object, desired) 528ecf6279fSAlan Cox pmap_t pmap; 529ecf6279fSAlan Cox vm_object_t first_object; 530ecf6279fSAlan Cox long desired; 53126f9a767SRodney W. Grimes { 532ecf6279fSAlan Cox vm_object_t backing_object, object; 53354d92145SMatthew Dillon vm_page_t p, next; 53482bfb965SAlan Cox int actcount, remove_mode; 53526f9a767SRodney W. Grimes 536ecf6279fSAlan Cox VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED); 53701381811SJohn Baldwin if (first_object->type == OBJT_DEVICE || 53882bfb965SAlan Cox first_object->type == OBJT_SG) 53938efa82bSJohn Dyson return; 540ecf6279fSAlan Cox for (object = first_object;; object = backing_object) { 541ecf6279fSAlan Cox if (pmap_resident_count(pmap) <= desired) 542ecf6279fSAlan Cox goto unlock_return; 54382bfb965SAlan Cox if (object->type == OBJT_PHYS || object->paging_in_progress) 544ecf6279fSAlan Cox goto unlock_return; 54526f9a767SRodney W. Grimes 54685b1dc89SAlan Cox remove_mode = 0; 54738efa82bSJohn Dyson if (object->shadow_count > 1) 54838efa82bSJohn Dyson remove_mode = 1; 54926f9a767SRodney W. Grimes /* 55026f9a767SRodney W. Grimes * scan the objects entire memory queue 55126f9a767SRodney W. Grimes */ 552b18bfc3dSJohn Dyson p = TAILQ_FIRST(&object->memq); 55382bfb965SAlan Cox while (p != NULL) { 5542965a453SKip Macy vm_page_lock(p); 5552965a453SKip Macy vm_page_lock_queues(); 556ecf6279fSAlan Cox if (pmap_resident_count(pmap) <= desired) { 557ce18aebdSAlan Cox vm_page_unlock_queues(); 5582965a453SKip Macy vm_page_unlock(p); 559ecf6279fSAlan Cox goto unlock_return; 560ce18aebdSAlan Cox } 561b18bfc3dSJohn Dyson next = TAILQ_NEXT(p, listq); 562393a081dSAttilio Rao cnt.v_pdpages++; 5630d94caffSDavid Greenman if (p->wire_count != 0 || 5640d94caffSDavid Greenman p->hold_count != 0 || 5650d94caffSDavid Greenman p->busy != 0 || 5669af80719SAlan Cox (p->oflags & VPO_BUSY) || 567ecf6279fSAlan Cox !pmap_page_exists_quick(pmap, p)) { 5682965a453SKip Macy vm_page_unlock_queues(); 5692965a453SKip Macy vm_page_unlock(p); 5700d94caffSDavid Greenman p = next; 5710d94caffSDavid Greenman continue; 5720d94caffSDavid Greenman } 5730385347cSPeter Wemm actcount = pmap_ts_referenced(p); 5747e006499SJohn Dyson if (actcount) { 575e69763a3SDoug Rabson vm_page_flag_set(p, PG_REFERENCED); 576c8c4b40cSJohn Dyson } else if (p->flags & PG_REFERENCED) { 5777e006499SJohn Dyson actcount = 1; 578ef743ce6SJohn Dyson } 57938efa82bSJohn Dyson if ((p->queue != PQ_ACTIVE) && 58038efa82bSJohn Dyson (p->flags & PG_REFERENCED)) { 581ef743ce6SJohn Dyson vm_page_activate(p); 5827e006499SJohn Dyson p->act_count += actcount; 583e69763a3SDoug Rabson vm_page_flag_clear(p, PG_REFERENCED); 584c8c4b40cSJohn Dyson } else if (p->queue == PQ_ACTIVE) { 585ef743ce6SJohn Dyson if ((p->flags & PG_REFERENCED) == 0) { 586c8c4b40cSJohn Dyson p->act_count -= min(p->act_count, ACT_DECLINE); 5872b6b0df7SMatthew Dillon if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 5884fec79beSAlan Cox pmap_remove_all(p); 58926f9a767SRodney W. Grimes vm_page_deactivate(p); 59026f9a767SRodney W. Grimes } else { 591e5b006ffSAlan Cox vm_page_requeue(p); 592c8c4b40cSJohn Dyson } 593c8c4b40cSJohn Dyson } else { 594eaf13dd7SJohn Dyson vm_page_activate(p); 595e69763a3SDoug Rabson vm_page_flag_clear(p, PG_REFERENCED); 59638efa82bSJohn Dyson if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 59738efa82bSJohn Dyson p->act_count += ACT_ADVANCE; 598e5b006ffSAlan Cox vm_page_requeue(p); 59926f9a767SRodney W. Grimes } 600bd7e5f99SJohn Dyson } else if (p->queue == PQ_INACTIVE) { 6014fec79beSAlan Cox pmap_remove_all(p); 60226f9a767SRodney W. Grimes } 6032965a453SKip Macy vm_page_unlock_queues(); 6042965a453SKip Macy vm_page_unlock(p); 60526f9a767SRodney W. Grimes p = next; 60626f9a767SRodney W. Grimes } 607ecf6279fSAlan Cox if ((backing_object = object->backing_object) == NULL) 608ecf6279fSAlan Cox goto unlock_return; 609ecf6279fSAlan Cox VM_OBJECT_LOCK(backing_object); 610ecf6279fSAlan Cox if (object != first_object) 611ecf6279fSAlan Cox VM_OBJECT_UNLOCK(object); 61238efa82bSJohn Dyson } 613ecf6279fSAlan Cox unlock_return: 614ecf6279fSAlan Cox if (object != first_object) 615ecf6279fSAlan Cox VM_OBJECT_UNLOCK(object); 61626f9a767SRodney W. Grimes } 61726f9a767SRodney W. Grimes 61826f9a767SRodney W. Grimes /* 61926f9a767SRodney W. Grimes * deactivate some number of pages in a map, try to do it fairly, but 62026f9a767SRodney W. Grimes * that is really hard to do. 62126f9a767SRodney W. Grimes */ 622cd41fc12SDavid Greenman static void 62338efa82bSJohn Dyson vm_pageout_map_deactivate_pages(map, desired) 62426f9a767SRodney W. Grimes vm_map_t map; 625ecf6279fSAlan Cox long desired; 62626f9a767SRodney W. Grimes { 62726f9a767SRodney W. Grimes vm_map_entry_t tmpe; 62838efa82bSJohn Dyson vm_object_t obj, bigobj; 62930105b9eSTor Egge int nothingwired; 6300d94caffSDavid Greenman 631d974f03cSAlan Cox if (!vm_map_trylock(map)) 63226f9a767SRodney W. Grimes return; 63338efa82bSJohn Dyson 63438efa82bSJohn Dyson bigobj = NULL; 63530105b9eSTor Egge nothingwired = TRUE; 63638efa82bSJohn Dyson 63738efa82bSJohn Dyson /* 63838efa82bSJohn Dyson * first, search out the biggest object, and try to free pages from 63938efa82bSJohn Dyson * that. 64038efa82bSJohn Dyson */ 64126f9a767SRodney W. Grimes tmpe = map->header.next; 64238efa82bSJohn Dyson while (tmpe != &map->header) { 6439fdfe602SMatthew Dillon if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 64438efa82bSJohn Dyson obj = tmpe->object.vm_object; 6450774dfb3SAlan Cox if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) { 6460774dfb3SAlan Cox if (obj->shadow_count <= 1 && 6470774dfb3SAlan Cox (bigobj == NULL || 6480774dfb3SAlan Cox bigobj->resident_page_count < obj->resident_page_count)) { 6490774dfb3SAlan Cox if (bigobj != NULL) 6500774dfb3SAlan Cox VM_OBJECT_UNLOCK(bigobj); 65138efa82bSJohn Dyson bigobj = obj; 6520774dfb3SAlan Cox } else 6530774dfb3SAlan Cox VM_OBJECT_UNLOCK(obj); 65438efa82bSJohn Dyson } 65538efa82bSJohn Dyson } 65630105b9eSTor Egge if (tmpe->wired_count > 0) 65730105b9eSTor Egge nothingwired = FALSE; 65838efa82bSJohn Dyson tmpe = tmpe->next; 65938efa82bSJohn Dyson } 66038efa82bSJohn Dyson 6610774dfb3SAlan Cox if (bigobj != NULL) { 662ecf6279fSAlan Cox vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired); 6630774dfb3SAlan Cox VM_OBJECT_UNLOCK(bigobj); 6640774dfb3SAlan Cox } 66538efa82bSJohn Dyson /* 66638efa82bSJohn Dyson * Next, hunt around for other pages to deactivate. We actually 66738efa82bSJohn Dyson * do this search sort of wrong -- .text first is not the best idea. 66838efa82bSJohn Dyson */ 66938efa82bSJohn Dyson tmpe = map->header.next; 67038efa82bSJohn Dyson while (tmpe != &map->header) { 671b1028ad1SLuoqi Chen if (pmap_resident_count(vm_map_pmap(map)) <= desired) 67238efa82bSJohn Dyson break; 6739fdfe602SMatthew Dillon if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 67438efa82bSJohn Dyson obj = tmpe->object.vm_object; 6750774dfb3SAlan Cox if (obj != NULL) { 6760774dfb3SAlan Cox VM_OBJECT_LOCK(obj); 677ecf6279fSAlan Cox vm_pageout_object_deactivate_pages(map->pmap, obj, desired); 6780774dfb3SAlan Cox VM_OBJECT_UNLOCK(obj); 6790774dfb3SAlan Cox } 68038efa82bSJohn Dyson } 68126f9a767SRodney W. Grimes tmpe = tmpe->next; 68238857e7fSAlan Cox } 68338efa82bSJohn Dyson 68438efa82bSJohn Dyson /* 68538efa82bSJohn Dyson * Remove all mappings if a process is swapped out, this will free page 68638efa82bSJohn Dyson * table pages. 68738efa82bSJohn Dyson */ 68838857e7fSAlan Cox if (desired == 0 && nothingwired) { 68905ba50f5SJake Burkholder pmap_remove(vm_map_pmap(map), vm_map_min(map), 69005ba50f5SJake Burkholder vm_map_max(map)); 69138857e7fSAlan Cox } 69238efa82bSJohn Dyson vm_map_unlock(map); 69326f9a767SRodney W. Grimes } 694a1287949SEivind Eklund #endif /* !defined(NO_SWAPPING) */ 695df8bae1dSRodney W. Grimes 6961c7c3c6aSMatthew Dillon /* 697df8bae1dSRodney W. Grimes * vm_pageout_scan does the dirty work for the pageout daemon. 698df8bae1dSRodney W. Grimes */ 6992b6b0df7SMatthew Dillon static void 7002b6b0df7SMatthew Dillon vm_pageout_scan(int pass) 701df8bae1dSRodney W. Grimes { 702502ba6e4SJohn Dyson vm_page_t m, next; 703936524aaSMatthew Dillon struct vm_page marker; 7041c7c3c6aSMatthew Dillon int page_shortage, maxscan, pcount; 7051c7c3c6aSMatthew Dillon int addl_page_shortage, addl_page_shortage_init; 706df8bae1dSRodney W. Grimes vm_object_t object; 7072446e4f0SAlan Cox int actcount; 708f6b04d2bSDavid Greenman int vnodes_skipped = 0; 7092b6b0df7SMatthew Dillon int maxlaunder; 7100d94caffSDavid Greenman 711df8bae1dSRodney W. Grimes /* 712855a310fSJeff Roberson * Decrease registered cache sizes. 713855a310fSJeff Roberson */ 714855a310fSJeff Roberson EVENTHANDLER_INVOKE(vm_lowmem, 0); 715855a310fSJeff Roberson /* 716855a310fSJeff Roberson * We do this explicitly after the caches have been drained above. 717855a310fSJeff Roberson */ 718855a310fSJeff Roberson uma_reclaim(); 7195985940eSJohn Dyson 720b0ef8c5fSAlan Cox addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit); 721b182ec9eSJohn Dyson 7221c7c3c6aSMatthew Dillon /* 7231c7c3c6aSMatthew Dillon * Calculate the number of pages we want to either free or move 7242b6b0df7SMatthew Dillon * to the cache. 7251c7c3c6aSMatthew Dillon */ 7262b6b0df7SMatthew Dillon page_shortage = vm_paging_target() + addl_page_shortage_init; 7271c7c3c6aSMatthew Dillon 7281c7c3c6aSMatthew Dillon /* 729936524aaSMatthew Dillon * Initialize our marker 730936524aaSMatthew Dillon */ 731936524aaSMatthew Dillon bzero(&marker, sizeof(marker)); 7329af80719SAlan Cox marker.flags = PG_FICTITIOUS | PG_MARKER; 7339af80719SAlan Cox marker.oflags = VPO_BUSY; 734936524aaSMatthew Dillon marker.queue = PQ_INACTIVE; 735936524aaSMatthew Dillon marker.wire_count = 1; 736936524aaSMatthew Dillon 737936524aaSMatthew Dillon /* 7381c7c3c6aSMatthew Dillon * Start scanning the inactive queue for pages we can move to the 7391c7c3c6aSMatthew Dillon * cache or free. The scan will stop when the target is reached or 740936524aaSMatthew Dillon * we have scanned the entire inactive queue. Note that m->act_count 741936524aaSMatthew Dillon * is not used to form decisions for the inactive queue, only for the 742936524aaSMatthew Dillon * active queue. 7432b6b0df7SMatthew Dillon * 7442b6b0df7SMatthew Dillon * maxlaunder limits the number of dirty pages we flush per scan. 7452b6b0df7SMatthew Dillon * For most systems a smaller value (16 or 32) is more robust under 7462b6b0df7SMatthew Dillon * extreme memory and disk pressure because any unnecessary writes 7472b6b0df7SMatthew Dillon * to disk can result in extreme performance degredation. However, 7482b6b0df7SMatthew Dillon * systems with excessive dirty pages (especially when MAP_NOSYNC is 7492b6b0df7SMatthew Dillon * used) will die horribly with limited laundering. If the pageout 7502b6b0df7SMatthew Dillon * daemon cannot clean enough pages in the first pass, we let it go 7512b6b0df7SMatthew Dillon * all out in succeeding passes. 7521c7c3c6aSMatthew Dillon */ 7532b6b0df7SMatthew Dillon if ((maxlaunder = vm_max_launder) <= 1) 7542b6b0df7SMatthew Dillon maxlaunder = 1; 7552b6b0df7SMatthew Dillon if (pass) 7562b6b0df7SMatthew Dillon maxlaunder = 10000; 7573e1b578aSAlan Cox vm_page_lock_queues(); 75867bf6868SJohn Dyson rescan0: 7591c7c3c6aSMatthew Dillon addl_page_shortage = addl_page_shortage_init; 7602feb50bfSAttilio Rao maxscan = cnt.v_inactive_count; 7616d03d577SMatthew Dillon 762be72f788SAlan Cox for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 7631c7c3c6aSMatthew Dillon m != NULL && maxscan-- > 0 && page_shortage > 0; 764e929c00dSKirk McKusick m = next) { 765df8bae1dSRodney W. Grimes 766393a081dSAttilio Rao cnt.v_pdpages++; 767b182ec9eSJohn Dyson 768ef39c05bSAlexander Leidinger if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) { 76967bf6868SJohn Dyson goto rescan0; 770f35329acSJohn Dyson } 771b182ec9eSJohn Dyson 772b18bfc3dSJohn Dyson next = TAILQ_NEXT(m, pageq); 773df8bae1dSRodney W. Grimes 774936524aaSMatthew Dillon /* 775936524aaSMatthew Dillon * skip marker pages 776936524aaSMatthew Dillon */ 777936524aaSMatthew Dillon if (m->flags & PG_MARKER) 778936524aaSMatthew Dillon continue; 779936524aaSMatthew Dillon 780e8f26319SKip Macy if (!vm_page_trylock(m)) { 781b182ec9eSJohn Dyson addl_page_shortage++; 782b182ec9eSJohn Dyson continue; 783df8bae1dSRodney W. Grimes } 7842965a453SKip Macy 785e8f26319SKip Macy /* 786e8f26319SKip Macy * A held page may be undergoing I/O, so skip it. 787e8f26319SKip Macy */ 788e8f26319SKip Macy if (m->hold_count || (object = m->object) == NULL) { 789e8f26319SKip Macy vm_page_unlock(m); 790e8f26319SKip Macy vm_page_requeue(m); 7912965a453SKip Macy addl_page_shortage++; 7922965a453SKip Macy continue; 7932965a453SKip Macy } 7942965a453SKip Macy 79526f9a767SRodney W. Grimes /* 796a1287949SEivind Eklund * Don't mess with busy pages, keep in the front of the 797b18bfc3dSJohn Dyson * queue, most likely are being paged out. 79826f9a767SRodney W. Grimes */ 7998dbca793STor Egge if (!VM_OBJECT_TRYLOCK(object) && 8008dbca793STor Egge (!vm_pageout_fallback_object_lock(m, &next) || 8018dbca793STor Egge m->hold_count != 0)) { 8028dbca793STor Egge VM_OBJECT_UNLOCK(object); 8032965a453SKip Macy vm_page_unlock(m); 80434d9e6fdSAlan Cox addl_page_shortage++; 80534d9e6fdSAlan Cox continue; 80634d9e6fdSAlan Cox } 8079af80719SAlan Cox if (m->busy || (m->oflags & VPO_BUSY)) { 8082965a453SKip Macy vm_page_unlock(m); 80934d9e6fdSAlan Cox VM_OBJECT_UNLOCK(object); 810b182ec9eSJohn Dyson addl_page_shortage++; 81126f9a767SRodney W. Grimes continue; 81226f9a767SRodney W. Grimes } 813bd7e5f99SJohn Dyson 8147e006499SJohn Dyson /* 8151c7c3c6aSMatthew Dillon * If the object is not being used, we ignore previous 8161c7c3c6aSMatthew Dillon * references. 8177e006499SJohn Dyson */ 81834d9e6fdSAlan Cox if (object->ref_count == 0) { 819e69763a3SDoug Rabson vm_page_flag_clear(m, PG_REFERENCED); 82047916d0cSAlan Cox KASSERT(!pmap_page_is_mapped(m), 82147916d0cSAlan Cox ("vm_pageout_scan: page %p is mapped", m)); 8227e006499SJohn Dyson 8237e006499SJohn Dyson /* 8241c7c3c6aSMatthew Dillon * Otherwise, if the page has been referenced while in the 8251c7c3c6aSMatthew Dillon * inactive queue, we bump the "activation count" upwards, 8261c7c3c6aSMatthew Dillon * making it less likely that the page will be added back to 8271c7c3c6aSMatthew Dillon * the inactive queue prematurely again. Here we check the 8281c7c3c6aSMatthew Dillon * page tables (or emulated bits, if any), given the upper 8291c7c3c6aSMatthew Dillon * level VM system not knowing anything about existing 8301c7c3c6aSMatthew Dillon * references. 8317e006499SJohn Dyson */ 832ef743ce6SJohn Dyson } else if (((m->flags & PG_REFERENCED) == 0) && 8330385347cSPeter Wemm (actcount = pmap_ts_referenced(m))) { 834ef743ce6SJohn Dyson vm_page_activate(m); 83534d9e6fdSAlan Cox VM_OBJECT_UNLOCK(object); 8367e006499SJohn Dyson m->act_count += (actcount + ACT_ADVANCE); 8372965a453SKip Macy vm_page_unlock(m); 838ef743ce6SJohn Dyson continue; 8392fe6e4d7SDavid Greenman } 840ef743ce6SJohn Dyson 8417e006499SJohn Dyson /* 8421c7c3c6aSMatthew Dillon * If the upper level VM system knows about any page 8431c7c3c6aSMatthew Dillon * references, we activate the page. We also set the 8441c7c3c6aSMatthew Dillon * "activation count" higher than normal so that we will less 8451c7c3c6aSMatthew Dillon * likely place pages back onto the inactive queue again. 8467e006499SJohn Dyson */ 847bd7e5f99SJohn Dyson if ((m->flags & PG_REFERENCED) != 0) { 848e69763a3SDoug Rabson vm_page_flag_clear(m, PG_REFERENCED); 8490385347cSPeter Wemm actcount = pmap_ts_referenced(m); 85026f9a767SRodney W. Grimes vm_page_activate(m); 85134d9e6fdSAlan Cox VM_OBJECT_UNLOCK(object); 8527e006499SJohn Dyson m->act_count += (actcount + ACT_ADVANCE + 1); 8532965a453SKip Macy vm_page_unlock(m); 8540d94caffSDavid Greenman continue; 8550d94caffSDavid Greenman } 85667bf6868SJohn Dyson 8577e006499SJohn Dyson /* 858b78ddb0bSAlan Cox * If the upper level VM system does not believe that the page 859b78ddb0bSAlan Cox * is fully dirty, but it is mapped for write access, then we 860b78ddb0bSAlan Cox * consult the pmap to see if the page's dirty status should 861b78ddb0bSAlan Cox * be updated. 8627e006499SJohn Dyson */ 863b78ddb0bSAlan Cox if (m->dirty != VM_PAGE_BITS_ALL && 864b78ddb0bSAlan Cox (m->flags & PG_WRITEABLE) != 0) { 865a3dfacb5SAlan Cox /* 866a3dfacb5SAlan Cox * Avoid a race condition: Unless write access is 867a3dfacb5SAlan Cox * removed from the page, another processor could 868a3dfacb5SAlan Cox * modify it before all access is removed by the call 869a3dfacb5SAlan Cox * to vm_page_cache() below. If vm_page_cache() finds 870a3dfacb5SAlan Cox * that the page has been modified when it removes all 871a3dfacb5SAlan Cox * access, it panics because it cannot cache dirty 872a3dfacb5SAlan Cox * pages. In principle, we could eliminate just write 873a3dfacb5SAlan Cox * access here rather than all access. In the expected 874a3dfacb5SAlan Cox * case, when there are no last instant modifications 875a3dfacb5SAlan Cox * to the page, removing all access will be cheaper 876a3dfacb5SAlan Cox * overall. 877a3dfacb5SAlan Cox */ 878b78ddb0bSAlan Cox if (pmap_is_modified(m)) 8797dbf82dcSMatthew Dillon vm_page_dirty(m); 880b78ddb0bSAlan Cox else if (m->dirty == 0) 881b78ddb0bSAlan Cox pmap_remove_all(m); 88230dcfc09SJohn Dyson } 883dcbcd518SBruce Evans 8846989c456SAlan Cox if (m->valid == 0) { 8857e006499SJohn Dyson /* 8867e006499SJohn Dyson * Invalid pages can be easily freed 8877e006499SJohn Dyson */ 8886989c456SAlan Cox vm_page_free(m); 889393a081dSAttilio Rao cnt.v_dfree++; 8901c7c3c6aSMatthew Dillon --page_shortage; 891bd7e5f99SJohn Dyson } else if (m->dirty == 0) { 8926989c456SAlan Cox /* 8936989c456SAlan Cox * Clean pages can be placed onto the cache queue. 8946989c456SAlan Cox * This effectively frees them. 8956989c456SAlan Cox */ 896bd7e5f99SJohn Dyson vm_page_cache(m); 8971c7c3c6aSMatthew Dillon --page_shortage; 8982b6b0df7SMatthew Dillon } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 8997e006499SJohn Dyson /* 9002b6b0df7SMatthew Dillon * Dirty pages need to be paged out, but flushing 9012b6b0df7SMatthew Dillon * a page is extremely expensive verses freeing 9022b6b0df7SMatthew Dillon * a clean page. Rather then artificially limiting 9032b6b0df7SMatthew Dillon * the number of pages we can flush, we instead give 9042b6b0df7SMatthew Dillon * dirty pages extra priority on the inactive queue 9052b6b0df7SMatthew Dillon * by forcing them to be cycled through the queue 9062b6b0df7SMatthew Dillon * twice before being flushed, after which the 9072b6b0df7SMatthew Dillon * (now clean) page will cycle through once more 9082b6b0df7SMatthew Dillon * before being freed. This significantly extends 9092b6b0df7SMatthew Dillon * the thrash point for a heavily loaded machine. 9107e006499SJohn Dyson */ 9112b6b0df7SMatthew Dillon vm_page_flag_set(m, PG_WINATCFLS); 912e5b006ffSAlan Cox vm_page_requeue(m); 9130d94caffSDavid Greenman } else if (maxlaunder > 0) { 9142b6b0df7SMatthew Dillon /* 9152b6b0df7SMatthew Dillon * We always want to try to flush some dirty pages if 9162b6b0df7SMatthew Dillon * we encounter them, to keep the system stable. 9172b6b0df7SMatthew Dillon * Normally this number is small, but under extreme 9182b6b0df7SMatthew Dillon * pressure where there are insufficient clean pages 9192b6b0df7SMatthew Dillon * on the inactive queue, we may have to go all out. 9202b6b0df7SMatthew Dillon */ 92197824da3SAlan Cox int swap_pageouts_ok, vfslocked = 0; 922f6b04d2bSDavid Greenman struct vnode *vp = NULL; 92314137dc0SAlan Cox struct mount *mp = NULL; 9240d94caffSDavid Greenman 92512ac6a1dSJohn Dyson if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 92612ac6a1dSJohn Dyson swap_pageouts_ok = 1; 92712ac6a1dSJohn Dyson } else { 92812ac6a1dSJohn Dyson swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 92912ac6a1dSJohn Dyson swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 93090ecac61SMatthew Dillon vm_page_count_min()); 93112ac6a1dSJohn Dyson 93212ac6a1dSJohn Dyson } 93370111b90SJohn Dyson 93470111b90SJohn Dyson /* 9351c7c3c6aSMatthew Dillon * We don't bother paging objects that are "dead". 9361c7c3c6aSMatthew Dillon * Those objects are in a "rundown" state. 93770111b90SJohn Dyson */ 93870111b90SJohn Dyson if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 9392965a453SKip Macy vm_page_unlock(m); 9403562af12SAlan Cox VM_OBJECT_UNLOCK(object); 941e5b006ffSAlan Cox vm_page_requeue(m); 94212ac6a1dSJohn Dyson continue; 94312ac6a1dSJohn Dyson } 94412ac6a1dSJohn Dyson 9451c7c3c6aSMatthew Dillon /* 946625e6c0aSTor Egge * Following operations may unlock 947625e6c0aSTor Egge * vm_page_queue_mtx, invalidating the 'next' 948625e6c0aSTor Egge * pointer. To prevent an inordinate number 949625e6c0aSTor Egge * of restarts we use our marker to remember 950625e6c0aSTor Egge * our place. 951625e6c0aSTor Egge * 952625e6c0aSTor Egge */ 953625e6c0aSTor Egge TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, 954625e6c0aSTor Egge m, &marker, pageq); 955625e6c0aSTor Egge /* 9562b6b0df7SMatthew Dillon * The object is already known NOT to be dead. It 9572b6b0df7SMatthew Dillon * is possible for the vget() to block the whole 9582b6b0df7SMatthew Dillon * pageout daemon, but the new low-memory handling 9592b6b0df7SMatthew Dillon * code should prevent it. 9601c7c3c6aSMatthew Dillon * 9612b6b0df7SMatthew Dillon * The previous code skipped locked vnodes and, worse, 9622b6b0df7SMatthew Dillon * reordered pages in the queue. This results in 9632b6b0df7SMatthew Dillon * completely non-deterministic operation and, on a 9642b6b0df7SMatthew Dillon * busy system, can lead to extremely non-optimal 9652b6b0df7SMatthew Dillon * pageouts. For example, it can cause clean pages 9662b6b0df7SMatthew Dillon * to be freed and dirty pages to be moved to the end 9672b6b0df7SMatthew Dillon * of the queue. Since dirty pages are also moved to 9682b6b0df7SMatthew Dillon * the end of the queue once-cleaned, this gives 9692b6b0df7SMatthew Dillon * way too large a weighting to defering the freeing 9702b6b0df7SMatthew Dillon * of dirty pages. 9711c7c3c6aSMatthew Dillon * 97223b59018SMatthew Dillon * We can't wait forever for the vnode lock, we might 97323b59018SMatthew Dillon * deadlock due to a vn_read() getting stuck in 97423b59018SMatthew Dillon * vm_wait while holding this vnode. We skip the 97523b59018SMatthew Dillon * vnode if we can't get it in a reasonable amount 97623b59018SMatthew Dillon * of time. 9771c7c3c6aSMatthew Dillon */ 9781c7c3c6aSMatthew Dillon if (object->type == OBJT_VNODE) { 9792965a453SKip Macy vm_page_unlock_queues(); 9802965a453SKip Macy vm_page_unlock(m); 98124a1cce3SDavid Greenman vp = object->handle; 982db27dcc0STor Egge if (vp->v_type == VREG && 983db27dcc0STor Egge vn_start_write(vp, &mp, V_NOWAIT) != 0) { 9846129343dSKonstantin Belousov mp = NULL; 985db27dcc0STor Egge ++pageout_lock_miss; 986db27dcc0STor Egge if (object->flags & OBJ_MIGHTBEDIRTY) 987db27dcc0STor Egge vnodes_skipped++; 9882965a453SKip Macy vm_page_lock_queues(); 989625e6c0aSTor Egge goto unlock_and_continue; 990db27dcc0STor Egge } 991b9f180d1SKonstantin Belousov KASSERT(mp != NULL, 992b9f180d1SKonstantin Belousov ("vp %p with NULL v_mount", vp)); 99314137dc0SAlan Cox vm_object_reference_locked(object); 9943562af12SAlan Cox VM_OBJECT_UNLOCK(object); 99597824da3SAlan Cox vfslocked = VFS_LOCK_GIANT(vp->v_mount); 99697824da3SAlan Cox if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK, 99797824da3SAlan Cox curthread)) { 9983562af12SAlan Cox VM_OBJECT_LOCK(object); 9993e1b578aSAlan Cox vm_page_lock_queues(); 100023b59018SMatthew Dillon ++pageout_lock_miss; 1001aef922f5SJohn Dyson if (object->flags & OBJ_MIGHTBEDIRTY) 1002925a3a41SJohn Dyson vnodes_skipped++; 1003625e6c0aSTor Egge vp = NULL; 1004625e6c0aSTor Egge goto unlock_and_continue; 100585a376ebSJohn Dyson } 10063562af12SAlan Cox VM_OBJECT_LOCK(object); 10072965a453SKip Macy vm_page_lock(m); 10083e1b578aSAlan Cox vm_page_lock_queues(); 1009f35329acSJohn Dyson /* 1010936524aaSMatthew Dillon * The page might have been moved to another 1011936524aaSMatthew Dillon * queue during potential blocking in vget() 1012936524aaSMatthew Dillon * above. The page might have been freed and 101314137dc0SAlan Cox * reused for another vnode. 1014f35329acSJohn Dyson */ 1015ef39c05bSAlexander Leidinger if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE || 1016936524aaSMatthew Dillon m->object != object || 1017625e6c0aSTor Egge TAILQ_NEXT(m, pageq) != &marker) { 10182965a453SKip Macy vm_page_unlock(m); 1019b182ec9eSJohn Dyson if (object->flags & OBJ_MIGHTBEDIRTY) 1020925a3a41SJohn Dyson vnodes_skipped++; 10213562af12SAlan Cox goto unlock_and_continue; 1022b182ec9eSJohn Dyson } 1023b182ec9eSJohn Dyson 1024f35329acSJohn Dyson /* 1025936524aaSMatthew Dillon * The page may have been busied during the 102614137dc0SAlan Cox * blocking in vget(). We don't move the 1027936524aaSMatthew Dillon * page back onto the end of the queue so that 1028936524aaSMatthew Dillon * statistics are more correct if we don't. 1029f35329acSJohn Dyson */ 10309af80719SAlan Cox if (m->busy || (m->oflags & VPO_BUSY)) { 10312965a453SKip Macy vm_page_unlock(m); 10323562af12SAlan Cox goto unlock_and_continue; 1033b182ec9eSJohn Dyson } 1034b182ec9eSJohn Dyson 1035f35329acSJohn Dyson /* 103657601bcbSMatthew Dillon * If the page has become held it might 103757601bcbSMatthew Dillon * be undergoing I/O, so skip it 1038f35329acSJohn Dyson */ 1039b182ec9eSJohn Dyson if (m->hold_count) { 10402965a453SKip Macy vm_page_unlock(m); 1041e5b006ffSAlan Cox vm_page_requeue(m); 1042b182ec9eSJohn Dyson if (object->flags & OBJ_MIGHTBEDIRTY) 1043925a3a41SJohn Dyson vnodes_skipped++; 10443562af12SAlan Cox goto unlock_and_continue; 1045f6b04d2bSDavid Greenman } 1046f6b04d2bSDavid Greenman } 10472965a453SKip Macy vm_page_unlock(m); 1048f6b04d2bSDavid Greenman 10490d94caffSDavid Greenman /* 10500d94caffSDavid Greenman * If a page is dirty, then it is either being washed 10510d94caffSDavid Greenman * (but not yet cleaned) or it is still in the 10520d94caffSDavid Greenman * laundry. If it is still in the laundry, then we 10532b6b0df7SMatthew Dillon * start the cleaning operation. 1054936524aaSMatthew Dillon * 10552b6b0df7SMatthew Dillon * decrement page_shortage on success to account for 10562b6b0df7SMatthew Dillon * the (future) cleaned page. Otherwise we could wind 10572b6b0df7SMatthew Dillon * up laundering or cleaning too many pages. 10580d94caffSDavid Greenman */ 10592965a453SKip Macy vm_page_unlock_queues(); 10602b6b0df7SMatthew Dillon if (vm_pageout_clean(m) != 0) { 10612b6b0df7SMatthew Dillon --page_shortage; 1062936524aaSMatthew Dillon --maxlaunder; 10632b6b0df7SMatthew Dillon } 10642965a453SKip Macy vm_page_lock_queues(); 10653562af12SAlan Cox unlock_and_continue: 10662965a453SKip Macy vm_page_lock_assert(m, MA_NOTOWNED); 10676989c456SAlan Cox VM_OBJECT_UNLOCK(object); 106814137dc0SAlan Cox if (mp != NULL) { 10696989c456SAlan Cox vm_page_unlock_queues(); 107014137dc0SAlan Cox if (vp != NULL) 1071f6b04d2bSDavid Greenman vput(vp); 107297824da3SAlan Cox VFS_UNLOCK_GIANT(vfslocked); 107314137dc0SAlan Cox vm_object_deallocate(object); 1074f2a2857bSKirk McKusick vn_finished_write(mp); 10756989c456SAlan Cox vm_page_lock_queues(); 10766989c456SAlan Cox } 1077625e6c0aSTor Egge next = TAILQ_NEXT(&marker, pageq); 1078625e6c0aSTor Egge TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, 1079625e6c0aSTor Egge &marker, pageq); 10802965a453SKip Macy vm_page_lock_assert(m, MA_NOTOWNED); 10816989c456SAlan Cox continue; 1082f2a2857bSKirk McKusick } 10832965a453SKip Macy vm_page_unlock(m); 10843562af12SAlan Cox VM_OBJECT_UNLOCK(object); 10850d94caffSDavid Greenman } 108626f9a767SRodney W. Grimes 1087df8bae1dSRodney W. Grimes /* 1088936524aaSMatthew Dillon * Compute the number of pages we want to try to move from the 1089936524aaSMatthew Dillon * active queue to the inactive queue. 10901c7c3c6aSMatthew Dillon */ 10912feb50bfSAttilio Rao page_shortage = vm_paging_target() + 10922feb50bfSAttilio Rao cnt.v_inactive_target - cnt.v_inactive_count; 1093b182ec9eSJohn Dyson page_shortage += addl_page_shortage; 10941c7c3c6aSMatthew Dillon 10951c7c3c6aSMatthew Dillon /* 1096936524aaSMatthew Dillon * Scan the active queue for things we can deactivate. We nominally 1097936524aaSMatthew Dillon * track the per-page activity counter and use it to locate 1098936524aaSMatthew Dillon * deactivation candidates. 10991c7c3c6aSMatthew Dillon */ 11002feb50bfSAttilio Rao pcount = cnt.v_active_count; 1101be72f788SAlan Cox m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 11022965a453SKip Macy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 11031c7c3c6aSMatthew Dillon 1104b18bfc3dSJohn Dyson while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 1105f35329acSJohn Dyson 1106ef39c05bSAlexander Leidinger KASSERT(VM_PAGE_INQUEUE2(m, PQ_ACTIVE), 1107d3c09dd7SAlan Cox ("vm_pageout_scan: page %p isn't active", m)); 1108f35329acSJohn Dyson 1109b18bfc3dSJohn Dyson next = TAILQ_NEXT(m, pageq); 1110b08abf6cSAlan Cox object = m->object; 11118dbca793STor Egge if ((m->flags & PG_MARKER) != 0) { 11128dbca793STor Egge m = next; 11138dbca793STor Egge continue; 11148dbca793STor Egge } 11152965a453SKip Macy if (!vm_page_trylock(m) || (object = m->object) == NULL) { 11162965a453SKip Macy m = next; 11172965a453SKip Macy continue; 11182965a453SKip Macy } 11198dbca793STor Egge if (!VM_OBJECT_TRYLOCK(object) && 11208dbca793STor Egge !vm_pageout_fallback_object_lock(m, &next)) { 11218dbca793STor Egge VM_OBJECT_UNLOCK(object); 11222965a453SKip Macy vm_page_unlock(m); 11234b8a5c40SAlan Cox m = next; 1124b08abf6cSAlan Cox continue; 1125b08abf6cSAlan Cox } 1126b08abf6cSAlan Cox 1127df8bae1dSRodney W. Grimes /* 112826f9a767SRodney W. Grimes * Don't deactivate pages that are busy. 1129df8bae1dSRodney W. Grimes */ 1130a647a309SDavid Greenman if ((m->busy != 0) || 11319af80719SAlan Cox (m->oflags & VPO_BUSY) || 1132f6b04d2bSDavid Greenman (m->hold_count != 0)) { 11332965a453SKip Macy vm_page_unlock(m); 1134b08abf6cSAlan Cox VM_OBJECT_UNLOCK(object); 1135e5b006ffSAlan Cox vm_page_requeue(m); 113626f9a767SRodney W. Grimes m = next; 113726f9a767SRodney W. Grimes continue; 1138df8bae1dSRodney W. Grimes } 1139b18bfc3dSJohn Dyson 1140b18bfc3dSJohn Dyson /* 1141b18bfc3dSJohn Dyson * The count for pagedaemon pages is done after checking the 1142956f3135SPhilippe Charnier * page for eligibility... 1143b18bfc3dSJohn Dyson */ 1144393a081dSAttilio Rao cnt.v_pdpages++; 1145ef743ce6SJohn Dyson 11467e006499SJohn Dyson /* 11477e006499SJohn Dyson * Check to see "how much" the page has been used. 11487e006499SJohn Dyson */ 11497e006499SJohn Dyson actcount = 0; 1150b08abf6cSAlan Cox if (object->ref_count != 0) { 1151ef743ce6SJohn Dyson if (m->flags & PG_REFERENCED) { 11527e006499SJohn Dyson actcount += 1; 11530d94caffSDavid Greenman } 11540385347cSPeter Wemm actcount += pmap_ts_referenced(m); 11557e006499SJohn Dyson if (actcount) { 11567e006499SJohn Dyson m->act_count += ACT_ADVANCE + actcount; 115738efa82bSJohn Dyson if (m->act_count > ACT_MAX) 115838efa82bSJohn Dyson m->act_count = ACT_MAX; 115938efa82bSJohn Dyson } 1160b18bfc3dSJohn Dyson } 1161ef743ce6SJohn Dyson 11627e006499SJohn Dyson /* 11637e006499SJohn Dyson * Since we have "tested" this bit, we need to clear it now. 11647e006499SJohn Dyson */ 1165e69763a3SDoug Rabson vm_page_flag_clear(m, PG_REFERENCED); 1166ef743ce6SJohn Dyson 11677e006499SJohn Dyson /* 11687e006499SJohn Dyson * Only if an object is currently being used, do we use the 11697e006499SJohn Dyson * page activation count stats. 11707e006499SJohn Dyson */ 1171b08abf6cSAlan Cox if (actcount && (object->ref_count != 0)) { 1172e5b006ffSAlan Cox vm_page_requeue(m); 117326f9a767SRodney W. Grimes } else { 117438efa82bSJohn Dyson m->act_count -= min(m->act_count, ACT_DECLINE); 11752b6b0df7SMatthew Dillon if (vm_pageout_algorithm || 1176b08abf6cSAlan Cox object->ref_count == 0 || 11772b6b0df7SMatthew Dillon m->act_count == 0) { 1178925a3a41SJohn Dyson page_shortage--; 1179b08abf6cSAlan Cox if (object->ref_count == 0) { 11805d4a7b79SAlan Cox KASSERT(!pmap_page_is_mapped(m), 11815d4a7b79SAlan Cox ("vm_pageout_scan: page %p is mapped", m)); 1182d4a272dbSJohn Dyson if (m->dirty == 0) 11830d94caffSDavid Greenman vm_page_cache(m); 1184d4a272dbSJohn Dyson else 1185d4a272dbSJohn Dyson vm_page_deactivate(m); 11860d94caffSDavid Greenman } else { 118726f9a767SRodney W. Grimes vm_page_deactivate(m); 1188df8bae1dSRodney W. Grimes } 118938efa82bSJohn Dyson } else { 1190e5b006ffSAlan Cox vm_page_requeue(m); 119138efa82bSJohn Dyson } 1192df8bae1dSRodney W. Grimes } 11932965a453SKip Macy vm_page_unlock(m); 1194b08abf6cSAlan Cox VM_OBJECT_UNLOCK(object); 119526f9a767SRodney W. Grimes m = next; 119626f9a767SRodney W. Grimes } 11978ffc1519SAlan Cox vm_page_unlock_queues(); 1198ceb0cf87SJohn Dyson #if !defined(NO_SWAPPING) 1199ceb0cf87SJohn Dyson /* 1200ceb0cf87SJohn Dyson * Idle process swapout -- run once per second. 1201ceb0cf87SJohn Dyson */ 1202ceb0cf87SJohn Dyson if (vm_swap_idle_enabled) { 1203ceb0cf87SJohn Dyson static long lsec; 1204227ee8a1SPoul-Henning Kamp if (time_second != lsec) { 120597824da3SAlan Cox vm_req_vmdaemon(VM_SWAP_IDLE); 1206227ee8a1SPoul-Henning Kamp lsec = time_second; 1207ceb0cf87SJohn Dyson } 1208ceb0cf87SJohn Dyson } 1209ceb0cf87SJohn Dyson #endif 1210ceb0cf87SJohn Dyson 12115663e6deSDavid Greenman /* 1212f6b04d2bSDavid Greenman * If we didn't get enough free pages, and we have skipped a vnode 12134c1f8ee9SDavid Greenman * in a writeable object, wakeup the sync daemon. And kick swapout 12144c1f8ee9SDavid Greenman * if we did not get enough free pages. 1215f6b04d2bSDavid Greenman */ 121690ecac61SMatthew Dillon if (vm_paging_target() > 0) { 121790ecac61SMatthew Dillon if (vnodes_skipped && vm_page_count_min()) 1218d50c1994SPeter Wemm (void) speedup_syncer(); 121938efa82bSJohn Dyson #if !defined(NO_SWAPPING) 122097824da3SAlan Cox if (vm_swap_enabled && vm_page_count_target()) 122197824da3SAlan Cox vm_req_vmdaemon(VM_SWAP_NORMAL); 12225afce282SDavid Greenman #endif 12234c1f8ee9SDavid Greenman } 12244c1f8ee9SDavid Greenman 1225f6b04d2bSDavid Greenman /* 1226e92686d0SDavid Schultz * If we are critically low on one of RAM or swap and low on 1227e92686d0SDavid Schultz * the other, kill the largest process. However, we avoid 1228e92686d0SDavid Schultz * doing this on the first pass in order to give ourselves a 1229e92686d0SDavid Schultz * chance to flush out dirty vnode-backed pages and to allow 1230e92686d0SDavid Schultz * active pages to be moved to the inactive queue and reclaimed. 12312025d69bSKonstantin Belousov */ 12322025d69bSKonstantin Belousov if (pass != 0 && 12332025d69bSKonstantin Belousov ((swap_pager_avail < 64 && vm_page_count_min()) || 12342025d69bSKonstantin Belousov (swap_pager_full && vm_paging_target() > 0))) 12352025d69bSKonstantin Belousov vm_pageout_oom(VM_OOM_MEM); 12362025d69bSKonstantin Belousov } 12372025d69bSKonstantin Belousov 12382025d69bSKonstantin Belousov 12392025d69bSKonstantin Belousov void 12402025d69bSKonstantin Belousov vm_pageout_oom(int shortage) 12412025d69bSKonstantin Belousov { 12422025d69bSKonstantin Belousov struct proc *p, *bigproc; 12432025d69bSKonstantin Belousov vm_offset_t size, bigsize; 12442025d69bSKonstantin Belousov struct thread *td; 12456bed074cSKonstantin Belousov struct vmspace *vm; 12462025d69bSKonstantin Belousov 12472025d69bSKonstantin Belousov /* 12481c58e4e5SJohn Baldwin * We keep the process bigproc locked once we find it to keep anyone 12491c58e4e5SJohn Baldwin * from messing with it; however, there is a possibility of 12501c58e4e5SJohn Baldwin * deadlock if process B is bigproc and one of it's child processes 12511c58e4e5SJohn Baldwin * attempts to propagate a signal to B while we are waiting for A's 12521c58e4e5SJohn Baldwin * lock while walking this list. To avoid this, we don't block on 12531c58e4e5SJohn Baldwin * the process lock but just skip a process if it is already locked. 12545663e6deSDavid Greenman */ 12555663e6deSDavid Greenman bigproc = NULL; 12565663e6deSDavid Greenman bigsize = 0; 12571005a129SJohn Baldwin sx_slock(&allproc_lock); 1258e602ba25SJulian Elischer FOREACH_PROC_IN_SYSTEM(p) { 1259e602ba25SJulian Elischer int breakout; 1260dcbcd518SBruce Evans 12611c58e4e5SJohn Baldwin if (PROC_TRYLOCK(p) == 0) 12621c58e4e5SJohn Baldwin continue; 12631c58e4e5SJohn Baldwin /* 12643f1c4c4fSKonstantin Belousov * If this is a system, protected or killed process, skip it. 12655663e6deSDavid Greenman */ 12666bed074cSKonstantin Belousov if ((p->p_flag & (P_INEXEC | P_PROTECTED | P_SYSTEM)) || 12673f1c4c4fSKonstantin Belousov (p->p_pid == 1) || P_KILLED(p) || 12688f60c087SPoul-Henning Kamp ((p->p_pid < 48) && (swap_pager_avail != 0))) { 12698606d880SJohn Baldwin PROC_UNLOCK(p); 12705663e6deSDavid Greenman continue; 12715663e6deSDavid Greenman } 12725663e6deSDavid Greenman /* 1273dcbcd518SBruce Evans * If the process is in a non-running type state, 1274e602ba25SJulian Elischer * don't touch it. Check all the threads individually. 12755663e6deSDavid Greenman */ 1276e602ba25SJulian Elischer breakout = 0; 1277e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 1278982d11f8SJeff Roberson thread_lock(td); 127971fad9fdSJulian Elischer if (!TD_ON_RUNQ(td) && 128071fad9fdSJulian Elischer !TD_IS_RUNNING(td) && 128171fad9fdSJulian Elischer !TD_IS_SLEEPING(td)) { 1282982d11f8SJeff Roberson thread_unlock(td); 1283e602ba25SJulian Elischer breakout = 1; 1284e602ba25SJulian Elischer break; 1285e602ba25SJulian Elischer } 1286982d11f8SJeff Roberson thread_unlock(td); 1287e602ba25SJulian Elischer } 1288e602ba25SJulian Elischer if (breakout) { 12891c58e4e5SJohn Baldwin PROC_UNLOCK(p); 12905663e6deSDavid Greenman continue; 12915663e6deSDavid Greenman } 12925663e6deSDavid Greenman /* 12935663e6deSDavid Greenman * get the process size 12945663e6deSDavid Greenman */ 12956bed074cSKonstantin Belousov vm = vmspace_acquire_ref(p); 12966bed074cSKonstantin Belousov if (vm == NULL) { 12976bed074cSKonstantin Belousov PROC_UNLOCK(p); 12986bed074cSKonstantin Belousov continue; 12996bed074cSKonstantin Belousov } 13006bed074cSKonstantin Belousov if (!vm_map_trylock_read(&vm->vm_map)) { 13016bed074cSKonstantin Belousov vmspace_free(vm); 130272d97679SDavid Schultz PROC_UNLOCK(p); 130372d97679SDavid Schultz continue; 130472d97679SDavid Schultz } 13057981aa24SKonstantin Belousov size = vmspace_swap_count(vm); 13066bed074cSKonstantin Belousov vm_map_unlock_read(&vm->vm_map); 13072025d69bSKonstantin Belousov if (shortage == VM_OOM_MEM) 13086bed074cSKonstantin Belousov size += vmspace_resident_count(vm); 13096bed074cSKonstantin Belousov vmspace_free(vm); 13105663e6deSDavid Greenman /* 13115663e6deSDavid Greenman * if the this process is bigger than the biggest one 13125663e6deSDavid Greenman * remember it. 13135663e6deSDavid Greenman */ 13145663e6deSDavid Greenman if (size > bigsize) { 13151c58e4e5SJohn Baldwin if (bigproc != NULL) 13161c58e4e5SJohn Baldwin PROC_UNLOCK(bigproc); 13175663e6deSDavid Greenman bigproc = p; 13185663e6deSDavid Greenman bigsize = size; 13191c58e4e5SJohn Baldwin } else 13201c58e4e5SJohn Baldwin PROC_UNLOCK(p); 13215663e6deSDavid Greenman } 13221005a129SJohn Baldwin sx_sunlock(&allproc_lock); 13235663e6deSDavid Greenman if (bigproc != NULL) { 1324729b1e51SDavid Greenman killproc(bigproc, "out of swap space"); 1325fa885116SJulian Elischer sched_nice(bigproc, PRIO_MIN); 13261c58e4e5SJohn Baldwin PROC_UNLOCK(bigproc); 13272feb50bfSAttilio Rao wakeup(&cnt.v_free_count); 13285663e6deSDavid Greenman } 13295663e6deSDavid Greenman } 133026f9a767SRodney W. Grimes 1331dc2efb27SJohn Dyson /* 1332dc2efb27SJohn Dyson * This routine tries to maintain the pseudo LRU active queue, 1333dc2efb27SJohn Dyson * so that during long periods of time where there is no paging, 1334956f3135SPhilippe Charnier * that some statistic accumulation still occurs. This code 1335dc2efb27SJohn Dyson * helps the situation where paging just starts to occur. 1336dc2efb27SJohn Dyson */ 1337dc2efb27SJohn Dyson static void 1338dc2efb27SJohn Dyson vm_pageout_page_stats() 1339dc2efb27SJohn Dyson { 1340b86e6ec0SAlan Cox vm_object_t object; 1341dc2efb27SJohn Dyson vm_page_t m,next; 1342dc2efb27SJohn Dyson int pcount,tpcount; /* Number of pages to check */ 1343dc2efb27SJohn Dyson static int fullintervalcount = 0; 1344bef608bdSJohn Dyson int page_shortage; 1345bef608bdSJohn Dyson 13465e609009SAlan Cox mtx_assert(&vm_page_queue_mtx, MA_OWNED); 134790ecac61SMatthew Dillon page_shortage = 13482feb50bfSAttilio Rao (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 13492feb50bfSAttilio Rao (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 135090ecac61SMatthew Dillon 1351bef608bdSJohn Dyson if (page_shortage <= 0) 1352bef608bdSJohn Dyson return; 1353dc2efb27SJohn Dyson 13542feb50bfSAttilio Rao pcount = cnt.v_active_count; 1355dc2efb27SJohn Dyson fullintervalcount += vm_pageout_stats_interval; 1356dc2efb27SJohn Dyson if (fullintervalcount < vm_pageout_full_stats_interval) { 13578d28bf04SAlan Cox tpcount = (int64_t)vm_pageout_stats_max * cnt.v_active_count / 13588d28bf04SAlan Cox cnt.v_page_count; 1359dc2efb27SJohn Dyson if (pcount > tpcount) 1360dc2efb27SJohn Dyson pcount = tpcount; 1361883f3caaSMatthew Dillon } else { 1362883f3caaSMatthew Dillon fullintervalcount = 0; 1363dc2efb27SJohn Dyson } 1364dc2efb27SJohn Dyson 1365be72f788SAlan Cox m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1366dc2efb27SJohn Dyson while ((m != NULL) && (pcount-- > 0)) { 13677e006499SJohn Dyson int actcount; 1368dc2efb27SJohn Dyson 1369ef39c05bSAlexander Leidinger KASSERT(VM_PAGE_INQUEUE2(m, PQ_ACTIVE), 1370ab42316cSAlan Cox ("vm_pageout_page_stats: page %p isn't active", m)); 1371dc2efb27SJohn Dyson 1372dc2efb27SJohn Dyson next = TAILQ_NEXT(m, pageq); 1373b86e6ec0SAlan Cox object = m->object; 13748dbca793STor Egge 13758dbca793STor Egge if ((m->flags & PG_MARKER) != 0) { 13768dbca793STor Egge m = next; 13778dbca793STor Egge continue; 13788dbca793STor Egge } 13792965a453SKip Macy vm_page_lock_assert(m, MA_NOTOWNED); 13802965a453SKip Macy if (vm_page_trylock(m) == 0 || (object = m->object) == NULL) { 13812965a453SKip Macy m = next; 13822965a453SKip Macy continue; 13832965a453SKip Macy } 13848dbca793STor Egge if (!VM_OBJECT_TRYLOCK(object) && 13858dbca793STor Egge !vm_pageout_fallback_object_lock(m, &next)) { 13868dbca793STor Egge VM_OBJECT_UNLOCK(object); 13872965a453SKip Macy vm_page_unlock(m); 1388b86e6ec0SAlan Cox m = next; 1389b86e6ec0SAlan Cox continue; 1390b86e6ec0SAlan Cox } 1391b86e6ec0SAlan Cox 1392dc2efb27SJohn Dyson /* 1393dc2efb27SJohn Dyson * Don't deactivate pages that are busy. 1394dc2efb27SJohn Dyson */ 1395dc2efb27SJohn Dyson if ((m->busy != 0) || 13969af80719SAlan Cox (m->oflags & VPO_BUSY) || 1397dc2efb27SJohn Dyson (m->hold_count != 0)) { 13982965a453SKip Macy vm_page_unlock(m); 1399b86e6ec0SAlan Cox VM_OBJECT_UNLOCK(object); 1400e5b006ffSAlan Cox vm_page_requeue(m); 1401dc2efb27SJohn Dyson m = next; 1402dc2efb27SJohn Dyson continue; 1403dc2efb27SJohn Dyson } 1404dc2efb27SJohn Dyson 14057e006499SJohn Dyson actcount = 0; 1406dc2efb27SJohn Dyson if (m->flags & PG_REFERENCED) { 1407e69763a3SDoug Rabson vm_page_flag_clear(m, PG_REFERENCED); 14087e006499SJohn Dyson actcount += 1; 1409dc2efb27SJohn Dyson } 1410dc2efb27SJohn Dyson 14110385347cSPeter Wemm actcount += pmap_ts_referenced(m); 14127e006499SJohn Dyson if (actcount) { 14137e006499SJohn Dyson m->act_count += ACT_ADVANCE + actcount; 1414dc2efb27SJohn Dyson if (m->act_count > ACT_MAX) 1415dc2efb27SJohn Dyson m->act_count = ACT_MAX; 1416e5b006ffSAlan Cox vm_page_requeue(m); 1417dc2efb27SJohn Dyson } else { 1418dc2efb27SJohn Dyson if (m->act_count == 0) { 14197e006499SJohn Dyson /* 14202b6b0df7SMatthew Dillon * We turn off page access, so that we have 14212b6b0df7SMatthew Dillon * more accurate RSS stats. We don't do this 14222b6b0df7SMatthew Dillon * in the normal page deactivation when the 14232b6b0df7SMatthew Dillon * system is loaded VM wise, because the 14242b6b0df7SMatthew Dillon * cost of the large number of page protect 14252b6b0df7SMatthew Dillon * operations would be higher than the value 14262b6b0df7SMatthew Dillon * of doing the operation. 14277e006499SJohn Dyson */ 14284fec79beSAlan Cox pmap_remove_all(m); 1429dc2efb27SJohn Dyson vm_page_deactivate(m); 1430dc2efb27SJohn Dyson } else { 1431dc2efb27SJohn Dyson m->act_count -= min(m->act_count, ACT_DECLINE); 1432e5b006ffSAlan Cox vm_page_requeue(m); 1433dc2efb27SJohn Dyson } 1434dc2efb27SJohn Dyson } 14352965a453SKip Macy vm_page_unlock(m); 1436b86e6ec0SAlan Cox VM_OBJECT_UNLOCK(object); 1437dc2efb27SJohn Dyson m = next; 1438dc2efb27SJohn Dyson } 1439dc2efb27SJohn Dyson } 1440dc2efb27SJohn Dyson 1441df8bae1dSRodney W. Grimes /* 1442df8bae1dSRodney W. Grimes * vm_pageout is the high level pageout daemon. 1443df8bae1dSRodney W. Grimes */ 14442b14f991SJulian Elischer static void 144526f9a767SRodney W. Grimes vm_pageout() 1446df8bae1dSRodney W. Grimes { 14471aab16a6SAlan Cox int error, pass; 14480384fff8SJason Evans 1449df8bae1dSRodney W. Grimes /* 1450df8bae1dSRodney W. Grimes * Initialize some paging parameters. 1451df8bae1dSRodney W. Grimes */ 14522feb50bfSAttilio Rao cnt.v_interrupt_free_min = 2; 14532feb50bfSAttilio Rao if (cnt.v_page_count < 2000) 1454f35329acSJohn Dyson vm_pageout_page_count = 8; 1455f6b04d2bSDavid Greenman 145645ae1d91SAlan Cox /* 145745ae1d91SAlan Cox * v_free_reserved needs to include enough for the largest 145845ae1d91SAlan Cox * swap pager structures plus enough for any pv_entry structs 145945ae1d91SAlan Cox * when paging. 146045ae1d91SAlan Cox */ 14612feb50bfSAttilio Rao if (cnt.v_page_count > 1024) 14622feb50bfSAttilio Rao cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 14632feb50bfSAttilio Rao else 14642feb50bfSAttilio Rao cnt.v_free_min = 4; 14652feb50bfSAttilio Rao cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 14662feb50bfSAttilio Rao cnt.v_interrupt_free_min; 14672feb50bfSAttilio Rao cnt.v_free_reserved = vm_pageout_page_count + 14682446e4f0SAlan Cox cnt.v_pageout_free_min + (cnt.v_page_count / 768); 14692feb50bfSAttilio Rao cnt.v_free_severe = cnt.v_free_min / 2; 14702feb50bfSAttilio Rao cnt.v_free_min += cnt.v_free_reserved; 14712feb50bfSAttilio Rao cnt.v_free_severe += cnt.v_free_reserved; 147245ae1d91SAlan Cox 1473ed74321bSDavid Greenman /* 14742b6b0df7SMatthew Dillon * v_free_target and v_cache_min control pageout hysteresis. Note 14752b6b0df7SMatthew Dillon * that these are more a measure of the VM cache queue hysteresis 14762b6b0df7SMatthew Dillon * then the VM free queue. Specifically, v_free_target is the 14772b6b0df7SMatthew Dillon * high water mark (free+cache pages). 14782b6b0df7SMatthew Dillon * 14792b6b0df7SMatthew Dillon * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 14802b6b0df7SMatthew Dillon * low water mark, while v_free_min is the stop. v_cache_min must 14812b6b0df7SMatthew Dillon * be big enough to handle memory needs while the pageout daemon 14822b6b0df7SMatthew Dillon * is signalled and run to free more pages. 1483ed74321bSDavid Greenman */ 14842feb50bfSAttilio Rao if (cnt.v_free_count > 6144) 14852feb50bfSAttilio Rao cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 14862feb50bfSAttilio Rao else 14872feb50bfSAttilio Rao cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 14886f2b142eSDavid Greenman 14892feb50bfSAttilio Rao if (cnt.v_free_count > 2048) { 14902feb50bfSAttilio Rao cnt.v_cache_min = cnt.v_free_target; 14912feb50bfSAttilio Rao cnt.v_cache_max = 2 * cnt.v_cache_min; 14922feb50bfSAttilio Rao cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 14930d94caffSDavid Greenman } else { 14942feb50bfSAttilio Rao cnt.v_cache_min = 0; 14952feb50bfSAttilio Rao cnt.v_cache_max = 0; 14962feb50bfSAttilio Rao cnt.v_inactive_target = cnt.v_free_count / 4; 14970d94caffSDavid Greenman } 14982feb50bfSAttilio Rao if (cnt.v_inactive_target > cnt.v_free_count / 3) 14992feb50bfSAttilio Rao cnt.v_inactive_target = cnt.v_free_count / 3; 1500df8bae1dSRodney W. Grimes 1501df8bae1dSRodney W. Grimes /* XXX does not really belong here */ 1502df8bae1dSRodney W. Grimes if (vm_page_max_wired == 0) 15032feb50bfSAttilio Rao vm_page_max_wired = cnt.v_free_count / 3; 1504df8bae1dSRodney W. Grimes 1505dc2efb27SJohn Dyson if (vm_pageout_stats_max == 0) 15062feb50bfSAttilio Rao vm_pageout_stats_max = cnt.v_free_target; 1507dc2efb27SJohn Dyson 1508dc2efb27SJohn Dyson /* 1509dc2efb27SJohn Dyson * Set interval in seconds for stats scan. 1510dc2efb27SJohn Dyson */ 1511dc2efb27SJohn Dyson if (vm_pageout_stats_interval == 0) 1512bef608bdSJohn Dyson vm_pageout_stats_interval = 5; 1513dc2efb27SJohn Dyson if (vm_pageout_full_stats_interval == 0) 1514dc2efb27SJohn Dyson vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1515dc2efb27SJohn Dyson 151624a1cce3SDavid Greenman swap_pager_swap_init(); 15172b6b0df7SMatthew Dillon pass = 0; 1518df8bae1dSRodney W. Grimes /* 15190d94caffSDavid Greenman * The pageout daemon is never done, so loop forever. 1520df8bae1dSRodney W. Grimes */ 1521df8bae1dSRodney W. Grimes while (TRUE) { 1522936524aaSMatthew Dillon /* 1523936524aaSMatthew Dillon * If we have enough free memory, wakeup waiters. Do 1524936524aaSMatthew Dillon * not clear vm_pages_needed until we reach our target, 1525936524aaSMatthew Dillon * otherwise we may be woken up over and over again and 1526936524aaSMatthew Dillon * waste a lot of cpu. 1527936524aaSMatthew Dillon */ 1528e9f995d8SAlan Cox mtx_lock(&vm_page_queue_free_mtx); 1529936524aaSMatthew Dillon if (vm_pages_needed && !vm_page_count_min()) { 1530a1c0a785SAlan Cox if (!vm_paging_needed()) 1531936524aaSMatthew Dillon vm_pages_needed = 0; 15322feb50bfSAttilio Rao wakeup(&cnt.v_free_count); 1533936524aaSMatthew Dillon } 1534936524aaSMatthew Dillon if (vm_pages_needed) { 153590ecac61SMatthew Dillon /* 15362b6b0df7SMatthew Dillon * Still not done, take a second pass without waiting 15372b6b0df7SMatthew Dillon * (unlimited dirty cleaning), otherwise sleep a bit 15382b6b0df7SMatthew Dillon * and try again. 153990ecac61SMatthew Dillon */ 15402b6b0df7SMatthew Dillon ++pass; 15412b6b0df7SMatthew Dillon if (pass > 1) 1542e9f995d8SAlan Cox msleep(&vm_pages_needed, 1543e9f995d8SAlan Cox &vm_page_queue_free_mtx, PVM, "psleep", 1544e9f995d8SAlan Cox hz / 2); 154590ecac61SMatthew Dillon } else { 154690ecac61SMatthew Dillon /* 15472b6b0df7SMatthew Dillon * Good enough, sleep & handle stats. Prime the pass 15482b6b0df7SMatthew Dillon * for the next run. 154990ecac61SMatthew Dillon */ 15502b6b0df7SMatthew Dillon if (pass > 1) 15512b6b0df7SMatthew Dillon pass = 1; 15522b6b0df7SMatthew Dillon else 15532b6b0df7SMatthew Dillon pass = 0; 1554e9f995d8SAlan Cox error = msleep(&vm_pages_needed, 1555e9f995d8SAlan Cox &vm_page_queue_free_mtx, PVM, "psleep", 1556e9f995d8SAlan Cox vm_pageout_stats_interval * hz); 1557dc2efb27SJohn Dyson if (error && !vm_pages_needed) { 1558e9f995d8SAlan Cox mtx_unlock(&vm_page_queue_free_mtx); 15592b6b0df7SMatthew Dillon pass = 0; 1560e9f995d8SAlan Cox vm_page_lock_queues(); 1561dc2efb27SJohn Dyson vm_pageout_page_stats(); 15625e609009SAlan Cox vm_page_unlock_queues(); 1563dc2efb27SJohn Dyson continue; 1564dc2efb27SJohn Dyson } 1565f919ebdeSDavid Greenman } 1566b18bfc3dSJohn Dyson if (vm_pages_needed) 1567393a081dSAttilio Rao cnt.v_pdwakeups++; 1568e9f995d8SAlan Cox mtx_unlock(&vm_page_queue_free_mtx); 15692b6b0df7SMatthew Dillon vm_pageout_scan(pass); 1570df8bae1dSRodney W. Grimes } 1571df8bae1dSRodney W. Grimes } 157226f9a767SRodney W. Grimes 15736b4b77adSAlan Cox /* 1574e9f995d8SAlan Cox * Unless the free page queue lock is held by the caller, this function 15756b4b77adSAlan Cox * should be regarded as advisory. Specifically, the caller should 15766b4b77adSAlan Cox * not msleep() on &cnt.v_free_count following this function unless 1577e9f995d8SAlan Cox * the free page queue lock is held until the msleep() is performed. 15786b4b77adSAlan Cox */ 1579e0c5a895SJohn Dyson void 1580e0c5a895SJohn Dyson pagedaemon_wakeup() 1581e0c5a895SJohn Dyson { 1582a1c0a785SAlan Cox 1583b40ce416SJulian Elischer if (!vm_pages_needed && curthread->td_proc != pageproc) { 1584a1c0a785SAlan Cox vm_pages_needed = 1; 1585e0c5a895SJohn Dyson wakeup(&vm_pages_needed); 1586e0c5a895SJohn Dyson } 1587e0c5a895SJohn Dyson } 1588e0c5a895SJohn Dyson 158938efa82bSJohn Dyson #if !defined(NO_SWAPPING) 15905afce282SDavid Greenman static void 159197824da3SAlan Cox vm_req_vmdaemon(int req) 15925afce282SDavid Greenman { 15935afce282SDavid Greenman static int lastrun = 0; 15945afce282SDavid Greenman 159597824da3SAlan Cox mtx_lock(&vm_daemon_mtx); 159697824da3SAlan Cox vm_pageout_req_swapout |= req; 1597b18bfc3dSJohn Dyson if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 15985afce282SDavid Greenman wakeup(&vm_daemon_needed); 15995afce282SDavid Greenman lastrun = ticks; 16005afce282SDavid Greenman } 160197824da3SAlan Cox mtx_unlock(&vm_daemon_mtx); 16025afce282SDavid Greenman } 16035afce282SDavid Greenman 16042b14f991SJulian Elischer static void 16054f9fb771SBruce Evans vm_daemon() 16060d94caffSDavid Greenman { 160791d5354aSJohn Baldwin struct rlimit rsslim; 1608dcbcd518SBruce Evans struct proc *p; 1609dcbcd518SBruce Evans struct thread *td; 16106bed074cSKonstantin Belousov struct vmspace *vm; 161197824da3SAlan Cox int breakout, swapout_flags; 16120d94caffSDavid Greenman 16132fe6e4d7SDavid Greenman while (TRUE) { 161497824da3SAlan Cox mtx_lock(&vm_daemon_mtx); 161597824da3SAlan Cox msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 0); 161697824da3SAlan Cox swapout_flags = vm_pageout_req_swapout; 16174c1f8ee9SDavid Greenman vm_pageout_req_swapout = 0; 161897824da3SAlan Cox mtx_unlock(&vm_daemon_mtx); 161997824da3SAlan Cox if (swapout_flags) 162097824da3SAlan Cox swapout_procs(swapout_flags); 162197824da3SAlan Cox 16222fe6e4d7SDavid Greenman /* 16230d94caffSDavid Greenman * scan the processes for exceeding their rlimits or if 16240d94caffSDavid Greenman * process is swapped out -- deactivate pages 16252fe6e4d7SDavid Greenman */ 16261005a129SJohn Baldwin sx_slock(&allproc_lock); 1627f67af5c9SXin LI FOREACH_PROC_IN_SYSTEM(p) { 1628fe2144fdSLuoqi Chen vm_pindex_t limit, size; 16292fe6e4d7SDavid Greenman 16302fe6e4d7SDavid Greenman /* 16312fe6e4d7SDavid Greenman * if this is a system process or if we have already 16322fe6e4d7SDavid Greenman * looked at this process, skip it. 16332fe6e4d7SDavid Greenman */ 1634897ecacdSJohn Baldwin PROC_LOCK(p); 16356bed074cSKonstantin Belousov if (p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) { 1636897ecacdSJohn Baldwin PROC_UNLOCK(p); 16372fe6e4d7SDavid Greenman continue; 16382fe6e4d7SDavid Greenman } 16392fe6e4d7SDavid Greenman /* 16402fe6e4d7SDavid Greenman * if the process is in a non-running type state, 16412fe6e4d7SDavid Greenman * don't touch it. 16422fe6e4d7SDavid Greenman */ 1643e602ba25SJulian Elischer breakout = 0; 1644e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 1645982d11f8SJeff Roberson thread_lock(td); 164671fad9fdSJulian Elischer if (!TD_ON_RUNQ(td) && 164771fad9fdSJulian Elischer !TD_IS_RUNNING(td) && 164871fad9fdSJulian Elischer !TD_IS_SLEEPING(td)) { 1649982d11f8SJeff Roberson thread_unlock(td); 1650e602ba25SJulian Elischer breakout = 1; 1651e602ba25SJulian Elischer break; 1652e602ba25SJulian Elischer } 1653982d11f8SJeff Roberson thread_unlock(td); 1654e602ba25SJulian Elischer } 1655897ecacdSJohn Baldwin if (breakout) { 1656897ecacdSJohn Baldwin PROC_UNLOCK(p); 16572fe6e4d7SDavid Greenman continue; 16582fe6e4d7SDavid Greenman } 16592fe6e4d7SDavid Greenman /* 16602fe6e4d7SDavid Greenman * get a limit 16612fe6e4d7SDavid Greenman */ 1662dcbcd518SBruce Evans lim_rlimit(p, RLIMIT_RSS, &rsslim); 1663fe2144fdSLuoqi Chen limit = OFF_TO_IDX( 166491d5354aSJohn Baldwin qmin(rsslim.rlim_cur, rsslim.rlim_max)); 16652fe6e4d7SDavid Greenman 16662fe6e4d7SDavid Greenman /* 16670d94caffSDavid Greenman * let processes that are swapped out really be 16680d94caffSDavid Greenman * swapped out set the limit to nothing (will force a 16690d94caffSDavid Greenman * swap-out.) 16702fe6e4d7SDavid Greenman */ 1671b61ce5b0SJeff Roberson if ((p->p_flag & P_INMEM) == 0) 16720d94caffSDavid Greenman limit = 0; /* XXX */ 16736bed074cSKonstantin Belousov vm = vmspace_acquire_ref(p); 1674897ecacdSJohn Baldwin PROC_UNLOCK(p); 16756bed074cSKonstantin Belousov if (vm == NULL) 16766bed074cSKonstantin Belousov continue; 16772fe6e4d7SDavid Greenman 16786bed074cSKonstantin Belousov size = vmspace_resident_count(vm); 16792fe6e4d7SDavid Greenman if (limit >= 0 && size >= limit) { 1680fe2144fdSLuoqi Chen vm_pageout_map_deactivate_pages( 16816bed074cSKonstantin Belousov &vm->vm_map, limit); 16822fe6e4d7SDavid Greenman } 16836bed074cSKonstantin Belousov vmspace_free(vm); 16842fe6e4d7SDavid Greenman } 16851005a129SJohn Baldwin sx_sunlock(&allproc_lock); 168624a1cce3SDavid Greenman } 16872fe6e4d7SDavid Greenman } 1688a1287949SEivind Eklund #endif /* !defined(NO_SWAPPING) */ 1689