160727d8bSWarner Losh /*- 226f9a767SRodney W. Grimes * Copyright (c) 1991 Regents of the University of California. 326f9a767SRodney W. Grimes * All rights reserved. 426f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 526f9a767SRodney W. Grimes * All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 726f9a767SRodney W. Grimes * All rights reserved. 88dbca793STor Egge * Copyright (c) 2005 Yahoo! Technologies Norway AS 98dbca793STor Egge * All rights reserved. 10df8bae1dSRodney W. Grimes * 11df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 12df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 13df8bae1dSRodney W. Grimes * 14df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 15df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 16df8bae1dSRodney W. Grimes * are met: 17df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 19df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 20df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 21df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 22df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 235929bcfaSPhilippe Charnier * must display the following acknowledgement: 24df8bae1dSRodney W. Grimes * This product includes software developed by the University of 25df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 26df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 27df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 28df8bae1dSRodney W. Grimes * without specific prior written permission. 29df8bae1dSRodney W. Grimes * 30df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40df8bae1dSRodney W. Grimes * SUCH DAMAGE. 41df8bae1dSRodney W. Grimes * 423c4dd356SDavid Greenman * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 43df8bae1dSRodney W. Grimes * 44df8bae1dSRodney W. Grimes * 45df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46df8bae1dSRodney W. Grimes * All rights reserved. 47df8bae1dSRodney W. Grimes * 48df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 51df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 52df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 53df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 54df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 55df8bae1dSRodney W. Grimes * 56df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 57df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 58df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 59df8bae1dSRodney W. Grimes * 60df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 61df8bae1dSRodney W. Grimes * 62df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 63df8bae1dSRodney W. Grimes * School of Computer Science 64df8bae1dSRodney W. Grimes * Carnegie Mellon University 65df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 66df8bae1dSRodney W. Grimes * 67df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 68df8bae1dSRodney W. Grimes * rights to redistribute these changes. 69df8bae1dSRodney W. Grimes */ 70df8bae1dSRodney W. Grimes 71df8bae1dSRodney W. Grimes /* 72df8bae1dSRodney W. Grimes * The proverbial page-out daemon. 73df8bae1dSRodney W. Grimes */ 74df8bae1dSRodney W. Grimes 75874651b1SDavid E. O'Brien #include <sys/cdefs.h> 76874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 77874651b1SDavid E. O'Brien 78faa5f8d8SAndrzej Bialecki #include "opt_vm.h" 79df8bae1dSRodney W. Grimes #include <sys/param.h> 8026f9a767SRodney W. Grimes #include <sys/systm.h> 81b5e8ce9fSBruce Evans #include <sys/kernel.h> 82855a310fSJeff Roberson #include <sys/eventhandler.h> 83fb919e4dSMark Murray #include <sys/lock.h> 84fb919e4dSMark Murray #include <sys/mutex.h> 8526f9a767SRodney W. Grimes #include <sys/proc.h> 869c8b8baaSPeter Wemm #include <sys/kthread.h> 870384fff8SJason Evans #include <sys/ktr.h> 8897824da3SAlan Cox #include <sys/mount.h> 89099e7e95SEdward Tomasz Napierala #include <sys/racct.h> 9026f9a767SRodney W. Grimes #include <sys/resourcevar.h> 91b43179fbSJeff Roberson #include <sys/sched.h> 92d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h> 93f6b04d2bSDavid Greenman #include <sys/vnode.h> 94efeaf95aSDavid Greenman #include <sys/vmmeter.h> 951005a129SJohn Baldwin #include <sys/sx.h> 9638efa82bSJohn Dyson #include <sys/sysctl.h> 97df8bae1dSRodney W. Grimes 98df8bae1dSRodney W. Grimes #include <vm/vm.h> 99efeaf95aSDavid Greenman #include <vm/vm_param.h> 100efeaf95aSDavid Greenman #include <vm/vm_object.h> 101df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 102efeaf95aSDavid Greenman #include <vm/vm_map.h> 103df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 10424a1cce3SDavid Greenman #include <vm/vm_pager.h> 10505f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 106efeaf95aSDavid Greenman #include <vm/vm_extern.h> 107670d17b5SJeff Roberson #include <vm/uma.h> 108df8bae1dSRodney W. Grimes 1092b14f991SJulian Elischer /* 1102b14f991SJulian Elischer * System initialization 1112b14f991SJulian Elischer */ 1122b14f991SJulian Elischer 1132b14f991SJulian Elischer /* the kernel process "vm_pageout"*/ 11411caded3SAlfred Perlstein static void vm_pageout(void); 11511caded3SAlfred Perlstein static int vm_pageout_clean(vm_page_t); 11611caded3SAlfred Perlstein static void vm_pageout_scan(int pass); 11745ae1d91SAlan Cox 1182b14f991SJulian Elischer struct proc *pageproc; 1192b14f991SJulian Elischer 1202b14f991SJulian Elischer static struct kproc_desc page_kp = { 1212b14f991SJulian Elischer "pagedaemon", 1222b14f991SJulian Elischer vm_pageout, 1232b14f991SJulian Elischer &pageproc 1242b14f991SJulian Elischer }; 125237fdd78SRobert Watson SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, 126237fdd78SRobert Watson &page_kp); 1272b14f991SJulian Elischer 12838efa82bSJohn Dyson #if !defined(NO_SWAPPING) 1292b14f991SJulian Elischer /* the kernel process "vm_daemon"*/ 13011caded3SAlfred Perlstein static void vm_daemon(void); 131f708ef1bSPoul-Henning Kamp static struct proc *vmproc; 1322b14f991SJulian Elischer 1332b14f991SJulian Elischer static struct kproc_desc vm_kp = { 1342b14f991SJulian Elischer "vmdaemon", 1352b14f991SJulian Elischer vm_daemon, 1362b14f991SJulian Elischer &vmproc 1372b14f991SJulian Elischer }; 138237fdd78SRobert Watson SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp); 13938efa82bSJohn Dyson #endif 1402b14f991SJulian Elischer 1412b14f991SJulian Elischer 1428b245767SAlan Cox int vm_pages_needed; /* Event on which pageout daemon sleeps */ 1438b245767SAlan Cox int vm_pageout_deficit; /* Estimated number of pages deficit */ 1448b245767SAlan Cox int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 14526f9a767SRodney W. Grimes 14638efa82bSJohn Dyson #if !defined(NO_SWAPPING) 147f708ef1bSPoul-Henning Kamp static int vm_pageout_req_swapout; /* XXX */ 148f708ef1bSPoul-Henning Kamp static int vm_daemon_needed; 14997824da3SAlan Cox static struct mtx vm_daemon_mtx; 15097824da3SAlan Cox /* Allow for use by vm_pageout before vm_daemon is initialized. */ 15197824da3SAlan Cox MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF); 15238efa82bSJohn Dyson #endif 1532b6b0df7SMatthew Dillon static int vm_max_launder = 32; 154303b270bSEivind Eklund static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 155303b270bSEivind Eklund static int vm_pageout_full_stats_interval = 0; 15626354d4cSAlan Cox static int vm_pageout_algorithm=0; 157303b270bSEivind Eklund static int defer_swap_pageouts=0; 158303b270bSEivind Eklund static int disable_swap_pageouts=0; 15970111b90SJohn Dyson 16038efa82bSJohn Dyson #if defined(NO_SWAPPING) 161303b270bSEivind Eklund static int vm_swap_enabled=0; 162303b270bSEivind Eklund static int vm_swap_idle_enabled=0; 16338efa82bSJohn Dyson #else 164303b270bSEivind Eklund static int vm_swap_enabled=1; 165303b270bSEivind Eklund static int vm_swap_idle_enabled=0; 16638efa82bSJohn Dyson #endif 16738efa82bSJohn Dyson 16838efa82bSJohn Dyson SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 1692b6b0df7SMatthew Dillon CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 1702b6b0df7SMatthew Dillon 1712b6b0df7SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, max_launder, 1722b6b0df7SMatthew Dillon CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 17338efa82bSJohn Dyson 174dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 175b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 176dc2efb27SJohn Dyson 177dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 178b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 179dc2efb27SJohn Dyson 180dc2efb27SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 181b0359e2cSPeter Wemm CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 182dc2efb27SJohn Dyson 18338efa82bSJohn Dyson #if defined(NO_SWAPPING) 184ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 1856bd9cb1cSTom Rhodes CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout"); 186ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 1876bd9cb1cSTom Rhodes CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 18838efa82bSJohn Dyson #else 189ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 190b0359e2cSPeter Wemm CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 191ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 192b0359e2cSPeter Wemm CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 19338efa82bSJohn Dyson #endif 19426f9a767SRodney W. Grimes 195ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 196b0359e2cSPeter Wemm CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 19712ac6a1dSJohn Dyson 198ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 199b0359e2cSPeter Wemm CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 20012ac6a1dSJohn Dyson 20123b59018SMatthew Dillon static int pageout_lock_miss; 20223b59018SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 20323b59018SMatthew Dillon CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 20423b59018SMatthew Dillon 205ffc82b0aSJohn Dyson #define VM_PAGEOUT_PAGE_COUNT 16 206bbc0ec52SDavid Greenman int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 207df8bae1dSRodney W. Grimes 208c3cb3e12SDavid Greenman int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 2095dfc2870SAlan Cox SYSCTL_INT(_vm, OID_AUTO, max_wired, 2105dfc2870SAlan Cox CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count"); 211df8bae1dSRodney W. Grimes 21238efa82bSJohn Dyson #if !defined(NO_SWAPPING) 213ecf6279fSAlan Cox static void vm_pageout_map_deactivate_pages(vm_map_t, long); 214ecf6279fSAlan Cox static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long); 21597824da3SAlan Cox static void vm_req_vmdaemon(int req); 21638efa82bSJohn Dyson #endif 217dc2efb27SJohn Dyson static void vm_pageout_page_stats(void); 218cd41fc12SDavid Greenman 219a8229fa3SAlan Cox /* 220a8229fa3SAlan Cox * Initialize a dummy page for marking the caller's place in the specified 221a8229fa3SAlan Cox * paging queue. In principle, this function only needs to set the flag 222a8229fa3SAlan Cox * PG_MARKER. Nonetheless, it sets the flag VPO_BUSY and initializes the hold 223a8229fa3SAlan Cox * count to one as safety precautions. 224a8229fa3SAlan Cox */ 2258c616246SKonstantin Belousov static void 2268c616246SKonstantin Belousov vm_pageout_init_marker(vm_page_t marker, u_short queue) 2278c616246SKonstantin Belousov { 2288c616246SKonstantin Belousov 2298c616246SKonstantin Belousov bzero(marker, sizeof(*marker)); 230a8229fa3SAlan Cox marker->flags = PG_MARKER; 2318c616246SKonstantin Belousov marker->oflags = VPO_BUSY; 2328c616246SKonstantin Belousov marker->queue = queue; 233a8229fa3SAlan Cox marker->hold_count = 1; 2348c616246SKonstantin Belousov } 2358c616246SKonstantin Belousov 23626f9a767SRodney W. Grimes /* 2378dbca793STor Egge * vm_pageout_fallback_object_lock: 2388dbca793STor Egge * 2398dbca793STor Egge * Lock vm object currently associated with `m'. VM_OBJECT_TRYLOCK is 2408dbca793STor Egge * known to have failed and page queue must be either PQ_ACTIVE or 2418dbca793STor Egge * PQ_INACTIVE. To avoid lock order violation, unlock the page queues 2428dbca793STor Egge * while locking the vm object. Use marker page to detect page queue 2438dbca793STor Egge * changes and maintain notion of next page on page queue. Return 2448dbca793STor Egge * TRUE if no changes were detected, FALSE otherwise. vm object is 2458dbca793STor Egge * locked on return. 2468dbca793STor Egge * 2478dbca793STor Egge * This function depends on both the lock portion of struct vm_object 2488dbca793STor Egge * and normal struct vm_page being type stable. 2498dbca793STor Egge */ 250da31e3aaSAlan Cox boolean_t 2518dbca793STor Egge vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next) 2528dbca793STor Egge { 2538dbca793STor Egge struct vm_page marker; 2548dbca793STor Egge boolean_t unchanged; 2558dbca793STor Egge u_short queue; 2568dbca793STor Egge vm_object_t object; 2578dbca793STor Egge 2588dbca793STor Egge queue = m->queue; 2598c616246SKonstantin Belousov vm_pageout_init_marker(&marker, queue); 2608dbca793STor Egge object = m->object; 2618dbca793STor Egge 2628dbca793STor Egge TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl, 2638dbca793STor Egge m, &marker, pageq); 2648dbca793STor Egge vm_page_unlock_queues(); 2652965a453SKip Macy vm_page_unlock(m); 2668dbca793STor Egge VM_OBJECT_LOCK(object); 2672965a453SKip Macy vm_page_lock(m); 2688dbca793STor Egge vm_page_lock_queues(); 2698dbca793STor Egge 2708dbca793STor Egge /* Page queue might have changed. */ 2718dbca793STor Egge *next = TAILQ_NEXT(&marker, pageq); 2728dbca793STor Egge unchanged = (m->queue == queue && 2738dbca793STor Egge m->object == object && 2748dbca793STor Egge &marker == TAILQ_NEXT(m, pageq)); 2758dbca793STor Egge TAILQ_REMOVE(&vm_page_queues[queue].pl, 2768dbca793STor Egge &marker, pageq); 2778dbca793STor Egge return (unchanged); 2788dbca793STor Egge } 2798dbca793STor Egge 2808dbca793STor Egge /* 2818c616246SKonstantin Belousov * Lock the page while holding the page queue lock. Use marker page 2828c616246SKonstantin Belousov * to detect page queue changes and maintain notion of next page on 2838c616246SKonstantin Belousov * page queue. Return TRUE if no changes were detected, FALSE 2848c616246SKonstantin Belousov * otherwise. The page is locked on return. The page queue lock might 2858c616246SKonstantin Belousov * be dropped and reacquired. 2868c616246SKonstantin Belousov * 2878c616246SKonstantin Belousov * This function depends on normal struct vm_page being type stable. 2888c616246SKonstantin Belousov */ 2898c616246SKonstantin Belousov boolean_t 2908c616246SKonstantin Belousov vm_pageout_page_lock(vm_page_t m, vm_page_t *next) 2918c616246SKonstantin Belousov { 2928c616246SKonstantin Belousov struct vm_page marker; 2938c616246SKonstantin Belousov boolean_t unchanged; 2948c616246SKonstantin Belousov u_short queue; 2958c616246SKonstantin Belousov 2968c616246SKonstantin Belousov vm_page_lock_assert(m, MA_NOTOWNED); 2978c616246SKonstantin Belousov mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2988c616246SKonstantin Belousov 2998c616246SKonstantin Belousov if (vm_page_trylock(m)) 3008c616246SKonstantin Belousov return (TRUE); 3018c616246SKonstantin Belousov 3028c616246SKonstantin Belousov queue = m->queue; 3038c616246SKonstantin Belousov vm_pageout_init_marker(&marker, queue); 3048c616246SKonstantin Belousov 3058c616246SKonstantin Belousov TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl, m, &marker, pageq); 3068c616246SKonstantin Belousov vm_page_unlock_queues(); 3078c616246SKonstantin Belousov vm_page_lock(m); 3088c616246SKonstantin Belousov vm_page_lock_queues(); 3098c616246SKonstantin Belousov 3108c616246SKonstantin Belousov /* Page queue might have changed. */ 3118c616246SKonstantin Belousov *next = TAILQ_NEXT(&marker, pageq); 3128c616246SKonstantin Belousov unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, pageq)); 3138c616246SKonstantin Belousov TAILQ_REMOVE(&vm_page_queues[queue].pl, &marker, pageq); 3148c616246SKonstantin Belousov return (unchanged); 3158c616246SKonstantin Belousov } 3168c616246SKonstantin Belousov 3178c616246SKonstantin Belousov /* 31826f9a767SRodney W. Grimes * vm_pageout_clean: 31924a1cce3SDavid Greenman * 3200d94caffSDavid Greenman * Clean the page and remove it from the laundry. 32126f9a767SRodney W. Grimes * 3220d94caffSDavid Greenman * We set the busy bit to cause potential page faults on this page to 3231c7c3c6aSMatthew Dillon * block. Note the careful timing, however, the busy bit isn't set till 3241c7c3c6aSMatthew Dillon * late and we cannot do anything that will mess with the page. 32526f9a767SRodney W. Grimes */ 3263af76890SPoul-Henning Kamp static int 3272965a453SKip Macy vm_pageout_clean(vm_page_t m) 32824a1cce3SDavid Greenman { 32954d92145SMatthew Dillon vm_object_t object; 33091b4f427SAlan Cox vm_page_t mc[2*vm_pageout_page_count], pb, ps; 3313562af12SAlan Cox int pageout_count; 33290ecac61SMatthew Dillon int ib, is, page_base; 333a316d390SJohn Dyson vm_pindex_t pindex = m->pindex; 33426f9a767SRodney W. Grimes 33595976f3fSAlan Cox vm_page_lock_assert(m, MA_OWNED); 33617f6a17bSAlan Cox object = m->object; 33717f6a17bSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3380cddd8f0SMatthew Dillon 33926f9a767SRodney W. Grimes /* 3401c7c3c6aSMatthew Dillon * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 3411c7c3c6aSMatthew Dillon * with the new swapper, but we could have serious problems paging 3421c7c3c6aSMatthew Dillon * out other object types if there is insufficient memory. 3431c7c3c6aSMatthew Dillon * 3441c7c3c6aSMatthew Dillon * Unfortunately, checking free memory here is far too late, so the 3451c7c3c6aSMatthew Dillon * check has been moved up a procedural level. 3461c7c3c6aSMatthew Dillon */ 3471c7c3c6aSMatthew Dillon 34824a1cce3SDavid Greenman /* 3499e897b1bSAlan Cox * Can't clean the page if it's busy or held. 35024a1cce3SDavid Greenman */ 35195976f3fSAlan Cox KASSERT(m->busy == 0 && (m->oflags & VPO_BUSY) == 0, 35295976f3fSAlan Cox ("vm_pageout_clean: page %p is busy", m)); 35395976f3fSAlan Cox KASSERT(m->hold_count == 0, ("vm_pageout_clean: page %p is held", m)); 35417f6a17bSAlan Cox vm_page_unlock(m); 3550d94caffSDavid Greenman 35691b4f427SAlan Cox mc[vm_pageout_page_count] = pb = ps = m; 35726f9a767SRodney W. Grimes pageout_count = 1; 358f35329acSJohn Dyson page_base = vm_pageout_page_count; 35990ecac61SMatthew Dillon ib = 1; 36090ecac61SMatthew Dillon is = 1; 36190ecac61SMatthew Dillon 36224a1cce3SDavid Greenman /* 36324a1cce3SDavid Greenman * Scan object for clusterable pages. 36424a1cce3SDavid Greenman * 36524a1cce3SDavid Greenman * We can cluster ONLY if: ->> the page is NOT 36624a1cce3SDavid Greenman * clean, wired, busy, held, or mapped into a 36724a1cce3SDavid Greenman * buffer, and one of the following: 36824a1cce3SDavid Greenman * 1) The page is inactive, or a seldom used 36924a1cce3SDavid Greenman * active page. 37024a1cce3SDavid Greenman * -or- 37124a1cce3SDavid Greenman * 2) we force the issue. 37290ecac61SMatthew Dillon * 37390ecac61SMatthew Dillon * During heavy mmap/modification loads the pageout 37490ecac61SMatthew Dillon * daemon can really fragment the underlying file 37590ecac61SMatthew Dillon * due to flushing pages out of order and not trying 37690ecac61SMatthew Dillon * align the clusters (which leave sporatic out-of-order 37790ecac61SMatthew Dillon * holes). To solve this problem we do the reverse scan 37890ecac61SMatthew Dillon * first and attempt to align our cluster, then do a 37990ecac61SMatthew Dillon * forward scan if room remains. 38024a1cce3SDavid Greenman */ 38190ecac61SMatthew Dillon more: 38290ecac61SMatthew Dillon while (ib && pageout_count < vm_pageout_page_count) { 38324a1cce3SDavid Greenman vm_page_t p; 384f6b04d2bSDavid Greenman 38590ecac61SMatthew Dillon if (ib > pindex) { 38690ecac61SMatthew Dillon ib = 0; 38790ecac61SMatthew Dillon break; 388f6b04d2bSDavid Greenman } 38990ecac61SMatthew Dillon 39091b4f427SAlan Cox if ((p = vm_page_prev(pb)) == NULL || 39191b4f427SAlan Cox (p->oflags & VPO_BUSY) != 0 || p->busy != 0) { 39290ecac61SMatthew Dillon ib = 0; 39390ecac61SMatthew Dillon break; 394f6b04d2bSDavid Greenman } 3952965a453SKip Macy vm_page_lock(p); 39624a1cce3SDavid Greenman vm_page_test_dirty(p); 39726f4eea5SAlan Cox if (p->dirty == 0 || 39890ecac61SMatthew Dillon p->queue != PQ_INACTIVE || 39957601bcbSMatthew Dillon p->hold_count != 0) { /* may be undergoing I/O */ 4002965a453SKip Macy vm_page_unlock(p); 40190ecac61SMatthew Dillon ib = 0; 40224a1cce3SDavid Greenman break; 403f6b04d2bSDavid Greenman } 4042965a453SKip Macy vm_page_unlock(p); 40591b4f427SAlan Cox mc[--page_base] = pb = p; 40690ecac61SMatthew Dillon ++pageout_count; 40790ecac61SMatthew Dillon ++ib; 40824a1cce3SDavid Greenman /* 40990ecac61SMatthew Dillon * alignment boundry, stop here and switch directions. Do 41090ecac61SMatthew Dillon * not clear ib. 41124a1cce3SDavid Greenman */ 41290ecac61SMatthew Dillon if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 41390ecac61SMatthew Dillon break; 41424a1cce3SDavid Greenman } 41590ecac61SMatthew Dillon 41690ecac61SMatthew Dillon while (pageout_count < vm_pageout_page_count && 41790ecac61SMatthew Dillon pindex + is < object->size) { 41890ecac61SMatthew Dillon vm_page_t p; 41990ecac61SMatthew Dillon 42091b4f427SAlan Cox if ((p = vm_page_next(ps)) == NULL || 42191b4f427SAlan Cox (p->oflags & VPO_BUSY) != 0 || p->busy != 0) 42290ecac61SMatthew Dillon break; 4232965a453SKip Macy vm_page_lock(p); 42424a1cce3SDavid Greenman vm_page_test_dirty(p); 42526f4eea5SAlan Cox if (p->dirty == 0 || 42690ecac61SMatthew Dillon p->queue != PQ_INACTIVE || 42757601bcbSMatthew Dillon p->hold_count != 0) { /* may be undergoing I/O */ 4282965a453SKip Macy vm_page_unlock(p); 42924a1cce3SDavid Greenman break; 43024a1cce3SDavid Greenman } 4312965a453SKip Macy vm_page_unlock(p); 43291b4f427SAlan Cox mc[page_base + pageout_count] = ps = p; 43390ecac61SMatthew Dillon ++pageout_count; 43490ecac61SMatthew Dillon ++is; 43524a1cce3SDavid Greenman } 43690ecac61SMatthew Dillon 43790ecac61SMatthew Dillon /* 43890ecac61SMatthew Dillon * If we exhausted our forward scan, continue with the reverse scan 43990ecac61SMatthew Dillon * when possible, even past a page boundry. This catches boundry 44090ecac61SMatthew Dillon * conditions. 44190ecac61SMatthew Dillon */ 44290ecac61SMatthew Dillon if (ib && pageout_count < vm_pageout_page_count) 44390ecac61SMatthew Dillon goto more; 444f6b04d2bSDavid Greenman 44567bf6868SJohn Dyson /* 44667bf6868SJohn Dyson * we allow reads during pageouts... 44767bf6868SJohn Dyson */ 448126d6082SKonstantin Belousov return (vm_pageout_flush(&mc[page_base], pageout_count, 0, 0, NULL, 449126d6082SKonstantin Belousov NULL)); 450aef922f5SJohn Dyson } 451aef922f5SJohn Dyson 4521c7c3c6aSMatthew Dillon /* 4531c7c3c6aSMatthew Dillon * vm_pageout_flush() - launder the given pages 4541c7c3c6aSMatthew Dillon * 4551c7c3c6aSMatthew Dillon * The given pages are laundered. Note that we setup for the start of 4561c7c3c6aSMatthew Dillon * I/O ( i.e. busy the page ), mark it read-only, and bump the object 4571c7c3c6aSMatthew Dillon * reference count all in here rather then in the parent. If we want 4581c7c3c6aSMatthew Dillon * the parent to do more sophisticated things we may have to change 4591c7c3c6aSMatthew Dillon * the ordering. 4601e8a675cSKonstantin Belousov * 4611e8a675cSKonstantin Belousov * Returned runlen is the count of pages between mreq and first 4621e8a675cSKonstantin Belousov * page after mreq with status VM_PAGER_AGAIN. 463126d6082SKonstantin Belousov * *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL 464126d6082SKonstantin Belousov * for any page in runlen set. 4651c7c3c6aSMatthew Dillon */ 466aef922f5SJohn Dyson int 467126d6082SKonstantin Belousov vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, 468126d6082SKonstantin Belousov boolean_t *eio) 469aef922f5SJohn Dyson { 4702e3b314dSAlan Cox vm_object_t object = mc[0]->object; 471aef922f5SJohn Dyson int pageout_status[count]; 47295461b45SJohn Dyson int numpagedout = 0; 4731e8a675cSKonstantin Belousov int i, runlen; 474aef922f5SJohn Dyson 4752e3b314dSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 4767bec141bSKip Macy mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); 4777bec141bSKip Macy 4781c7c3c6aSMatthew Dillon /* 4791c7c3c6aSMatthew Dillon * Initiate I/O. Bump the vm_page_t->busy counter and 4801c7c3c6aSMatthew Dillon * mark the pages read-only. 4811c7c3c6aSMatthew Dillon * 4821c7c3c6aSMatthew Dillon * We do not have to fixup the clean/dirty bits here... we can 4831c7c3c6aSMatthew Dillon * allow the pager to do it after the I/O completes. 48402fa91d3SMatthew Dillon * 48502fa91d3SMatthew Dillon * NOTE! mc[i]->dirty may be partial or fragmented due to an 48602fa91d3SMatthew Dillon * edge case with file fragments. 4871c7c3c6aSMatthew Dillon */ 4888f9110f6SJohn Dyson for (i = 0; i < count; i++) { 4897a935082SAlan Cox KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 4907a935082SAlan Cox ("vm_pageout_flush: partially invalid page %p index %d/%d", 4917a935082SAlan Cox mc[i], i, count)); 492e69763a3SDoug Rabson vm_page_io_start(mc[i]); 49378985e42SAlan Cox pmap_remove_write(mc[i]); 4942965a453SKip Macy } 495d474eaaaSDoug Rabson vm_object_pip_add(object, count); 496aef922f5SJohn Dyson 497d076fbeaSAlan Cox vm_pager_put_pages(object, mc, count, flags, pageout_status); 49826f9a767SRodney W. Grimes 4991e8a675cSKonstantin Belousov runlen = count - mreq; 500126d6082SKonstantin Belousov if (eio != NULL) 501126d6082SKonstantin Belousov *eio = FALSE; 502aef922f5SJohn Dyson for (i = 0; i < count; i++) { 503aef922f5SJohn Dyson vm_page_t mt = mc[i]; 50424a1cce3SDavid Greenman 5054cd45723SAlan Cox KASSERT(pageout_status[i] == VM_PAGER_PEND || 5066031c68dSAlan Cox !pmap_page_is_write_mapped(mt), 5079ea8d1a6SAlan Cox ("vm_pageout_flush: page %p is not write protected", mt)); 50826f9a767SRodney W. Grimes switch (pageout_status[i]) { 50926f9a767SRodney W. Grimes case VM_PAGER_OK: 51026f9a767SRodney W. Grimes case VM_PAGER_PEND: 51195461b45SJohn Dyson numpagedout++; 51226f9a767SRodney W. Grimes break; 51326f9a767SRodney W. Grimes case VM_PAGER_BAD: 51426f9a767SRodney W. Grimes /* 5150d94caffSDavid Greenman * Page outside of range of object. Right now we 5160d94caffSDavid Greenman * essentially lose the changes by pretending it 5170d94caffSDavid Greenman * worked. 51826f9a767SRodney W. Grimes */ 51990ecac61SMatthew Dillon vm_page_undirty(mt); 52026f9a767SRodney W. Grimes break; 52126f9a767SRodney W. Grimes case VM_PAGER_ERROR: 52226f9a767SRodney W. Grimes case VM_PAGER_FAIL: 52326f9a767SRodney W. Grimes /* 5240d94caffSDavid Greenman * If page couldn't be paged out, then reactivate the 5250d94caffSDavid Greenman * page so it doesn't clog the inactive list. (We 5260d94caffSDavid Greenman * will try paging out it again later). 52726f9a767SRodney W. Grimes */ 5283c4a2440SAlan Cox vm_page_lock(mt); 52924a1cce3SDavid Greenman vm_page_activate(mt); 5303c4a2440SAlan Cox vm_page_unlock(mt); 531126d6082SKonstantin Belousov if (eio != NULL && i >= mreq && i - mreq < runlen) 532126d6082SKonstantin Belousov *eio = TRUE; 53326f9a767SRodney W. Grimes break; 53426f9a767SRodney W. Grimes case VM_PAGER_AGAIN: 5351e8a675cSKonstantin Belousov if (i >= mreq && i - mreq < runlen) 5361e8a675cSKonstantin Belousov runlen = i - mreq; 53726f9a767SRodney W. Grimes break; 53826f9a767SRodney W. Grimes } 53926f9a767SRodney W. Grimes 54026f9a767SRodney W. Grimes /* 5410d94caffSDavid Greenman * If the operation is still going, leave the page busy to 5420d94caffSDavid Greenman * block all other accesses. Also, leave the paging in 5430d94caffSDavid Greenman * progress indicator set so that we don't attempt an object 5440d94caffSDavid Greenman * collapse. 54526f9a767SRodney W. Grimes */ 54626f9a767SRodney W. Grimes if (pageout_status[i] != VM_PAGER_PEND) { 547f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 548e69763a3SDoug Rabson vm_page_io_finish(mt); 5493c4a2440SAlan Cox if (vm_page_count_severe()) { 5503c4a2440SAlan Cox vm_page_lock(mt); 5519ea8d1a6SAlan Cox vm_page_try_to_cache(mt); 5522965a453SKip Macy vm_page_unlock(mt); 55326f9a767SRodney W. Grimes } 5543c4a2440SAlan Cox } 5553c4a2440SAlan Cox } 5561e8a675cSKonstantin Belousov if (prunlen != NULL) 5571e8a675cSKonstantin Belousov *prunlen = runlen; 5583c4a2440SAlan Cox return (numpagedout); 55926f9a767SRodney W. Grimes } 56026f9a767SRodney W. Grimes 56138efa82bSJohn Dyson #if !defined(NO_SWAPPING) 56226f9a767SRodney W. Grimes /* 56326f9a767SRodney W. Grimes * vm_pageout_object_deactivate_pages 56426f9a767SRodney W. Grimes * 565ce186587SAlan Cox * Deactivate enough pages to satisfy the inactive target 566ce186587SAlan Cox * requirements. 56726f9a767SRodney W. Grimes * 56826f9a767SRodney W. Grimes * The object and map must be locked. 56926f9a767SRodney W. Grimes */ 57038efa82bSJohn Dyson static void 571ce186587SAlan Cox vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object, 572ce186587SAlan Cox long desired) 57326f9a767SRodney W. Grimes { 574ecf6279fSAlan Cox vm_object_t backing_object, object; 575ce186587SAlan Cox vm_page_t p; 57682bfb965SAlan Cox int actcount, remove_mode; 57726f9a767SRodney W. Grimes 578ecf6279fSAlan Cox VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED); 57901381811SJohn Baldwin if (first_object->type == OBJT_DEVICE || 58082bfb965SAlan Cox first_object->type == OBJT_SG) 58138efa82bSJohn Dyson return; 582ecf6279fSAlan Cox for (object = first_object;; object = backing_object) { 583ecf6279fSAlan Cox if (pmap_resident_count(pmap) <= desired) 584ecf6279fSAlan Cox goto unlock_return; 585447fe2a4SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 58682bfb965SAlan Cox if (object->type == OBJT_PHYS || object->paging_in_progress) 587ecf6279fSAlan Cox goto unlock_return; 58826f9a767SRodney W. Grimes 58985b1dc89SAlan Cox remove_mode = 0; 59038efa82bSJohn Dyson if (object->shadow_count > 1) 59138efa82bSJohn Dyson remove_mode = 1; 59226f9a767SRodney W. Grimes /* 593ce186587SAlan Cox * Scan the object's entire memory queue. 59426f9a767SRodney W. Grimes */ 595ce186587SAlan Cox TAILQ_FOREACH(p, &object->memq, listq) { 596447fe2a4SAlan Cox if (pmap_resident_count(pmap) <= desired) 597447fe2a4SAlan Cox goto unlock_return; 598ce186587SAlan Cox if ((p->oflags & VPO_BUSY) != 0 || p->busy != 0) 599447fe2a4SAlan Cox continue; 600ce186587SAlan Cox PCPU_INC(cnt.v_pdpages); 6012965a453SKip Macy vm_page_lock(p); 602ce186587SAlan Cox if (p->wire_count != 0 || p->hold_count != 0 || 603ecf6279fSAlan Cox !pmap_page_exists_quick(pmap, p)) { 6042965a453SKip Macy vm_page_unlock(p); 6050d94caffSDavid Greenman continue; 6060d94caffSDavid Greenman } 6070385347cSPeter Wemm actcount = pmap_ts_referenced(p); 6083407fefeSKonstantin Belousov if ((p->aflags & PGA_REFERENCED) != 0) { 609ce186587SAlan Cox if (actcount == 0) 6107e006499SJohn Dyson actcount = 1; 6113407fefeSKonstantin Belousov vm_page_aflag_clear(p, PGA_REFERENCED); 612ef743ce6SJohn Dyson } 613ce186587SAlan Cox if (p->queue != PQ_ACTIVE && actcount != 0) { 614ef743ce6SJohn Dyson vm_page_activate(p); 6157e006499SJohn Dyson p->act_count += actcount; 616c8c4b40cSJohn Dyson } else if (p->queue == PQ_ACTIVE) { 617ce186587SAlan Cox if (actcount == 0) { 618ce186587SAlan Cox p->act_count -= min(p->act_count, 619ce186587SAlan Cox ACT_DECLINE); 620ce186587SAlan Cox if (!remove_mode && 621ce186587SAlan Cox (vm_pageout_algorithm || 622ce186587SAlan Cox p->act_count == 0)) { 6234fec79beSAlan Cox pmap_remove_all(p); 62426f9a767SRodney W. Grimes vm_page_deactivate(p); 62526f9a767SRodney W. Grimes } else { 626ce186587SAlan Cox vm_page_lock_queues(); 627e5b006ffSAlan Cox vm_page_requeue(p); 628ce186587SAlan Cox vm_page_unlock_queues(); 629c8c4b40cSJohn Dyson } 630c8c4b40cSJohn Dyson } else { 631eaf13dd7SJohn Dyson vm_page_activate(p); 632ce186587SAlan Cox if (p->act_count < ACT_MAX - 633ce186587SAlan Cox ACT_ADVANCE) 63438efa82bSJohn Dyson p->act_count += ACT_ADVANCE; 635ce186587SAlan Cox vm_page_lock_queues(); 636e5b006ffSAlan Cox vm_page_requeue(p); 6372965a453SKip Macy vm_page_unlock_queues(); 638ce186587SAlan Cox } 639ce186587SAlan Cox } else if (p->queue == PQ_INACTIVE) 640ce186587SAlan Cox pmap_remove_all(p); 6412965a453SKip Macy vm_page_unlock(p); 64226f9a767SRodney W. Grimes } 643ecf6279fSAlan Cox if ((backing_object = object->backing_object) == NULL) 644ecf6279fSAlan Cox goto unlock_return; 645ecf6279fSAlan Cox VM_OBJECT_LOCK(backing_object); 646ecf6279fSAlan Cox if (object != first_object) 647ecf6279fSAlan Cox VM_OBJECT_UNLOCK(object); 64838efa82bSJohn Dyson } 649ecf6279fSAlan Cox unlock_return: 650ecf6279fSAlan Cox if (object != first_object) 651ecf6279fSAlan Cox VM_OBJECT_UNLOCK(object); 65226f9a767SRodney W. Grimes } 65326f9a767SRodney W. Grimes 65426f9a767SRodney W. Grimes /* 65526f9a767SRodney W. Grimes * deactivate some number of pages in a map, try to do it fairly, but 65626f9a767SRodney W. Grimes * that is really hard to do. 65726f9a767SRodney W. Grimes */ 658cd41fc12SDavid Greenman static void 65938efa82bSJohn Dyson vm_pageout_map_deactivate_pages(map, desired) 66026f9a767SRodney W. Grimes vm_map_t map; 661ecf6279fSAlan Cox long desired; 66226f9a767SRodney W. Grimes { 66326f9a767SRodney W. Grimes vm_map_entry_t tmpe; 66438efa82bSJohn Dyson vm_object_t obj, bigobj; 66530105b9eSTor Egge int nothingwired; 6660d94caffSDavid Greenman 667d974f03cSAlan Cox if (!vm_map_trylock(map)) 66826f9a767SRodney W. Grimes return; 66938efa82bSJohn Dyson 67038efa82bSJohn Dyson bigobj = NULL; 67130105b9eSTor Egge nothingwired = TRUE; 67238efa82bSJohn Dyson 67338efa82bSJohn Dyson /* 67438efa82bSJohn Dyson * first, search out the biggest object, and try to free pages from 67538efa82bSJohn Dyson * that. 67638efa82bSJohn Dyson */ 67726f9a767SRodney W. Grimes tmpe = map->header.next; 67838efa82bSJohn Dyson while (tmpe != &map->header) { 6799fdfe602SMatthew Dillon if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 68038efa82bSJohn Dyson obj = tmpe->object.vm_object; 6810774dfb3SAlan Cox if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) { 6820774dfb3SAlan Cox if (obj->shadow_count <= 1 && 6830774dfb3SAlan Cox (bigobj == NULL || 6840774dfb3SAlan Cox bigobj->resident_page_count < obj->resident_page_count)) { 6850774dfb3SAlan Cox if (bigobj != NULL) 6860774dfb3SAlan Cox VM_OBJECT_UNLOCK(bigobj); 68738efa82bSJohn Dyson bigobj = obj; 6880774dfb3SAlan Cox } else 6890774dfb3SAlan Cox VM_OBJECT_UNLOCK(obj); 69038efa82bSJohn Dyson } 69138efa82bSJohn Dyson } 69230105b9eSTor Egge if (tmpe->wired_count > 0) 69330105b9eSTor Egge nothingwired = FALSE; 69438efa82bSJohn Dyson tmpe = tmpe->next; 69538efa82bSJohn Dyson } 69638efa82bSJohn Dyson 6970774dfb3SAlan Cox if (bigobj != NULL) { 698ecf6279fSAlan Cox vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired); 6990774dfb3SAlan Cox VM_OBJECT_UNLOCK(bigobj); 7000774dfb3SAlan Cox } 70138efa82bSJohn Dyson /* 70238efa82bSJohn Dyson * Next, hunt around for other pages to deactivate. We actually 70338efa82bSJohn Dyson * do this search sort of wrong -- .text first is not the best idea. 70438efa82bSJohn Dyson */ 70538efa82bSJohn Dyson tmpe = map->header.next; 70638efa82bSJohn Dyson while (tmpe != &map->header) { 707b1028ad1SLuoqi Chen if (pmap_resident_count(vm_map_pmap(map)) <= desired) 70838efa82bSJohn Dyson break; 7099fdfe602SMatthew Dillon if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 71038efa82bSJohn Dyson obj = tmpe->object.vm_object; 7110774dfb3SAlan Cox if (obj != NULL) { 7120774dfb3SAlan Cox VM_OBJECT_LOCK(obj); 713ecf6279fSAlan Cox vm_pageout_object_deactivate_pages(map->pmap, obj, desired); 7140774dfb3SAlan Cox VM_OBJECT_UNLOCK(obj); 7150774dfb3SAlan Cox } 71638efa82bSJohn Dyson } 71726f9a767SRodney W. Grimes tmpe = tmpe->next; 71838857e7fSAlan Cox } 71938efa82bSJohn Dyson 72038efa82bSJohn Dyson /* 72138efa82bSJohn Dyson * Remove all mappings if a process is swapped out, this will free page 72238efa82bSJohn Dyson * table pages. 72338efa82bSJohn Dyson */ 72438857e7fSAlan Cox if (desired == 0 && nothingwired) { 7258d01a3b2SNathan Whitehorn pmap_remove(vm_map_pmap(map), vm_map_min(map), 7268d01a3b2SNathan Whitehorn vm_map_max(map)); 72738857e7fSAlan Cox } 72838efa82bSJohn Dyson vm_map_unlock(map); 72926f9a767SRodney W. Grimes } 730a1287949SEivind Eklund #endif /* !defined(NO_SWAPPING) */ 731df8bae1dSRodney W. Grimes 7321c7c3c6aSMatthew Dillon /* 733df8bae1dSRodney W. Grimes * vm_pageout_scan does the dirty work for the pageout daemon. 734df8bae1dSRodney W. Grimes */ 7352b6b0df7SMatthew Dillon static void 7362b6b0df7SMatthew Dillon vm_pageout_scan(int pass) 737df8bae1dSRodney W. Grimes { 738502ba6e4SJohn Dyson vm_page_t m, next; 739936524aaSMatthew Dillon struct vm_page marker; 7401c7c3c6aSMatthew Dillon int page_shortage, maxscan, pcount; 7411c7c3c6aSMatthew Dillon int addl_page_shortage, addl_page_shortage_init; 742df8bae1dSRodney W. Grimes vm_object_t object; 7432446e4f0SAlan Cox int actcount; 744f6b04d2bSDavid Greenman int vnodes_skipped = 0; 7452b6b0df7SMatthew Dillon int maxlaunder; 746*48cc2fc7SKonstantin Belousov boolean_t queues_locked; 7470d94caffSDavid Greenman 748df8bae1dSRodney W. Grimes /* 749855a310fSJeff Roberson * Decrease registered cache sizes. 750855a310fSJeff Roberson */ 751855a310fSJeff Roberson EVENTHANDLER_INVOKE(vm_lowmem, 0); 752855a310fSJeff Roberson /* 753855a310fSJeff Roberson * We do this explicitly after the caches have been drained above. 754855a310fSJeff Roberson */ 755855a310fSJeff Roberson uma_reclaim(); 7565985940eSJohn Dyson 757b0ef8c5fSAlan Cox addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit); 758b182ec9eSJohn Dyson 7591c7c3c6aSMatthew Dillon /* 7601c7c3c6aSMatthew Dillon * Calculate the number of pages we want to either free or move 7612b6b0df7SMatthew Dillon * to the cache. 7621c7c3c6aSMatthew Dillon */ 7632b6b0df7SMatthew Dillon page_shortage = vm_paging_target() + addl_page_shortage_init; 7641c7c3c6aSMatthew Dillon 765af394cfaSJung-uk Kim vm_pageout_init_marker(&marker, PQ_INACTIVE); 766936524aaSMatthew Dillon 767936524aaSMatthew Dillon /* 7681c7c3c6aSMatthew Dillon * Start scanning the inactive queue for pages we can move to the 7691c7c3c6aSMatthew Dillon * cache or free. The scan will stop when the target is reached or 770936524aaSMatthew Dillon * we have scanned the entire inactive queue. Note that m->act_count 771936524aaSMatthew Dillon * is not used to form decisions for the inactive queue, only for the 772936524aaSMatthew Dillon * active queue. 7732b6b0df7SMatthew Dillon * 7742b6b0df7SMatthew Dillon * maxlaunder limits the number of dirty pages we flush per scan. 7752b6b0df7SMatthew Dillon * For most systems a smaller value (16 or 32) is more robust under 7762b6b0df7SMatthew Dillon * extreme memory and disk pressure because any unnecessary writes 7772b6b0df7SMatthew Dillon * to disk can result in extreme performance degredation. However, 7782b6b0df7SMatthew Dillon * systems with excessive dirty pages (especially when MAP_NOSYNC is 7792b6b0df7SMatthew Dillon * used) will die horribly with limited laundering. If the pageout 7802b6b0df7SMatthew Dillon * daemon cannot clean enough pages in the first pass, we let it go 7812b6b0df7SMatthew Dillon * all out in succeeding passes. 7821c7c3c6aSMatthew Dillon */ 7832b6b0df7SMatthew Dillon if ((maxlaunder = vm_max_launder) <= 1) 7842b6b0df7SMatthew Dillon maxlaunder = 1; 7852b6b0df7SMatthew Dillon if (pass) 7862b6b0df7SMatthew Dillon maxlaunder = 10000; 7873e1b578aSAlan Cox vm_page_lock_queues(); 788*48cc2fc7SKonstantin Belousov queues_locked = TRUE; 78967bf6868SJohn Dyson rescan0: 7901c7c3c6aSMatthew Dillon addl_page_shortage = addl_page_shortage_init; 7912feb50bfSAttilio Rao maxscan = cnt.v_inactive_count; 7926d03d577SMatthew Dillon 793be72f788SAlan Cox for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 7941c7c3c6aSMatthew Dillon m != NULL && maxscan-- > 0 && page_shortage > 0; 795e929c00dSKirk McKusick m = next) { 796*48cc2fc7SKonstantin Belousov KASSERT(queues_locked, ("unlocked queues")); 797*48cc2fc7SKonstantin Belousov mtx_assert(&vm_page_queue_mtx, MA_OWNED); 798df8bae1dSRodney W. Grimes 799393a081dSAttilio Rao cnt.v_pdpages++; 800b182ec9eSJohn Dyson 8019cf51988SAlan Cox if (m->queue != PQ_INACTIVE) 80267bf6868SJohn Dyson goto rescan0; 803b182ec9eSJohn Dyson 804b18bfc3dSJohn Dyson next = TAILQ_NEXT(m, pageq); 805df8bae1dSRodney W. Grimes 806936524aaSMatthew Dillon /* 807936524aaSMatthew Dillon * skip marker pages 808936524aaSMatthew Dillon */ 809936524aaSMatthew Dillon if (m->flags & PG_MARKER) 810936524aaSMatthew Dillon continue; 811936524aaSMatthew Dillon 8127900f95dSKonstantin Belousov KASSERT((m->flags & PG_FICTITIOUS) == 0, 8137900f95dSKonstantin Belousov ("Fictitious page %p cannot be in inactive queue", m)); 8147900f95dSKonstantin Belousov KASSERT((m->oflags & VPO_UNMANAGED) == 0, 8157900f95dSKonstantin Belousov ("Unmanaged page %p cannot be in inactive queue", m)); 8167900f95dSKonstantin Belousov 8178c616246SKonstantin Belousov /* 8188c616246SKonstantin Belousov * Lock the page. 8198c616246SKonstantin Belousov */ 8208c616246SKonstantin Belousov if (!vm_pageout_page_lock(m, &next)) { 8218c616246SKonstantin Belousov vm_page_unlock(m); 822b182ec9eSJohn Dyson addl_page_shortage++; 823b182ec9eSJohn Dyson continue; 824df8bae1dSRodney W. Grimes } 8252965a453SKip Macy 826e8f26319SKip Macy /* 827e8f26319SKip Macy * A held page may be undergoing I/O, so skip it. 828e8f26319SKip Macy */ 8299ee2165fSAlan Cox if (m->hold_count) { 830e8f26319SKip Macy vm_page_unlock(m); 831e8f26319SKip Macy vm_page_requeue(m); 8322965a453SKip Macy addl_page_shortage++; 8332965a453SKip Macy continue; 8342965a453SKip Macy } 8352965a453SKip Macy 83626f9a767SRodney W. Grimes /* 837a1287949SEivind Eklund * Don't mess with busy pages, keep in the front of the 838b18bfc3dSJohn Dyson * queue, most likely are being paged out. 83926f9a767SRodney W. Grimes */ 8409ee2165fSAlan Cox object = m->object; 8418dbca793STor Egge if (!VM_OBJECT_TRYLOCK(object) && 8428dbca793STor Egge (!vm_pageout_fallback_object_lock(m, &next) || 8438dbca793STor Egge m->hold_count != 0)) { 8448dbca793STor Egge VM_OBJECT_UNLOCK(object); 8452965a453SKip Macy vm_page_unlock(m); 84634d9e6fdSAlan Cox addl_page_shortage++; 84734d9e6fdSAlan Cox continue; 84834d9e6fdSAlan Cox } 8499af80719SAlan Cox if (m->busy || (m->oflags & VPO_BUSY)) { 8502965a453SKip Macy vm_page_unlock(m); 85134d9e6fdSAlan Cox VM_OBJECT_UNLOCK(object); 852b182ec9eSJohn Dyson addl_page_shortage++; 85326f9a767SRodney W. Grimes continue; 85426f9a767SRodney W. Grimes } 855bd7e5f99SJohn Dyson 8567e006499SJohn Dyson /* 857*48cc2fc7SKonstantin Belousov * We unlock vm_page_queue_mtx, invalidating the 858*48cc2fc7SKonstantin Belousov * 'next' pointer. Use our marker to remember our 859*48cc2fc7SKonstantin Belousov * place. 860*48cc2fc7SKonstantin Belousov */ 861*48cc2fc7SKonstantin Belousov TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, 862*48cc2fc7SKonstantin Belousov m, &marker, pageq); 863*48cc2fc7SKonstantin Belousov vm_page_unlock_queues(); 864*48cc2fc7SKonstantin Belousov queues_locked = FALSE; 865*48cc2fc7SKonstantin Belousov 866*48cc2fc7SKonstantin Belousov /* 8671c7c3c6aSMatthew Dillon * If the object is not being used, we ignore previous 8681c7c3c6aSMatthew Dillon * references. 8697e006499SJohn Dyson */ 87034d9e6fdSAlan Cox if (object->ref_count == 0) { 8713407fefeSKonstantin Belousov vm_page_aflag_clear(m, PGA_REFERENCED); 87247916d0cSAlan Cox KASSERT(!pmap_page_is_mapped(m), 87347916d0cSAlan Cox ("vm_pageout_scan: page %p is mapped", m)); 8747e006499SJohn Dyson 8757e006499SJohn Dyson /* 8761c7c3c6aSMatthew Dillon * Otherwise, if the page has been referenced while in the 8771c7c3c6aSMatthew Dillon * inactive queue, we bump the "activation count" upwards, 8781c7c3c6aSMatthew Dillon * making it less likely that the page will be added back to 8791c7c3c6aSMatthew Dillon * the inactive queue prematurely again. Here we check the 8801c7c3c6aSMatthew Dillon * page tables (or emulated bits, if any), given the upper 8811c7c3c6aSMatthew Dillon * level VM system not knowing anything about existing 8821c7c3c6aSMatthew Dillon * references. 8837e006499SJohn Dyson */ 8845d10ef20SKonstantin Belousov } else if ((m->aflags & PGA_REFERENCED) == 0 && 8855d10ef20SKonstantin Belousov (actcount = pmap_ts_referenced(m)) != 0) { 886ef743ce6SJohn Dyson vm_page_activate(m); 8872965a453SKip Macy vm_page_unlock(m); 8884c6a2e7aSAlan Cox m->act_count += actcount + ACT_ADVANCE; 8894c6a2e7aSAlan Cox VM_OBJECT_UNLOCK(object); 890*48cc2fc7SKonstantin Belousov goto relock_queues; 8912fe6e4d7SDavid Greenman } 892ef743ce6SJohn Dyson 8937e006499SJohn Dyson /* 8941c7c3c6aSMatthew Dillon * If the upper level VM system knows about any page 8951c7c3c6aSMatthew Dillon * references, we activate the page. We also set the 8961c7c3c6aSMatthew Dillon * "activation count" higher than normal so that we will less 8971c7c3c6aSMatthew Dillon * likely place pages back onto the inactive queue again. 8987e006499SJohn Dyson */ 8993407fefeSKonstantin Belousov if ((m->aflags & PGA_REFERENCED) != 0) { 9003407fefeSKonstantin Belousov vm_page_aflag_clear(m, PGA_REFERENCED); 9010385347cSPeter Wemm actcount = pmap_ts_referenced(m); 90226f9a767SRodney W. Grimes vm_page_activate(m); 9032965a453SKip Macy vm_page_unlock(m); 9044c6a2e7aSAlan Cox m->act_count += actcount + ACT_ADVANCE + 1; 9054c6a2e7aSAlan Cox VM_OBJECT_UNLOCK(object); 906*48cc2fc7SKonstantin Belousov goto relock_queues; 9070d94caffSDavid Greenman } 90867bf6868SJohn Dyson 9097e006499SJohn Dyson /* 910b78ddb0bSAlan Cox * If the upper level VM system does not believe that the page 911b78ddb0bSAlan Cox * is fully dirty, but it is mapped for write access, then we 912b78ddb0bSAlan Cox * consult the pmap to see if the page's dirty status should 913b78ddb0bSAlan Cox * be updated. 9147e006499SJohn Dyson */ 915b78ddb0bSAlan Cox if (m->dirty != VM_PAGE_BITS_ALL && 9166031c68dSAlan Cox pmap_page_is_write_mapped(m)) { 917a3dfacb5SAlan Cox /* 918a3dfacb5SAlan Cox * Avoid a race condition: Unless write access is 919a3dfacb5SAlan Cox * removed from the page, another processor could 920a3dfacb5SAlan Cox * modify it before all access is removed by the call 921a3dfacb5SAlan Cox * to vm_page_cache() below. If vm_page_cache() finds 922a3dfacb5SAlan Cox * that the page has been modified when it removes all 923a3dfacb5SAlan Cox * access, it panics because it cannot cache dirty 924a3dfacb5SAlan Cox * pages. In principle, we could eliminate just write 925a3dfacb5SAlan Cox * access here rather than all access. In the expected 926a3dfacb5SAlan Cox * case, when there are no last instant modifications 927a3dfacb5SAlan Cox * to the page, removing all access will be cheaper 928a3dfacb5SAlan Cox * overall. 929a3dfacb5SAlan Cox */ 930b78ddb0bSAlan Cox if (pmap_is_modified(m)) 9317dbf82dcSMatthew Dillon vm_page_dirty(m); 932b78ddb0bSAlan Cox else if (m->dirty == 0) 933b78ddb0bSAlan Cox pmap_remove_all(m); 93430dcfc09SJohn Dyson } 935dcbcd518SBruce Evans 9366989c456SAlan Cox if (m->valid == 0) { 9377e006499SJohn Dyson /* 9387e006499SJohn Dyson * Invalid pages can be easily freed 9397e006499SJohn Dyson */ 9406989c456SAlan Cox vm_page_free(m); 941*48cc2fc7SKonstantin Belousov PCPU_INC(cnt.v_dfree); 9421c7c3c6aSMatthew Dillon --page_shortage; 943bd7e5f99SJohn Dyson } else if (m->dirty == 0) { 9446989c456SAlan Cox /* 9456989c456SAlan Cox * Clean pages can be placed onto the cache queue. 9466989c456SAlan Cox * This effectively frees them. 9476989c456SAlan Cox */ 948bd7e5f99SJohn Dyson vm_page_cache(m); 9491c7c3c6aSMatthew Dillon --page_shortage; 9502b6b0df7SMatthew Dillon } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 9517e006499SJohn Dyson /* 9522b6b0df7SMatthew Dillon * Dirty pages need to be paged out, but flushing 9532b6b0df7SMatthew Dillon * a page is extremely expensive verses freeing 9542b6b0df7SMatthew Dillon * a clean page. Rather then artificially limiting 9552b6b0df7SMatthew Dillon * the number of pages we can flush, we instead give 9562b6b0df7SMatthew Dillon * dirty pages extra priority on the inactive queue 9572b6b0df7SMatthew Dillon * by forcing them to be cycled through the queue 9582b6b0df7SMatthew Dillon * twice before being flushed, after which the 9592b6b0df7SMatthew Dillon * (now clean) page will cycle through once more 9602b6b0df7SMatthew Dillon * before being freed. This significantly extends 9612b6b0df7SMatthew Dillon * the thrash point for a heavily loaded machine. 9627e006499SJohn Dyson */ 9633407fefeSKonstantin Belousov m->flags |= PG_WINATCFLS; 964*48cc2fc7SKonstantin Belousov vm_page_lock_queues(); 965*48cc2fc7SKonstantin Belousov queues_locked = TRUE; 966e5b006ffSAlan Cox vm_page_requeue(m); 9670d94caffSDavid Greenman } else if (maxlaunder > 0) { 9682b6b0df7SMatthew Dillon /* 9692b6b0df7SMatthew Dillon * We always want to try to flush some dirty pages if 9702b6b0df7SMatthew Dillon * we encounter them, to keep the system stable. 9712b6b0df7SMatthew Dillon * Normally this number is small, but under extreme 9722b6b0df7SMatthew Dillon * pressure where there are insufficient clean pages 9732b6b0df7SMatthew Dillon * on the inactive queue, we may have to go all out. 9742b6b0df7SMatthew Dillon */ 97597824da3SAlan Cox int swap_pageouts_ok, vfslocked = 0; 976f6b04d2bSDavid Greenman struct vnode *vp = NULL; 97714137dc0SAlan Cox struct mount *mp = NULL; 9780d94caffSDavid Greenman 97912ac6a1dSJohn Dyson if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 98012ac6a1dSJohn Dyson swap_pageouts_ok = 1; 98112ac6a1dSJohn Dyson } else { 98212ac6a1dSJohn Dyson swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 98312ac6a1dSJohn Dyson swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 98490ecac61SMatthew Dillon vm_page_count_min()); 98512ac6a1dSJohn Dyson 98612ac6a1dSJohn Dyson } 98770111b90SJohn Dyson 98870111b90SJohn Dyson /* 9891c7c3c6aSMatthew Dillon * We don't bother paging objects that are "dead". 9901c7c3c6aSMatthew Dillon * Those objects are in a "rundown" state. 99170111b90SJohn Dyson */ 99270111b90SJohn Dyson if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 9932965a453SKip Macy vm_page_unlock(m); 9943562af12SAlan Cox VM_OBJECT_UNLOCK(object); 995*48cc2fc7SKonstantin Belousov vm_page_lock_queues(); 996*48cc2fc7SKonstantin Belousov queues_locked = TRUE; 997e5b006ffSAlan Cox vm_page_requeue(m); 998*48cc2fc7SKonstantin Belousov goto relock_queues; 99912ac6a1dSJohn Dyson } 100012ac6a1dSJohn Dyson 10011c7c3c6aSMatthew Dillon /* 10022b6b0df7SMatthew Dillon * The object is already known NOT to be dead. It 10032b6b0df7SMatthew Dillon * is possible for the vget() to block the whole 10042b6b0df7SMatthew Dillon * pageout daemon, but the new low-memory handling 10052b6b0df7SMatthew Dillon * code should prevent it. 10061c7c3c6aSMatthew Dillon * 10072b6b0df7SMatthew Dillon * The previous code skipped locked vnodes and, worse, 10082b6b0df7SMatthew Dillon * reordered pages in the queue. This results in 10092b6b0df7SMatthew Dillon * completely non-deterministic operation and, on a 10102b6b0df7SMatthew Dillon * busy system, can lead to extremely non-optimal 10112b6b0df7SMatthew Dillon * pageouts. For example, it can cause clean pages 10122b6b0df7SMatthew Dillon * to be freed and dirty pages to be moved to the end 10132b6b0df7SMatthew Dillon * of the queue. Since dirty pages are also moved to 10142b6b0df7SMatthew Dillon * the end of the queue once-cleaned, this gives 10152b6b0df7SMatthew Dillon * way too large a weighting to defering the freeing 10162b6b0df7SMatthew Dillon * of dirty pages. 10171c7c3c6aSMatthew Dillon * 101823b59018SMatthew Dillon * We can't wait forever for the vnode lock, we might 101923b59018SMatthew Dillon * deadlock due to a vn_read() getting stuck in 102023b59018SMatthew Dillon * vm_wait while holding this vnode. We skip the 102123b59018SMatthew Dillon * vnode if we can't get it in a reasonable amount 102223b59018SMatthew Dillon * of time. 10231c7c3c6aSMatthew Dillon */ 10241c7c3c6aSMatthew Dillon if (object->type == OBJT_VNODE) { 10252965a453SKip Macy vm_page_unlock(m); 102624a1cce3SDavid Greenman vp = object->handle; 1027db27dcc0STor Egge if (vp->v_type == VREG && 1028db27dcc0STor Egge vn_start_write(vp, &mp, V_NOWAIT) != 0) { 10296129343dSKonstantin Belousov mp = NULL; 1030db27dcc0STor Egge ++pageout_lock_miss; 1031db27dcc0STor Egge if (object->flags & OBJ_MIGHTBEDIRTY) 1032db27dcc0STor Egge vnodes_skipped++; 10332965a453SKip Macy vm_page_lock_queues(); 1034625e6c0aSTor Egge goto unlock_and_continue; 1035db27dcc0STor Egge } 1036b9f180d1SKonstantin Belousov KASSERT(mp != NULL, 1037b9f180d1SKonstantin Belousov ("vp %p with NULL v_mount", vp)); 103814137dc0SAlan Cox vm_object_reference_locked(object); 10393562af12SAlan Cox VM_OBJECT_UNLOCK(object); 104097824da3SAlan Cox vfslocked = VFS_LOCK_GIANT(vp->v_mount); 104197824da3SAlan Cox if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK, 104297824da3SAlan Cox curthread)) { 10433562af12SAlan Cox VM_OBJECT_LOCK(object); 10443e1b578aSAlan Cox vm_page_lock_queues(); 104523b59018SMatthew Dillon ++pageout_lock_miss; 1046aef922f5SJohn Dyson if (object->flags & OBJ_MIGHTBEDIRTY) 1047925a3a41SJohn Dyson vnodes_skipped++; 1048625e6c0aSTor Egge vp = NULL; 1049625e6c0aSTor Egge goto unlock_and_continue; 105085a376ebSJohn Dyson } 10513562af12SAlan Cox VM_OBJECT_LOCK(object); 10522965a453SKip Macy vm_page_lock(m); 10533e1b578aSAlan Cox vm_page_lock_queues(); 1054*48cc2fc7SKonstantin Belousov queues_locked = TRUE; 1055f35329acSJohn Dyson /* 1056936524aaSMatthew Dillon * The page might have been moved to another 1057936524aaSMatthew Dillon * queue during potential blocking in vget() 1058936524aaSMatthew Dillon * above. The page might have been freed and 105914137dc0SAlan Cox * reused for another vnode. 1060f35329acSJohn Dyson */ 10619cf51988SAlan Cox if (m->queue != PQ_INACTIVE || 1062936524aaSMatthew Dillon m->object != object || 1063625e6c0aSTor Egge TAILQ_NEXT(m, pageq) != &marker) { 10642965a453SKip Macy vm_page_unlock(m); 1065b182ec9eSJohn Dyson if (object->flags & OBJ_MIGHTBEDIRTY) 1066925a3a41SJohn Dyson vnodes_skipped++; 10673562af12SAlan Cox goto unlock_and_continue; 1068b182ec9eSJohn Dyson } 1069b182ec9eSJohn Dyson 1070f35329acSJohn Dyson /* 1071936524aaSMatthew Dillon * The page may have been busied during the 107214137dc0SAlan Cox * blocking in vget(). We don't move the 1073936524aaSMatthew Dillon * page back onto the end of the queue so that 1074936524aaSMatthew Dillon * statistics are more correct if we don't. 1075f35329acSJohn Dyson */ 10769af80719SAlan Cox if (m->busy || (m->oflags & VPO_BUSY)) { 10772965a453SKip Macy vm_page_unlock(m); 10783562af12SAlan Cox goto unlock_and_continue; 1079b182ec9eSJohn Dyson } 1080b182ec9eSJohn Dyson 1081f35329acSJohn Dyson /* 108257601bcbSMatthew Dillon * If the page has become held it might 108357601bcbSMatthew Dillon * be undergoing I/O, so skip it 1084f35329acSJohn Dyson */ 1085b182ec9eSJohn Dyson if (m->hold_count) { 1086*48cc2fc7SKonstantin Belousov vm_page_lock_queues(); 1087*48cc2fc7SKonstantin Belousov queues_locked = TRUE; 10882965a453SKip Macy vm_page_unlock(m); 1089e5b006ffSAlan Cox vm_page_requeue(m); 1090b182ec9eSJohn Dyson if (object->flags & OBJ_MIGHTBEDIRTY) 1091925a3a41SJohn Dyson vnodes_skipped++; 10923562af12SAlan Cox goto unlock_and_continue; 1093f6b04d2bSDavid Greenman } 1094f6b04d2bSDavid Greenman } 1095f6b04d2bSDavid Greenman 10960d94caffSDavid Greenman /* 10970d94caffSDavid Greenman * If a page is dirty, then it is either being washed 10980d94caffSDavid Greenman * (but not yet cleaned) or it is still in the 10990d94caffSDavid Greenman * laundry. If it is still in the laundry, then we 11002b6b0df7SMatthew Dillon * start the cleaning operation. 1101936524aaSMatthew Dillon * 11022b6b0df7SMatthew Dillon * decrement page_shortage on success to account for 11032b6b0df7SMatthew Dillon * the (future) cleaned page. Otherwise we could wind 11042b6b0df7SMatthew Dillon * up laundering or cleaning too many pages. 11050d94caffSDavid Greenman */ 11062b6b0df7SMatthew Dillon if (vm_pageout_clean(m) != 0) { 11072b6b0df7SMatthew Dillon --page_shortage; 1108936524aaSMatthew Dillon --maxlaunder; 11092b6b0df7SMatthew Dillon } 11103562af12SAlan Cox unlock_and_continue: 11112965a453SKip Macy vm_page_lock_assert(m, MA_NOTOWNED); 11126989c456SAlan Cox VM_OBJECT_UNLOCK(object); 111314137dc0SAlan Cox if (mp != NULL) { 1114*48cc2fc7SKonstantin Belousov if (queues_locked) { 11156989c456SAlan Cox vm_page_unlock_queues(); 1116*48cc2fc7SKonstantin Belousov queues_locked = FALSE; 1117*48cc2fc7SKonstantin Belousov } 111814137dc0SAlan Cox if (vp != NULL) 1119f6b04d2bSDavid Greenman vput(vp); 112097824da3SAlan Cox VFS_UNLOCK_GIANT(vfslocked); 112114137dc0SAlan Cox vm_object_deallocate(object); 1122f2a2857bSKirk McKusick vn_finished_write(mp); 1123*48cc2fc7SKonstantin Belousov } 1124*48cc2fc7SKonstantin Belousov vm_page_lock_assert(m, MA_NOTOWNED); 1125*48cc2fc7SKonstantin Belousov goto relock_queues; 1126*48cc2fc7SKonstantin Belousov } 1127*48cc2fc7SKonstantin Belousov vm_page_unlock(m); 1128*48cc2fc7SKonstantin Belousov VM_OBJECT_UNLOCK(object); 1129*48cc2fc7SKonstantin Belousov relock_queues: 1130*48cc2fc7SKonstantin Belousov if (!queues_locked) { 11316989c456SAlan Cox vm_page_lock_queues(); 1132*48cc2fc7SKonstantin Belousov queues_locked = TRUE; 11336989c456SAlan Cox } 1134625e6c0aSTor Egge next = TAILQ_NEXT(&marker, pageq); 1135625e6c0aSTor Egge TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, 1136625e6c0aSTor Egge &marker, pageq); 11370d94caffSDavid Greenman } 113826f9a767SRodney W. Grimes 1139df8bae1dSRodney W. Grimes /* 1140936524aaSMatthew Dillon * Compute the number of pages we want to try to move from the 1141936524aaSMatthew Dillon * active queue to the inactive queue. 11421c7c3c6aSMatthew Dillon */ 11432feb50bfSAttilio Rao page_shortage = vm_paging_target() + 11442feb50bfSAttilio Rao cnt.v_inactive_target - cnt.v_inactive_count; 1145b182ec9eSJohn Dyson page_shortage += addl_page_shortage; 11461c7c3c6aSMatthew Dillon 11471c7c3c6aSMatthew Dillon /* 1148936524aaSMatthew Dillon * Scan the active queue for things we can deactivate. We nominally 1149936524aaSMatthew Dillon * track the per-page activity counter and use it to locate 1150936524aaSMatthew Dillon * deactivation candidates. 11511c7c3c6aSMatthew Dillon */ 11522feb50bfSAttilio Rao pcount = cnt.v_active_count; 1153be72f788SAlan Cox m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 11542965a453SKip Macy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 11551c7c3c6aSMatthew Dillon 1156b18bfc3dSJohn Dyson while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 1157f35329acSJohn Dyson 11589cf51988SAlan Cox KASSERT(m->queue == PQ_ACTIVE, 1159d3c09dd7SAlan Cox ("vm_pageout_scan: page %p isn't active", m)); 1160f35329acSJohn Dyson 1161b18bfc3dSJohn Dyson next = TAILQ_NEXT(m, pageq); 11628dbca793STor Egge if ((m->flags & PG_MARKER) != 0) { 11638dbca793STor Egge m = next; 11648dbca793STor Egge continue; 11658dbca793STor Egge } 11667900f95dSKonstantin Belousov KASSERT((m->flags & PG_FICTITIOUS) == 0, 11677900f95dSKonstantin Belousov ("Fictitious page %p cannot be in active queue", m)); 11687900f95dSKonstantin Belousov KASSERT((m->oflags & VPO_UNMANAGED) == 0, 11697900f95dSKonstantin Belousov ("Unmanaged page %p cannot be in active queue", m)); 11709ee2165fSAlan Cox if (!vm_pageout_page_lock(m, &next)) { 11718c616246SKonstantin Belousov vm_page_unlock(m); 11722965a453SKip Macy m = next; 11732965a453SKip Macy continue; 11742965a453SKip Macy } 11759ee2165fSAlan Cox object = m->object; 11768dbca793STor Egge if (!VM_OBJECT_TRYLOCK(object) && 11778dbca793STor Egge !vm_pageout_fallback_object_lock(m, &next)) { 11788dbca793STor Egge VM_OBJECT_UNLOCK(object); 11792965a453SKip Macy vm_page_unlock(m); 11804b8a5c40SAlan Cox m = next; 1181b08abf6cSAlan Cox continue; 1182b08abf6cSAlan Cox } 1183b08abf6cSAlan Cox 1184df8bae1dSRodney W. Grimes /* 118526f9a767SRodney W. Grimes * Don't deactivate pages that are busy. 1186df8bae1dSRodney W. Grimes */ 1187a647a309SDavid Greenman if ((m->busy != 0) || 11889af80719SAlan Cox (m->oflags & VPO_BUSY) || 1189f6b04d2bSDavid Greenman (m->hold_count != 0)) { 11902965a453SKip Macy vm_page_unlock(m); 1191b08abf6cSAlan Cox VM_OBJECT_UNLOCK(object); 1192e5b006ffSAlan Cox vm_page_requeue(m); 119326f9a767SRodney W. Grimes m = next; 119426f9a767SRodney W. Grimes continue; 1195df8bae1dSRodney W. Grimes } 1196b18bfc3dSJohn Dyson 1197b18bfc3dSJohn Dyson /* 1198b18bfc3dSJohn Dyson * The count for pagedaemon pages is done after checking the 1199956f3135SPhilippe Charnier * page for eligibility... 1200b18bfc3dSJohn Dyson */ 1201393a081dSAttilio Rao cnt.v_pdpages++; 1202ef743ce6SJohn Dyson 12037e006499SJohn Dyson /* 12047e006499SJohn Dyson * Check to see "how much" the page has been used. 12057e006499SJohn Dyson */ 12067e006499SJohn Dyson actcount = 0; 1207b08abf6cSAlan Cox if (object->ref_count != 0) { 12083407fefeSKonstantin Belousov if (m->aflags & PGA_REFERENCED) { 12097e006499SJohn Dyson actcount += 1; 12100d94caffSDavid Greenman } 12110385347cSPeter Wemm actcount += pmap_ts_referenced(m); 12127e006499SJohn Dyson if (actcount) { 12137e006499SJohn Dyson m->act_count += ACT_ADVANCE + actcount; 121438efa82bSJohn Dyson if (m->act_count > ACT_MAX) 121538efa82bSJohn Dyson m->act_count = ACT_MAX; 121638efa82bSJohn Dyson } 1217b18bfc3dSJohn Dyson } 1218ef743ce6SJohn Dyson 12197e006499SJohn Dyson /* 12207e006499SJohn Dyson * Since we have "tested" this bit, we need to clear it now. 12217e006499SJohn Dyson */ 12223407fefeSKonstantin Belousov vm_page_aflag_clear(m, PGA_REFERENCED); 1223ef743ce6SJohn Dyson 12247e006499SJohn Dyson /* 12257e006499SJohn Dyson * Only if an object is currently being used, do we use the 12267e006499SJohn Dyson * page activation count stats. 12277e006499SJohn Dyson */ 1228b08abf6cSAlan Cox if (actcount && (object->ref_count != 0)) { 1229e5b006ffSAlan Cox vm_page_requeue(m); 123026f9a767SRodney W. Grimes } else { 123138efa82bSJohn Dyson m->act_count -= min(m->act_count, ACT_DECLINE); 12322b6b0df7SMatthew Dillon if (vm_pageout_algorithm || 1233b08abf6cSAlan Cox object->ref_count == 0 || 12342b6b0df7SMatthew Dillon m->act_count == 0) { 1235925a3a41SJohn Dyson page_shortage--; 1236b08abf6cSAlan Cox if (object->ref_count == 0) { 12375d4a7b79SAlan Cox KASSERT(!pmap_page_is_mapped(m), 12385d4a7b79SAlan Cox ("vm_pageout_scan: page %p is mapped", m)); 1239d4a272dbSJohn Dyson if (m->dirty == 0) 12400d94caffSDavid Greenman vm_page_cache(m); 1241d4a272dbSJohn Dyson else 1242d4a272dbSJohn Dyson vm_page_deactivate(m); 12430d94caffSDavid Greenman } else { 124426f9a767SRodney W. Grimes vm_page_deactivate(m); 1245df8bae1dSRodney W. Grimes } 124638efa82bSJohn Dyson } else { 1247e5b006ffSAlan Cox vm_page_requeue(m); 124838efa82bSJohn Dyson } 1249df8bae1dSRodney W. Grimes } 12502965a453SKip Macy vm_page_unlock(m); 1251b08abf6cSAlan Cox VM_OBJECT_UNLOCK(object); 125226f9a767SRodney W. Grimes m = next; 125326f9a767SRodney W. Grimes } 12548ffc1519SAlan Cox vm_page_unlock_queues(); 1255ceb0cf87SJohn Dyson #if !defined(NO_SWAPPING) 1256ceb0cf87SJohn Dyson /* 1257ceb0cf87SJohn Dyson * Idle process swapout -- run once per second. 1258ceb0cf87SJohn Dyson */ 1259ceb0cf87SJohn Dyson if (vm_swap_idle_enabled) { 1260ceb0cf87SJohn Dyson static long lsec; 1261227ee8a1SPoul-Henning Kamp if (time_second != lsec) { 126297824da3SAlan Cox vm_req_vmdaemon(VM_SWAP_IDLE); 1263227ee8a1SPoul-Henning Kamp lsec = time_second; 1264ceb0cf87SJohn Dyson } 1265ceb0cf87SJohn Dyson } 1266ceb0cf87SJohn Dyson #endif 1267ceb0cf87SJohn Dyson 12685663e6deSDavid Greenman /* 1269f6b04d2bSDavid Greenman * If we didn't get enough free pages, and we have skipped a vnode 12704c1f8ee9SDavid Greenman * in a writeable object, wakeup the sync daemon. And kick swapout 12714c1f8ee9SDavid Greenman * if we did not get enough free pages. 1272f6b04d2bSDavid Greenman */ 127390ecac61SMatthew Dillon if (vm_paging_target() > 0) { 127490ecac61SMatthew Dillon if (vnodes_skipped && vm_page_count_min()) 1275d50c1994SPeter Wemm (void) speedup_syncer(); 127638efa82bSJohn Dyson #if !defined(NO_SWAPPING) 127797824da3SAlan Cox if (vm_swap_enabled && vm_page_count_target()) 127897824da3SAlan Cox vm_req_vmdaemon(VM_SWAP_NORMAL); 12795afce282SDavid Greenman #endif 12804c1f8ee9SDavid Greenman } 12814c1f8ee9SDavid Greenman 1282f6b04d2bSDavid Greenman /* 1283e92686d0SDavid Schultz * If we are critically low on one of RAM or swap and low on 1284e92686d0SDavid Schultz * the other, kill the largest process. However, we avoid 1285e92686d0SDavid Schultz * doing this on the first pass in order to give ourselves a 1286e92686d0SDavid Schultz * chance to flush out dirty vnode-backed pages and to allow 1287e92686d0SDavid Schultz * active pages to be moved to the inactive queue and reclaimed. 12882025d69bSKonstantin Belousov */ 12892025d69bSKonstantin Belousov if (pass != 0 && 12902025d69bSKonstantin Belousov ((swap_pager_avail < 64 && vm_page_count_min()) || 12912025d69bSKonstantin Belousov (swap_pager_full && vm_paging_target() > 0))) 12922025d69bSKonstantin Belousov vm_pageout_oom(VM_OOM_MEM); 12932025d69bSKonstantin Belousov } 12942025d69bSKonstantin Belousov 12952025d69bSKonstantin Belousov 12962025d69bSKonstantin Belousov void 12972025d69bSKonstantin Belousov vm_pageout_oom(int shortage) 12982025d69bSKonstantin Belousov { 12992025d69bSKonstantin Belousov struct proc *p, *bigproc; 13002025d69bSKonstantin Belousov vm_offset_t size, bigsize; 13012025d69bSKonstantin Belousov struct thread *td; 13026bed074cSKonstantin Belousov struct vmspace *vm; 13032025d69bSKonstantin Belousov 13042025d69bSKonstantin Belousov /* 13051c58e4e5SJohn Baldwin * We keep the process bigproc locked once we find it to keep anyone 13061c58e4e5SJohn Baldwin * from messing with it; however, there is a possibility of 13071c58e4e5SJohn Baldwin * deadlock if process B is bigproc and one of it's child processes 13081c58e4e5SJohn Baldwin * attempts to propagate a signal to B while we are waiting for A's 13091c58e4e5SJohn Baldwin * lock while walking this list. To avoid this, we don't block on 13101c58e4e5SJohn Baldwin * the process lock but just skip a process if it is already locked. 13115663e6deSDavid Greenman */ 13125663e6deSDavid Greenman bigproc = NULL; 13135663e6deSDavid Greenman bigsize = 0; 13141005a129SJohn Baldwin sx_slock(&allproc_lock); 1315e602ba25SJulian Elischer FOREACH_PROC_IN_SYSTEM(p) { 1316e602ba25SJulian Elischer int breakout; 1317dcbcd518SBruce Evans 13181c58e4e5SJohn Baldwin if (PROC_TRYLOCK(p) == 0) 13191c58e4e5SJohn Baldwin continue; 13201c58e4e5SJohn Baldwin /* 13213f1c4c4fSKonstantin Belousov * If this is a system, protected or killed process, skip it. 13225663e6deSDavid Greenman */ 13238e6fa660SJohn Baldwin if (p->p_state != PRS_NORMAL || 13248e6fa660SJohn Baldwin (p->p_flag & (P_INEXEC | P_PROTECTED | P_SYSTEM)) || 13253f1c4c4fSKonstantin Belousov (p->p_pid == 1) || P_KILLED(p) || 13268f60c087SPoul-Henning Kamp ((p->p_pid < 48) && (swap_pager_avail != 0))) { 13278606d880SJohn Baldwin PROC_UNLOCK(p); 13285663e6deSDavid Greenman continue; 13295663e6deSDavid Greenman } 13305663e6deSDavid Greenman /* 1331dcbcd518SBruce Evans * If the process is in a non-running type state, 1332e602ba25SJulian Elischer * don't touch it. Check all the threads individually. 13335663e6deSDavid Greenman */ 1334e602ba25SJulian Elischer breakout = 0; 1335e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 1336982d11f8SJeff Roberson thread_lock(td); 133771fad9fdSJulian Elischer if (!TD_ON_RUNQ(td) && 133871fad9fdSJulian Elischer !TD_IS_RUNNING(td) && 1339f497cda2SEdward Tomasz Napierala !TD_IS_SLEEPING(td) && 1340f497cda2SEdward Tomasz Napierala !TD_IS_SUSPENDED(td)) { 1341982d11f8SJeff Roberson thread_unlock(td); 1342e602ba25SJulian Elischer breakout = 1; 1343e602ba25SJulian Elischer break; 1344e602ba25SJulian Elischer } 1345982d11f8SJeff Roberson thread_unlock(td); 1346e602ba25SJulian Elischer } 1347e602ba25SJulian Elischer if (breakout) { 13481c58e4e5SJohn Baldwin PROC_UNLOCK(p); 13495663e6deSDavid Greenman continue; 13505663e6deSDavid Greenman } 13515663e6deSDavid Greenman /* 13525663e6deSDavid Greenman * get the process size 13535663e6deSDavid Greenman */ 13546bed074cSKonstantin Belousov vm = vmspace_acquire_ref(p); 13556bed074cSKonstantin Belousov if (vm == NULL) { 13566bed074cSKonstantin Belousov PROC_UNLOCK(p); 13576bed074cSKonstantin Belousov continue; 13586bed074cSKonstantin Belousov } 13596bed074cSKonstantin Belousov if (!vm_map_trylock_read(&vm->vm_map)) { 13606bed074cSKonstantin Belousov vmspace_free(vm); 136172d97679SDavid Schultz PROC_UNLOCK(p); 136272d97679SDavid Schultz continue; 136372d97679SDavid Schultz } 13647981aa24SKonstantin Belousov size = vmspace_swap_count(vm); 13656bed074cSKonstantin Belousov vm_map_unlock_read(&vm->vm_map); 13662025d69bSKonstantin Belousov if (shortage == VM_OOM_MEM) 13676bed074cSKonstantin Belousov size += vmspace_resident_count(vm); 13686bed074cSKonstantin Belousov vmspace_free(vm); 13695663e6deSDavid Greenman /* 13705663e6deSDavid Greenman * if the this process is bigger than the biggest one 13715663e6deSDavid Greenman * remember it. 13725663e6deSDavid Greenman */ 13735663e6deSDavid Greenman if (size > bigsize) { 13741c58e4e5SJohn Baldwin if (bigproc != NULL) 13751c58e4e5SJohn Baldwin PROC_UNLOCK(bigproc); 13765663e6deSDavid Greenman bigproc = p; 13775663e6deSDavid Greenman bigsize = size; 13781c58e4e5SJohn Baldwin } else 13791c58e4e5SJohn Baldwin PROC_UNLOCK(p); 13805663e6deSDavid Greenman } 13811005a129SJohn Baldwin sx_sunlock(&allproc_lock); 13825663e6deSDavid Greenman if (bigproc != NULL) { 1383729b1e51SDavid Greenman killproc(bigproc, "out of swap space"); 1384fa885116SJulian Elischer sched_nice(bigproc, PRIO_MIN); 13851c58e4e5SJohn Baldwin PROC_UNLOCK(bigproc); 13862feb50bfSAttilio Rao wakeup(&cnt.v_free_count); 13875663e6deSDavid Greenman } 13885663e6deSDavid Greenman } 138926f9a767SRodney W. Grimes 1390dc2efb27SJohn Dyson /* 1391dc2efb27SJohn Dyson * This routine tries to maintain the pseudo LRU active queue, 1392dc2efb27SJohn Dyson * so that during long periods of time where there is no paging, 1393956f3135SPhilippe Charnier * that some statistic accumulation still occurs. This code 1394dc2efb27SJohn Dyson * helps the situation where paging just starts to occur. 1395dc2efb27SJohn Dyson */ 1396dc2efb27SJohn Dyson static void 1397dc2efb27SJohn Dyson vm_pageout_page_stats() 1398dc2efb27SJohn Dyson { 1399b86e6ec0SAlan Cox vm_object_t object; 1400dc2efb27SJohn Dyson vm_page_t m,next; 1401dc2efb27SJohn Dyson int pcount,tpcount; /* Number of pages to check */ 1402dc2efb27SJohn Dyson static int fullintervalcount = 0; 1403bef608bdSJohn Dyson int page_shortage; 1404bef608bdSJohn Dyson 140590ecac61SMatthew Dillon page_shortage = 14062feb50bfSAttilio Rao (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 14072feb50bfSAttilio Rao (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 140890ecac61SMatthew Dillon 1409bef608bdSJohn Dyson if (page_shortage <= 0) 1410bef608bdSJohn Dyson return; 1411dc2efb27SJohn Dyson 1412cdaba1f2SAlan Cox vm_page_lock_queues(); 14132feb50bfSAttilio Rao pcount = cnt.v_active_count; 1414dc2efb27SJohn Dyson fullintervalcount += vm_pageout_stats_interval; 1415dc2efb27SJohn Dyson if (fullintervalcount < vm_pageout_full_stats_interval) { 14168d28bf04SAlan Cox tpcount = (int64_t)vm_pageout_stats_max * cnt.v_active_count / 14178d28bf04SAlan Cox cnt.v_page_count; 1418dc2efb27SJohn Dyson if (pcount > tpcount) 1419dc2efb27SJohn Dyson pcount = tpcount; 1420883f3caaSMatthew Dillon } else { 1421883f3caaSMatthew Dillon fullintervalcount = 0; 1422dc2efb27SJohn Dyson } 1423dc2efb27SJohn Dyson 1424be72f788SAlan Cox m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1425dc2efb27SJohn Dyson while ((m != NULL) && (pcount-- > 0)) { 14267e006499SJohn Dyson int actcount; 1427dc2efb27SJohn Dyson 14289cf51988SAlan Cox KASSERT(m->queue == PQ_ACTIVE, 1429ab42316cSAlan Cox ("vm_pageout_page_stats: page %p isn't active", m)); 1430dc2efb27SJohn Dyson 1431dc2efb27SJohn Dyson next = TAILQ_NEXT(m, pageq); 14328dbca793STor Egge if ((m->flags & PG_MARKER) != 0) { 14338dbca793STor Egge m = next; 14348dbca793STor Egge continue; 14358dbca793STor Egge } 14362965a453SKip Macy vm_page_lock_assert(m, MA_NOTOWNED); 14379ee2165fSAlan Cox if (!vm_pageout_page_lock(m, &next)) { 14388c616246SKonstantin Belousov vm_page_unlock(m); 14392965a453SKip Macy m = next; 14402965a453SKip Macy continue; 14412965a453SKip Macy } 14429ee2165fSAlan Cox object = m->object; 14438dbca793STor Egge if (!VM_OBJECT_TRYLOCK(object) && 14448dbca793STor Egge !vm_pageout_fallback_object_lock(m, &next)) { 14458dbca793STor Egge VM_OBJECT_UNLOCK(object); 14462965a453SKip Macy vm_page_unlock(m); 1447b86e6ec0SAlan Cox m = next; 1448b86e6ec0SAlan Cox continue; 1449b86e6ec0SAlan Cox } 1450b86e6ec0SAlan Cox 1451dc2efb27SJohn Dyson /* 1452dc2efb27SJohn Dyson * Don't deactivate pages that are busy. 1453dc2efb27SJohn Dyson */ 1454dc2efb27SJohn Dyson if ((m->busy != 0) || 14559af80719SAlan Cox (m->oflags & VPO_BUSY) || 1456dc2efb27SJohn Dyson (m->hold_count != 0)) { 14572965a453SKip Macy vm_page_unlock(m); 1458b86e6ec0SAlan Cox VM_OBJECT_UNLOCK(object); 1459e5b006ffSAlan Cox vm_page_requeue(m); 1460dc2efb27SJohn Dyson m = next; 1461dc2efb27SJohn Dyson continue; 1462dc2efb27SJohn Dyson } 1463dc2efb27SJohn Dyson 14647e006499SJohn Dyson actcount = 0; 14653407fefeSKonstantin Belousov if (m->aflags & PGA_REFERENCED) { 14663407fefeSKonstantin Belousov vm_page_aflag_clear(m, PGA_REFERENCED); 14677e006499SJohn Dyson actcount += 1; 1468dc2efb27SJohn Dyson } 1469dc2efb27SJohn Dyson 14700385347cSPeter Wemm actcount += pmap_ts_referenced(m); 14717e006499SJohn Dyson if (actcount) { 14727e006499SJohn Dyson m->act_count += ACT_ADVANCE + actcount; 1473dc2efb27SJohn Dyson if (m->act_count > ACT_MAX) 1474dc2efb27SJohn Dyson m->act_count = ACT_MAX; 1475e5b006ffSAlan Cox vm_page_requeue(m); 1476dc2efb27SJohn Dyson } else { 1477dc2efb27SJohn Dyson if (m->act_count == 0) { 14787e006499SJohn Dyson /* 14792b6b0df7SMatthew Dillon * We turn off page access, so that we have 14802b6b0df7SMatthew Dillon * more accurate RSS stats. We don't do this 14812b6b0df7SMatthew Dillon * in the normal page deactivation when the 14822b6b0df7SMatthew Dillon * system is loaded VM wise, because the 14832b6b0df7SMatthew Dillon * cost of the large number of page protect 14842b6b0df7SMatthew Dillon * operations would be higher than the value 14852b6b0df7SMatthew Dillon * of doing the operation. 14867e006499SJohn Dyson */ 14874fec79beSAlan Cox pmap_remove_all(m); 1488dc2efb27SJohn Dyson vm_page_deactivate(m); 1489dc2efb27SJohn Dyson } else { 1490dc2efb27SJohn Dyson m->act_count -= min(m->act_count, ACT_DECLINE); 1491e5b006ffSAlan Cox vm_page_requeue(m); 1492dc2efb27SJohn Dyson } 1493dc2efb27SJohn Dyson } 14942965a453SKip Macy vm_page_unlock(m); 1495b86e6ec0SAlan Cox VM_OBJECT_UNLOCK(object); 1496dc2efb27SJohn Dyson m = next; 1497dc2efb27SJohn Dyson } 1498cdaba1f2SAlan Cox vm_page_unlock_queues(); 1499dc2efb27SJohn Dyson } 1500dc2efb27SJohn Dyson 1501df8bae1dSRodney W. Grimes /* 1502df8bae1dSRodney W. Grimes * vm_pageout is the high level pageout daemon. 1503df8bae1dSRodney W. Grimes */ 15042b14f991SJulian Elischer static void 150526f9a767SRodney W. Grimes vm_pageout() 1506df8bae1dSRodney W. Grimes { 15071aab16a6SAlan Cox int error, pass; 15080384fff8SJason Evans 1509df8bae1dSRodney W. Grimes /* 1510df8bae1dSRodney W. Grimes * Initialize some paging parameters. 1511df8bae1dSRodney W. Grimes */ 15122feb50bfSAttilio Rao cnt.v_interrupt_free_min = 2; 15132feb50bfSAttilio Rao if (cnt.v_page_count < 2000) 1514f35329acSJohn Dyson vm_pageout_page_count = 8; 1515f6b04d2bSDavid Greenman 151645ae1d91SAlan Cox /* 151745ae1d91SAlan Cox * v_free_reserved needs to include enough for the largest 151845ae1d91SAlan Cox * swap pager structures plus enough for any pv_entry structs 151945ae1d91SAlan Cox * when paging. 152045ae1d91SAlan Cox */ 15212feb50bfSAttilio Rao if (cnt.v_page_count > 1024) 15222feb50bfSAttilio Rao cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 15232feb50bfSAttilio Rao else 15242feb50bfSAttilio Rao cnt.v_free_min = 4; 15252feb50bfSAttilio Rao cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 15262feb50bfSAttilio Rao cnt.v_interrupt_free_min; 15272feb50bfSAttilio Rao cnt.v_free_reserved = vm_pageout_page_count + 15282446e4f0SAlan Cox cnt.v_pageout_free_min + (cnt.v_page_count / 768); 15292feb50bfSAttilio Rao cnt.v_free_severe = cnt.v_free_min / 2; 15302feb50bfSAttilio Rao cnt.v_free_min += cnt.v_free_reserved; 15312feb50bfSAttilio Rao cnt.v_free_severe += cnt.v_free_reserved; 153245ae1d91SAlan Cox 1533ed74321bSDavid Greenman /* 15342b6b0df7SMatthew Dillon * v_free_target and v_cache_min control pageout hysteresis. Note 15352b6b0df7SMatthew Dillon * that these are more a measure of the VM cache queue hysteresis 15362b6b0df7SMatthew Dillon * then the VM free queue. Specifically, v_free_target is the 15372b6b0df7SMatthew Dillon * high water mark (free+cache pages). 15382b6b0df7SMatthew Dillon * 15392b6b0df7SMatthew Dillon * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 15402b6b0df7SMatthew Dillon * low water mark, while v_free_min is the stop. v_cache_min must 15412b6b0df7SMatthew Dillon * be big enough to handle memory needs while the pageout daemon 15422b6b0df7SMatthew Dillon * is signalled and run to free more pages. 1543ed74321bSDavid Greenman */ 15442feb50bfSAttilio Rao if (cnt.v_free_count > 6144) 15452feb50bfSAttilio Rao cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 15462feb50bfSAttilio Rao else 15472feb50bfSAttilio Rao cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 15486f2b142eSDavid Greenman 15492feb50bfSAttilio Rao if (cnt.v_free_count > 2048) { 15502feb50bfSAttilio Rao cnt.v_cache_min = cnt.v_free_target; 15512feb50bfSAttilio Rao cnt.v_cache_max = 2 * cnt.v_cache_min; 15522feb50bfSAttilio Rao cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 15530d94caffSDavid Greenman } else { 15542feb50bfSAttilio Rao cnt.v_cache_min = 0; 15552feb50bfSAttilio Rao cnt.v_cache_max = 0; 15562feb50bfSAttilio Rao cnt.v_inactive_target = cnt.v_free_count / 4; 15570d94caffSDavid Greenman } 15582feb50bfSAttilio Rao if (cnt.v_inactive_target > cnt.v_free_count / 3) 15592feb50bfSAttilio Rao cnt.v_inactive_target = cnt.v_free_count / 3; 1560df8bae1dSRodney W. Grimes 1561df8bae1dSRodney W. Grimes /* XXX does not really belong here */ 1562df8bae1dSRodney W. Grimes if (vm_page_max_wired == 0) 15632feb50bfSAttilio Rao vm_page_max_wired = cnt.v_free_count / 3; 1564df8bae1dSRodney W. Grimes 1565dc2efb27SJohn Dyson if (vm_pageout_stats_max == 0) 15662feb50bfSAttilio Rao vm_pageout_stats_max = cnt.v_free_target; 1567dc2efb27SJohn Dyson 1568dc2efb27SJohn Dyson /* 1569dc2efb27SJohn Dyson * Set interval in seconds for stats scan. 1570dc2efb27SJohn Dyson */ 1571dc2efb27SJohn Dyson if (vm_pageout_stats_interval == 0) 1572bef608bdSJohn Dyson vm_pageout_stats_interval = 5; 1573dc2efb27SJohn Dyson if (vm_pageout_full_stats_interval == 0) 1574dc2efb27SJohn Dyson vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1575dc2efb27SJohn Dyson 157624a1cce3SDavid Greenman swap_pager_swap_init(); 15772b6b0df7SMatthew Dillon pass = 0; 1578df8bae1dSRodney W. Grimes /* 15790d94caffSDavid Greenman * The pageout daemon is never done, so loop forever. 1580df8bae1dSRodney W. Grimes */ 1581df8bae1dSRodney W. Grimes while (TRUE) { 1582936524aaSMatthew Dillon /* 1583936524aaSMatthew Dillon * If we have enough free memory, wakeup waiters. Do 1584936524aaSMatthew Dillon * not clear vm_pages_needed until we reach our target, 1585936524aaSMatthew Dillon * otherwise we may be woken up over and over again and 1586936524aaSMatthew Dillon * waste a lot of cpu. 1587936524aaSMatthew Dillon */ 1588e9f995d8SAlan Cox mtx_lock(&vm_page_queue_free_mtx); 1589936524aaSMatthew Dillon if (vm_pages_needed && !vm_page_count_min()) { 1590a1c0a785SAlan Cox if (!vm_paging_needed()) 1591936524aaSMatthew Dillon vm_pages_needed = 0; 15922feb50bfSAttilio Rao wakeup(&cnt.v_free_count); 1593936524aaSMatthew Dillon } 1594936524aaSMatthew Dillon if (vm_pages_needed) { 159590ecac61SMatthew Dillon /* 15962b6b0df7SMatthew Dillon * Still not done, take a second pass without waiting 15972b6b0df7SMatthew Dillon * (unlimited dirty cleaning), otherwise sleep a bit 15982b6b0df7SMatthew Dillon * and try again. 159990ecac61SMatthew Dillon */ 16002b6b0df7SMatthew Dillon ++pass; 16012b6b0df7SMatthew Dillon if (pass > 1) 1602e9f995d8SAlan Cox msleep(&vm_pages_needed, 1603e9f995d8SAlan Cox &vm_page_queue_free_mtx, PVM, "psleep", 1604e9f995d8SAlan Cox hz / 2); 160590ecac61SMatthew Dillon } else { 160690ecac61SMatthew Dillon /* 16072b6b0df7SMatthew Dillon * Good enough, sleep & handle stats. Prime the pass 16082b6b0df7SMatthew Dillon * for the next run. 160990ecac61SMatthew Dillon */ 16102b6b0df7SMatthew Dillon if (pass > 1) 16112b6b0df7SMatthew Dillon pass = 1; 16122b6b0df7SMatthew Dillon else 16132b6b0df7SMatthew Dillon pass = 0; 1614e9f995d8SAlan Cox error = msleep(&vm_pages_needed, 1615e9f995d8SAlan Cox &vm_page_queue_free_mtx, PVM, "psleep", 1616e9f995d8SAlan Cox vm_pageout_stats_interval * hz); 1617dc2efb27SJohn Dyson if (error && !vm_pages_needed) { 1618e9f995d8SAlan Cox mtx_unlock(&vm_page_queue_free_mtx); 16192b6b0df7SMatthew Dillon pass = 0; 1620dc2efb27SJohn Dyson vm_pageout_page_stats(); 1621dc2efb27SJohn Dyson continue; 1622dc2efb27SJohn Dyson } 1623f919ebdeSDavid Greenman } 1624b18bfc3dSJohn Dyson if (vm_pages_needed) 1625393a081dSAttilio Rao cnt.v_pdwakeups++; 1626e9f995d8SAlan Cox mtx_unlock(&vm_page_queue_free_mtx); 16272b6b0df7SMatthew Dillon vm_pageout_scan(pass); 1628df8bae1dSRodney W. Grimes } 1629df8bae1dSRodney W. Grimes } 163026f9a767SRodney W. Grimes 16316b4b77adSAlan Cox /* 1632e9f995d8SAlan Cox * Unless the free page queue lock is held by the caller, this function 16336b4b77adSAlan Cox * should be regarded as advisory. Specifically, the caller should 16346b4b77adSAlan Cox * not msleep() on &cnt.v_free_count following this function unless 1635e9f995d8SAlan Cox * the free page queue lock is held until the msleep() is performed. 16366b4b77adSAlan Cox */ 1637e0c5a895SJohn Dyson void 1638e0c5a895SJohn Dyson pagedaemon_wakeup() 1639e0c5a895SJohn Dyson { 1640a1c0a785SAlan Cox 1641b40ce416SJulian Elischer if (!vm_pages_needed && curthread->td_proc != pageproc) { 1642a1c0a785SAlan Cox vm_pages_needed = 1; 1643e0c5a895SJohn Dyson wakeup(&vm_pages_needed); 1644e0c5a895SJohn Dyson } 1645e0c5a895SJohn Dyson } 1646e0c5a895SJohn Dyson 164738efa82bSJohn Dyson #if !defined(NO_SWAPPING) 16485afce282SDavid Greenman static void 164997824da3SAlan Cox vm_req_vmdaemon(int req) 16505afce282SDavid Greenman { 16515afce282SDavid Greenman static int lastrun = 0; 16525afce282SDavid Greenman 165397824da3SAlan Cox mtx_lock(&vm_daemon_mtx); 165497824da3SAlan Cox vm_pageout_req_swapout |= req; 1655b18bfc3dSJohn Dyson if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 16565afce282SDavid Greenman wakeup(&vm_daemon_needed); 16575afce282SDavid Greenman lastrun = ticks; 16585afce282SDavid Greenman } 165997824da3SAlan Cox mtx_unlock(&vm_daemon_mtx); 16605afce282SDavid Greenman } 16615afce282SDavid Greenman 16622b14f991SJulian Elischer static void 16634f9fb771SBruce Evans vm_daemon() 16640d94caffSDavid Greenman { 166591d5354aSJohn Baldwin struct rlimit rsslim; 1666dcbcd518SBruce Evans struct proc *p; 1667dcbcd518SBruce Evans struct thread *td; 16686bed074cSKonstantin Belousov struct vmspace *vm; 1669099e7e95SEdward Tomasz Napierala int breakout, swapout_flags, tryagain, attempts; 1670afcc55f3SEdward Tomasz Napierala #ifdef RACCT 1671099e7e95SEdward Tomasz Napierala uint64_t rsize, ravailable; 1672afcc55f3SEdward Tomasz Napierala #endif 16730d94caffSDavid Greenman 16742fe6e4d7SDavid Greenman while (TRUE) { 167597824da3SAlan Cox mtx_lock(&vm_daemon_mtx); 1676099e7e95SEdward Tomasz Napierala #ifdef RACCT 1677099e7e95SEdward Tomasz Napierala msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", hz); 1678099e7e95SEdward Tomasz Napierala #else 167997824da3SAlan Cox msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 0); 1680099e7e95SEdward Tomasz Napierala #endif 168197824da3SAlan Cox swapout_flags = vm_pageout_req_swapout; 16824c1f8ee9SDavid Greenman vm_pageout_req_swapout = 0; 168397824da3SAlan Cox mtx_unlock(&vm_daemon_mtx); 168497824da3SAlan Cox if (swapout_flags) 168597824da3SAlan Cox swapout_procs(swapout_flags); 168697824da3SAlan Cox 16872fe6e4d7SDavid Greenman /* 16880d94caffSDavid Greenman * scan the processes for exceeding their rlimits or if 16890d94caffSDavid Greenman * process is swapped out -- deactivate pages 16902fe6e4d7SDavid Greenman */ 1691099e7e95SEdward Tomasz Napierala tryagain = 0; 1692099e7e95SEdward Tomasz Napierala attempts = 0; 1693099e7e95SEdward Tomasz Napierala again: 1694099e7e95SEdward Tomasz Napierala attempts++; 16951005a129SJohn Baldwin sx_slock(&allproc_lock); 1696f67af5c9SXin LI FOREACH_PROC_IN_SYSTEM(p) { 1697fe2144fdSLuoqi Chen vm_pindex_t limit, size; 16982fe6e4d7SDavid Greenman 16992fe6e4d7SDavid Greenman /* 17002fe6e4d7SDavid Greenman * if this is a system process or if we have already 17012fe6e4d7SDavid Greenman * looked at this process, skip it. 17022fe6e4d7SDavid Greenman */ 1703897ecacdSJohn Baldwin PROC_LOCK(p); 17048e6fa660SJohn Baldwin if (p->p_state != PRS_NORMAL || 17058e6fa660SJohn Baldwin p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) { 1706897ecacdSJohn Baldwin PROC_UNLOCK(p); 17072fe6e4d7SDavid Greenman continue; 17082fe6e4d7SDavid Greenman } 17092fe6e4d7SDavid Greenman /* 17102fe6e4d7SDavid Greenman * if the process is in a non-running type state, 17112fe6e4d7SDavid Greenman * don't touch it. 17122fe6e4d7SDavid Greenman */ 1713e602ba25SJulian Elischer breakout = 0; 1714e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 1715982d11f8SJeff Roberson thread_lock(td); 171671fad9fdSJulian Elischer if (!TD_ON_RUNQ(td) && 171771fad9fdSJulian Elischer !TD_IS_RUNNING(td) && 1718f497cda2SEdward Tomasz Napierala !TD_IS_SLEEPING(td) && 1719f497cda2SEdward Tomasz Napierala !TD_IS_SUSPENDED(td)) { 1720982d11f8SJeff Roberson thread_unlock(td); 1721e602ba25SJulian Elischer breakout = 1; 1722e602ba25SJulian Elischer break; 1723e602ba25SJulian Elischer } 1724982d11f8SJeff Roberson thread_unlock(td); 1725e602ba25SJulian Elischer } 1726897ecacdSJohn Baldwin if (breakout) { 1727897ecacdSJohn Baldwin PROC_UNLOCK(p); 17282fe6e4d7SDavid Greenman continue; 17292fe6e4d7SDavid Greenman } 17302fe6e4d7SDavid Greenman /* 17312fe6e4d7SDavid Greenman * get a limit 17322fe6e4d7SDavid Greenman */ 1733dcbcd518SBruce Evans lim_rlimit(p, RLIMIT_RSS, &rsslim); 1734fe2144fdSLuoqi Chen limit = OFF_TO_IDX( 173591d5354aSJohn Baldwin qmin(rsslim.rlim_cur, rsslim.rlim_max)); 17362fe6e4d7SDavid Greenman 17372fe6e4d7SDavid Greenman /* 17380d94caffSDavid Greenman * let processes that are swapped out really be 17390d94caffSDavid Greenman * swapped out set the limit to nothing (will force a 17400d94caffSDavid Greenman * swap-out.) 17412fe6e4d7SDavid Greenman */ 1742b61ce5b0SJeff Roberson if ((p->p_flag & P_INMEM) == 0) 17430d94caffSDavid Greenman limit = 0; /* XXX */ 17446bed074cSKonstantin Belousov vm = vmspace_acquire_ref(p); 1745897ecacdSJohn Baldwin PROC_UNLOCK(p); 17466bed074cSKonstantin Belousov if (vm == NULL) 17476bed074cSKonstantin Belousov continue; 17482fe6e4d7SDavid Greenman 17496bed074cSKonstantin Belousov size = vmspace_resident_count(vm); 17502fe6e4d7SDavid Greenman if (limit >= 0 && size >= limit) { 1751fe2144fdSLuoqi Chen vm_pageout_map_deactivate_pages( 17526bed074cSKonstantin Belousov &vm->vm_map, limit); 17532fe6e4d7SDavid Greenman } 1754afcc55f3SEdward Tomasz Napierala #ifdef RACCT 1755099e7e95SEdward Tomasz Napierala rsize = IDX_TO_OFF(size); 1756099e7e95SEdward Tomasz Napierala PROC_LOCK(p); 1757099e7e95SEdward Tomasz Napierala racct_set(p, RACCT_RSS, rsize); 1758099e7e95SEdward Tomasz Napierala ravailable = racct_get_available(p, RACCT_RSS); 1759099e7e95SEdward Tomasz Napierala PROC_UNLOCK(p); 1760099e7e95SEdward Tomasz Napierala if (rsize > ravailable) { 1761099e7e95SEdward Tomasz Napierala /* 1762099e7e95SEdward Tomasz Napierala * Don't be overly aggressive; this might be 1763099e7e95SEdward Tomasz Napierala * an innocent process, and the limit could've 1764099e7e95SEdward Tomasz Napierala * been exceeded by some memory hog. Don't 1765099e7e95SEdward Tomasz Napierala * try to deactivate more than 1/4th of process' 1766099e7e95SEdward Tomasz Napierala * resident set size. 1767099e7e95SEdward Tomasz Napierala */ 1768099e7e95SEdward Tomasz Napierala if (attempts <= 8) { 1769099e7e95SEdward Tomasz Napierala if (ravailable < rsize - (rsize / 4)) 1770099e7e95SEdward Tomasz Napierala ravailable = rsize - (rsize / 4); 1771099e7e95SEdward Tomasz Napierala } 1772099e7e95SEdward Tomasz Napierala vm_pageout_map_deactivate_pages( 1773099e7e95SEdward Tomasz Napierala &vm->vm_map, OFF_TO_IDX(ravailable)); 1774099e7e95SEdward Tomasz Napierala /* Update RSS usage after paging out. */ 1775099e7e95SEdward Tomasz Napierala size = vmspace_resident_count(vm); 1776099e7e95SEdward Tomasz Napierala rsize = IDX_TO_OFF(size); 1777099e7e95SEdward Tomasz Napierala PROC_LOCK(p); 1778099e7e95SEdward Tomasz Napierala racct_set(p, RACCT_RSS, rsize); 1779099e7e95SEdward Tomasz Napierala PROC_UNLOCK(p); 1780099e7e95SEdward Tomasz Napierala if (rsize > ravailable) 1781099e7e95SEdward Tomasz Napierala tryagain = 1; 1782099e7e95SEdward Tomasz Napierala } 1783afcc55f3SEdward Tomasz Napierala #endif 17846bed074cSKonstantin Belousov vmspace_free(vm); 17852fe6e4d7SDavid Greenman } 17861005a129SJohn Baldwin sx_sunlock(&allproc_lock); 1787099e7e95SEdward Tomasz Napierala if (tryagain != 0 && attempts <= 10) 1788099e7e95SEdward Tomasz Napierala goto again; 178924a1cce3SDavid Greenman } 17902fe6e4d7SDavid Greenman } 1791a1287949SEivind Eklund #endif /* !defined(NO_SWAPPING) */ 1792