160727d8bSWarner Losh /*- 226f9a767SRodney W. Grimes * Copyright (c) 1991 Regents of the University of California. 326f9a767SRodney W. Grimes * All rights reserved. 426f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 526f9a767SRodney W. Grimes * All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 726f9a767SRodney W. Grimes * All rights reserved. 88dbca793STor Egge * Copyright (c) 2005 Yahoo! Technologies Norway AS 98dbca793STor Egge * All rights reserved. 10df8bae1dSRodney W. Grimes * 11df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 12df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 13df8bae1dSRodney W. Grimes * 14df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 15df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 16df8bae1dSRodney W. Grimes * are met: 17df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 19df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 20df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 21df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 22df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 235929bcfaSPhilippe Charnier * must display the following acknowledgement: 24df8bae1dSRodney W. Grimes * This product includes software developed by the University of 25df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 26df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 27df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 28df8bae1dSRodney W. Grimes * without specific prior written permission. 29df8bae1dSRodney W. Grimes * 30df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40df8bae1dSRodney W. Grimes * SUCH DAMAGE. 41df8bae1dSRodney W. Grimes * 423c4dd356SDavid Greenman * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 43df8bae1dSRodney W. Grimes * 44df8bae1dSRodney W. Grimes * 45df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46df8bae1dSRodney W. Grimes * All rights reserved. 47df8bae1dSRodney W. Grimes * 48df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 51df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 52df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 53df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 54df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 55df8bae1dSRodney W. Grimes * 56df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 57df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 58df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 59df8bae1dSRodney W. Grimes * 60df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 61df8bae1dSRodney W. Grimes * 62df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 63df8bae1dSRodney W. Grimes * School of Computer Science 64df8bae1dSRodney W. Grimes * Carnegie Mellon University 65df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 66df8bae1dSRodney W. Grimes * 67df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 68df8bae1dSRodney W. Grimes * rights to redistribute these changes. 69df8bae1dSRodney W. Grimes */ 70df8bae1dSRodney W. Grimes 71df8bae1dSRodney W. Grimes /* 72df8bae1dSRodney W. Grimes * The proverbial page-out daemon. 73df8bae1dSRodney W. Grimes */ 74df8bae1dSRodney W. Grimes 75874651b1SDavid E. O'Brien #include <sys/cdefs.h> 76874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 77874651b1SDavid E. O'Brien 78faa5f8d8SAndrzej Bialecki #include "opt_vm.h" 7914a0d74eSSteven Hartland #include "opt_kdtrace.h" 80df8bae1dSRodney W. Grimes #include <sys/param.h> 8126f9a767SRodney W. Grimes #include <sys/systm.h> 82b5e8ce9fSBruce Evans #include <sys/kernel.h> 83855a310fSJeff Roberson #include <sys/eventhandler.h> 84fb919e4dSMark Murray #include <sys/lock.h> 85fb919e4dSMark Murray #include <sys/mutex.h> 8626f9a767SRodney W. Grimes #include <sys/proc.h> 879c8b8baaSPeter Wemm #include <sys/kthread.h> 880384fff8SJason Evans #include <sys/ktr.h> 8997824da3SAlan Cox #include <sys/mount.h> 90099e7e95SEdward Tomasz Napierala #include <sys/racct.h> 9126f9a767SRodney W. Grimes #include <sys/resourcevar.h> 92b43179fbSJeff Roberson #include <sys/sched.h> 9314a0d74eSSteven Hartland #include <sys/sdt.h> 94d2fc5315SPoul-Henning Kamp #include <sys/signalvar.h> 95449c2e92SKonstantin Belousov #include <sys/smp.h> 96*a6bf3a9eSRyan Stone #include <sys/time.h> 97f6b04d2bSDavid Greenman #include <sys/vnode.h> 98efeaf95aSDavid Greenman #include <sys/vmmeter.h> 9989f6b863SAttilio Rao #include <sys/rwlock.h> 1001005a129SJohn Baldwin #include <sys/sx.h> 10138efa82bSJohn Dyson #include <sys/sysctl.h> 102df8bae1dSRodney W. Grimes 103df8bae1dSRodney W. Grimes #include <vm/vm.h> 104efeaf95aSDavid Greenman #include <vm/vm_param.h> 105efeaf95aSDavid Greenman #include <vm/vm_object.h> 106df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 107efeaf95aSDavid Greenman #include <vm/vm_map.h> 108df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 10924a1cce3SDavid Greenman #include <vm/vm_pager.h> 110449c2e92SKonstantin Belousov #include <vm/vm_phys.h> 11105f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 112efeaf95aSDavid Greenman #include <vm/vm_extern.h> 113670d17b5SJeff Roberson #include <vm/uma.h> 114df8bae1dSRodney W. Grimes 1152b14f991SJulian Elischer /* 1162b14f991SJulian Elischer * System initialization 1172b14f991SJulian Elischer */ 1182b14f991SJulian Elischer 1192b14f991SJulian Elischer /* the kernel process "vm_pageout"*/ 12011caded3SAlfred Perlstein static void vm_pageout(void); 1214d19f4adSSteven Hartland static void vm_pageout_init(void); 12234d8b7eaSJeff Roberson static int vm_pageout_clean(vm_page_t m); 12334d8b7eaSJeff Roberson static int vm_pageout_cluster(vm_page_t m); 124449c2e92SKonstantin Belousov static void vm_pageout_scan(struct vm_domain *vmd, int pass); 125449c2e92SKonstantin Belousov static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass); 12645ae1d91SAlan Cox 1274d19f4adSSteven Hartland SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init, 1284d19f4adSSteven Hartland NULL); 1294d19f4adSSteven Hartland 1302b14f991SJulian Elischer struct proc *pageproc; 1312b14f991SJulian Elischer 1322b14f991SJulian Elischer static struct kproc_desc page_kp = { 1332b14f991SJulian Elischer "pagedaemon", 1342b14f991SJulian Elischer vm_pageout, 1352b14f991SJulian Elischer &pageproc 1362b14f991SJulian Elischer }; 1374d19f4adSSteven Hartland SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, 138237fdd78SRobert Watson &page_kp); 1392b14f991SJulian Elischer 14014a0d74eSSteven Hartland SDT_PROVIDER_DEFINE(vm); 14114a0d74eSSteven Hartland SDT_PROBE_DEFINE(vm, , , vm__lowmem_cache); 14214a0d74eSSteven Hartland SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan); 14314a0d74eSSteven Hartland 14438efa82bSJohn Dyson #if !defined(NO_SWAPPING) 1452b14f991SJulian Elischer /* the kernel process "vm_daemon"*/ 14611caded3SAlfred Perlstein static void vm_daemon(void); 147f708ef1bSPoul-Henning Kamp static struct proc *vmproc; 1482b14f991SJulian Elischer 1492b14f991SJulian Elischer static struct kproc_desc vm_kp = { 1502b14f991SJulian Elischer "vmdaemon", 1512b14f991SJulian Elischer vm_daemon, 1522b14f991SJulian Elischer &vmproc 1532b14f991SJulian Elischer }; 154237fdd78SRobert Watson SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp); 15538efa82bSJohn Dyson #endif 1562b14f991SJulian Elischer 1572b14f991SJulian Elischer 1588b245767SAlan Cox int vm_pages_needed; /* Event on which pageout daemon sleeps */ 1598b245767SAlan Cox int vm_pageout_deficit; /* Estimated number of pages deficit */ 1608b245767SAlan Cox int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 161d9e23210SJeff Roberson int vm_pageout_wakeup_thresh; 16226f9a767SRodney W. Grimes 16338efa82bSJohn Dyson #if !defined(NO_SWAPPING) 164f708ef1bSPoul-Henning Kamp static int vm_pageout_req_swapout; /* XXX */ 165f708ef1bSPoul-Henning Kamp static int vm_daemon_needed; 16697824da3SAlan Cox static struct mtx vm_daemon_mtx; 16797824da3SAlan Cox /* Allow for use by vm_pageout before vm_daemon is initialized. */ 16897824da3SAlan Cox MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF); 16938efa82bSJohn Dyson #endif 1702b6b0df7SMatthew Dillon static int vm_max_launder = 32; 171d9e23210SJeff Roberson static int vm_pageout_update_period; 1724a365329SAndrey Zonov static int defer_swap_pageouts; 1734a365329SAndrey Zonov static int disable_swap_pageouts; 174c9612b2dSJeff Roberson static int lowmem_period = 10; 175*a6bf3a9eSRyan Stone static time_t lowmem_uptime; 17670111b90SJohn Dyson 17738efa82bSJohn Dyson #if defined(NO_SWAPPING) 178303b270bSEivind Eklund static int vm_swap_enabled = 0; 179303b270bSEivind Eklund static int vm_swap_idle_enabled = 0; 18038efa82bSJohn Dyson #else 181303b270bSEivind Eklund static int vm_swap_enabled = 1; 182303b270bSEivind Eklund static int vm_swap_idle_enabled = 0; 18338efa82bSJohn Dyson #endif 18438efa82bSJohn Dyson 1858311a2b8SWill Andrews static int vm_panic_on_oom = 0; 1868311a2b8SWill Andrews 1878311a2b8SWill Andrews SYSCTL_INT(_vm, OID_AUTO, panic_on_oom, 1888311a2b8SWill Andrews CTLFLAG_RWTUN, &vm_panic_on_oom, 0, 1898311a2b8SWill Andrews "panic on out of memory instead of killing the largest process"); 1908311a2b8SWill Andrews 191d9e23210SJeff Roberson SYSCTL_INT(_vm, OID_AUTO, pageout_wakeup_thresh, 192d9e23210SJeff Roberson CTLFLAG_RW, &vm_pageout_wakeup_thresh, 0, 193d9e23210SJeff Roberson "free page threshold for waking up the pageout daemon"); 194d9e23210SJeff Roberson 1952b6b0df7SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, max_launder, 1962b6b0df7SMatthew Dillon CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 19738efa82bSJohn Dyson 198d9e23210SJeff Roberson SYSCTL_INT(_vm, OID_AUTO, pageout_update_period, 199d9e23210SJeff Roberson CTLFLAG_RW, &vm_pageout_update_period, 0, 200d9e23210SJeff Roberson "Maximum active LRU update period"); 20153636869SAndrey Zonov 202c9612b2dSJeff Roberson SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RW, &lowmem_period, 0, 203c9612b2dSJeff Roberson "Low memory callback period"); 204c9612b2dSJeff Roberson 20538efa82bSJohn Dyson #if defined(NO_SWAPPING) 206ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 2076bd9cb1cSTom Rhodes CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout"); 208ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 2096bd9cb1cSTom Rhodes CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 21038efa82bSJohn Dyson #else 211ceb0cf87SJohn Dyson SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 212b0359e2cSPeter Wemm CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 213ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 214b0359e2cSPeter Wemm CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 21538efa82bSJohn Dyson #endif 21626f9a767SRodney W. Grimes 217ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 218b0359e2cSPeter Wemm CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 21912ac6a1dSJohn Dyson 220ceb0cf87SJohn Dyson SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 221b0359e2cSPeter Wemm CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 22212ac6a1dSJohn Dyson 22323b59018SMatthew Dillon static int pageout_lock_miss; 22423b59018SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 22523b59018SMatthew Dillon CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 22623b59018SMatthew Dillon 227ffc82b0aSJohn Dyson #define VM_PAGEOUT_PAGE_COUNT 16 228bbc0ec52SDavid Greenman int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 229df8bae1dSRodney W. Grimes 230c3cb3e12SDavid Greenman int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 2315dfc2870SAlan Cox SYSCTL_INT(_vm, OID_AUTO, max_wired, 2325dfc2870SAlan Cox CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count"); 233df8bae1dSRodney W. Grimes 23485eeca35SAlan Cox static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *); 235449c2e92SKonstantin Belousov static boolean_t vm_pageout_launder(struct vm_pagequeue *pq, int, vm_paddr_t, 236449c2e92SKonstantin Belousov vm_paddr_t); 23738efa82bSJohn Dyson #if !defined(NO_SWAPPING) 238ecf6279fSAlan Cox static void vm_pageout_map_deactivate_pages(vm_map_t, long); 239ecf6279fSAlan Cox static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long); 24097824da3SAlan Cox static void vm_req_vmdaemon(int req); 24138efa82bSJohn Dyson #endif 24285eeca35SAlan Cox static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *); 243cd41fc12SDavid Greenman 244a8229fa3SAlan Cox /* 245a8229fa3SAlan Cox * Initialize a dummy page for marking the caller's place in the specified 246a8229fa3SAlan Cox * paging queue. In principle, this function only needs to set the flag 247c7aebda8SAttilio Rao * PG_MARKER. Nonetheless, it wirte busies and initializes the hold count 248c7aebda8SAttilio Rao * to one as safety precautions. 249a8229fa3SAlan Cox */ 2508c616246SKonstantin Belousov static void 2518c616246SKonstantin Belousov vm_pageout_init_marker(vm_page_t marker, u_short queue) 2528c616246SKonstantin Belousov { 2538c616246SKonstantin Belousov 2548c616246SKonstantin Belousov bzero(marker, sizeof(*marker)); 255a8229fa3SAlan Cox marker->flags = PG_MARKER; 256c7aebda8SAttilio Rao marker->busy_lock = VPB_SINGLE_EXCLUSIVER; 2578c616246SKonstantin Belousov marker->queue = queue; 258a8229fa3SAlan Cox marker->hold_count = 1; 2598c616246SKonstantin Belousov } 2608c616246SKonstantin Belousov 26126f9a767SRodney W. Grimes /* 2628dbca793STor Egge * vm_pageout_fallback_object_lock: 2638dbca793STor Egge * 26489f6b863SAttilio Rao * Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is 2658dbca793STor Egge * known to have failed and page queue must be either PQ_ACTIVE or 2668dbca793STor Egge * PQ_INACTIVE. To avoid lock order violation, unlock the page queues 2678dbca793STor Egge * while locking the vm object. Use marker page to detect page queue 2688dbca793STor Egge * changes and maintain notion of next page on page queue. Return 2698dbca793STor Egge * TRUE if no changes were detected, FALSE otherwise. vm object is 2708dbca793STor Egge * locked on return. 2718dbca793STor Egge * 2728dbca793STor Egge * This function depends on both the lock portion of struct vm_object 2738dbca793STor Egge * and normal struct vm_page being type stable. 2748dbca793STor Egge */ 27585eeca35SAlan Cox static boolean_t 2768dbca793STor Egge vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next) 2778dbca793STor Egge { 2788dbca793STor Egge struct vm_page marker; 2798d220203SAlan Cox struct vm_pagequeue *pq; 2808dbca793STor Egge boolean_t unchanged; 2818dbca793STor Egge u_short queue; 2828dbca793STor Egge vm_object_t object; 2838dbca793STor Egge 2848dbca793STor Egge queue = m->queue; 2858c616246SKonstantin Belousov vm_pageout_init_marker(&marker, queue); 286449c2e92SKonstantin Belousov pq = vm_page_pagequeue(m); 2878dbca793STor Egge object = m->object; 2888dbca793STor Egge 289c325e866SKonstantin Belousov TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q); 2908d220203SAlan Cox vm_pagequeue_unlock(pq); 2912965a453SKip Macy vm_page_unlock(m); 29289f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 2932965a453SKip Macy vm_page_lock(m); 2948d220203SAlan Cox vm_pagequeue_lock(pq); 2958dbca793STor Egge 2968dbca793STor Egge /* Page queue might have changed. */ 297c325e866SKonstantin Belousov *next = TAILQ_NEXT(&marker, plinks.q); 2988dbca793STor Egge unchanged = (m->queue == queue && 2998dbca793STor Egge m->object == object && 300c325e866SKonstantin Belousov &marker == TAILQ_NEXT(m, plinks.q)); 301c325e866SKonstantin Belousov TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q); 3028dbca793STor Egge return (unchanged); 3038dbca793STor Egge } 3048dbca793STor Egge 3058dbca793STor Egge /* 3068c616246SKonstantin Belousov * Lock the page while holding the page queue lock. Use marker page 3078c616246SKonstantin Belousov * to detect page queue changes and maintain notion of next page on 3088c616246SKonstantin Belousov * page queue. Return TRUE if no changes were detected, FALSE 3098c616246SKonstantin Belousov * otherwise. The page is locked on return. The page queue lock might 3108c616246SKonstantin Belousov * be dropped and reacquired. 3118c616246SKonstantin Belousov * 3128c616246SKonstantin Belousov * This function depends on normal struct vm_page being type stable. 3138c616246SKonstantin Belousov */ 31485eeca35SAlan Cox static boolean_t 3158c616246SKonstantin Belousov vm_pageout_page_lock(vm_page_t m, vm_page_t *next) 3168c616246SKonstantin Belousov { 3178c616246SKonstantin Belousov struct vm_page marker; 3188d220203SAlan Cox struct vm_pagequeue *pq; 3198c616246SKonstantin Belousov boolean_t unchanged; 3208c616246SKonstantin Belousov u_short queue; 3218c616246SKonstantin Belousov 3228c616246SKonstantin Belousov vm_page_lock_assert(m, MA_NOTOWNED); 3238c616246SKonstantin Belousov if (vm_page_trylock(m)) 3248c616246SKonstantin Belousov return (TRUE); 3258c616246SKonstantin Belousov 3268c616246SKonstantin Belousov queue = m->queue; 3278c616246SKonstantin Belousov vm_pageout_init_marker(&marker, queue); 328449c2e92SKonstantin Belousov pq = vm_page_pagequeue(m); 3298c616246SKonstantin Belousov 330c325e866SKonstantin Belousov TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q); 3318d220203SAlan Cox vm_pagequeue_unlock(pq); 3328c616246SKonstantin Belousov vm_page_lock(m); 3338d220203SAlan Cox vm_pagequeue_lock(pq); 3348c616246SKonstantin Belousov 3358c616246SKonstantin Belousov /* Page queue might have changed. */ 336c325e866SKonstantin Belousov *next = TAILQ_NEXT(&marker, plinks.q); 337c325e866SKonstantin Belousov unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, plinks.q)); 338c325e866SKonstantin Belousov TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q); 3398c616246SKonstantin Belousov return (unchanged); 3408c616246SKonstantin Belousov } 3418c616246SKonstantin Belousov 3428c616246SKonstantin Belousov /* 34326f9a767SRodney W. Grimes * vm_pageout_clean: 34424a1cce3SDavid Greenman * 3450d94caffSDavid Greenman * Clean the page and remove it from the laundry. 34626f9a767SRodney W. Grimes * 3470d94caffSDavid Greenman * We set the busy bit to cause potential page faults on this page to 3481c7c3c6aSMatthew Dillon * block. Note the careful timing, however, the busy bit isn't set till 3491c7c3c6aSMatthew Dillon * late and we cannot do anything that will mess with the page. 35026f9a767SRodney W. Grimes */ 3513af76890SPoul-Henning Kamp static int 35234d8b7eaSJeff Roberson vm_pageout_cluster(vm_page_t m) 35324a1cce3SDavid Greenman { 35454d92145SMatthew Dillon vm_object_t object; 35591b4f427SAlan Cox vm_page_t mc[2*vm_pageout_page_count], pb, ps; 3563562af12SAlan Cox int pageout_count; 35790ecac61SMatthew Dillon int ib, is, page_base; 358a316d390SJohn Dyson vm_pindex_t pindex = m->pindex; 35926f9a767SRodney W. Grimes 36095976f3fSAlan Cox vm_page_lock_assert(m, MA_OWNED); 36117f6a17bSAlan Cox object = m->object; 36289f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 3630cddd8f0SMatthew Dillon 36426f9a767SRodney W. Grimes /* 3651c7c3c6aSMatthew Dillon * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 3661c7c3c6aSMatthew Dillon * with the new swapper, but we could have serious problems paging 3671c7c3c6aSMatthew Dillon * out other object types if there is insufficient memory. 3681c7c3c6aSMatthew Dillon * 3691c7c3c6aSMatthew Dillon * Unfortunately, checking free memory here is far too late, so the 3701c7c3c6aSMatthew Dillon * check has been moved up a procedural level. 3711c7c3c6aSMatthew Dillon */ 3721c7c3c6aSMatthew Dillon 37324a1cce3SDavid Greenman /* 3749e897b1bSAlan Cox * Can't clean the page if it's busy or held. 37524a1cce3SDavid Greenman */ 376c7aebda8SAttilio Rao vm_page_assert_unbusied(m); 37795976f3fSAlan Cox KASSERT(m->hold_count == 0, ("vm_pageout_clean: page %p is held", m)); 37817f6a17bSAlan Cox vm_page_unlock(m); 3790d94caffSDavid Greenman 38091b4f427SAlan Cox mc[vm_pageout_page_count] = pb = ps = m; 38126f9a767SRodney W. Grimes pageout_count = 1; 382f35329acSJohn Dyson page_base = vm_pageout_page_count; 38390ecac61SMatthew Dillon ib = 1; 38490ecac61SMatthew Dillon is = 1; 38590ecac61SMatthew Dillon 38624a1cce3SDavid Greenman /* 38724a1cce3SDavid Greenman * Scan object for clusterable pages. 38824a1cce3SDavid Greenman * 38924a1cce3SDavid Greenman * We can cluster ONLY if: ->> the page is NOT 39024a1cce3SDavid Greenman * clean, wired, busy, held, or mapped into a 39124a1cce3SDavid Greenman * buffer, and one of the following: 39224a1cce3SDavid Greenman * 1) The page is inactive, or a seldom used 39324a1cce3SDavid Greenman * active page. 39424a1cce3SDavid Greenman * -or- 39524a1cce3SDavid Greenman * 2) we force the issue. 39690ecac61SMatthew Dillon * 39790ecac61SMatthew Dillon * During heavy mmap/modification loads the pageout 39890ecac61SMatthew Dillon * daemon can really fragment the underlying file 39990ecac61SMatthew Dillon * due to flushing pages out of order and not trying 40090ecac61SMatthew Dillon * align the clusters (which leave sporatic out-of-order 40190ecac61SMatthew Dillon * holes). To solve this problem we do the reverse scan 40290ecac61SMatthew Dillon * first and attempt to align our cluster, then do a 40390ecac61SMatthew Dillon * forward scan if room remains. 40424a1cce3SDavid Greenman */ 40590ecac61SMatthew Dillon more: 40690ecac61SMatthew Dillon while (ib && pageout_count < vm_pageout_page_count) { 40724a1cce3SDavid Greenman vm_page_t p; 408f6b04d2bSDavid Greenman 40990ecac61SMatthew Dillon if (ib > pindex) { 41090ecac61SMatthew Dillon ib = 0; 41190ecac61SMatthew Dillon break; 412f6b04d2bSDavid Greenman } 41390ecac61SMatthew Dillon 414c7aebda8SAttilio Rao if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) { 41590ecac61SMatthew Dillon ib = 0; 41690ecac61SMatthew Dillon break; 417f6b04d2bSDavid Greenman } 4182965a453SKip Macy vm_page_lock(p); 41924a1cce3SDavid Greenman vm_page_test_dirty(p); 42026f4eea5SAlan Cox if (p->dirty == 0 || 42190ecac61SMatthew Dillon p->queue != PQ_INACTIVE || 42257601bcbSMatthew Dillon p->hold_count != 0) { /* may be undergoing I/O */ 4232965a453SKip Macy vm_page_unlock(p); 42490ecac61SMatthew Dillon ib = 0; 42524a1cce3SDavid Greenman break; 426f6b04d2bSDavid Greenman } 4272965a453SKip Macy vm_page_unlock(p); 42891b4f427SAlan Cox mc[--page_base] = pb = p; 42990ecac61SMatthew Dillon ++pageout_count; 43090ecac61SMatthew Dillon ++ib; 43124a1cce3SDavid Greenman /* 43290ecac61SMatthew Dillon * alignment boundry, stop here and switch directions. Do 43390ecac61SMatthew Dillon * not clear ib. 43424a1cce3SDavid Greenman */ 43590ecac61SMatthew Dillon if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 43690ecac61SMatthew Dillon break; 43724a1cce3SDavid Greenman } 43890ecac61SMatthew Dillon 43990ecac61SMatthew Dillon while (pageout_count < vm_pageout_page_count && 44090ecac61SMatthew Dillon pindex + is < object->size) { 44190ecac61SMatthew Dillon vm_page_t p; 44290ecac61SMatthew Dillon 443c7aebda8SAttilio Rao if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p)) 44490ecac61SMatthew Dillon break; 4452965a453SKip Macy vm_page_lock(p); 44624a1cce3SDavid Greenman vm_page_test_dirty(p); 44726f4eea5SAlan Cox if (p->dirty == 0 || 44890ecac61SMatthew Dillon p->queue != PQ_INACTIVE || 44957601bcbSMatthew Dillon p->hold_count != 0) { /* may be undergoing I/O */ 4502965a453SKip Macy vm_page_unlock(p); 45124a1cce3SDavid Greenman break; 45224a1cce3SDavid Greenman } 4532965a453SKip Macy vm_page_unlock(p); 45491b4f427SAlan Cox mc[page_base + pageout_count] = ps = p; 45590ecac61SMatthew Dillon ++pageout_count; 45690ecac61SMatthew Dillon ++is; 45724a1cce3SDavid Greenman } 45890ecac61SMatthew Dillon 45990ecac61SMatthew Dillon /* 46090ecac61SMatthew Dillon * If we exhausted our forward scan, continue with the reverse scan 46190ecac61SMatthew Dillon * when possible, even past a page boundry. This catches boundry 46290ecac61SMatthew Dillon * conditions. 46390ecac61SMatthew Dillon */ 46490ecac61SMatthew Dillon if (ib && pageout_count < vm_pageout_page_count) 46590ecac61SMatthew Dillon goto more; 466f6b04d2bSDavid Greenman 46767bf6868SJohn Dyson /* 46867bf6868SJohn Dyson * we allow reads during pageouts... 46967bf6868SJohn Dyson */ 470126d6082SKonstantin Belousov return (vm_pageout_flush(&mc[page_base], pageout_count, 0, 0, NULL, 471126d6082SKonstantin Belousov NULL)); 472aef922f5SJohn Dyson } 473aef922f5SJohn Dyson 4741c7c3c6aSMatthew Dillon /* 4751c7c3c6aSMatthew Dillon * vm_pageout_flush() - launder the given pages 4761c7c3c6aSMatthew Dillon * 4771c7c3c6aSMatthew Dillon * The given pages are laundered. Note that we setup for the start of 4781c7c3c6aSMatthew Dillon * I/O ( i.e. busy the page ), mark it read-only, and bump the object 4791c7c3c6aSMatthew Dillon * reference count all in here rather then in the parent. If we want 4801c7c3c6aSMatthew Dillon * the parent to do more sophisticated things we may have to change 4811c7c3c6aSMatthew Dillon * the ordering. 4821e8a675cSKonstantin Belousov * 4831e8a675cSKonstantin Belousov * Returned runlen is the count of pages between mreq and first 4841e8a675cSKonstantin Belousov * page after mreq with status VM_PAGER_AGAIN. 485126d6082SKonstantin Belousov * *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL 486126d6082SKonstantin Belousov * for any page in runlen set. 4871c7c3c6aSMatthew Dillon */ 488aef922f5SJohn Dyson int 489126d6082SKonstantin Belousov vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, 490126d6082SKonstantin Belousov boolean_t *eio) 491aef922f5SJohn Dyson { 4922e3b314dSAlan Cox vm_object_t object = mc[0]->object; 493aef922f5SJohn Dyson int pageout_status[count]; 49495461b45SJohn Dyson int numpagedout = 0; 4951e8a675cSKonstantin Belousov int i, runlen; 496aef922f5SJohn Dyson 49789f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 4987bec141bSKip Macy 4991c7c3c6aSMatthew Dillon /* 5001c7c3c6aSMatthew Dillon * Initiate I/O. Bump the vm_page_t->busy counter and 5011c7c3c6aSMatthew Dillon * mark the pages read-only. 5021c7c3c6aSMatthew Dillon * 5031c7c3c6aSMatthew Dillon * We do not have to fixup the clean/dirty bits here... we can 5041c7c3c6aSMatthew Dillon * allow the pager to do it after the I/O completes. 50502fa91d3SMatthew Dillon * 50602fa91d3SMatthew Dillon * NOTE! mc[i]->dirty may be partial or fragmented due to an 50702fa91d3SMatthew Dillon * edge case with file fragments. 5081c7c3c6aSMatthew Dillon */ 5098f9110f6SJohn Dyson for (i = 0; i < count; i++) { 5107a935082SAlan Cox KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 5117a935082SAlan Cox ("vm_pageout_flush: partially invalid page %p index %d/%d", 5127a935082SAlan Cox mc[i], i, count)); 513c7aebda8SAttilio Rao vm_page_sbusy(mc[i]); 51478985e42SAlan Cox pmap_remove_write(mc[i]); 5152965a453SKip Macy } 516d474eaaaSDoug Rabson vm_object_pip_add(object, count); 517aef922f5SJohn Dyson 518d076fbeaSAlan Cox vm_pager_put_pages(object, mc, count, flags, pageout_status); 51926f9a767SRodney W. Grimes 5201e8a675cSKonstantin Belousov runlen = count - mreq; 521126d6082SKonstantin Belousov if (eio != NULL) 522126d6082SKonstantin Belousov *eio = FALSE; 523aef922f5SJohn Dyson for (i = 0; i < count; i++) { 524aef922f5SJohn Dyson vm_page_t mt = mc[i]; 52524a1cce3SDavid Greenman 5264cd45723SAlan Cox KASSERT(pageout_status[i] == VM_PAGER_PEND || 5276031c68dSAlan Cox !pmap_page_is_write_mapped(mt), 5289ea8d1a6SAlan Cox ("vm_pageout_flush: page %p is not write protected", mt)); 52926f9a767SRodney W. Grimes switch (pageout_status[i]) { 53026f9a767SRodney W. Grimes case VM_PAGER_OK: 53126f9a767SRodney W. Grimes case VM_PAGER_PEND: 53295461b45SJohn Dyson numpagedout++; 53326f9a767SRodney W. Grimes break; 53426f9a767SRodney W. Grimes case VM_PAGER_BAD: 53526f9a767SRodney W. Grimes /* 5360d94caffSDavid Greenman * Page outside of range of object. Right now we 5370d94caffSDavid Greenman * essentially lose the changes by pretending it 5380d94caffSDavid Greenman * worked. 53926f9a767SRodney W. Grimes */ 54090ecac61SMatthew Dillon vm_page_undirty(mt); 54126f9a767SRodney W. Grimes break; 54226f9a767SRodney W. Grimes case VM_PAGER_ERROR: 54326f9a767SRodney W. Grimes case VM_PAGER_FAIL: 54426f9a767SRodney W. Grimes /* 5450d94caffSDavid Greenman * If page couldn't be paged out, then reactivate the 5460d94caffSDavid Greenman * page so it doesn't clog the inactive list. (We 5470d94caffSDavid Greenman * will try paging out it again later). 54826f9a767SRodney W. Grimes */ 5493c4a2440SAlan Cox vm_page_lock(mt); 55024a1cce3SDavid Greenman vm_page_activate(mt); 5513c4a2440SAlan Cox vm_page_unlock(mt); 552126d6082SKonstantin Belousov if (eio != NULL && i >= mreq && i - mreq < runlen) 553126d6082SKonstantin Belousov *eio = TRUE; 55426f9a767SRodney W. Grimes break; 55526f9a767SRodney W. Grimes case VM_PAGER_AGAIN: 5561e8a675cSKonstantin Belousov if (i >= mreq && i - mreq < runlen) 5571e8a675cSKonstantin Belousov runlen = i - mreq; 55826f9a767SRodney W. Grimes break; 55926f9a767SRodney W. Grimes } 56026f9a767SRodney W. Grimes 56126f9a767SRodney W. Grimes /* 5620d94caffSDavid Greenman * If the operation is still going, leave the page busy to 5630d94caffSDavid Greenman * block all other accesses. Also, leave the paging in 5640d94caffSDavid Greenman * progress indicator set so that we don't attempt an object 5650d94caffSDavid Greenman * collapse. 56626f9a767SRodney W. Grimes */ 56726f9a767SRodney W. Grimes if (pageout_status[i] != VM_PAGER_PEND) { 568f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 569c7aebda8SAttilio Rao vm_page_sunbusy(mt); 5703c4a2440SAlan Cox } 5713c4a2440SAlan Cox } 5721e8a675cSKonstantin Belousov if (prunlen != NULL) 5731e8a675cSKonstantin Belousov *prunlen = runlen; 5743c4a2440SAlan Cox return (numpagedout); 57526f9a767SRodney W. Grimes } 57626f9a767SRodney W. Grimes 57785eeca35SAlan Cox static boolean_t 578449c2e92SKonstantin Belousov vm_pageout_launder(struct vm_pagequeue *pq, int tries, vm_paddr_t low, 579449c2e92SKonstantin Belousov vm_paddr_t high) 58085eeca35SAlan Cox { 58185eeca35SAlan Cox struct mount *mp; 58285eeca35SAlan Cox struct vnode *vp; 58385eeca35SAlan Cox vm_object_t object; 58485eeca35SAlan Cox vm_paddr_t pa; 58585eeca35SAlan Cox vm_page_t m, m_tmp, next; 5861bd7d0b7SKonstantin Belousov int lockmode; 58785eeca35SAlan Cox 5888d220203SAlan Cox vm_pagequeue_lock(pq); 589c325e866SKonstantin Belousov TAILQ_FOREACH_SAFE(m, &pq->pq_pl, plinks.q, next) { 59085eeca35SAlan Cox if ((m->flags & PG_MARKER) != 0) 59185eeca35SAlan Cox continue; 59285eeca35SAlan Cox pa = VM_PAGE_TO_PHYS(m); 59385eeca35SAlan Cox if (pa < low || pa + PAGE_SIZE > high) 59485eeca35SAlan Cox continue; 59585eeca35SAlan Cox if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) { 59685eeca35SAlan Cox vm_page_unlock(m); 59785eeca35SAlan Cox continue; 59885eeca35SAlan Cox } 59985eeca35SAlan Cox object = m->object; 60089f6b863SAttilio Rao if ((!VM_OBJECT_TRYWLOCK(object) && 60185eeca35SAlan Cox (!vm_pageout_fallback_object_lock(m, &next) || 602c7aebda8SAttilio Rao m->hold_count != 0)) || vm_page_busied(m)) { 60385eeca35SAlan Cox vm_page_unlock(m); 60489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 60585eeca35SAlan Cox continue; 60685eeca35SAlan Cox } 60785eeca35SAlan Cox vm_page_test_dirty(m); 6089fc4739dSAlan Cox if (m->dirty == 0 && object->ref_count != 0) 60985eeca35SAlan Cox pmap_remove_all(m); 61085eeca35SAlan Cox if (m->dirty != 0) { 61185eeca35SAlan Cox vm_page_unlock(m); 61285eeca35SAlan Cox if (tries == 0 || (object->flags & OBJ_DEAD) != 0) { 61389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 61485eeca35SAlan Cox continue; 61585eeca35SAlan Cox } 61685eeca35SAlan Cox if (object->type == OBJT_VNODE) { 6178d220203SAlan Cox vm_pagequeue_unlock(pq); 61885eeca35SAlan Cox vp = object->handle; 61985eeca35SAlan Cox vm_object_reference_locked(object); 62089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 62185eeca35SAlan Cox (void)vn_start_write(vp, &mp, V_WAIT); 6221bd7d0b7SKonstantin Belousov lockmode = MNT_SHARED_WRITES(vp->v_mount) ? 6231bd7d0b7SKonstantin Belousov LK_SHARED : LK_EXCLUSIVE; 6241bd7d0b7SKonstantin Belousov vn_lock(vp, lockmode | LK_RETRY); 62589f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 62685eeca35SAlan Cox vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 62789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 62885eeca35SAlan Cox VOP_UNLOCK(vp, 0); 62985eeca35SAlan Cox vm_object_deallocate(object); 63085eeca35SAlan Cox vn_finished_write(mp); 63185eeca35SAlan Cox return (TRUE); 63285eeca35SAlan Cox } else if (object->type == OBJT_SWAP || 63385eeca35SAlan Cox object->type == OBJT_DEFAULT) { 6348d220203SAlan Cox vm_pagequeue_unlock(pq); 63585eeca35SAlan Cox m_tmp = m; 63685eeca35SAlan Cox vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC, 63785eeca35SAlan Cox 0, NULL, NULL); 63889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 63985eeca35SAlan Cox return (TRUE); 64085eeca35SAlan Cox } 64185eeca35SAlan Cox } else { 6428d220203SAlan Cox /* 6438d220203SAlan Cox * Dequeue here to prevent lock recursion in 6448d220203SAlan Cox * vm_page_cache(). 6458d220203SAlan Cox */ 6468d220203SAlan Cox vm_page_dequeue_locked(m); 64785eeca35SAlan Cox vm_page_cache(m); 64885eeca35SAlan Cox vm_page_unlock(m); 64985eeca35SAlan Cox } 65089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 65185eeca35SAlan Cox } 6528d220203SAlan Cox vm_pagequeue_unlock(pq); 65385eeca35SAlan Cox return (FALSE); 65485eeca35SAlan Cox } 65585eeca35SAlan Cox 65685eeca35SAlan Cox /* 65785eeca35SAlan Cox * Increase the number of cached pages. The specified value, "tries", 65885eeca35SAlan Cox * determines which categories of pages are cached: 65985eeca35SAlan Cox * 66085eeca35SAlan Cox * 0: All clean, inactive pages within the specified physical address range 66185eeca35SAlan Cox * are cached. Will not sleep. 66285eeca35SAlan Cox * 1: The vm_lowmem handlers are called. All inactive pages within 66385eeca35SAlan Cox * the specified physical address range are cached. May sleep. 66485eeca35SAlan Cox * 2: The vm_lowmem handlers are called. All inactive and active pages 66585eeca35SAlan Cox * within the specified physical address range are cached. May sleep. 66685eeca35SAlan Cox */ 66785eeca35SAlan Cox void 66885eeca35SAlan Cox vm_pageout_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high) 66985eeca35SAlan Cox { 670449c2e92SKonstantin Belousov int actl, actmax, inactl, inactmax, dom, initial_dom; 671449c2e92SKonstantin Belousov static int start_dom = 0; 67285eeca35SAlan Cox 67385eeca35SAlan Cox if (tries > 0) { 67485eeca35SAlan Cox /* 67585eeca35SAlan Cox * Decrease registered cache sizes. The vm_lowmem handlers 67685eeca35SAlan Cox * may acquire locks and/or sleep, so they can only be invoked 67785eeca35SAlan Cox * when "tries" is greater than zero. 67885eeca35SAlan Cox */ 67914a0d74eSSteven Hartland SDT_PROBE0(vm, , , vm__lowmem_cache); 68085eeca35SAlan Cox EVENTHANDLER_INVOKE(vm_lowmem, 0); 68185eeca35SAlan Cox 68285eeca35SAlan Cox /* 68385eeca35SAlan Cox * We do this explicitly after the caches have been drained 68485eeca35SAlan Cox * above. 68585eeca35SAlan Cox */ 68685eeca35SAlan Cox uma_reclaim(); 68785eeca35SAlan Cox } 688449c2e92SKonstantin Belousov 689449c2e92SKonstantin Belousov /* 690449c2e92SKonstantin Belousov * Make the next scan start on the next domain. 691449c2e92SKonstantin Belousov */ 692449c2e92SKonstantin Belousov initial_dom = atomic_fetchadd_int(&start_dom, 1) % vm_ndomains; 693449c2e92SKonstantin Belousov 69485eeca35SAlan Cox inactl = 0; 69544f1c916SBryan Drewery inactmax = vm_cnt.v_inactive_count; 69685eeca35SAlan Cox actl = 0; 69744f1c916SBryan Drewery actmax = tries < 2 ? 0 : vm_cnt.v_active_count; 698449c2e92SKonstantin Belousov dom = initial_dom; 699449c2e92SKonstantin Belousov 700449c2e92SKonstantin Belousov /* 701449c2e92SKonstantin Belousov * Scan domains in round-robin order, first inactive queues, 702449c2e92SKonstantin Belousov * then active. Since domain usually owns large physically 703449c2e92SKonstantin Belousov * contiguous chunk of memory, it makes sense to completely 704449c2e92SKonstantin Belousov * exhaust one domain before switching to next, while growing 705449c2e92SKonstantin Belousov * the pool of contiguous physical pages. 706449c2e92SKonstantin Belousov * 707449c2e92SKonstantin Belousov * Do not even start launder a domain which cannot contain 708449c2e92SKonstantin Belousov * the specified address range, as indicated by segments 709449c2e92SKonstantin Belousov * constituting the domain. 710449c2e92SKonstantin Belousov */ 71185eeca35SAlan Cox again: 712449c2e92SKonstantin Belousov if (inactl < inactmax) { 713449c2e92SKonstantin Belousov if (vm_phys_domain_intersects(vm_dom[dom].vmd_segs, 714449c2e92SKonstantin Belousov low, high) && 715449c2e92SKonstantin Belousov vm_pageout_launder(&vm_dom[dom].vmd_pagequeues[PQ_INACTIVE], 716449c2e92SKonstantin Belousov tries, low, high)) { 71785eeca35SAlan Cox inactl++; 71885eeca35SAlan Cox goto again; 71985eeca35SAlan Cox } 720449c2e92SKonstantin Belousov if (++dom == vm_ndomains) 721449c2e92SKonstantin Belousov dom = 0; 722449c2e92SKonstantin Belousov if (dom != initial_dom) 723449c2e92SKonstantin Belousov goto again; 724449c2e92SKonstantin Belousov } 725449c2e92SKonstantin Belousov if (actl < actmax) { 726449c2e92SKonstantin Belousov if (vm_phys_domain_intersects(vm_dom[dom].vmd_segs, 727449c2e92SKonstantin Belousov low, high) && 728449c2e92SKonstantin Belousov vm_pageout_launder(&vm_dom[dom].vmd_pagequeues[PQ_ACTIVE], 729449c2e92SKonstantin Belousov tries, low, high)) { 73085eeca35SAlan Cox actl++; 73185eeca35SAlan Cox goto again; 73285eeca35SAlan Cox } 733449c2e92SKonstantin Belousov if (++dom == vm_ndomains) 734449c2e92SKonstantin Belousov dom = 0; 735449c2e92SKonstantin Belousov if (dom != initial_dom) 736449c2e92SKonstantin Belousov goto again; 737449c2e92SKonstantin Belousov } 73885eeca35SAlan Cox } 73985eeca35SAlan Cox 74038efa82bSJohn Dyson #if !defined(NO_SWAPPING) 74126f9a767SRodney W. Grimes /* 74226f9a767SRodney W. Grimes * vm_pageout_object_deactivate_pages 74326f9a767SRodney W. Grimes * 744ce186587SAlan Cox * Deactivate enough pages to satisfy the inactive target 745ce186587SAlan Cox * requirements. 74626f9a767SRodney W. Grimes * 74726f9a767SRodney W. Grimes * The object and map must be locked. 74826f9a767SRodney W. Grimes */ 74938efa82bSJohn Dyson static void 750ce186587SAlan Cox vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object, 751ce186587SAlan Cox long desired) 75226f9a767SRodney W. Grimes { 753ecf6279fSAlan Cox vm_object_t backing_object, object; 754ce186587SAlan Cox vm_page_t p; 755bb7858eaSJeff Roberson int act_delta, remove_mode; 75626f9a767SRodney W. Grimes 757e23b0a19SAlan Cox VM_OBJECT_ASSERT_LOCKED(first_object); 75828634820SAlan Cox if ((first_object->flags & OBJ_FICTITIOUS) != 0) 75938efa82bSJohn Dyson return; 760ecf6279fSAlan Cox for (object = first_object;; object = backing_object) { 761ecf6279fSAlan Cox if (pmap_resident_count(pmap) <= desired) 762ecf6279fSAlan Cox goto unlock_return; 763e23b0a19SAlan Cox VM_OBJECT_ASSERT_LOCKED(object); 76428634820SAlan Cox if ((object->flags & OBJ_UNMANAGED) != 0 || 76528634820SAlan Cox object->paging_in_progress != 0) 766ecf6279fSAlan Cox goto unlock_return; 76726f9a767SRodney W. Grimes 76885b1dc89SAlan Cox remove_mode = 0; 76938efa82bSJohn Dyson if (object->shadow_count > 1) 77038efa82bSJohn Dyson remove_mode = 1; 77126f9a767SRodney W. Grimes /* 772ce186587SAlan Cox * Scan the object's entire memory queue. 77326f9a767SRodney W. Grimes */ 774ce186587SAlan Cox TAILQ_FOREACH(p, &object->memq, listq) { 775447fe2a4SAlan Cox if (pmap_resident_count(pmap) <= desired) 776447fe2a4SAlan Cox goto unlock_return; 777c7aebda8SAttilio Rao if (vm_page_busied(p)) 778447fe2a4SAlan Cox continue; 779ce186587SAlan Cox PCPU_INC(cnt.v_pdpages); 7802965a453SKip Macy vm_page_lock(p); 781ce186587SAlan Cox if (p->wire_count != 0 || p->hold_count != 0 || 782ecf6279fSAlan Cox !pmap_page_exists_quick(pmap, p)) { 7832965a453SKip Macy vm_page_unlock(p); 7840d94caffSDavid Greenman continue; 7850d94caffSDavid Greenman } 786bb7858eaSJeff Roberson act_delta = pmap_ts_referenced(p); 7873407fefeSKonstantin Belousov if ((p->aflags & PGA_REFERENCED) != 0) { 788bb7858eaSJeff Roberson if (act_delta == 0) 789bb7858eaSJeff Roberson act_delta = 1; 7903407fefeSKonstantin Belousov vm_page_aflag_clear(p, PGA_REFERENCED); 791ef743ce6SJohn Dyson } 792bb7858eaSJeff Roberson if (p->queue != PQ_ACTIVE && act_delta != 0) { 793ef743ce6SJohn Dyson vm_page_activate(p); 794bb7858eaSJeff Roberson p->act_count += act_delta; 795c8c4b40cSJohn Dyson } else if (p->queue == PQ_ACTIVE) { 796bb7858eaSJeff Roberson if (act_delta == 0) { 797ce186587SAlan Cox p->act_count -= min(p->act_count, 798ce186587SAlan Cox ACT_DECLINE); 79990776bd7SJeff Roberson if (!remove_mode && p->act_count == 0) { 8004fec79beSAlan Cox pmap_remove_all(p); 80126f9a767SRodney W. Grimes vm_page_deactivate(p); 8028d220203SAlan Cox } else 8038d220203SAlan Cox vm_page_requeue(p); 804c8c4b40cSJohn Dyson } else { 805eaf13dd7SJohn Dyson vm_page_activate(p); 806ce186587SAlan Cox if (p->act_count < ACT_MAX - 807ce186587SAlan Cox ACT_ADVANCE) 80838efa82bSJohn Dyson p->act_count += ACT_ADVANCE; 8098d220203SAlan Cox vm_page_requeue(p); 810ce186587SAlan Cox } 811ce186587SAlan Cox } else if (p->queue == PQ_INACTIVE) 812ce186587SAlan Cox pmap_remove_all(p); 8132965a453SKip Macy vm_page_unlock(p); 81426f9a767SRodney W. Grimes } 815ecf6279fSAlan Cox if ((backing_object = object->backing_object) == NULL) 816ecf6279fSAlan Cox goto unlock_return; 817e23b0a19SAlan Cox VM_OBJECT_RLOCK(backing_object); 818ecf6279fSAlan Cox if (object != first_object) 819e23b0a19SAlan Cox VM_OBJECT_RUNLOCK(object); 82038efa82bSJohn Dyson } 821ecf6279fSAlan Cox unlock_return: 822ecf6279fSAlan Cox if (object != first_object) 823e23b0a19SAlan Cox VM_OBJECT_RUNLOCK(object); 82426f9a767SRodney W. Grimes } 82526f9a767SRodney W. Grimes 82626f9a767SRodney W. Grimes /* 82726f9a767SRodney W. Grimes * deactivate some number of pages in a map, try to do it fairly, but 82826f9a767SRodney W. Grimes * that is really hard to do. 82926f9a767SRodney W. Grimes */ 830cd41fc12SDavid Greenman static void 83138efa82bSJohn Dyson vm_pageout_map_deactivate_pages(map, desired) 83226f9a767SRodney W. Grimes vm_map_t map; 833ecf6279fSAlan Cox long desired; 83426f9a767SRodney W. Grimes { 83526f9a767SRodney W. Grimes vm_map_entry_t tmpe; 83638efa82bSJohn Dyson vm_object_t obj, bigobj; 83730105b9eSTor Egge int nothingwired; 8380d94caffSDavid Greenman 839d974f03cSAlan Cox if (!vm_map_trylock(map)) 84026f9a767SRodney W. Grimes return; 84138efa82bSJohn Dyson 84238efa82bSJohn Dyson bigobj = NULL; 84330105b9eSTor Egge nothingwired = TRUE; 84438efa82bSJohn Dyson 84538efa82bSJohn Dyson /* 84638efa82bSJohn Dyson * first, search out the biggest object, and try to free pages from 84738efa82bSJohn Dyson * that. 84838efa82bSJohn Dyson */ 84926f9a767SRodney W. Grimes tmpe = map->header.next; 85038efa82bSJohn Dyson while (tmpe != &map->header) { 8519fdfe602SMatthew Dillon if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 85238efa82bSJohn Dyson obj = tmpe->object.vm_object; 853e23b0a19SAlan Cox if (obj != NULL && VM_OBJECT_TRYRLOCK(obj)) { 8540774dfb3SAlan Cox if (obj->shadow_count <= 1 && 8550774dfb3SAlan Cox (bigobj == NULL || 8560774dfb3SAlan Cox bigobj->resident_page_count < obj->resident_page_count)) { 8570774dfb3SAlan Cox if (bigobj != NULL) 858e23b0a19SAlan Cox VM_OBJECT_RUNLOCK(bigobj); 85938efa82bSJohn Dyson bigobj = obj; 8600774dfb3SAlan Cox } else 861e23b0a19SAlan Cox VM_OBJECT_RUNLOCK(obj); 86238efa82bSJohn Dyson } 86338efa82bSJohn Dyson } 86430105b9eSTor Egge if (tmpe->wired_count > 0) 86530105b9eSTor Egge nothingwired = FALSE; 86638efa82bSJohn Dyson tmpe = tmpe->next; 86738efa82bSJohn Dyson } 86838efa82bSJohn Dyson 8690774dfb3SAlan Cox if (bigobj != NULL) { 870ecf6279fSAlan Cox vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired); 871e23b0a19SAlan Cox VM_OBJECT_RUNLOCK(bigobj); 8720774dfb3SAlan Cox } 87338efa82bSJohn Dyson /* 87438efa82bSJohn Dyson * Next, hunt around for other pages to deactivate. We actually 87538efa82bSJohn Dyson * do this search sort of wrong -- .text first is not the best idea. 87638efa82bSJohn Dyson */ 87738efa82bSJohn Dyson tmpe = map->header.next; 87838efa82bSJohn Dyson while (tmpe != &map->header) { 879b1028ad1SLuoqi Chen if (pmap_resident_count(vm_map_pmap(map)) <= desired) 88038efa82bSJohn Dyson break; 8819fdfe602SMatthew Dillon if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 88238efa82bSJohn Dyson obj = tmpe->object.vm_object; 8830774dfb3SAlan Cox if (obj != NULL) { 884e23b0a19SAlan Cox VM_OBJECT_RLOCK(obj); 885ecf6279fSAlan Cox vm_pageout_object_deactivate_pages(map->pmap, obj, desired); 886e23b0a19SAlan Cox VM_OBJECT_RUNLOCK(obj); 8870774dfb3SAlan Cox } 88838efa82bSJohn Dyson } 88926f9a767SRodney W. Grimes tmpe = tmpe->next; 89038857e7fSAlan Cox } 89138efa82bSJohn Dyson 89238efa82bSJohn Dyson /* 89338efa82bSJohn Dyson * Remove all mappings if a process is swapped out, this will free page 89438efa82bSJohn Dyson * table pages. 89538efa82bSJohn Dyson */ 89638857e7fSAlan Cox if (desired == 0 && nothingwired) { 8978d01a3b2SNathan Whitehorn pmap_remove(vm_map_pmap(map), vm_map_min(map), 8988d01a3b2SNathan Whitehorn vm_map_max(map)); 89938857e7fSAlan Cox } 900938b0f5bSMarcel Moolenaar 90138efa82bSJohn Dyson vm_map_unlock(map); 90226f9a767SRodney W. Grimes } 903a1287949SEivind Eklund #endif /* !defined(NO_SWAPPING) */ 904df8bae1dSRodney W. Grimes 9051c7c3c6aSMatthew Dillon /* 90634d8b7eaSJeff Roberson * Attempt to acquire all of the necessary locks to launder a page and 90734d8b7eaSJeff Roberson * then call through the clustering layer to PUTPAGES. Wait a short 90834d8b7eaSJeff Roberson * time for a vnode lock. 90934d8b7eaSJeff Roberson * 91034d8b7eaSJeff Roberson * Requires the page and object lock on entry, releases both before return. 91134d8b7eaSJeff Roberson * Returns 0 on success and an errno otherwise. 91234d8b7eaSJeff Roberson */ 91334d8b7eaSJeff Roberson static int 91434d8b7eaSJeff Roberson vm_pageout_clean(vm_page_t m) 91534d8b7eaSJeff Roberson { 91634d8b7eaSJeff Roberson struct vnode *vp; 91734d8b7eaSJeff Roberson struct mount *mp; 91834d8b7eaSJeff Roberson vm_object_t object; 91934d8b7eaSJeff Roberson vm_pindex_t pindex; 92034d8b7eaSJeff Roberson int error, lockmode; 92134d8b7eaSJeff Roberson 92234d8b7eaSJeff Roberson vm_page_assert_locked(m); 92334d8b7eaSJeff Roberson object = m->object; 92434d8b7eaSJeff Roberson VM_OBJECT_ASSERT_WLOCKED(object); 92534d8b7eaSJeff Roberson error = 0; 92634d8b7eaSJeff Roberson vp = NULL; 92734d8b7eaSJeff Roberson mp = NULL; 92834d8b7eaSJeff Roberson 92934d8b7eaSJeff Roberson /* 93034d8b7eaSJeff Roberson * The object is already known NOT to be dead. It 93134d8b7eaSJeff Roberson * is possible for the vget() to block the whole 93234d8b7eaSJeff Roberson * pageout daemon, but the new low-memory handling 93334d8b7eaSJeff Roberson * code should prevent it. 93434d8b7eaSJeff Roberson * 93534d8b7eaSJeff Roberson * We can't wait forever for the vnode lock, we might 93634d8b7eaSJeff Roberson * deadlock due to a vn_read() getting stuck in 93734d8b7eaSJeff Roberson * vm_wait while holding this vnode. We skip the 93834d8b7eaSJeff Roberson * vnode if we can't get it in a reasonable amount 93934d8b7eaSJeff Roberson * of time. 94034d8b7eaSJeff Roberson */ 94134d8b7eaSJeff Roberson if (object->type == OBJT_VNODE) { 94234d8b7eaSJeff Roberson vm_page_unlock(m); 94334d8b7eaSJeff Roberson vp = object->handle; 94434d8b7eaSJeff Roberson if (vp->v_type == VREG && 94534d8b7eaSJeff Roberson vn_start_write(vp, &mp, V_NOWAIT) != 0) { 94634d8b7eaSJeff Roberson mp = NULL; 94734d8b7eaSJeff Roberson error = EDEADLK; 94834d8b7eaSJeff Roberson goto unlock_all; 94934d8b7eaSJeff Roberson } 95034d8b7eaSJeff Roberson KASSERT(mp != NULL, 95134d8b7eaSJeff Roberson ("vp %p with NULL v_mount", vp)); 95234d8b7eaSJeff Roberson vm_object_reference_locked(object); 95334d8b7eaSJeff Roberson pindex = m->pindex; 95434d8b7eaSJeff Roberson VM_OBJECT_WUNLOCK(object); 95534d8b7eaSJeff Roberson lockmode = MNT_SHARED_WRITES(vp->v_mount) ? 95634d8b7eaSJeff Roberson LK_SHARED : LK_EXCLUSIVE; 95734d8b7eaSJeff Roberson if (vget(vp, lockmode | LK_TIMELOCK, curthread)) { 95834d8b7eaSJeff Roberson vp = NULL; 95934d8b7eaSJeff Roberson error = EDEADLK; 96034d8b7eaSJeff Roberson goto unlock_mp; 96134d8b7eaSJeff Roberson } 96234d8b7eaSJeff Roberson VM_OBJECT_WLOCK(object); 96334d8b7eaSJeff Roberson vm_page_lock(m); 96434d8b7eaSJeff Roberson /* 96534d8b7eaSJeff Roberson * While the object and page were unlocked, the page 96634d8b7eaSJeff Roberson * may have been: 96734d8b7eaSJeff Roberson * (1) moved to a different queue, 96834d8b7eaSJeff Roberson * (2) reallocated to a different object, 96934d8b7eaSJeff Roberson * (3) reallocated to a different offset, or 97034d8b7eaSJeff Roberson * (4) cleaned. 97134d8b7eaSJeff Roberson */ 97234d8b7eaSJeff Roberson if (m->queue != PQ_INACTIVE || m->object != object || 97334d8b7eaSJeff Roberson m->pindex != pindex || m->dirty == 0) { 97434d8b7eaSJeff Roberson vm_page_unlock(m); 97534d8b7eaSJeff Roberson error = ENXIO; 97634d8b7eaSJeff Roberson goto unlock_all; 97734d8b7eaSJeff Roberson } 97834d8b7eaSJeff Roberson 97934d8b7eaSJeff Roberson /* 98034d8b7eaSJeff Roberson * The page may have been busied or held while the object 98134d8b7eaSJeff Roberson * and page locks were released. 98234d8b7eaSJeff Roberson */ 98334d8b7eaSJeff Roberson if (vm_page_busied(m) || m->hold_count != 0) { 98434d8b7eaSJeff Roberson vm_page_unlock(m); 98534d8b7eaSJeff Roberson error = EBUSY; 98634d8b7eaSJeff Roberson goto unlock_all; 98734d8b7eaSJeff Roberson } 98834d8b7eaSJeff Roberson } 98934d8b7eaSJeff Roberson 99034d8b7eaSJeff Roberson /* 99134d8b7eaSJeff Roberson * If a page is dirty, then it is either being washed 99234d8b7eaSJeff Roberson * (but not yet cleaned) or it is still in the 99334d8b7eaSJeff Roberson * laundry. If it is still in the laundry, then we 99434d8b7eaSJeff Roberson * start the cleaning operation. 99534d8b7eaSJeff Roberson */ 99634d8b7eaSJeff Roberson if (vm_pageout_cluster(m) == 0) 99734d8b7eaSJeff Roberson error = EIO; 99834d8b7eaSJeff Roberson 99934d8b7eaSJeff Roberson unlock_all: 100034d8b7eaSJeff Roberson VM_OBJECT_WUNLOCK(object); 100134d8b7eaSJeff Roberson 100234d8b7eaSJeff Roberson unlock_mp: 100334d8b7eaSJeff Roberson vm_page_lock_assert(m, MA_NOTOWNED); 100434d8b7eaSJeff Roberson if (mp != NULL) { 100534d8b7eaSJeff Roberson if (vp != NULL) 100634d8b7eaSJeff Roberson vput(vp); 100734d8b7eaSJeff Roberson vm_object_deallocate(object); 100834d8b7eaSJeff Roberson vn_finished_write(mp); 100934d8b7eaSJeff Roberson } 101034d8b7eaSJeff Roberson 101134d8b7eaSJeff Roberson return (error); 101234d8b7eaSJeff Roberson } 101334d8b7eaSJeff Roberson 101434d8b7eaSJeff Roberson /* 1015df8bae1dSRodney W. Grimes * vm_pageout_scan does the dirty work for the pageout daemon. 1016d9e23210SJeff Roberson * 1017d9e23210SJeff Roberson * pass 0 - Update active LRU/deactivate pages 1018d9e23210SJeff Roberson * pass 1 - Move inactive to cache or free 1019d9e23210SJeff Roberson * pass 2 - Launder dirty pages 1020df8bae1dSRodney W. Grimes */ 10212b6b0df7SMatthew Dillon static void 1022449c2e92SKonstantin Belousov vm_pageout_scan(struct vm_domain *vmd, int pass) 1023df8bae1dSRodney W. Grimes { 1024502ba6e4SJohn Dyson vm_page_t m, next; 10258d220203SAlan Cox struct vm_pagequeue *pq; 1026df8bae1dSRodney W. Grimes vm_object_t object; 102722cf98d1SAlan Cox long min_scan; 10289099545aSAlan Cox int act_delta, addl_page_shortage, deficit, maxscan, page_shortage; 1029f6b04d2bSDavid Greenman int vnodes_skipped = 0; 103022cf98d1SAlan Cox int maxlaunder, scan_tick, scanned; 103148cc2fc7SKonstantin Belousov boolean_t queues_locked; 10320d94caffSDavid Greenman 1033df8bae1dSRodney W. Grimes /* 1034d9e23210SJeff Roberson * If we need to reclaim memory ask kernel caches to return 1035c9612b2dSJeff Roberson * some. We rate limit to avoid thrashing. 1036d9e23210SJeff Roberson */ 1037c9612b2dSJeff Roberson if (vmd == &vm_dom[0] && pass > 0 && 1038*a6bf3a9eSRyan Stone (time_uptime - lowmem_uptime) >= lowmem_period) { 1039d9e23210SJeff Roberson /* 1040855a310fSJeff Roberson * Decrease registered cache sizes. 1041855a310fSJeff Roberson */ 104214a0d74eSSteven Hartland SDT_PROBE0(vm, , , vm__lowmem_scan); 1043855a310fSJeff Roberson EVENTHANDLER_INVOKE(vm_lowmem, 0); 1044855a310fSJeff Roberson /* 1045d9e23210SJeff Roberson * We do this explicitly after the caches have been 1046d9e23210SJeff Roberson * drained above. 1047855a310fSJeff Roberson */ 1048855a310fSJeff Roberson uma_reclaim(); 1049*a6bf3a9eSRyan Stone lowmem_uptime = time_uptime; 1050d9e23210SJeff Roberson } 10515985940eSJohn Dyson 1052311e34e2SKonstantin Belousov /* 105396240c89SEitan Adler * The addl_page_shortage is the number of temporarily 1054311e34e2SKonstantin Belousov * stuck pages in the inactive queue. In other words, the 1055449c2e92SKonstantin Belousov * number of pages from the inactive count that should be 1056311e34e2SKonstantin Belousov * discounted in setting the target for the active queue scan. 1057311e34e2SKonstantin Belousov */ 10589099545aSAlan Cox addl_page_shortage = 0; 10599099545aSAlan Cox 10601c7c3c6aSMatthew Dillon /* 10611c7c3c6aSMatthew Dillon * Calculate the number of pages we want to either free or move 10622b6b0df7SMatthew Dillon * to the cache. 10631c7c3c6aSMatthew Dillon */ 106460196cdaSAlan Cox if (pass > 0) { 106560196cdaSAlan Cox deficit = atomic_readandclear_int(&vm_pageout_deficit); 10669099545aSAlan Cox page_shortage = vm_paging_target() + deficit; 106760196cdaSAlan Cox } else 106860196cdaSAlan Cox page_shortage = deficit = 0; 10691c7c3c6aSMatthew Dillon 1070936524aaSMatthew Dillon /* 10712b6b0df7SMatthew Dillon * maxlaunder limits the number of dirty pages we flush per scan. 10722b6b0df7SMatthew Dillon * For most systems a smaller value (16 or 32) is more robust under 10732b6b0df7SMatthew Dillon * extreme memory and disk pressure because any unnecessary writes 10742b6b0df7SMatthew Dillon * to disk can result in extreme performance degredation. However, 10752b6b0df7SMatthew Dillon * systems with excessive dirty pages (especially when MAP_NOSYNC is 10762b6b0df7SMatthew Dillon * used) will die horribly with limited laundering. If the pageout 10772b6b0df7SMatthew Dillon * daemon cannot clean enough pages in the first pass, we let it go 10782b6b0df7SMatthew Dillon * all out in succeeding passes. 10791c7c3c6aSMatthew Dillon */ 10802b6b0df7SMatthew Dillon if ((maxlaunder = vm_max_launder) <= 1) 10812b6b0df7SMatthew Dillon maxlaunder = 1; 1082d9e23210SJeff Roberson if (pass > 1) 10832b6b0df7SMatthew Dillon maxlaunder = 10000; 10848d220203SAlan Cox 10858d220203SAlan Cox /* 10868d220203SAlan Cox * Start scanning the inactive queue for pages we can move to the 10878d220203SAlan Cox * cache or free. The scan will stop when the target is reached or 10888d220203SAlan Cox * we have scanned the entire inactive queue. Note that m->act_count 10898d220203SAlan Cox * is not used to form decisions for the inactive queue, only for the 10908d220203SAlan Cox * active queue. 10918d220203SAlan Cox */ 1092449c2e92SKonstantin Belousov pq = &vmd->vmd_pagequeues[PQ_INACTIVE]; 1093449c2e92SKonstantin Belousov maxscan = pq->pq_cnt; 10948d220203SAlan Cox vm_pagequeue_lock(pq); 10958d220203SAlan Cox queues_locked = TRUE; 10968d220203SAlan Cox for (m = TAILQ_FIRST(&pq->pq_pl); 10971c7c3c6aSMatthew Dillon m != NULL && maxscan-- > 0 && page_shortage > 0; 1098e929c00dSKirk McKusick m = next) { 10998d220203SAlan Cox vm_pagequeue_assert_locked(pq); 110048cc2fc7SKonstantin Belousov KASSERT(queues_locked, ("unlocked queues")); 1101d4961bcbSKonstantin Belousov KASSERT(m->queue == PQ_INACTIVE, ("Inactive queue %p", m)); 1102df8bae1dSRodney W. Grimes 11038d220203SAlan Cox PCPU_INC(cnt.v_pdpages); 1104c325e866SKonstantin Belousov next = TAILQ_NEXT(m, plinks.q); 1105df8bae1dSRodney W. Grimes 1106936524aaSMatthew Dillon /* 1107936524aaSMatthew Dillon * skip marker pages 1108936524aaSMatthew Dillon */ 1109936524aaSMatthew Dillon if (m->flags & PG_MARKER) 1110936524aaSMatthew Dillon continue; 1111936524aaSMatthew Dillon 11127900f95dSKonstantin Belousov KASSERT((m->flags & PG_FICTITIOUS) == 0, 11137900f95dSKonstantin Belousov ("Fictitious page %p cannot be in inactive queue", m)); 11147900f95dSKonstantin Belousov KASSERT((m->oflags & VPO_UNMANAGED) == 0, 11157900f95dSKonstantin Belousov ("Unmanaged page %p cannot be in inactive queue", m)); 11167900f95dSKonstantin Belousov 11178c616246SKonstantin Belousov /* 1118311e34e2SKonstantin Belousov * The page or object lock acquisitions fail if the 1119311e34e2SKonstantin Belousov * page was removed from the queue or moved to a 1120311e34e2SKonstantin Belousov * different position within the queue. In either 1121311e34e2SKonstantin Belousov * case, addl_page_shortage should not be incremented. 11228c616246SKonstantin Belousov */ 11238c616246SKonstantin Belousov if (!vm_pageout_page_lock(m, &next)) { 11248c616246SKonstantin Belousov vm_page_unlock(m); 1125b182ec9eSJohn Dyson continue; 1126df8bae1dSRodney W. Grimes } 11279ee2165fSAlan Cox object = m->object; 112889f6b863SAttilio Rao if (!VM_OBJECT_TRYWLOCK(object) && 1129311e34e2SKonstantin Belousov !vm_pageout_fallback_object_lock(m, &next)) { 11302965a453SKip Macy vm_page_unlock(m); 113189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 113234d9e6fdSAlan Cox continue; 113334d9e6fdSAlan Cox } 1134311e34e2SKonstantin Belousov 1135311e34e2SKonstantin Belousov /* 1136311e34e2SKonstantin Belousov * Don't mess with busy pages, keep them at at the 1137311e34e2SKonstantin Belousov * front of the queue, most likely they are being 1138311e34e2SKonstantin Belousov * paged out. Increment addl_page_shortage for busy 1139311e34e2SKonstantin Belousov * pages, because they may leave the inactive queue 1140311e34e2SKonstantin Belousov * shortly after page scan is finished. 1141311e34e2SKonstantin Belousov */ 1142c7aebda8SAttilio Rao if (vm_page_busied(m)) { 11432965a453SKip Macy vm_page_unlock(m); 114489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1145b182ec9eSJohn Dyson addl_page_shortage++; 114626f9a767SRodney W. Grimes continue; 114726f9a767SRodney W. Grimes } 1148bd7e5f99SJohn Dyson 11497e006499SJohn Dyson /* 11508d220203SAlan Cox * We unlock the inactive page queue, invalidating the 115148cc2fc7SKonstantin Belousov * 'next' pointer. Use our marker to remember our 115248cc2fc7SKonstantin Belousov * place. 115348cc2fc7SKonstantin Belousov */ 1154c325e866SKonstantin Belousov TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, plinks.q); 11558d220203SAlan Cox vm_pagequeue_unlock(pq); 115648cc2fc7SKonstantin Belousov queues_locked = FALSE; 115748cc2fc7SKonstantin Belousov 115848cc2fc7SKonstantin Belousov /* 1159776f729cSKonstantin Belousov * Invalid pages can be easily freed. They cannot be 1160776f729cSKonstantin Belousov * mapped, vm_page_free() asserts this. 1161776f729cSKonstantin Belousov */ 1162776f729cSKonstantin Belousov if (m->valid == 0 && m->hold_count == 0) { 1163776f729cSKonstantin Belousov vm_page_free(m); 1164776f729cSKonstantin Belousov PCPU_INC(cnt.v_dfree); 1165776f729cSKonstantin Belousov --page_shortage; 1166776f729cSKonstantin Belousov goto drop_page; 1167776f729cSKonstantin Belousov } 1168776f729cSKonstantin Belousov 1169776f729cSKonstantin Belousov /* 1170bb7858eaSJeff Roberson * We bump the activation count if the page has been 1171bb7858eaSJeff Roberson * referenced while in the inactive queue. This makes 1172bb7858eaSJeff Roberson * it less likely that the page will be added back to the 1173bb7858eaSJeff Roberson * inactive queue prematurely again. Here we check the 11741c7c3c6aSMatthew Dillon * page tables (or emulated bits, if any), given the upper 11751c7c3c6aSMatthew Dillon * level VM system not knowing anything about existing 11761c7c3c6aSMatthew Dillon * references. 11777e006499SJohn Dyson */ 1178bb7858eaSJeff Roberson if ((m->aflags & PGA_REFERENCED) != 0) { 1179bb7858eaSJeff Roberson vm_page_aflag_clear(m, PGA_REFERENCED); 1180bb7858eaSJeff Roberson act_delta = 1; 118186fa2471SAlan Cox } else 118286fa2471SAlan Cox act_delta = 0; 1183bb7858eaSJeff Roberson if (object->ref_count != 0) { 1184bb7858eaSJeff Roberson act_delta += pmap_ts_referenced(m); 1185bb7858eaSJeff Roberson } else { 1186bb7858eaSJeff Roberson KASSERT(!pmap_page_is_mapped(m), 1187bb7858eaSJeff Roberson ("vm_pageout_scan: page %p is mapped", m)); 11882fe6e4d7SDavid Greenman } 1189ef743ce6SJohn Dyson 11907e006499SJohn Dyson /* 11911c7c3c6aSMatthew Dillon * If the upper level VM system knows about any page 1192bb7858eaSJeff Roberson * references, we reactivate the page or requeue it. 11937e006499SJohn Dyson */ 1194bb7858eaSJeff Roberson if (act_delta != 0) { 119586fa2471SAlan Cox if (object->ref_count != 0) { 119626f9a767SRodney W. Grimes vm_page_activate(m); 1197bb7858eaSJeff Roberson m->act_count += act_delta + ACT_ADVANCE; 1198bb7858eaSJeff Roberson } else { 1199bb7858eaSJeff Roberson vm_pagequeue_lock(pq); 1200bb7858eaSJeff Roberson queues_locked = TRUE; 1201bb7858eaSJeff Roberson vm_page_requeue_locked(m); 1202bb7858eaSJeff Roberson } 1203776f729cSKonstantin Belousov goto drop_page; 12040d94caffSDavid Greenman } 120567bf6868SJohn Dyson 1206311e34e2SKonstantin Belousov if (m->hold_count != 0) { 1207311e34e2SKonstantin Belousov /* 1208311e34e2SKonstantin Belousov * Held pages are essentially stuck in the 1209311e34e2SKonstantin Belousov * queue. So, they ought to be discounted 1210449c2e92SKonstantin Belousov * from the inactive count. See the 1211311e34e2SKonstantin Belousov * calculation of the page_shortage for the 1212311e34e2SKonstantin Belousov * loop over the active queue below. 1213311e34e2SKonstantin Belousov */ 1214311e34e2SKonstantin Belousov addl_page_shortage++; 1215776f729cSKonstantin Belousov goto drop_page; 1216311e34e2SKonstantin Belousov } 1217311e34e2SKonstantin Belousov 12187e006499SJohn Dyson /* 12199fc4739dSAlan Cox * If the page appears to be clean at the machine-independent 12209fc4739dSAlan Cox * layer, then remove all of its mappings from the pmap in 12219fc4739dSAlan Cox * anticipation of placing it onto the cache queue. If, 12229fc4739dSAlan Cox * however, any of the page's mappings allow write access, 12239fc4739dSAlan Cox * then the page may still be modified until the last of those 12249fc4739dSAlan Cox * mappings are removed. 12257e006499SJohn Dyson */ 1226aa044135SAlan Cox if (object->ref_count != 0) { 12279fc4739dSAlan Cox vm_page_test_dirty(m); 1228aa044135SAlan Cox if (m->dirty == 0) 1229b78ddb0bSAlan Cox pmap_remove_all(m); 1230aa044135SAlan Cox } 1231dcbcd518SBruce Evans 1232776f729cSKonstantin Belousov if (m->dirty == 0) { 12336989c456SAlan Cox /* 123478afdce6SAlan Cox * Clean pages can be freed. 12356989c456SAlan Cox */ 123678afdce6SAlan Cox vm_page_free(m); 123778afdce6SAlan Cox PCPU_INC(cnt.v_dfree); 12381c7c3c6aSMatthew Dillon --page_shortage; 1239d9e23210SJeff Roberson } else if ((m->flags & PG_WINATCFLS) == 0 && pass < 2) { 12407e006499SJohn Dyson /* 12412b6b0df7SMatthew Dillon * Dirty pages need to be paged out, but flushing 1242ab46f63eSJohn Baldwin * a page is extremely expensive versus freeing 12432b6b0df7SMatthew Dillon * a clean page. Rather then artificially limiting 12442b6b0df7SMatthew Dillon * the number of pages we can flush, we instead give 12452b6b0df7SMatthew Dillon * dirty pages extra priority on the inactive queue 12462b6b0df7SMatthew Dillon * by forcing them to be cycled through the queue 12472b6b0df7SMatthew Dillon * twice before being flushed, after which the 12482b6b0df7SMatthew Dillon * (now clean) page will cycle through once more 12492b6b0df7SMatthew Dillon * before being freed. This significantly extends 12502b6b0df7SMatthew Dillon * the thrash point for a heavily loaded machine. 12517e006499SJohn Dyson */ 12523407fefeSKonstantin Belousov m->flags |= PG_WINATCFLS; 12538d220203SAlan Cox vm_pagequeue_lock(pq); 125448cc2fc7SKonstantin Belousov queues_locked = TRUE; 12558d220203SAlan Cox vm_page_requeue_locked(m); 12560d94caffSDavid Greenman } else if (maxlaunder > 0) { 12572b6b0df7SMatthew Dillon /* 12582b6b0df7SMatthew Dillon * We always want to try to flush some dirty pages if 12592b6b0df7SMatthew Dillon * we encounter them, to keep the system stable. 12602b6b0df7SMatthew Dillon * Normally this number is small, but under extreme 12612b6b0df7SMatthew Dillon * pressure where there are insufficient clean pages 12622b6b0df7SMatthew Dillon * on the inactive queue, we may have to go all out. 12632b6b0df7SMatthew Dillon */ 12645050aa86SKonstantin Belousov int swap_pageouts_ok; 126534d8b7eaSJeff Roberson int error; 12660d94caffSDavid Greenman 126712ac6a1dSJohn Dyson if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 126812ac6a1dSJohn Dyson swap_pageouts_ok = 1; 126912ac6a1dSJohn Dyson } else { 127012ac6a1dSJohn Dyson swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 127112ac6a1dSJohn Dyson swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 127290ecac61SMatthew Dillon vm_page_count_min()); 127312ac6a1dSJohn Dyson 127412ac6a1dSJohn Dyson } 127570111b90SJohn Dyson 127670111b90SJohn Dyson /* 12771c7c3c6aSMatthew Dillon * We don't bother paging objects that are "dead". 12781c7c3c6aSMatthew Dillon * Those objects are in a "rundown" state. 127970111b90SJohn Dyson */ 128070111b90SJohn Dyson if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 12818d220203SAlan Cox vm_pagequeue_lock(pq); 12822965a453SKip Macy vm_page_unlock(m); 128389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 128448cc2fc7SKonstantin Belousov queues_locked = TRUE; 12858d220203SAlan Cox vm_page_requeue_locked(m); 128648cc2fc7SKonstantin Belousov goto relock_queues; 128712ac6a1dSJohn Dyson } 128834d8b7eaSJeff Roberson error = vm_pageout_clean(m); 12891c7c3c6aSMatthew Dillon /* 129034d8b7eaSJeff Roberson * Decrement page_shortage on success to account for 12912b6b0df7SMatthew Dillon * the (future) cleaned page. Otherwise we could wind 12922b6b0df7SMatthew Dillon * up laundering or cleaning too many pages. 12930d94caffSDavid Greenman */ 129434d8b7eaSJeff Roberson if (error == 0) { 129534d8b7eaSJeff Roberson page_shortage--; 129634d8b7eaSJeff Roberson maxlaunder--; 129734d8b7eaSJeff Roberson } else if (error == EDEADLK) { 129834d8b7eaSJeff Roberson pageout_lock_miss++; 129934d8b7eaSJeff Roberson vnodes_skipped++; 130034d8b7eaSJeff Roberson } else if (error == EBUSY) { 130134d8b7eaSJeff Roberson addl_page_shortage++; 130248cc2fc7SKonstantin Belousov } 130348cc2fc7SKonstantin Belousov vm_page_lock_assert(m, MA_NOTOWNED); 130448cc2fc7SKonstantin Belousov goto relock_queues; 130548cc2fc7SKonstantin Belousov } 1306776f729cSKonstantin Belousov drop_page: 130748cc2fc7SKonstantin Belousov vm_page_unlock(m); 130889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 130948cc2fc7SKonstantin Belousov relock_queues: 131048cc2fc7SKonstantin Belousov if (!queues_locked) { 13118d220203SAlan Cox vm_pagequeue_lock(pq); 131248cc2fc7SKonstantin Belousov queues_locked = TRUE; 13136989c456SAlan Cox } 1314c325e866SKonstantin Belousov next = TAILQ_NEXT(&vmd->vmd_marker, plinks.q); 1315c325e866SKonstantin Belousov TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q); 13160d94caffSDavid Greenman } 13178d220203SAlan Cox vm_pagequeue_unlock(pq); 131826f9a767SRodney W. Grimes 13199452b5edSAlan Cox #if !defined(NO_SWAPPING) 13209452b5edSAlan Cox /* 13219452b5edSAlan Cox * Wakeup the swapout daemon if we didn't cache or free the targeted 13229452b5edSAlan Cox * number of pages. 13239452b5edSAlan Cox */ 13249452b5edSAlan Cox if (vm_swap_enabled && page_shortage > 0) 13259452b5edSAlan Cox vm_req_vmdaemon(VM_SWAP_NORMAL); 13269452b5edSAlan Cox #endif 13279452b5edSAlan Cox 13289452b5edSAlan Cox /* 13299452b5edSAlan Cox * Wakeup the sync daemon if we skipped a vnode in a writeable object 13309452b5edSAlan Cox * and we didn't cache or free enough pages. 13319452b5edSAlan Cox */ 13329452b5edSAlan Cox if (vnodes_skipped > 0 && page_shortage > vm_cnt.v_free_target - 13339452b5edSAlan Cox vm_cnt.v_free_min) 13349452b5edSAlan Cox (void)speedup_syncer(); 13359452b5edSAlan Cox 1336df8bae1dSRodney W. Grimes /* 1337936524aaSMatthew Dillon * Compute the number of pages we want to try to move from the 1338936524aaSMatthew Dillon * active queue to the inactive queue. 13391c7c3c6aSMatthew Dillon */ 134044f1c916SBryan Drewery page_shortage = vm_cnt.v_inactive_target - vm_cnt.v_inactive_count + 13419099545aSAlan Cox vm_paging_target() + deficit + addl_page_shortage; 13429099545aSAlan Cox 1343114f62c6SJeff Roberson pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; 1344114f62c6SJeff Roberson vm_pagequeue_lock(pq); 13459099545aSAlan Cox maxscan = pq->pq_cnt; 13469099545aSAlan Cox 1347d9e23210SJeff Roberson /* 1348d9e23210SJeff Roberson * If we're just idle polling attempt to visit every 1349d9e23210SJeff Roberson * active page within 'update_period' seconds. 1350d9e23210SJeff Roberson */ 135122cf98d1SAlan Cox scan_tick = ticks; 135222cf98d1SAlan Cox if (vm_pageout_update_period != 0) { 135322cf98d1SAlan Cox min_scan = pq->pq_cnt; 135422cf98d1SAlan Cox min_scan *= scan_tick - vmd->vmd_last_active_scan; 135522cf98d1SAlan Cox min_scan /= hz * vm_pageout_update_period; 135622cf98d1SAlan Cox } else 135722cf98d1SAlan Cox min_scan = 0; 135822cf98d1SAlan Cox if (min_scan > 0 || (page_shortage > 0 && maxscan > 0)) 135922cf98d1SAlan Cox vmd->vmd_last_active_scan = scan_tick; 13601c7c3c6aSMatthew Dillon 13611c7c3c6aSMatthew Dillon /* 136222cf98d1SAlan Cox * Scan the active queue for pages that can be deactivated. Update 136322cf98d1SAlan Cox * the per-page activity counter and use it to identify deactivation 136422cf98d1SAlan Cox * candidates. 13651c7c3c6aSMatthew Dillon */ 136622cf98d1SAlan Cox for (m = TAILQ_FIRST(&pq->pq_pl), scanned = 0; m != NULL && (scanned < 136722cf98d1SAlan Cox min_scan || (page_shortage > 0 && scanned < maxscan)); m = next, 136822cf98d1SAlan Cox scanned++) { 1369f35329acSJohn Dyson 13709cf51988SAlan Cox KASSERT(m->queue == PQ_ACTIVE, 1371d3c09dd7SAlan Cox ("vm_pageout_scan: page %p isn't active", m)); 1372f35329acSJohn Dyson 1373c325e866SKonstantin Belousov next = TAILQ_NEXT(m, plinks.q); 137422cf98d1SAlan Cox if ((m->flags & PG_MARKER) != 0) 13758dbca793STor Egge continue; 13767900f95dSKonstantin Belousov KASSERT((m->flags & PG_FICTITIOUS) == 0, 13777900f95dSKonstantin Belousov ("Fictitious page %p cannot be in active queue", m)); 13787900f95dSKonstantin Belousov KASSERT((m->oflags & VPO_UNMANAGED) == 0, 13797900f95dSKonstantin Belousov ("Unmanaged page %p cannot be in active queue", m)); 13809ee2165fSAlan Cox if (!vm_pageout_page_lock(m, &next)) { 13818c616246SKonstantin Belousov vm_page_unlock(m); 13822965a453SKip Macy continue; 13832965a453SKip Macy } 1384b18bfc3dSJohn Dyson 1385b18bfc3dSJohn Dyson /* 1386b18bfc3dSJohn Dyson * The count for pagedaemon pages is done after checking the 1387956f3135SPhilippe Charnier * page for eligibility... 1388b18bfc3dSJohn Dyson */ 13898d220203SAlan Cox PCPU_INC(cnt.v_pdpages); 1390ef743ce6SJohn Dyson 13917e006499SJohn Dyson /* 13927e006499SJohn Dyson * Check to see "how much" the page has been used. 13937e006499SJohn Dyson */ 139486fa2471SAlan Cox if ((m->aflags & PGA_REFERENCED) != 0) { 1395bb7858eaSJeff Roberson vm_page_aflag_clear(m, PGA_REFERENCED); 139686fa2471SAlan Cox act_delta = 1; 139786fa2471SAlan Cox } else 139886fa2471SAlan Cox act_delta = 0; 139986fa2471SAlan Cox 1400274132acSJeff Roberson /* 1401274132acSJeff Roberson * Unlocked object ref count check. Two races are possible. 1402274132acSJeff Roberson * 1) The ref was transitioning to zero and we saw non-zero, 1403274132acSJeff Roberson * the pmap bits will be checked unnecessarily. 1404274132acSJeff Roberson * 2) The ref was transitioning to one and we saw zero. 1405274132acSJeff Roberson * The page lock prevents a new reference to this page so 1406274132acSJeff Roberson * we need not check the reference bits. 1407274132acSJeff Roberson */ 1408274132acSJeff Roberson if (m->object->ref_count != 0) 1409bb7858eaSJeff Roberson act_delta += pmap_ts_referenced(m); 1410bb7858eaSJeff Roberson 1411bb7858eaSJeff Roberson /* 1412bb7858eaSJeff Roberson * Advance or decay the act_count based on recent usage. 1413bb7858eaSJeff Roberson */ 141486fa2471SAlan Cox if (act_delta != 0) { 1415bb7858eaSJeff Roberson m->act_count += ACT_ADVANCE + act_delta; 141638efa82bSJohn Dyson if (m->act_count > ACT_MAX) 141738efa82bSJohn Dyson m->act_count = ACT_MAX; 141886fa2471SAlan Cox } else 141938efa82bSJohn Dyson m->act_count -= min(m->act_count, ACT_DECLINE); 1420bb7858eaSJeff Roberson 1421bb7858eaSJeff Roberson /* 1422bb7858eaSJeff Roberson * Move this page to the tail of the active or inactive 1423bb7858eaSJeff Roberson * queue depending on usage. 1424bb7858eaSJeff Roberson */ 142586fa2471SAlan Cox if (m->act_count == 0) { 14268d220203SAlan Cox /* Dequeue to avoid later lock recursion. */ 14278d220203SAlan Cox vm_page_dequeue_locked(m); 1428d4a272dbSJohn Dyson vm_page_deactivate(m); 1429bb7858eaSJeff Roberson page_shortage--; 14308d220203SAlan Cox } else 14318d220203SAlan Cox vm_page_requeue_locked(m); 14322965a453SKip Macy vm_page_unlock(m); 143326f9a767SRodney W. Grimes } 14348d220203SAlan Cox vm_pagequeue_unlock(pq); 1435ceb0cf87SJohn Dyson #if !defined(NO_SWAPPING) 1436ceb0cf87SJohn Dyson /* 1437ceb0cf87SJohn Dyson * Idle process swapout -- run once per second. 1438ceb0cf87SJohn Dyson */ 1439ceb0cf87SJohn Dyson if (vm_swap_idle_enabled) { 1440ceb0cf87SJohn Dyson static long lsec; 1441227ee8a1SPoul-Henning Kamp if (time_second != lsec) { 144297824da3SAlan Cox vm_req_vmdaemon(VM_SWAP_IDLE); 1443227ee8a1SPoul-Henning Kamp lsec = time_second; 1444ceb0cf87SJohn Dyson } 1445ceb0cf87SJohn Dyson } 1446ceb0cf87SJohn Dyson #endif 1447ceb0cf87SJohn Dyson 14485663e6deSDavid Greenman /* 1449e92686d0SDavid Schultz * If we are critically low on one of RAM or swap and low on 1450e92686d0SDavid Schultz * the other, kill the largest process. However, we avoid 1451e92686d0SDavid Schultz * doing this on the first pass in order to give ourselves a 1452e92686d0SDavid Schultz * chance to flush out dirty vnode-backed pages and to allow 1453e92686d0SDavid Schultz * active pages to be moved to the inactive queue and reclaimed. 14542025d69bSKonstantin Belousov */ 1455449c2e92SKonstantin Belousov vm_pageout_mightbe_oom(vmd, pass); 14562025d69bSKonstantin Belousov } 14572025d69bSKonstantin Belousov 1458449c2e92SKonstantin Belousov static int vm_pageout_oom_vote; 1459449c2e92SKonstantin Belousov 1460449c2e92SKonstantin Belousov /* 1461449c2e92SKonstantin Belousov * The pagedaemon threads randlomly select one to perform the 1462449c2e92SKonstantin Belousov * OOM. Trying to kill processes before all pagedaemons 1463449c2e92SKonstantin Belousov * failed to reach free target is premature. 1464449c2e92SKonstantin Belousov */ 1465449c2e92SKonstantin Belousov static void 1466449c2e92SKonstantin Belousov vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass) 1467449c2e92SKonstantin Belousov { 1468449c2e92SKonstantin Belousov int old_vote; 1469449c2e92SKonstantin Belousov 1470d9e23210SJeff Roberson if (pass <= 1 || !((swap_pager_avail < 64 && vm_page_count_min()) || 1471449c2e92SKonstantin Belousov (swap_pager_full && vm_paging_target() > 0))) { 1472449c2e92SKonstantin Belousov if (vmd->vmd_oom) { 1473449c2e92SKonstantin Belousov vmd->vmd_oom = FALSE; 1474449c2e92SKonstantin Belousov atomic_subtract_int(&vm_pageout_oom_vote, 1); 1475449c2e92SKonstantin Belousov } 1476449c2e92SKonstantin Belousov return; 1477449c2e92SKonstantin Belousov } 1478449c2e92SKonstantin Belousov 1479449c2e92SKonstantin Belousov if (vmd->vmd_oom) 1480449c2e92SKonstantin Belousov return; 1481449c2e92SKonstantin Belousov 1482449c2e92SKonstantin Belousov vmd->vmd_oom = TRUE; 1483449c2e92SKonstantin Belousov old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1); 1484449c2e92SKonstantin Belousov if (old_vote != vm_ndomains - 1) 1485449c2e92SKonstantin Belousov return; 1486449c2e92SKonstantin Belousov 1487449c2e92SKonstantin Belousov /* 1488449c2e92SKonstantin Belousov * The current pagedaemon thread is the last in the quorum to 1489449c2e92SKonstantin Belousov * start OOM. Initiate the selection and signaling of the 1490449c2e92SKonstantin Belousov * victim. 1491449c2e92SKonstantin Belousov */ 1492449c2e92SKonstantin Belousov vm_pageout_oom(VM_OOM_MEM); 1493449c2e92SKonstantin Belousov 1494449c2e92SKonstantin Belousov /* 1495449c2e92SKonstantin Belousov * After one round of OOM terror, recall our vote. On the 1496449c2e92SKonstantin Belousov * next pass, current pagedaemon would vote again if the low 1497449c2e92SKonstantin Belousov * memory condition is still there, due to vmd_oom being 1498449c2e92SKonstantin Belousov * false. 1499449c2e92SKonstantin Belousov */ 1500449c2e92SKonstantin Belousov vmd->vmd_oom = FALSE; 1501449c2e92SKonstantin Belousov atomic_subtract_int(&vm_pageout_oom_vote, 1); 1502449c2e92SKonstantin Belousov } 15032025d69bSKonstantin Belousov 15042025d69bSKonstantin Belousov void 15052025d69bSKonstantin Belousov vm_pageout_oom(int shortage) 15062025d69bSKonstantin Belousov { 15072025d69bSKonstantin Belousov struct proc *p, *bigproc; 15082025d69bSKonstantin Belousov vm_offset_t size, bigsize; 15092025d69bSKonstantin Belousov struct thread *td; 15106bed074cSKonstantin Belousov struct vmspace *vm; 15112025d69bSKonstantin Belousov 15122025d69bSKonstantin Belousov /* 15131c58e4e5SJohn Baldwin * We keep the process bigproc locked once we find it to keep anyone 15141c58e4e5SJohn Baldwin * from messing with it; however, there is a possibility of 15151c58e4e5SJohn Baldwin * deadlock if process B is bigproc and one of it's child processes 15161c58e4e5SJohn Baldwin * attempts to propagate a signal to B while we are waiting for A's 15171c58e4e5SJohn Baldwin * lock while walking this list. To avoid this, we don't block on 15181c58e4e5SJohn Baldwin * the process lock but just skip a process if it is already locked. 15195663e6deSDavid Greenman */ 15205663e6deSDavid Greenman bigproc = NULL; 15215663e6deSDavid Greenman bigsize = 0; 15221005a129SJohn Baldwin sx_slock(&allproc_lock); 1523e602ba25SJulian Elischer FOREACH_PROC_IN_SYSTEM(p) { 1524e602ba25SJulian Elischer int breakout; 1525dcbcd518SBruce Evans 152671943c3dSKonstantin Belousov PROC_LOCK(p); 152771943c3dSKonstantin Belousov 15281c58e4e5SJohn Baldwin /* 15293f1c4c4fSKonstantin Belousov * If this is a system, protected or killed process, skip it. 15305663e6deSDavid Greenman */ 153171943c3dSKonstantin Belousov if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | 153271943c3dSKonstantin Belousov P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 || 153371943c3dSKonstantin Belousov p->p_pid == 1 || P_KILLED(p) || 153471943c3dSKonstantin Belousov (p->p_pid < 48 && swap_pager_avail != 0)) { 15358606d880SJohn Baldwin PROC_UNLOCK(p); 15365663e6deSDavid Greenman continue; 15375663e6deSDavid Greenman } 15385663e6deSDavid Greenman /* 1539dcbcd518SBruce Evans * If the process is in a non-running type state, 1540e602ba25SJulian Elischer * don't touch it. Check all the threads individually. 15415663e6deSDavid Greenman */ 1542e602ba25SJulian Elischer breakout = 0; 1543e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 1544982d11f8SJeff Roberson thread_lock(td); 154571fad9fdSJulian Elischer if (!TD_ON_RUNQ(td) && 154671fad9fdSJulian Elischer !TD_IS_RUNNING(td) && 1547f497cda2SEdward Tomasz Napierala !TD_IS_SLEEPING(td) && 1548f497cda2SEdward Tomasz Napierala !TD_IS_SUSPENDED(td)) { 1549982d11f8SJeff Roberson thread_unlock(td); 1550e602ba25SJulian Elischer breakout = 1; 1551e602ba25SJulian Elischer break; 1552e602ba25SJulian Elischer } 1553982d11f8SJeff Roberson thread_unlock(td); 1554e602ba25SJulian Elischer } 1555e602ba25SJulian Elischer if (breakout) { 15561c58e4e5SJohn Baldwin PROC_UNLOCK(p); 15575663e6deSDavid Greenman continue; 15585663e6deSDavid Greenman } 15595663e6deSDavid Greenman /* 15605663e6deSDavid Greenman * get the process size 15615663e6deSDavid Greenman */ 15626bed074cSKonstantin Belousov vm = vmspace_acquire_ref(p); 15636bed074cSKonstantin Belousov if (vm == NULL) { 15646bed074cSKonstantin Belousov PROC_UNLOCK(p); 15656bed074cSKonstantin Belousov continue; 15666bed074cSKonstantin Belousov } 156771943c3dSKonstantin Belousov _PHOLD(p); 15686bed074cSKonstantin Belousov if (!vm_map_trylock_read(&vm->vm_map)) { 156971943c3dSKonstantin Belousov _PRELE(p); 157072d97679SDavid Schultz PROC_UNLOCK(p); 157171943c3dSKonstantin Belousov vmspace_free(vm); 157272d97679SDavid Schultz continue; 157372d97679SDavid Schultz } 157471943c3dSKonstantin Belousov PROC_UNLOCK(p); 15757981aa24SKonstantin Belousov size = vmspace_swap_count(vm); 15766bed074cSKonstantin Belousov vm_map_unlock_read(&vm->vm_map); 15772025d69bSKonstantin Belousov if (shortage == VM_OOM_MEM) 15786bed074cSKonstantin Belousov size += vmspace_resident_count(vm); 15796bed074cSKonstantin Belousov vmspace_free(vm); 15805663e6deSDavid Greenman /* 15815663e6deSDavid Greenman * if the this process is bigger than the biggest one 15825663e6deSDavid Greenman * remember it. 15835663e6deSDavid Greenman */ 15845663e6deSDavid Greenman if (size > bigsize) { 15851c58e4e5SJohn Baldwin if (bigproc != NULL) 158671943c3dSKonstantin Belousov PRELE(bigproc); 15875663e6deSDavid Greenman bigproc = p; 15885663e6deSDavid Greenman bigsize = size; 158971943c3dSKonstantin Belousov } else { 159071943c3dSKonstantin Belousov PRELE(p); 159171943c3dSKonstantin Belousov } 15925663e6deSDavid Greenman } 15931005a129SJohn Baldwin sx_sunlock(&allproc_lock); 15945663e6deSDavid Greenman if (bigproc != NULL) { 15958311a2b8SWill Andrews if (vm_panic_on_oom != 0) 15968311a2b8SWill Andrews panic("out of swap space"); 159771943c3dSKonstantin Belousov PROC_LOCK(bigproc); 1598729b1e51SDavid Greenman killproc(bigproc, "out of swap space"); 1599fa885116SJulian Elischer sched_nice(bigproc, PRIO_MIN); 160071943c3dSKonstantin Belousov _PRELE(bigproc); 16011c58e4e5SJohn Baldwin PROC_UNLOCK(bigproc); 160244f1c916SBryan Drewery wakeup(&vm_cnt.v_free_count); 16035663e6deSDavid Greenman } 16045663e6deSDavid Greenman } 160526f9a767SRodney W. Grimes 1606449c2e92SKonstantin Belousov static void 1607449c2e92SKonstantin Belousov vm_pageout_worker(void *arg) 1608449c2e92SKonstantin Belousov { 1609449c2e92SKonstantin Belousov struct vm_domain *domain; 1610949c9186SKonstantin Belousov int domidx; 1611449c2e92SKonstantin Belousov 1612449c2e92SKonstantin Belousov domidx = (uintptr_t)arg; 1613449c2e92SKonstantin Belousov domain = &vm_dom[domidx]; 1614449c2e92SKonstantin Belousov 1615449c2e92SKonstantin Belousov /* 1616949c9186SKonstantin Belousov * XXXKIB It could be useful to bind pageout daemon threads to 1617949c9186SKonstantin Belousov * the cores belonging to the domain, from which vm_page_array 1618949c9186SKonstantin Belousov * is allocated. 1619449c2e92SKonstantin Belousov */ 1620449c2e92SKonstantin Belousov 1621449c2e92SKonstantin Belousov KASSERT(domain->vmd_segs != 0, ("domain without segments")); 162222cf98d1SAlan Cox domain->vmd_last_active_scan = ticks; 1623449c2e92SKonstantin Belousov vm_pageout_init_marker(&domain->vmd_marker, PQ_INACTIVE); 1624449c2e92SKonstantin Belousov 1625449c2e92SKonstantin Belousov /* 1626449c2e92SKonstantin Belousov * The pageout daemon worker is never done, so loop forever. 1627449c2e92SKonstantin Belousov */ 1628449c2e92SKonstantin Belousov while (TRUE) { 1629449c2e92SKonstantin Belousov /* 1630449c2e92SKonstantin Belousov * If we have enough free memory, wakeup waiters. Do 1631449c2e92SKonstantin Belousov * not clear vm_pages_needed until we reach our target, 1632449c2e92SKonstantin Belousov * otherwise we may be woken up over and over again and 1633449c2e92SKonstantin Belousov * waste a lot of cpu. 1634449c2e92SKonstantin Belousov */ 1635449c2e92SKonstantin Belousov mtx_lock(&vm_page_queue_free_mtx); 1636449c2e92SKonstantin Belousov if (vm_pages_needed && !vm_page_count_min()) { 1637449c2e92SKonstantin Belousov if (!vm_paging_needed()) 1638449c2e92SKonstantin Belousov vm_pages_needed = 0; 163944f1c916SBryan Drewery wakeup(&vm_cnt.v_free_count); 1640449c2e92SKonstantin Belousov } 1641449c2e92SKonstantin Belousov if (vm_pages_needed) { 1642449c2e92SKonstantin Belousov /* 1643449c2e92SKonstantin Belousov * Still not done, take a second pass without waiting 1644449c2e92SKonstantin Belousov * (unlimited dirty cleaning), otherwise sleep a bit 1645449c2e92SKonstantin Belousov * and try again. 1646449c2e92SKonstantin Belousov */ 1647449c2e92SKonstantin Belousov if (domain->vmd_pass > 1) 1648449c2e92SKonstantin Belousov msleep(&vm_pages_needed, 1649449c2e92SKonstantin Belousov &vm_page_queue_free_mtx, PVM, "psleep", 1650449c2e92SKonstantin Belousov hz / 2); 1651449c2e92SKonstantin Belousov } else { 1652449c2e92SKonstantin Belousov /* 1653d9e23210SJeff Roberson * Good enough, sleep until required to refresh 1654d9e23210SJeff Roberson * stats. 1655449c2e92SKonstantin Belousov */ 1656449c2e92SKonstantin Belousov domain->vmd_pass = 0; 1657d9e23210SJeff Roberson msleep(&vm_pages_needed, &vm_page_queue_free_mtx, 1658d9e23210SJeff Roberson PVM, "psleep", hz); 1659d9e23210SJeff Roberson 1660449c2e92SKonstantin Belousov } 1661d9e23210SJeff Roberson if (vm_pages_needed) { 166244f1c916SBryan Drewery vm_cnt.v_pdwakeups++; 1663d9e23210SJeff Roberson domain->vmd_pass++; 1664d9e23210SJeff Roberson } 1665449c2e92SKonstantin Belousov mtx_unlock(&vm_page_queue_free_mtx); 1666449c2e92SKonstantin Belousov vm_pageout_scan(domain, domain->vmd_pass); 1667449c2e92SKonstantin Belousov } 1668449c2e92SKonstantin Belousov } 1669449c2e92SKonstantin Belousov 1670df8bae1dSRodney W. Grimes /* 16714d19f4adSSteven Hartland * vm_pageout_init initialises basic pageout daemon settings. 1672df8bae1dSRodney W. Grimes */ 16732b14f991SJulian Elischer static void 16744d19f4adSSteven Hartland vm_pageout_init(void) 1675df8bae1dSRodney W. Grimes { 1676df8bae1dSRodney W. Grimes /* 1677df8bae1dSRodney W. Grimes * Initialize some paging parameters. 1678df8bae1dSRodney W. Grimes */ 167944f1c916SBryan Drewery vm_cnt.v_interrupt_free_min = 2; 168044f1c916SBryan Drewery if (vm_cnt.v_page_count < 2000) 1681f35329acSJohn Dyson vm_pageout_page_count = 8; 1682f6b04d2bSDavid Greenman 168345ae1d91SAlan Cox /* 168445ae1d91SAlan Cox * v_free_reserved needs to include enough for the largest 168545ae1d91SAlan Cox * swap pager structures plus enough for any pv_entry structs 168645ae1d91SAlan Cox * when paging. 168745ae1d91SAlan Cox */ 168844f1c916SBryan Drewery if (vm_cnt.v_page_count > 1024) 168944f1c916SBryan Drewery vm_cnt.v_free_min = 4 + (vm_cnt.v_page_count - 1024) / 200; 16902feb50bfSAttilio Rao else 169144f1c916SBryan Drewery vm_cnt.v_free_min = 4; 169244f1c916SBryan Drewery vm_cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 169344f1c916SBryan Drewery vm_cnt.v_interrupt_free_min; 169444f1c916SBryan Drewery vm_cnt.v_free_reserved = vm_pageout_page_count + 169544f1c916SBryan Drewery vm_cnt.v_pageout_free_min + (vm_cnt.v_page_count / 768); 169644f1c916SBryan Drewery vm_cnt.v_free_severe = vm_cnt.v_free_min / 2; 169744f1c916SBryan Drewery vm_cnt.v_free_target = 4 * vm_cnt.v_free_min + vm_cnt.v_free_reserved; 169844f1c916SBryan Drewery vm_cnt.v_free_min += vm_cnt.v_free_reserved; 169944f1c916SBryan Drewery vm_cnt.v_free_severe += vm_cnt.v_free_reserved; 170044f1c916SBryan Drewery vm_cnt.v_inactive_target = (3 * vm_cnt.v_free_target) / 2; 170144f1c916SBryan Drewery if (vm_cnt.v_inactive_target > vm_cnt.v_free_count / 3) 170244f1c916SBryan Drewery vm_cnt.v_inactive_target = vm_cnt.v_free_count / 3; 1703df8bae1dSRodney W. Grimes 1704d9e23210SJeff Roberson /* 1705d9e23210SJeff Roberson * Set the default wakeup threshold to be 10% above the minimum 1706d9e23210SJeff Roberson * page limit. This keeps the steady state out of shortfall. 1707d9e23210SJeff Roberson */ 170844f1c916SBryan Drewery vm_pageout_wakeup_thresh = (vm_cnt.v_free_min / 10) * 11; 1709d9e23210SJeff Roberson 1710d9e23210SJeff Roberson /* 1711d9e23210SJeff Roberson * Set interval in seconds for active scan. We want to visit each 1712c9612b2dSJeff Roberson * page at least once every ten minutes. This is to prevent worst 1713c9612b2dSJeff Roberson * case paging behaviors with stale active LRU. 1714d9e23210SJeff Roberson */ 1715d9e23210SJeff Roberson if (vm_pageout_update_period == 0) 1716c9612b2dSJeff Roberson vm_pageout_update_period = 600; 1717d9e23210SJeff Roberson 1718df8bae1dSRodney W. Grimes /* XXX does not really belong here */ 1719df8bae1dSRodney W. Grimes if (vm_page_max_wired == 0) 172044f1c916SBryan Drewery vm_page_max_wired = vm_cnt.v_free_count / 3; 17214d19f4adSSteven Hartland } 17224d19f4adSSteven Hartland 17234d19f4adSSteven Hartland /* 17244d19f4adSSteven Hartland * vm_pageout is the high level pageout daemon. 17254d19f4adSSteven Hartland */ 17264d19f4adSSteven Hartland static void 17274d19f4adSSteven Hartland vm_pageout(void) 17284d19f4adSSteven Hartland { 172944ec2b63SKonstantin Belousov int error; 17304d19f4adSSteven Hartland #if MAXMEMDOM > 1 173144ec2b63SKonstantin Belousov int i; 17324d19f4adSSteven Hartland #endif 1733df8bae1dSRodney W. Grimes 173424a1cce3SDavid Greenman swap_pager_swap_init(); 1735449c2e92SKonstantin Belousov #if MAXMEMDOM > 1 1736449c2e92SKonstantin Belousov for (i = 1; i < vm_ndomains; i++) { 1737449c2e92SKonstantin Belousov error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i, 1738449c2e92SKonstantin Belousov curproc, NULL, 0, 0, "dom%d", i); 1739449c2e92SKonstantin Belousov if (error != 0) { 1740449c2e92SKonstantin Belousov panic("starting pageout for domain %d, error %d\n", 1741449c2e92SKonstantin Belousov i, error); 1742dc2efb27SJohn Dyson } 1743f919ebdeSDavid Greenman } 1744449c2e92SKonstantin Belousov #endif 174544ec2b63SKonstantin Belousov error = kthread_add(uma_reclaim_worker, NULL, curproc, NULL, 174644ec2b63SKonstantin Belousov 0, 0, "uma"); 174744ec2b63SKonstantin Belousov if (error != 0) 174844ec2b63SKonstantin Belousov panic("starting uma_reclaim helper, error %d\n", error); 1749d395270dSDimitry Andric vm_pageout_worker((void *)(uintptr_t)0); 1750df8bae1dSRodney W. Grimes } 175126f9a767SRodney W. Grimes 17526b4b77adSAlan Cox /* 1753e9f995d8SAlan Cox * Unless the free page queue lock is held by the caller, this function 17546b4b77adSAlan Cox * should be regarded as advisory. Specifically, the caller should 175544f1c916SBryan Drewery * not msleep() on &vm_cnt.v_free_count following this function unless 1756e9f995d8SAlan Cox * the free page queue lock is held until the msleep() is performed. 17576b4b77adSAlan Cox */ 1758e0c5a895SJohn Dyson void 17594a365329SAndrey Zonov pagedaemon_wakeup(void) 1760e0c5a895SJohn Dyson { 1761a1c0a785SAlan Cox 1762b40ce416SJulian Elischer if (!vm_pages_needed && curthread->td_proc != pageproc) { 1763a1c0a785SAlan Cox vm_pages_needed = 1; 1764e0c5a895SJohn Dyson wakeup(&vm_pages_needed); 1765e0c5a895SJohn Dyson } 1766e0c5a895SJohn Dyson } 1767e0c5a895SJohn Dyson 176838efa82bSJohn Dyson #if !defined(NO_SWAPPING) 17695afce282SDavid Greenman static void 177097824da3SAlan Cox vm_req_vmdaemon(int req) 17715afce282SDavid Greenman { 17725afce282SDavid Greenman static int lastrun = 0; 17735afce282SDavid Greenman 177497824da3SAlan Cox mtx_lock(&vm_daemon_mtx); 177597824da3SAlan Cox vm_pageout_req_swapout |= req; 1776b18bfc3dSJohn Dyson if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 17775afce282SDavid Greenman wakeup(&vm_daemon_needed); 17785afce282SDavid Greenman lastrun = ticks; 17795afce282SDavid Greenman } 178097824da3SAlan Cox mtx_unlock(&vm_daemon_mtx); 17815afce282SDavid Greenman } 17825afce282SDavid Greenman 17832b14f991SJulian Elischer static void 17844a365329SAndrey Zonov vm_daemon(void) 17850d94caffSDavid Greenman { 178691d5354aSJohn Baldwin struct rlimit rsslim; 1787dcbcd518SBruce Evans struct proc *p; 1788dcbcd518SBruce Evans struct thread *td; 17896bed074cSKonstantin Belousov struct vmspace *vm; 1790099e7e95SEdward Tomasz Napierala int breakout, swapout_flags, tryagain, attempts; 1791afcc55f3SEdward Tomasz Napierala #ifdef RACCT 1792099e7e95SEdward Tomasz Napierala uint64_t rsize, ravailable; 1793afcc55f3SEdward Tomasz Napierala #endif 17940d94caffSDavid Greenman 17952fe6e4d7SDavid Greenman while (TRUE) { 179697824da3SAlan Cox mtx_lock(&vm_daemon_mtx); 17974b5c9cf6SEdward Tomasz Napierala msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 1798099e7e95SEdward Tomasz Napierala #ifdef RACCT 17994b5c9cf6SEdward Tomasz Napierala racct_enable ? hz : 0 1800099e7e95SEdward Tomasz Napierala #else 18014b5c9cf6SEdward Tomasz Napierala 0 1802099e7e95SEdward Tomasz Napierala #endif 18034b5c9cf6SEdward Tomasz Napierala ); 180497824da3SAlan Cox swapout_flags = vm_pageout_req_swapout; 18054c1f8ee9SDavid Greenman vm_pageout_req_swapout = 0; 180697824da3SAlan Cox mtx_unlock(&vm_daemon_mtx); 180797824da3SAlan Cox if (swapout_flags) 180897824da3SAlan Cox swapout_procs(swapout_flags); 180997824da3SAlan Cox 18102fe6e4d7SDavid Greenman /* 18110d94caffSDavid Greenman * scan the processes for exceeding their rlimits or if 18120d94caffSDavid Greenman * process is swapped out -- deactivate pages 18132fe6e4d7SDavid Greenman */ 1814099e7e95SEdward Tomasz Napierala tryagain = 0; 1815099e7e95SEdward Tomasz Napierala attempts = 0; 1816099e7e95SEdward Tomasz Napierala again: 1817099e7e95SEdward Tomasz Napierala attempts++; 18181005a129SJohn Baldwin sx_slock(&allproc_lock); 1819f67af5c9SXin LI FOREACH_PROC_IN_SYSTEM(p) { 1820fe2144fdSLuoqi Chen vm_pindex_t limit, size; 18212fe6e4d7SDavid Greenman 18222fe6e4d7SDavid Greenman /* 18232fe6e4d7SDavid Greenman * if this is a system process or if we have already 18242fe6e4d7SDavid Greenman * looked at this process, skip it. 18252fe6e4d7SDavid Greenman */ 1826897ecacdSJohn Baldwin PROC_LOCK(p); 18278e6fa660SJohn Baldwin if (p->p_state != PRS_NORMAL || 18288e6fa660SJohn Baldwin p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) { 1829897ecacdSJohn Baldwin PROC_UNLOCK(p); 18302fe6e4d7SDavid Greenman continue; 18312fe6e4d7SDavid Greenman } 18322fe6e4d7SDavid Greenman /* 18332fe6e4d7SDavid Greenman * if the process is in a non-running type state, 18342fe6e4d7SDavid Greenman * don't touch it. 18352fe6e4d7SDavid Greenman */ 1836e602ba25SJulian Elischer breakout = 0; 1837e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 1838982d11f8SJeff Roberson thread_lock(td); 183971fad9fdSJulian Elischer if (!TD_ON_RUNQ(td) && 184071fad9fdSJulian Elischer !TD_IS_RUNNING(td) && 1841f497cda2SEdward Tomasz Napierala !TD_IS_SLEEPING(td) && 1842f497cda2SEdward Tomasz Napierala !TD_IS_SUSPENDED(td)) { 1843982d11f8SJeff Roberson thread_unlock(td); 1844e602ba25SJulian Elischer breakout = 1; 1845e602ba25SJulian Elischer break; 1846e602ba25SJulian Elischer } 1847982d11f8SJeff Roberson thread_unlock(td); 1848e602ba25SJulian Elischer } 1849897ecacdSJohn Baldwin if (breakout) { 1850897ecacdSJohn Baldwin PROC_UNLOCK(p); 18512fe6e4d7SDavid Greenman continue; 18522fe6e4d7SDavid Greenman } 18532fe6e4d7SDavid Greenman /* 18542fe6e4d7SDavid Greenman * get a limit 18552fe6e4d7SDavid Greenman */ 1856f6f6d240SMateusz Guzik lim_rlimit_proc(p, RLIMIT_RSS, &rsslim); 1857fe2144fdSLuoqi Chen limit = OFF_TO_IDX( 185891d5354aSJohn Baldwin qmin(rsslim.rlim_cur, rsslim.rlim_max)); 18592fe6e4d7SDavid Greenman 18602fe6e4d7SDavid Greenman /* 18610d94caffSDavid Greenman * let processes that are swapped out really be 18620d94caffSDavid Greenman * swapped out set the limit to nothing (will force a 18630d94caffSDavid Greenman * swap-out.) 18642fe6e4d7SDavid Greenman */ 1865b61ce5b0SJeff Roberson if ((p->p_flag & P_INMEM) == 0) 18660d94caffSDavid Greenman limit = 0; /* XXX */ 18676bed074cSKonstantin Belousov vm = vmspace_acquire_ref(p); 1868897ecacdSJohn Baldwin PROC_UNLOCK(p); 18696bed074cSKonstantin Belousov if (vm == NULL) 18706bed074cSKonstantin Belousov continue; 18712fe6e4d7SDavid Greenman 18726bed074cSKonstantin Belousov size = vmspace_resident_count(vm); 1873a406d8c3SEdward Tomasz Napierala if (size >= limit) { 1874fe2144fdSLuoqi Chen vm_pageout_map_deactivate_pages( 18756bed074cSKonstantin Belousov &vm->vm_map, limit); 18762fe6e4d7SDavid Greenman } 1877afcc55f3SEdward Tomasz Napierala #ifdef RACCT 18784b5c9cf6SEdward Tomasz Napierala if (racct_enable) { 1879099e7e95SEdward Tomasz Napierala rsize = IDX_TO_OFF(size); 1880099e7e95SEdward Tomasz Napierala PROC_LOCK(p); 1881099e7e95SEdward Tomasz Napierala racct_set(p, RACCT_RSS, rsize); 1882099e7e95SEdward Tomasz Napierala ravailable = racct_get_available(p, RACCT_RSS); 1883099e7e95SEdward Tomasz Napierala PROC_UNLOCK(p); 1884099e7e95SEdward Tomasz Napierala if (rsize > ravailable) { 1885099e7e95SEdward Tomasz Napierala /* 18864b5c9cf6SEdward Tomasz Napierala * Don't be overly aggressive; this 18874b5c9cf6SEdward Tomasz Napierala * might be an innocent process, 18884b5c9cf6SEdward Tomasz Napierala * and the limit could've been exceeded 18894b5c9cf6SEdward Tomasz Napierala * by some memory hog. Don't try 18904b5c9cf6SEdward Tomasz Napierala * to deactivate more than 1/4th 18914b5c9cf6SEdward Tomasz Napierala * of process' resident set size. 1892099e7e95SEdward Tomasz Napierala */ 1893099e7e95SEdward Tomasz Napierala if (attempts <= 8) { 18944b5c9cf6SEdward Tomasz Napierala if (ravailable < rsize - 18954b5c9cf6SEdward Tomasz Napierala (rsize / 4)) { 18964b5c9cf6SEdward Tomasz Napierala ravailable = rsize - 18974b5c9cf6SEdward Tomasz Napierala (rsize / 4); 18984b5c9cf6SEdward Tomasz Napierala } 1899099e7e95SEdward Tomasz Napierala } 1900099e7e95SEdward Tomasz Napierala vm_pageout_map_deactivate_pages( 19014b5c9cf6SEdward Tomasz Napierala &vm->vm_map, 19024b5c9cf6SEdward Tomasz Napierala OFF_TO_IDX(ravailable)); 1903099e7e95SEdward Tomasz Napierala /* Update RSS usage after paging out. */ 1904099e7e95SEdward Tomasz Napierala size = vmspace_resident_count(vm); 1905099e7e95SEdward Tomasz Napierala rsize = IDX_TO_OFF(size); 1906099e7e95SEdward Tomasz Napierala PROC_LOCK(p); 1907099e7e95SEdward Tomasz Napierala racct_set(p, RACCT_RSS, rsize); 1908099e7e95SEdward Tomasz Napierala PROC_UNLOCK(p); 1909099e7e95SEdward Tomasz Napierala if (rsize > ravailable) 1910099e7e95SEdward Tomasz Napierala tryagain = 1; 1911099e7e95SEdward Tomasz Napierala } 19124b5c9cf6SEdward Tomasz Napierala } 1913afcc55f3SEdward Tomasz Napierala #endif 19146bed074cSKonstantin Belousov vmspace_free(vm); 19152fe6e4d7SDavid Greenman } 19161005a129SJohn Baldwin sx_sunlock(&allproc_lock); 1917099e7e95SEdward Tomasz Napierala if (tryagain != 0 && attempts <= 10) 1918099e7e95SEdward Tomasz Napierala goto again; 191924a1cce3SDavid Greenman } 19202fe6e4d7SDavid Greenman } 1921a1287949SEivind Eklund #endif /* !defined(NO_SWAPPING) */ 1922