160727d8bSWarner Losh /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 6df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 9df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 10df8bae1dSRodney W. Grimes * are met: 11df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 12df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 13df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 15df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 16df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 17df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 18df8bae1dSRodney W. Grimes * without specific prior written permission. 19df8bae1dSRodney W. Grimes * 20df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30df8bae1dSRodney W. Grimes * SUCH DAMAGE. 31df8bae1dSRodney W. Grimes * 323c4dd356SDavid Greenman * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 33df8bae1dSRodney W. Grimes * 34df8bae1dSRodney W. Grimes * 35df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36df8bae1dSRodney W. Grimes * All rights reserved. 37df8bae1dSRodney W. Grimes * 38df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39df8bae1dSRodney W. Grimes * 40df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 41df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 42df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 43df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 44df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 51df8bae1dSRodney W. Grimes * 52df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53df8bae1dSRodney W. Grimes * School of Computer Science 54df8bae1dSRodney W. Grimes * Carnegie Mellon University 55df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 56df8bae1dSRodney W. Grimes * 57df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 58df8bae1dSRodney W. Grimes * rights to redistribute these changes. 59df8bae1dSRodney W. Grimes */ 60df8bae1dSRodney W. Grimes 61df8bae1dSRodney W. Grimes /* 62df8bae1dSRodney W. Grimes * Virtual memory object module. 63df8bae1dSRodney W. Grimes */ 64df8bae1dSRodney W. Grimes 65874651b1SDavid E. O'Brien #include <sys/cdefs.h> 66874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 67874651b1SDavid E. O'Brien 68f8a47341SAlan Cox #include "opt_vm.h" 69f8a47341SAlan Cox 70df8bae1dSRodney W. Grimes #include <sys/param.h> 71df8bae1dSRodney W. Grimes #include <sys/systm.h> 72fb919e4dSMark Murray #include <sys/lock.h> 73867a482dSJohn Dyson #include <sys/mman.h> 74cf2819ccSJohn Dyson #include <sys/mount.h> 75b9b7a4beSMatthew Dillon #include <sys/kernel.h> 76b9b7a4beSMatthew Dillon #include <sys/sysctl.h> 771b367556SJason Evans #include <sys/mutex.h> 78fb919e4dSMark Murray #include <sys/proc.h> /* for curproc, pageproc */ 79fb919e4dSMark Murray #include <sys/socket.h> 803364c323SKonstantin Belousov #include <sys/resourcevar.h> 81fb919e4dSMark Murray #include <sys/vnode.h> 82fb919e4dSMark Murray #include <sys/vmmeter.h> 831005a129SJohn Baldwin #include <sys/sx.h> 84df8bae1dSRodney W. Grimes 85df8bae1dSRodney W. Grimes #include <vm/vm.h> 86efeaf95aSDavid Greenman #include <vm/vm_param.h> 87efeaf95aSDavid Greenman #include <vm/pmap.h> 88efeaf95aSDavid Greenman #include <vm/vm_map.h> 89efeaf95aSDavid Greenman #include <vm/vm_object.h> 90df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 9126f9a767SRodney W. Grimes #include <vm/vm_pageout.h> 920d94caffSDavid Greenman #include <vm/vm_pager.h> 9305f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 94a1f6d91cSDavid Greenman #include <vm/vm_kern.h> 95efeaf95aSDavid Greenman #include <vm/vm_extern.h> 96f8a47341SAlan Cox #include <vm/vm_reserv.h> 97670d17b5SJeff Roberson #include <vm/uma.h> 9826f9a767SRodney W. Grimes 99b9b7a4beSMatthew Dillon #define EASY_SCAN_FACTOR 8 100b9b7a4beSMatthew Dillon 101b9b7a4beSMatthew Dillon #define MSYNC_FLUSH_HARDSEQ 0x01 102b9b7a4beSMatthew Dillon #define MSYNC_FLUSH_SOFTSEQ 0x02 103b9b7a4beSMatthew Dillon 104b9b7a4beSMatthew Dillon /* 105b9b7a4beSMatthew Dillon * msync / VM object flushing optimizations 106b9b7a4beSMatthew Dillon */ 107b9b7a4beSMatthew Dillon static int msync_flush_flags = MSYNC_FLUSH_HARDSEQ | MSYNC_FLUSH_SOFTSEQ; 1086bd9cb1cSTom Rhodes SYSCTL_INT(_vm, OID_AUTO, msync_flush_flags, CTLFLAG_RW, &msync_flush_flags, 0, 1096bd9cb1cSTom Rhodes "Enable sequential iteration optimization"); 110b9b7a4beSMatthew Dillon 111c53f7aceSDag-Erling Smørgrav static int old_msync; 112c53f7aceSDag-Erling Smørgrav SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, 113c53f7aceSDag-Erling Smørgrav "Use old (insecure) msync behavior"); 114c53f7aceSDag-Erling Smørgrav 115b9b7a4beSMatthew Dillon static void vm_object_qcollapse(vm_object_t object); 116b9b7a4beSMatthew Dillon static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags); 11702dd8331SAlan Cox static void vm_object_vndeallocate(vm_object_t object); 118f6b04d2bSDavid Greenman 119df8bae1dSRodney W. Grimes /* 120df8bae1dSRodney W. Grimes * Virtual memory objects maintain the actual data 121df8bae1dSRodney W. Grimes * associated with allocated virtual memory. A given 122df8bae1dSRodney W. Grimes * page of memory exists within exactly one object. 123df8bae1dSRodney W. Grimes * 124df8bae1dSRodney W. Grimes * An object is only deallocated when all "references" 125df8bae1dSRodney W. Grimes * are given up. Only one "reference" to a given 126df8bae1dSRodney W. Grimes * region of an object should be writeable. 127df8bae1dSRodney W. Grimes * 128df8bae1dSRodney W. Grimes * Associated with each object is a list of all resident 129df8bae1dSRodney W. Grimes * memory pages belonging to that object; this list is 130df8bae1dSRodney W. Grimes * maintained by the "vm_page" module, and locked by the object's 131df8bae1dSRodney W. Grimes * lock. 132df8bae1dSRodney W. Grimes * 133df8bae1dSRodney W. Grimes * Each object also records a "pager" routine which is 134df8bae1dSRodney W. Grimes * used to retrieve (and store) pages to the proper backing 135df8bae1dSRodney W. Grimes * storage. In addition, objects may be backed by other 136df8bae1dSRodney W. Grimes * objects from which they were virtual-copied. 137df8bae1dSRodney W. Grimes * 138df8bae1dSRodney W. Grimes * The only items within the object structure which are 139df8bae1dSRodney W. Grimes * modified after time of creation are: 140df8bae1dSRodney W. Grimes * reference count locked by object's lock 141df8bae1dSRodney W. Grimes * pager routine locked by object's lock 142df8bae1dSRodney W. Grimes * 143df8bae1dSRodney W. Grimes */ 144df8bae1dSRodney W. Grimes 14528f8db14SBruce Evans struct object_q vm_object_list; 146a5698387SAlan Cox struct mtx vm_object_list_mtx; /* lock for object list and count */ 147cccf11b8SAlan Cox 148cccf11b8SAlan Cox struct vm_object kernel_object_store; 149cccf11b8SAlan Cox struct vm_object kmem_object_store; 150df8bae1dSRodney W. Grimes 151604c2bbcSAlan Cox SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, "VM object stats"); 152604c2bbcSAlan Cox 153f708ef1bSPoul-Henning Kamp static long object_collapses; 154604c2bbcSAlan Cox SYSCTL_LONG(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD, 155604c2bbcSAlan Cox &object_collapses, 0, "VM object collapses"); 156604c2bbcSAlan Cox 157f708ef1bSPoul-Henning Kamp static long object_bypasses; 158604c2bbcSAlan Cox SYSCTL_LONG(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD, 159604c2bbcSAlan Cox &object_bypasses, 0, "VM object bypasses"); 160dad740e9SAlan Cox 161670d17b5SJeff Roberson static uma_zone_t obj_zone; 1628355f576SJeff Roberson 163b23f72e9SBrian Feldman static int vm_object_zinit(void *mem, int size, int flags); 1648355f576SJeff Roberson 1658355f576SJeff Roberson #ifdef INVARIANTS 1668355f576SJeff Roberson static void vm_object_zdtor(void *mem, int size, void *arg); 1678355f576SJeff Roberson 1688355f576SJeff Roberson static void 1698355f576SJeff Roberson vm_object_zdtor(void *mem, int size, void *arg) 1708355f576SJeff Roberson { 1718355f576SJeff Roberson vm_object_t object; 1728355f576SJeff Roberson 1738355f576SJeff Roberson object = (vm_object_t)mem; 17443186e53SAlan Cox KASSERT(TAILQ_EMPTY(&object->memq), 17543186e53SAlan Cox ("object %p has resident pages", 17643186e53SAlan Cox object)); 177f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 178f8a47341SAlan Cox KASSERT(LIST_EMPTY(&object->rvq), 179f8a47341SAlan Cox ("object %p has reservations", 180f8a47341SAlan Cox object)); 181f8a47341SAlan Cox #endif 1827bfda801SAlan Cox KASSERT(object->cache == NULL, 1837bfda801SAlan Cox ("object %p has cached pages", 1847bfda801SAlan Cox object)); 1858355f576SJeff Roberson KASSERT(object->paging_in_progress == 0, 1868355f576SJeff Roberson ("object %p paging_in_progress = %d", 1878355f576SJeff Roberson object, object->paging_in_progress)); 1888355f576SJeff Roberson KASSERT(object->resident_page_count == 0, 1898355f576SJeff Roberson ("object %p resident_page_count = %d", 1908355f576SJeff Roberson object, object->resident_page_count)); 1918355f576SJeff Roberson KASSERT(object->shadow_count == 0, 1928355f576SJeff Roberson ("object %p shadow_count = %d", 1938355f576SJeff Roberson object, object->shadow_count)); 1948355f576SJeff Roberson } 1958355f576SJeff Roberson #endif 1968355f576SJeff Roberson 197b23f72e9SBrian Feldman static int 198b23f72e9SBrian Feldman vm_object_zinit(void *mem, int size, int flags) 1998355f576SJeff Roberson { 2008355f576SJeff Roberson vm_object_t object; 2018355f576SJeff Roberson 2028355f576SJeff Roberson object = (vm_object_t)mem; 203e3a9e1b2SAlan Cox bzero(&object->mtx, sizeof(object->mtx)); 2045285558aSAlan Cox VM_OBJECT_LOCK_INIT(object, "standard object"); 2058355f576SJeff Roberson 2068355f576SJeff Roberson /* These are true for any object that has been freed */ 2078355f576SJeff Roberson object->paging_in_progress = 0; 2088355f576SJeff Roberson object->resident_page_count = 0; 2098355f576SJeff Roberson object->shadow_count = 0; 210b23f72e9SBrian Feldman return (0); 2118355f576SJeff Roberson } 212df8bae1dSRodney W. Grimes 2133075778bSJohn Dyson void 2146395da54SIan Dowse _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) 215df8bae1dSRodney W. Grimes { 2160cddd8f0SMatthew Dillon 217df8bae1dSRodney W. Grimes TAILQ_INIT(&object->memq); 2181c500307SAlan Cox LIST_INIT(&object->shadow_head); 219a1f6d91cSDavid Greenman 220b86ec922SMatthew Dillon object->root = NULL; 22124a1cce3SDavid Greenman object->type = type; 222df8bae1dSRodney W. Grimes object->size = size; 223b881da26SAlan Cox object->generation = 1; 224a1f6d91cSDavid Greenman object->ref_count = 1; 2253153e878SAlan Cox object->memattr = VM_MEMATTR_DEFAULT; 22624a1cce3SDavid Greenman object->flags = 0; 2273364c323SKonstantin Belousov object->uip = NULL; 2283364c323SKonstantin Belousov object->charge = 0; 22960517fd1SJohn Baldwin if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP)) 2303471677cSAlan Cox object->flags = OBJ_ONEMAPPING; 2312446e4f0SAlan Cox object->pg_color = 0; 23224a1cce3SDavid Greenman object->handle = NULL; 23324a1cce3SDavid Greenman object->backing_object = NULL; 234a316d390SJohn Dyson object->backing_object_offset = (vm_ooffset_t) 0; 235f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 236f8a47341SAlan Cox LIST_INIT(&object->rvq); 237f8a47341SAlan Cox #endif 2387bfda801SAlan Cox object->cache = NULL; 239a1f6d91cSDavid Greenman 240a5698387SAlan Cox mtx_lock(&vm_object_list_mtx); 24160517fd1SJohn Baldwin TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 242a5698387SAlan Cox mtx_unlock(&vm_object_list_mtx); 243df8bae1dSRodney W. Grimes } 244df8bae1dSRodney W. Grimes 245df8bae1dSRodney W. Grimes /* 24626f9a767SRodney W. Grimes * vm_object_init: 24726f9a767SRodney W. Grimes * 24826f9a767SRodney W. Grimes * Initialize the VM objects module. 24926f9a767SRodney W. Grimes */ 25026f9a767SRodney W. Grimes void 2511b40f8c0SMatthew Dillon vm_object_init(void) 25226f9a767SRodney W. Grimes { 25326f9a767SRodney W. Grimes TAILQ_INIT(&vm_object_list); 2546008862bSJohn Baldwin mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); 2550217125fSDavid Greenman 2565285558aSAlan Cox VM_OBJECT_LOCK_INIT(&kernel_object_store, "kernel object"); 2579f5c801bSAlan Cox _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 25826f9a767SRodney W. Grimes kernel_object); 259f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 260f8a47341SAlan Cox kernel_object->flags |= OBJ_COLORED; 261f8a47341SAlan Cox kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 262f8a47341SAlan Cox #endif 26326f9a767SRodney W. Grimes 2645285558aSAlan Cox VM_OBJECT_LOCK_INIT(&kmem_object_store, "kmem object"); 2659f5c801bSAlan Cox _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 26626f9a767SRodney W. Grimes kmem_object); 267f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 268f8a47341SAlan Cox kmem_object->flags |= OBJ_COLORED; 269f8a47341SAlan Cox kmem_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 270f8a47341SAlan Cox #endif 271ed6a7863SAlan Cox 2728dbca793STor Egge /* 2738dbca793STor Egge * The lock portion of struct vm_object must be type stable due 2748dbca793STor Egge * to vm_pageout_fallback_object_lock locking a vm object 2758dbca793STor Egge * without holding any references to it. 2768dbca793STor Egge */ 2778355f576SJeff Roberson obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, 2788355f576SJeff Roberson #ifdef INVARIANTS 2798355f576SJeff Roberson vm_object_zdtor, 2808355f576SJeff Roberson #else 2818355f576SJeff Roberson NULL, 2828355f576SJeff Roberson #endif 283f3c625e4SJeff Roberson vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE); 28499448ed1SJohn Dyson } 28599448ed1SJohn Dyson 28699448ed1SJohn Dyson void 2871b40f8c0SMatthew Dillon vm_object_clear_flag(vm_object_t object, u_short bits) 2881b40f8c0SMatthew Dillon { 2895440b5a9SAlan Cox 290d647a0edSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 291b06805adSJake Burkholder object->flags &= ~bits; 2921b40f8c0SMatthew Dillon } 2931b40f8c0SMatthew Dillon 2943153e878SAlan Cox /* 2953153e878SAlan Cox * Sets the default memory attribute for the specified object. Pages 2963153e878SAlan Cox * that are allocated to this object are by default assigned this memory 2973153e878SAlan Cox * attribute. 2983153e878SAlan Cox * 2993153e878SAlan Cox * Presently, this function must be called before any pages are allocated 3003153e878SAlan Cox * to the object. In the future, this requirement may be relaxed for 3013153e878SAlan Cox * "default" and "swap" objects. 3023153e878SAlan Cox */ 3033153e878SAlan Cox int 3043153e878SAlan Cox vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr) 3053153e878SAlan Cox { 3063153e878SAlan Cox 3073153e878SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3083153e878SAlan Cox switch (object->type) { 3093153e878SAlan Cox case OBJT_DEFAULT: 3103153e878SAlan Cox case OBJT_DEVICE: 3113153e878SAlan Cox case OBJT_PHYS: 3123153e878SAlan Cox case OBJT_SWAP: 3133153e878SAlan Cox case OBJT_VNODE: 3143153e878SAlan Cox if (!TAILQ_EMPTY(&object->memq)) 3153153e878SAlan Cox return (KERN_FAILURE); 3163153e878SAlan Cox break; 3173153e878SAlan Cox case OBJT_DEAD: 3183153e878SAlan Cox return (KERN_INVALID_ARGUMENT); 3193153e878SAlan Cox } 3203153e878SAlan Cox object->memattr = memattr; 3213153e878SAlan Cox return (KERN_SUCCESS); 3223153e878SAlan Cox } 3233153e878SAlan Cox 3241b40f8c0SMatthew Dillon void 3251b40f8c0SMatthew Dillon vm_object_pip_add(vm_object_t object, short i) 3261b40f8c0SMatthew Dillon { 327f279b88dSAlan Cox 328d647a0edSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 329b06805adSJake Burkholder object->paging_in_progress += i; 3301b40f8c0SMatthew Dillon } 3311b40f8c0SMatthew Dillon 3321b40f8c0SMatthew Dillon void 3331b40f8c0SMatthew Dillon vm_object_pip_subtract(vm_object_t object, short i) 3341b40f8c0SMatthew Dillon { 335d647a0edSAlan Cox 3360fa05eaeSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 337b06805adSJake Burkholder object->paging_in_progress -= i; 3381b40f8c0SMatthew Dillon } 3391b40f8c0SMatthew Dillon 3401b40f8c0SMatthew Dillon void 3411b40f8c0SMatthew Dillon vm_object_pip_wakeup(vm_object_t object) 3421b40f8c0SMatthew Dillon { 343f279b88dSAlan Cox 344d647a0edSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 345b06805adSJake Burkholder object->paging_in_progress--; 3461b40f8c0SMatthew Dillon if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 3471b40f8c0SMatthew Dillon vm_object_clear_flag(object, OBJ_PIPWNT); 3481b40f8c0SMatthew Dillon wakeup(object); 3491b40f8c0SMatthew Dillon } 3501b40f8c0SMatthew Dillon } 3511b40f8c0SMatthew Dillon 3521b40f8c0SMatthew Dillon void 3531b40f8c0SMatthew Dillon vm_object_pip_wakeupn(vm_object_t object, short i) 3541b40f8c0SMatthew Dillon { 355d647a0edSAlan Cox 3560d420ad3SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3571b40f8c0SMatthew Dillon if (i) 358b06805adSJake Burkholder object->paging_in_progress -= i; 3591b40f8c0SMatthew Dillon if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 3601b40f8c0SMatthew Dillon vm_object_clear_flag(object, OBJ_PIPWNT); 3611b40f8c0SMatthew Dillon wakeup(object); 3621b40f8c0SMatthew Dillon } 3631b40f8c0SMatthew Dillon } 3641b40f8c0SMatthew Dillon 3651b40f8c0SMatthew Dillon void 3661b40f8c0SMatthew Dillon vm_object_pip_wait(vm_object_t object, char *waitid) 3671b40f8c0SMatthew Dillon { 3681ca58953SAlan Cox 3691ca58953SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3701ca58953SAlan Cox while (object->paging_in_progress) { 3711ca58953SAlan Cox object->flags |= OBJ_PIPWNT; 3721ca58953SAlan Cox msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0); 3731ca58953SAlan Cox } 3741b40f8c0SMatthew Dillon } 3751b40f8c0SMatthew Dillon 37626f9a767SRodney W. Grimes /* 37726f9a767SRodney W. Grimes * vm_object_allocate: 37826f9a767SRodney W. Grimes * 37926f9a767SRodney W. Grimes * Returns a new object with the given size. 38026f9a767SRodney W. Grimes */ 38126f9a767SRodney W. Grimes vm_object_t 3826395da54SIan Dowse vm_object_allocate(objtype_t type, vm_pindex_t size) 38326f9a767SRodney W. Grimes { 38490688d13SAlan Cox vm_object_t object; 38590688d13SAlan Cox 38690688d13SAlan Cox object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); 38790688d13SAlan Cox _vm_object_allocate(type, size, object); 38890688d13SAlan Cox return (object); 38926f9a767SRodney W. Grimes } 39026f9a767SRodney W. Grimes 39126f9a767SRodney W. Grimes 39226f9a767SRodney W. Grimes /* 393df8bae1dSRodney W. Grimes * vm_object_reference: 394df8bae1dSRodney W. Grimes * 39515347817SAlan Cox * Gets another reference to the given object. Note: OBJ_DEAD 39615347817SAlan Cox * objects can be referenced during final cleaning. 397df8bae1dSRodney W. Grimes */ 3986476c0d2SJohn Dyson void 3991b40f8c0SMatthew Dillon vm_object_reference(vm_object_t object) 400df8bae1dSRodney W. Grimes { 401df8bae1dSRodney W. Grimes if (object == NULL) 402df8bae1dSRodney W. Grimes return; 40315347817SAlan Cox VM_OBJECT_LOCK(object); 40452481a9aSJeff Roberson vm_object_reference_locked(object); 40582f9defeSAlan Cox VM_OBJECT_UNLOCK(object); 40695e5e988SJohn Dyson } 40795e5e988SJohn Dyson 40823955314SAlfred Perlstein /* 409b921a12bSAlan Cox * vm_object_reference_locked: 410b921a12bSAlan Cox * 411b921a12bSAlan Cox * Gets another reference to the given object. 412b921a12bSAlan Cox * 413b921a12bSAlan Cox * The object must be locked. 414b921a12bSAlan Cox */ 415b921a12bSAlan Cox void 416b921a12bSAlan Cox vm_object_reference_locked(vm_object_t object) 417b921a12bSAlan Cox { 418b921a12bSAlan Cox struct vnode *vp; 419b921a12bSAlan Cox 420b921a12bSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 421b921a12bSAlan Cox object->ref_count++; 422b921a12bSAlan Cox if (object->type == OBJT_VNODE) { 423b921a12bSAlan Cox vp = object->handle; 424b921a12bSAlan Cox vref(vp); 425b921a12bSAlan Cox } 426b921a12bSAlan Cox } 427b921a12bSAlan Cox 428b921a12bSAlan Cox /* 4299d5abbddSJens Schweikhardt * Handle deallocating an object of type OBJT_VNODE. 43023955314SAlfred Perlstein */ 43102dd8331SAlan Cox static void 4321b40f8c0SMatthew Dillon vm_object_vndeallocate(vm_object_t object) 43395e5e988SJohn Dyson { 43495e5e988SJohn Dyson struct vnode *vp = (struct vnode *) object->handle; 435219cbf59SEivind Eklund 436ae51ff11SJeff Roberson VFS_ASSERT_GIANT(vp->v_mount); 437ad682c48SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 4385526d2d9SEivind Eklund KASSERT(object->type == OBJT_VNODE, 4395526d2d9SEivind Eklund ("vm_object_vndeallocate: not a vnode object")); 440219cbf59SEivind Eklund KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 441219cbf59SEivind Eklund #ifdef INVARIANTS 44295e5e988SJohn Dyson if (object->ref_count == 0) { 44395e5e988SJohn Dyson vprint("vm_object_vndeallocate", vp); 44495e5e988SJohn Dyson panic("vm_object_vndeallocate: bad object reference count"); 44595e5e988SJohn Dyson } 44695e5e988SJohn Dyson #endif 44795e5e988SJohn Dyson 44895e5e988SJohn Dyson object->ref_count--; 44947221757SJohn Dyson if (object->ref_count == 0) { 450e6e370a7SJeff Roberson mp_fixme("Unlocked vflag access."); 451e6e370a7SJeff Roberson vp->v_vflag &= ~VV_TEXT; 4522be70f79SJohn Dyson } 453ad682c48SAlan Cox VM_OBJECT_UNLOCK(object); 45423955314SAlfred Perlstein /* 45523955314SAlfred Perlstein * vrele may need a vop lock 45623955314SAlfred Perlstein */ 45747221757SJohn Dyson vrele(vp); 458df8bae1dSRodney W. Grimes } 459df8bae1dSRodney W. Grimes 460df8bae1dSRodney W. Grimes /* 461df8bae1dSRodney W. Grimes * vm_object_deallocate: 462df8bae1dSRodney W. Grimes * 463df8bae1dSRodney W. Grimes * Release a reference to the specified object, 464df8bae1dSRodney W. Grimes * gained either through a vm_object_allocate 465df8bae1dSRodney W. Grimes * or a vm_object_reference call. When all references 466df8bae1dSRodney W. Grimes * are gone, storage associated with this object 467df8bae1dSRodney W. Grimes * may be relinquished. 468df8bae1dSRodney W. Grimes * 469df8bae1dSRodney W. Grimes * No object may be locked. 470df8bae1dSRodney W. Grimes */ 47126f9a767SRodney W. Grimes void 4721b40f8c0SMatthew Dillon vm_object_deallocate(vm_object_t object) 473df8bae1dSRodney W. Grimes { 474df8bae1dSRodney W. Grimes vm_object_t temp; 475df8bae1dSRodney W. Grimes 476df8bae1dSRodney W. Grimes while (object != NULL) { 477ae51ff11SJeff Roberson int vfslocked; 478ca95b514SJohn Baldwin 479ae51ff11SJeff Roberson vfslocked = 0; 480ca95b514SJohn Baldwin restart: 481ad682c48SAlan Cox VM_OBJECT_LOCK(object); 4823b68228cSAlan Cox if (object->type == OBJT_VNODE) { 483ca95b514SJohn Baldwin struct vnode *vp = (struct vnode *) object->handle; 484ca95b514SJohn Baldwin 485ca95b514SJohn Baldwin /* 486ca95b514SJohn Baldwin * Conditionally acquire Giant for a vnode-backed 487ca95b514SJohn Baldwin * object. We have to be careful since the type of 488ca95b514SJohn Baldwin * a vnode object can change while the object is 489ca95b514SJohn Baldwin * unlocked. 490ca95b514SJohn Baldwin */ 491ca95b514SJohn Baldwin if (VFS_NEEDSGIANT(vp->v_mount) && !vfslocked) { 492ca95b514SJohn Baldwin vfslocked = 1; 493ca95b514SJohn Baldwin if (!mtx_trylock(&Giant)) { 494ca95b514SJohn Baldwin VM_OBJECT_UNLOCK(object); 495ca95b514SJohn Baldwin mtx_lock(&Giant); 496ca95b514SJohn Baldwin goto restart; 497ca95b514SJohn Baldwin } 498ca95b514SJohn Baldwin } 49995e5e988SJohn Dyson vm_object_vndeallocate(object); 500ae51ff11SJeff Roberson VFS_UNLOCK_GIANT(vfslocked); 50123b186d3SAlan Cox return; 502ca95b514SJohn Baldwin } else 503ca95b514SJohn Baldwin /* 504ca95b514SJohn Baldwin * This is to handle the case that the object 505ca95b514SJohn Baldwin * changed type while we dropped its lock to 506ca95b514SJohn Baldwin * obtain Giant. 507ca95b514SJohn Baldwin */ 508ca95b514SJohn Baldwin VFS_UNLOCK_GIANT(vfslocked); 50995e5e988SJohn Dyson 5108125b1e6SAlfred Perlstein KASSERT(object->ref_count != 0, 5118125b1e6SAlfred Perlstein ("vm_object_deallocate: object deallocated too many times: %d", object->type)); 5122be70f79SJohn Dyson 5132be70f79SJohn Dyson /* 5148125b1e6SAlfred Perlstein * If the reference count goes to 0 we start calling 5158125b1e6SAlfred Perlstein * vm_object_terminate() on the object chain. 5168125b1e6SAlfred Perlstein * A ref count of 1 may be a special case depending on the 5178125b1e6SAlfred Perlstein * shadow count being 0 or 1. 5182be70f79SJohn Dyson */ 519c0877f10SJohn Dyson object->ref_count--; 5208125b1e6SAlfred Perlstein if (object->ref_count > 1) { 5213b68228cSAlan Cox VM_OBJECT_UNLOCK(object); 52223b186d3SAlan Cox return; 5238125b1e6SAlfred Perlstein } else if (object->ref_count == 1) { 5244c8e0452SAlan Cox if (object->shadow_count == 0 && 5254c8e0452SAlan Cox object->handle == NULL && 5264c8e0452SAlan Cox (object->type == OBJT_DEFAULT || 5274c8e0452SAlan Cox object->type == OBJT_SWAP)) { 5288125b1e6SAlfred Perlstein vm_object_set_flag(object, OBJ_ONEMAPPING); 5298125b1e6SAlfred Perlstein } else if ((object->shadow_count == 1) && 5308125b1e6SAlfred Perlstein (object->handle == NULL) && 53124a1cce3SDavid Greenman (object->type == OBJT_DEFAULT || 53224a1cce3SDavid Greenman object->type == OBJT_SWAP)) { 533a1f6d91cSDavid Greenman vm_object_t robject; 53495e5e988SJohn Dyson 5351c500307SAlan Cox robject = LIST_FIRST(&object->shadow_head); 5365526d2d9SEivind Eklund KASSERT(robject != NULL, 537219cbf59SEivind Eklund ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 5385526d2d9SEivind Eklund object->ref_count, 5395526d2d9SEivind Eklund object->shadow_count)); 540b72b0115SAlan Cox if (!VM_OBJECT_TRYLOCK(robject)) { 541b72b0115SAlan Cox /* 542b72b0115SAlan Cox * Avoid a potential deadlock. 543b72b0115SAlan Cox */ 544b72b0115SAlan Cox object->ref_count++; 545b72b0115SAlan Cox VM_OBJECT_UNLOCK(object); 546a7d86121SAlan Cox /* 547a7d86121SAlan Cox * More likely than not the thread 548a7d86121SAlan Cox * holding robject's lock has lower 549a7d86121SAlan Cox * priority than the current thread. 550a7d86121SAlan Cox * Let the lower priority thread run. 551a7d86121SAlan Cox */ 5528db5fc58SJohn Baldwin pause("vmo_de", 1); 553b72b0115SAlan Cox continue; 554b72b0115SAlan Cox } 555d936694fSAlan Cox /* 556d936694fSAlan Cox * Collapse object into its shadow unless its 557d936694fSAlan Cox * shadow is dead. In that case, object will 558d936694fSAlan Cox * be deallocated by the thread that is 559d936694fSAlan Cox * deallocating its shadow. 560d936694fSAlan Cox */ 561d936694fSAlan Cox if ((robject->flags & OBJ_DEAD) == 0 && 562d936694fSAlan Cox (robject->handle == NULL) && 56324a1cce3SDavid Greenman (robject->type == OBJT_DEFAULT || 56424a1cce3SDavid Greenman robject->type == OBJT_SWAP)) { 565a1f6d91cSDavid Greenman 56695e5e988SJohn Dyson robject->ref_count++; 567138449dcSAlan Cox retry: 568138449dcSAlan Cox if (robject->paging_in_progress) { 569138449dcSAlan Cox VM_OBJECT_UNLOCK(object); 570138449dcSAlan Cox vm_object_pip_wait(robject, 571138449dcSAlan Cox "objde1"); 5722e9f4a69SAlan Cox temp = robject->backing_object; 5732e9f4a69SAlan Cox if (object == temp) { 574138449dcSAlan Cox VM_OBJECT_LOCK(object); 575138449dcSAlan Cox goto retry; 5762e9f4a69SAlan Cox } 577138449dcSAlan Cox } else if (object->paging_in_progress) { 578138449dcSAlan Cox VM_OBJECT_UNLOCK(robject); 579138449dcSAlan Cox object->flags |= OBJ_PIPWNT; 580138449dcSAlan Cox msleep(object, 581138449dcSAlan Cox VM_OBJECT_MTX(object), 582138449dcSAlan Cox PDROP | PVM, "objde2", 0); 583138449dcSAlan Cox VM_OBJECT_LOCK(robject); 5842e9f4a69SAlan Cox temp = robject->backing_object; 5852e9f4a69SAlan Cox if (object == temp) { 586138449dcSAlan Cox VM_OBJECT_LOCK(object); 587138449dcSAlan Cox goto retry; 588a1f6d91cSDavid Greenman } 5892e9f4a69SAlan Cox } else 5903b68228cSAlan Cox VM_OBJECT_UNLOCK(object); 5912e9f4a69SAlan Cox 59295e5e988SJohn Dyson if (robject->ref_count == 1) { 59395e5e988SJohn Dyson robject->ref_count--; 594ba8da839SDavid Greenman object = robject; 59595e5e988SJohn Dyson goto doterm; 59695e5e988SJohn Dyson } 59795e5e988SJohn Dyson object = robject; 59895e5e988SJohn Dyson vm_object_collapse(object); 599d7fc2210SAlan Cox VM_OBJECT_UNLOCK(object); 600ba8da839SDavid Greenman continue; 601a1f6d91cSDavid Greenman } 602b72b0115SAlan Cox VM_OBJECT_UNLOCK(robject); 60395e5e988SJohn Dyson } 6043b68228cSAlan Cox VM_OBJECT_UNLOCK(object); 60523b186d3SAlan Cox return; 60695e5e988SJohn Dyson } 60795e5e988SJohn Dyson doterm: 60824a1cce3SDavid Greenman temp = object->backing_object; 609c9917419SAlan Cox if (temp != NULL) { 610c9917419SAlan Cox VM_OBJECT_LOCK(temp); 6111c500307SAlan Cox LIST_REMOVE(object, shadow_list); 61295e5e988SJohn Dyson temp->shadow_count--; 613eaf13dd7SJohn Dyson temp->generation++; 614c9917419SAlan Cox VM_OBJECT_UNLOCK(temp); 61595461b45SJohn Dyson object->backing_object = NULL; 616de5f6a77SJohn Dyson } 617245df27cSMatthew Dillon /* 618245df27cSMatthew Dillon * Don't double-terminate, we could be in a termination 619245df27cSMatthew Dillon * recursion due to the terminate having to sync data 620245df27cSMatthew Dillon * to disk. 621245df27cSMatthew Dillon */ 622245df27cSMatthew Dillon if ((object->flags & OBJ_DEAD) == 0) 623df8bae1dSRodney W. Grimes vm_object_terminate(object); 624c829b9d0SAlan Cox else 625c829b9d0SAlan Cox VM_OBJECT_UNLOCK(object); 626df8bae1dSRodney W. Grimes object = temp; 627df8bae1dSRodney W. Grimes } 628df8bae1dSRodney W. Grimes } 629df8bae1dSRodney W. Grimes 630df8bae1dSRodney W. Grimes /* 6312ac78f0eSStephan Uphoff * vm_object_destroy removes the object from the global object list 6322ac78f0eSStephan Uphoff * and frees the space for the object. 6332ac78f0eSStephan Uphoff */ 6342ac78f0eSStephan Uphoff void 6352ac78f0eSStephan Uphoff vm_object_destroy(vm_object_t object) 6362ac78f0eSStephan Uphoff { 6372ac78f0eSStephan Uphoff 6382ac78f0eSStephan Uphoff /* 6392ac78f0eSStephan Uphoff * Remove the object from the global object list. 6402ac78f0eSStephan Uphoff */ 6412ac78f0eSStephan Uphoff mtx_lock(&vm_object_list_mtx); 6422ac78f0eSStephan Uphoff TAILQ_REMOVE(&vm_object_list, object, object_list); 6432ac78f0eSStephan Uphoff mtx_unlock(&vm_object_list_mtx); 6442ac78f0eSStephan Uphoff 6452ac78f0eSStephan Uphoff /* 6463364c323SKonstantin Belousov * Release the allocation charge. 6473364c323SKonstantin Belousov */ 6483364c323SKonstantin Belousov if (object->uip != NULL) { 6493364c323SKonstantin Belousov KASSERT(object->type == OBJT_DEFAULT || 6503364c323SKonstantin Belousov object->type == OBJT_SWAP, 6513364c323SKonstantin Belousov ("vm_object_terminate: non-swap obj %p has uip", 6523364c323SKonstantin Belousov object)); 6533364c323SKonstantin Belousov swap_release_by_uid(object->charge, object->uip); 6543364c323SKonstantin Belousov object->charge = 0; 6553364c323SKonstantin Belousov uifree(object->uip); 6563364c323SKonstantin Belousov object->uip = NULL; 6573364c323SKonstantin Belousov } 6583364c323SKonstantin Belousov 6593364c323SKonstantin Belousov /* 6602ac78f0eSStephan Uphoff * Free the space for the object. 6612ac78f0eSStephan Uphoff */ 6622ac78f0eSStephan Uphoff uma_zfree(obj_zone, object); 6632ac78f0eSStephan Uphoff } 6642ac78f0eSStephan Uphoff 6652ac78f0eSStephan Uphoff /* 666df8bae1dSRodney W. Grimes * vm_object_terminate actually destroys the specified object, freeing 667df8bae1dSRodney W. Grimes * up all previously used resources. 668df8bae1dSRodney W. Grimes * 669df8bae1dSRodney W. Grimes * The object must be locked. 6701c7c3c6aSMatthew Dillon * This routine may block. 671df8bae1dSRodney W. Grimes */ 67295e5e988SJohn Dyson void 6731b40f8c0SMatthew Dillon vm_object_terminate(vm_object_t object) 674df8bae1dSRodney W. Grimes { 675d031cff1SMatthew Dillon vm_page_t p; 676df8bae1dSRodney W. Grimes 677c829b9d0SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 6780cddd8f0SMatthew Dillon 67995e5e988SJohn Dyson /* 68095e5e988SJohn Dyson * Make sure no one uses us. 68195e5e988SJohn Dyson */ 682069e9bc1SDoug Rabson vm_object_set_flag(object, OBJ_DEAD); 6833c631446SJohn Dyson 684df8bae1dSRodney W. Grimes /* 685f6b04d2bSDavid Greenman * wait for the pageout daemon to be done with the object 686df8bae1dSRodney W. Grimes */ 68766095752SJohn Dyson vm_object_pip_wait(object, "objtrm"); 688df8bae1dSRodney W. Grimes 6895526d2d9SEivind Eklund KASSERT(!object->paging_in_progress, 6905526d2d9SEivind Eklund ("vm_object_terminate: pageout in progress")); 69126f9a767SRodney W. Grimes 69226f9a767SRodney W. Grimes /* 6930d94caffSDavid Greenman * Clean and free the pages, as appropriate. All references to the 6940d94caffSDavid Greenman * object are gone, so we don't need to lock it. 69526f9a767SRodney W. Grimes */ 69624a1cce3SDavid Greenman if (object->type == OBJT_VNODE) { 697f7dd7b63SAlan Cox struct vnode *vp = (struct vnode *)object->handle; 69895e5e988SJohn Dyson 69995e5e988SJohn Dyson /* 70095e5e988SJohn Dyson * Clean pages and flush buffers. 70195e5e988SJohn Dyson */ 7028f9110f6SJohn Dyson vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 703b6e48e03SAlan Cox VM_OBJECT_UNLOCK(object); 70495e5e988SJohn Dyson 7050d7935fdSAttilio Rao vinvalbuf(vp, V_SAVE, 0, 0); 706f7dd7b63SAlan Cox 707f7dd7b63SAlan Cox VM_OBJECT_LOCK(object); 708bef608bdSJohn Dyson } 709bef608bdSJohn Dyson 710971dd342SAlfred Perlstein KASSERT(object->ref_count == 0, 711971dd342SAlfred Perlstein ("vm_object_terminate: object with references, ref_count=%d", 712971dd342SAlfred Perlstein object->ref_count)); 713996c772fSJohn Dyson 7140d94caffSDavid Greenman /* 715356863ebSDavid Greenman * Now free any remaining pages. For internal objects, this also 716356863ebSDavid Greenman * removes them from paging queues. Don't free wired pages, just 717356863ebSDavid Greenman * remove them from the object. 718df8bae1dSRodney W. Grimes */ 71956030358SAlan Cox vm_page_lock_queues(); 720b18bfc3dSJohn Dyson while ((p = TAILQ_FIRST(&object->memq)) != NULL) { 7219af80719SAlan Cox KASSERT(!p->busy && (p->oflags & VPO_BUSY) == 0, 722971dd342SAlfred Perlstein ("vm_object_terminate: freeing busy page %p " 723e9f54126SRobert Noland "p->busy = %d, p->oflags %x\n", p, p->busy, p->oflags)); 7240b10ba98SDavid Greenman if (p->wire_count == 0) { 725df8bae1dSRodney W. Grimes vm_page_free(p); 726393a081dSAttilio Rao cnt.v_pfree++; 7270b10ba98SDavid Greenman } else { 7280b10ba98SDavid Greenman vm_page_remove(p); 7290b10ba98SDavid Greenman } 730df8bae1dSRodney W. Grimes } 73156030358SAlan Cox vm_page_unlock_queues(); 732bef608bdSJohn Dyson 733f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 734f8a47341SAlan Cox if (__predict_false(!LIST_EMPTY(&object->rvq))) 735f8a47341SAlan Cox vm_reserv_break_all(object); 736f8a47341SAlan Cox #endif 7377bfda801SAlan Cox if (__predict_false(object->cache != NULL)) 738c9444914SAlan Cox vm_page_cache_free(object, 0, 0); 7397bfda801SAlan Cox 7402d8acc0fSJohn Dyson /* 7419fcfb650SDavid Greenman * Let the pager know object is dead. 7429fcfb650SDavid Greenman */ 7439fcfb650SDavid Greenman vm_pager_deallocate(object); 744658ad5ffSAlan Cox VM_OBJECT_UNLOCK(object); 7459fcfb650SDavid Greenman 7462ac78f0eSStephan Uphoff vm_object_destroy(object); 74747221757SJohn Dyson } 748df8bae1dSRodney W. Grimes 749df8bae1dSRodney W. Grimes /* 750df8bae1dSRodney W. Grimes * vm_object_page_clean 751df8bae1dSRodney W. Grimes * 7524f79d873SMatthew Dillon * Clean all dirty pages in the specified range of object. Leaves page 7534f79d873SMatthew Dillon * on whatever queue it is currently on. If NOSYNC is set then do not 754b146f9e5SAlan Cox * write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC), 7554f79d873SMatthew Dillon * leaving the object dirty. 75626f9a767SRodney W. Grimes * 75743b7990eSMatthew Dillon * When stuffing pages asynchronously, allow clustering. XXX we need a 75843b7990eSMatthew Dillon * synchronous clustering mode implementation. 75943b7990eSMatthew Dillon * 76026f9a767SRodney W. Grimes * Odd semantics: if start == end, we clean everything. 76126f9a767SRodney W. Grimes * 76226f9a767SRodney W. Grimes * The object must be locked. 76326f9a767SRodney W. Grimes */ 764f6b04d2bSDavid Greenman void 7651b40f8c0SMatthew Dillon vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags) 766f6b04d2bSDavid Greenman { 767b9b7a4beSMatthew Dillon vm_page_t p, np; 7686395da54SIan Dowse vm_pindex_t tstart, tend; 769bd7e5f99SJohn Dyson vm_pindex_t pi; 7704f79d873SMatthew Dillon int clearobjflags; 7718f9110f6SJohn Dyson int pagerflags; 7722d8acc0fSJohn Dyson int curgeneration; 773f6b04d2bSDavid Greenman 774b6e48e03SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 775aef922f5SJohn Dyson if (object->type != OBJT_VNODE || 776aef922f5SJohn Dyson (object->flags & OBJ_MIGHTBEDIRTY) == 0) 777f6b04d2bSDavid Greenman return; 778f6b04d2bSDavid Greenman 77943b7990eSMatthew Dillon pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 7808f9110f6SJohn Dyson pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0; 7818f9110f6SJohn Dyson 782069e9bc1SDoug Rabson vm_object_set_flag(object, OBJ_CLEANING); 78324a1cce3SDavid Greenman 784f6b04d2bSDavid Greenman tstart = start; 785f6b04d2bSDavid Greenman if (end == 0) { 786f6b04d2bSDavid Greenman tend = object->size; 787f6b04d2bSDavid Greenman } else { 788f6b04d2bSDavid Greenman tend = end; 789f6b04d2bSDavid Greenman } 790eaf13dd7SJohn Dyson 79135c01631SAlan Cox vm_page_lock_queues(); 7924f79d873SMatthew Dillon /* 793b9b7a4beSMatthew Dillon * If the caller is smart and only msync()s a range he knows is 794b9b7a4beSMatthew Dillon * dirty, we may be able to avoid an object scan. This results in 795b9b7a4beSMatthew Dillon * a phenominal improvement in performance. We cannot do this 796b9b7a4beSMatthew Dillon * as a matter of course because the object may be huge - e.g. 797b9b7a4beSMatthew Dillon * the size might be in the gigabytes or terrabytes. 798b9b7a4beSMatthew Dillon */ 799b9b7a4beSMatthew Dillon if (msync_flush_flags & MSYNC_FLUSH_HARDSEQ) { 800300b96acSIan Dowse vm_pindex_t tscan; 801b9b7a4beSMatthew Dillon int scanlimit; 802b9b7a4beSMatthew Dillon int scanreset; 803b9b7a4beSMatthew Dillon 804b9b7a4beSMatthew Dillon scanreset = object->resident_page_count / EASY_SCAN_FACTOR; 805b9b7a4beSMatthew Dillon if (scanreset < 16) 806b9b7a4beSMatthew Dillon scanreset = 16; 80743b7990eSMatthew Dillon pagerflags |= VM_PAGER_IGNORE_CLEANCHK; 808b9b7a4beSMatthew Dillon 809b9b7a4beSMatthew Dillon scanlimit = scanreset; 810b9b7a4beSMatthew Dillon tscan = tstart; 811b9b7a4beSMatthew Dillon while (tscan < tend) { 812b9b7a4beSMatthew Dillon curgeneration = object->generation; 813b9b7a4beSMatthew Dillon p = vm_page_lookup(object, tscan); 8147bfda801SAlan Cox if (p == NULL || p->valid == 0) { 815b9b7a4beSMatthew Dillon if (--scanlimit == 0) 816b9b7a4beSMatthew Dillon break; 817b9b7a4beSMatthew Dillon ++tscan; 818b9b7a4beSMatthew Dillon continue; 819b9b7a4beSMatthew Dillon } 820b9b7a4beSMatthew Dillon vm_page_test_dirty(p); 82126f4eea5SAlan Cox if (p->dirty == 0) { 822b9b7a4beSMatthew Dillon if (--scanlimit == 0) 823b9b7a4beSMatthew Dillon break; 824b9b7a4beSMatthew Dillon ++tscan; 825b9b7a4beSMatthew Dillon continue; 826b9b7a4beSMatthew Dillon } 827b9b7a4beSMatthew Dillon /* 828b9b7a4beSMatthew Dillon * If we have been asked to skip nosync pages and 829b9b7a4beSMatthew Dillon * this is a nosync page, we can't continue. 830b9b7a4beSMatthew Dillon */ 831b146f9e5SAlan Cox if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) { 832b9b7a4beSMatthew Dillon if (--scanlimit == 0) 833b9b7a4beSMatthew Dillon break; 834b9b7a4beSMatthew Dillon ++tscan; 835b9b7a4beSMatthew Dillon continue; 836b9b7a4beSMatthew Dillon } 837b9b7a4beSMatthew Dillon scanlimit = scanreset; 838b9b7a4beSMatthew Dillon 839b9b7a4beSMatthew Dillon /* 840b9b7a4beSMatthew Dillon * This returns 0 if it was unable to busy the first 841b9b7a4beSMatthew Dillon * page (i.e. had to sleep). 842b9b7a4beSMatthew Dillon */ 843b9b7a4beSMatthew Dillon tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags); 844b9b7a4beSMatthew Dillon } 845b9b7a4beSMatthew Dillon 846b9b7a4beSMatthew Dillon /* 847b9b7a4beSMatthew Dillon * If everything was dirty and we flushed it successfully, 848b9b7a4beSMatthew Dillon * and the requested range is not the entire object, we 849b9b7a4beSMatthew Dillon * don't have to mess with CLEANCHK or MIGHTBEDIRTY and can 850b9b7a4beSMatthew Dillon * return immediately. 851b9b7a4beSMatthew Dillon */ 852b9b7a4beSMatthew Dillon if (tscan >= tend && (tstart || tend < object->size)) { 85335c01631SAlan Cox vm_page_unlock_queues(); 854b9b7a4beSMatthew Dillon vm_object_clear_flag(object, OBJ_CLEANING); 855b9b7a4beSMatthew Dillon return; 856b9b7a4beSMatthew Dillon } 85743b7990eSMatthew Dillon pagerflags &= ~VM_PAGER_IGNORE_CLEANCHK; 858b9b7a4beSMatthew Dillon } 859b9b7a4beSMatthew Dillon 860b9b7a4beSMatthew Dillon /* 8614f79d873SMatthew Dillon * Generally set CLEANCHK interlock and make the page read-only so 8624f79d873SMatthew Dillon * we can then clear the object flags. 8634f79d873SMatthew Dillon * 8644f79d873SMatthew Dillon * However, if this is a nosync mmap then the object is likely to 8654f79d873SMatthew Dillon * stay dirty so do not mess with the page and do not clear the 8664f79d873SMatthew Dillon * object flags. 8674f79d873SMatthew Dillon */ 8684f79d873SMatthew Dillon clearobjflags = 1; 869fc2ffbe6SPoul-Henning Kamp TAILQ_FOREACH(p, &object->memq, listq) { 8700cd31a0dSAlan Cox p->oflags |= VPO_CLEANCHK; 871b146f9e5SAlan Cox if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) 8724f79d873SMatthew Dillon clearobjflags = 0; 8734f79d873SMatthew Dillon else 87478985e42SAlan Cox pmap_remove_write(p); 875eaf13dd7SJohn Dyson } 876eaf13dd7SJohn Dyson 8774f79d873SMatthew Dillon if (clearobjflags && (tstart == 0) && (tend == object->size)) { 878245df27cSMatthew Dillon struct vnode *vp; 879245df27cSMatthew Dillon 880af51d7bfSAlan Cox vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY); 881245df27cSMatthew Dillon if (object->type == OBJT_VNODE && 882245df27cSMatthew Dillon (vp = (struct vnode *)object->handle) != NULL) { 883e6e370a7SJeff Roberson VI_LOCK(vp); 884e6e370a7SJeff Roberson if (vp->v_iflag & VI_OBJDIRTY) 885e6e370a7SJeff Roberson vp->v_iflag &= ~VI_OBJDIRTY; 886e6e370a7SJeff Roberson VI_UNLOCK(vp); 887245df27cSMatthew Dillon } 888ec4f9fb0SDavid Greenman } 889f6b04d2bSDavid Greenman 890bd7e5f99SJohn Dyson rescan: 8912d8acc0fSJohn Dyson curgeneration = object->generation; 8922d8acc0fSJohn Dyson 893b18bfc3dSJohn Dyson for (p = TAILQ_FIRST(&object->memq); p; p = np) { 894b9b7a4beSMatthew Dillon int n; 895b9b7a4beSMatthew Dillon 896b18bfc3dSJohn Dyson np = TAILQ_NEXT(p, listq); 897bd7e5f99SJohn Dyson 898b9b7a4beSMatthew Dillon again: 899bd7e5f99SJohn Dyson pi = p->pindex; 9000cd31a0dSAlan Cox if ((p->oflags & VPO_CLEANCHK) == 0 || 901bd7e5f99SJohn Dyson (pi < tstart) || (pi >= tend) || 9027bfda801SAlan Cox p->valid == 0) { 9030cd31a0dSAlan Cox p->oflags &= ~VPO_CLEANCHK; 904aef922f5SJohn Dyson continue; 905f6b04d2bSDavid Greenman } 906f6b04d2bSDavid Greenman 907bd7e5f99SJohn Dyson vm_page_test_dirty(p); 90826f4eea5SAlan Cox if (p->dirty == 0) { 9090cd31a0dSAlan Cox p->oflags &= ~VPO_CLEANCHK; 910bd7e5f99SJohn Dyson continue; 911bd7e5f99SJohn Dyson } 912ec4f9fb0SDavid Greenman 9134f79d873SMatthew Dillon /* 9144f79d873SMatthew Dillon * If we have been asked to skip nosync pages and this is a 9154f79d873SMatthew Dillon * nosync page, skip it. Note that the object flags were 9164f79d873SMatthew Dillon * not cleared in this case so we do not have to set them. 9174f79d873SMatthew Dillon */ 918b146f9e5SAlan Cox if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) { 9190cd31a0dSAlan Cox p->oflags &= ~VPO_CLEANCHK; 9204f79d873SMatthew Dillon continue; 9214f79d873SMatthew Dillon } 9224f79d873SMatthew Dillon 923b9b7a4beSMatthew Dillon n = vm_object_page_collect_flush(object, p, 924b9b7a4beSMatthew Dillon curgeneration, pagerflags); 925b9b7a4beSMatthew Dillon if (n == 0) 926b9b7a4beSMatthew Dillon goto rescan; 927b9b7a4beSMatthew Dillon 928b9b7a4beSMatthew Dillon if (object->generation != curgeneration) 929b9b7a4beSMatthew Dillon goto rescan; 930b9b7a4beSMatthew Dillon 931b9b7a4beSMatthew Dillon /* 932b9b7a4beSMatthew Dillon * Try to optimize the next page. If we can't we pick up 933b9b7a4beSMatthew Dillon * our (random) scan where we left off. 934b9b7a4beSMatthew Dillon */ 935b9b7a4beSMatthew Dillon if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) { 936b9b7a4beSMatthew Dillon if ((p = vm_page_lookup(object, pi + n)) != NULL) 937b9b7a4beSMatthew Dillon goto again; 938b9b7a4beSMatthew Dillon } 939b9b7a4beSMatthew Dillon } 94035c01631SAlan Cox vm_page_unlock_queues(); 941b9b7a4beSMatthew Dillon #if 0 9428df6bac4SPoul-Henning Kamp VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc); 943b9b7a4beSMatthew Dillon #endif 944b9b7a4beSMatthew Dillon 945b9b7a4beSMatthew Dillon vm_object_clear_flag(object, OBJ_CLEANING); 946b9b7a4beSMatthew Dillon return; 947b9b7a4beSMatthew Dillon } 948b9b7a4beSMatthew Dillon 949b9b7a4beSMatthew Dillon static int 950b9b7a4beSMatthew Dillon vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags) 951b9b7a4beSMatthew Dillon { 952b9b7a4beSMatthew Dillon int runlen; 953b9b7a4beSMatthew Dillon int maxf; 954b9b7a4beSMatthew Dillon int chkb; 955b9b7a4beSMatthew Dillon int maxb; 956b9b7a4beSMatthew Dillon int i; 957b9b7a4beSMatthew Dillon vm_pindex_t pi; 958b9b7a4beSMatthew Dillon vm_page_t maf[vm_pageout_page_count]; 959b9b7a4beSMatthew Dillon vm_page_t mab[vm_pageout_page_count]; 960b9b7a4beSMatthew Dillon vm_page_t ma[vm_pageout_page_count]; 961b9b7a4beSMatthew Dillon 96235c01631SAlan Cox mtx_assert(&vm_page_queue_mtx, MA_OWNED); 963b9b7a4beSMatthew Dillon pi = p->pindex; 964bd82dc74SAlan Cox while (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) { 96535c01631SAlan Cox vm_page_lock_queues(); 9662d8acc0fSJohn Dyson if (object->generation != curgeneration) { 967b9b7a4beSMatthew Dillon return(0); 968f6b04d2bSDavid Greenman } 969bd82dc74SAlan Cox } 970bd7e5f99SJohn Dyson maxf = 0; 971bd7e5f99SJohn Dyson for(i = 1; i < vm_pageout_page_count; i++) { 972b9b7a4beSMatthew Dillon vm_page_t tp; 973b9b7a4beSMatthew Dillon 9748aef1712SMatthew Dillon if ((tp = vm_page_lookup(object, pi + i)) != NULL) { 9759af80719SAlan Cox if ((tp->oflags & VPO_BUSY) || 97643b7990eSMatthew Dillon ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 9770cd31a0dSAlan Cox (tp->oflags & VPO_CLEANCHK) == 0) || 978ffc82b0aSJohn Dyson (tp->busy != 0)) 979bd7e5f99SJohn Dyson break; 980bd7e5f99SJohn Dyson vm_page_test_dirty(tp); 98126f4eea5SAlan Cox if (tp->dirty == 0) { 9820cd31a0dSAlan Cox tp->oflags &= ~VPO_CLEANCHK; 983bd7e5f99SJohn Dyson break; 984bd7e5f99SJohn Dyson } 985bd7e5f99SJohn Dyson maf[ i - 1 ] = tp; 986bd7e5f99SJohn Dyson maxf++; 987bd7e5f99SJohn Dyson continue; 988bd7e5f99SJohn Dyson } 989bd7e5f99SJohn Dyson break; 990bd7e5f99SJohn Dyson } 991aef922f5SJohn Dyson 992bd7e5f99SJohn Dyson maxb = 0; 993bd7e5f99SJohn Dyson chkb = vm_pageout_page_count - maxf; 994bd7e5f99SJohn Dyson if (chkb) { 995bd7e5f99SJohn Dyson for(i = 1; i < chkb;i++) { 996b9b7a4beSMatthew Dillon vm_page_t tp; 997b9b7a4beSMatthew Dillon 9988aef1712SMatthew Dillon if ((tp = vm_page_lookup(object, pi - i)) != NULL) { 9999af80719SAlan Cox if ((tp->oflags & VPO_BUSY) || 100043b7990eSMatthew Dillon ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 10010cd31a0dSAlan Cox (tp->oflags & VPO_CLEANCHK) == 0) || 1002ffc82b0aSJohn Dyson (tp->busy != 0)) 1003bd7e5f99SJohn Dyson break; 1004bd7e5f99SJohn Dyson vm_page_test_dirty(tp); 100526f4eea5SAlan Cox if (tp->dirty == 0) { 10060cd31a0dSAlan Cox tp->oflags &= ~VPO_CLEANCHK; 1007bd7e5f99SJohn Dyson break; 1008bd7e5f99SJohn Dyson } 1009bd7e5f99SJohn Dyson mab[ i - 1 ] = tp; 1010bd7e5f99SJohn Dyson maxb++; 1011bd7e5f99SJohn Dyson continue; 1012bd7e5f99SJohn Dyson } 1013bd7e5f99SJohn Dyson break; 1014bd7e5f99SJohn Dyson } 1015bd7e5f99SJohn Dyson } 1016bd7e5f99SJohn Dyson 1017bd7e5f99SJohn Dyson for(i = 0; i < maxb; i++) { 1018bd7e5f99SJohn Dyson int index = (maxb - i) - 1; 1019bd7e5f99SJohn Dyson ma[index] = mab[i]; 10200cd31a0dSAlan Cox ma[index]->oflags &= ~VPO_CLEANCHK; 1021bd7e5f99SJohn Dyson } 10220cd31a0dSAlan Cox p->oflags &= ~VPO_CLEANCHK; 1023bd7e5f99SJohn Dyson ma[maxb] = p; 1024bd7e5f99SJohn Dyson for(i = 0; i < maxf; i++) { 1025bd7e5f99SJohn Dyson int index = (maxb + i) + 1; 1026bd7e5f99SJohn Dyson ma[index] = maf[i]; 10270cd31a0dSAlan Cox ma[index]->oflags &= ~VPO_CLEANCHK; 1028f6b04d2bSDavid Greenman } 1029bd7e5f99SJohn Dyson runlen = maxb + maxf + 1; 1030cf2819ccSJohn Dyson 10317a935082SAlan Cox vm_pageout_flush(ma, runlen, pagerflags); 1032cf2819ccSJohn Dyson for (i = 0; i < runlen; i++) { 103326f4eea5SAlan Cox if (ma[i]->dirty) { 103478985e42SAlan Cox pmap_remove_write(ma[i]); 10350cd31a0dSAlan Cox ma[i]->oflags |= VPO_CLEANCHK; 1036aef922f5SJohn Dyson 1037b9b7a4beSMatthew Dillon /* 1038b9b7a4beSMatthew Dillon * maxf will end up being the actual number of pages 1039b9b7a4beSMatthew Dillon * we wrote out contiguously, non-inclusive of the 1040b9b7a4beSMatthew Dillon * first page. We do not count look-behind pages. 1041b9b7a4beSMatthew Dillon */ 1042b9b7a4beSMatthew Dillon if (i >= maxb + 1 && (maxf > i - maxb - 1)) 1043b9b7a4beSMatthew Dillon maxf = i - maxb - 1; 1044b9b7a4beSMatthew Dillon } 1045b9b7a4beSMatthew Dillon } 1046b9b7a4beSMatthew Dillon return(maxf + 1); 104726f9a767SRodney W. Grimes } 1048df8bae1dSRodney W. Grimes 10491efb74fbSJohn Dyson /* 1050950f8459SAlan Cox * Note that there is absolutely no sense in writing out 1051950f8459SAlan Cox * anonymous objects, so we track down the vnode object 1052950f8459SAlan Cox * to write out. 1053950f8459SAlan Cox * We invalidate (remove) all pages from the address space 1054950f8459SAlan Cox * for semantic correctness. 1055950f8459SAlan Cox * 1056950f8459SAlan Cox * Note: certain anonymous maps, such as MAP_NOSYNC maps, 1057950f8459SAlan Cox * may start out with a NULL object. 1058950f8459SAlan Cox */ 1059950f8459SAlan Cox void 1060950f8459SAlan Cox vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, 1061950f8459SAlan Cox boolean_t syncio, boolean_t invalidate) 1062950f8459SAlan Cox { 1063950f8459SAlan Cox vm_object_t backing_object; 1064950f8459SAlan Cox struct vnode *vp; 10653b582b4eSTor Egge struct mount *mp; 1066950f8459SAlan Cox int flags; 1067950f8459SAlan Cox 1068950f8459SAlan Cox if (object == NULL) 1069950f8459SAlan Cox return; 1070950f8459SAlan Cox VM_OBJECT_LOCK(object); 1071950f8459SAlan Cox while ((backing_object = object->backing_object) != NULL) { 1072950f8459SAlan Cox VM_OBJECT_LOCK(backing_object); 107356e0670fSAlan Cox offset += object->backing_object_offset; 1074950f8459SAlan Cox VM_OBJECT_UNLOCK(object); 1075950f8459SAlan Cox object = backing_object; 1076950f8459SAlan Cox if (object->size < OFF_TO_IDX(offset + size)) 1077950f8459SAlan Cox size = IDX_TO_OFF(object->size) - offset; 1078950f8459SAlan Cox } 1079950f8459SAlan Cox /* 1080950f8459SAlan Cox * Flush pages if writing is allowed, invalidate them 1081950f8459SAlan Cox * if invalidation requested. Pages undergoing I/O 1082950f8459SAlan Cox * will be ignored by vm_object_page_remove(). 1083950f8459SAlan Cox * 1084950f8459SAlan Cox * We cannot lock the vnode and then wait for paging 1085950f8459SAlan Cox * to complete without deadlocking against vm_fault. 1086950f8459SAlan Cox * Instead we simply call vm_object_page_remove() and 1087950f8459SAlan Cox * allow it to block internally on a page-by-page 1088950f8459SAlan Cox * basis when it encounters pages undergoing async 1089950f8459SAlan Cox * I/O. 1090950f8459SAlan Cox */ 1091950f8459SAlan Cox if (object->type == OBJT_VNODE && 1092950f8459SAlan Cox (object->flags & OBJ_MIGHTBEDIRTY) != 0) { 1093ae51ff11SJeff Roberson int vfslocked; 1094950f8459SAlan Cox vp = object->handle; 1095950f8459SAlan Cox VM_OBJECT_UNLOCK(object); 10963b582b4eSTor Egge (void) vn_start_write(vp, &mp, V_WAIT); 1097ae51ff11SJeff Roberson vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1098cb05b60aSAttilio Rao vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1099950f8459SAlan Cox flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 1100950f8459SAlan Cox flags |= invalidate ? OBJPC_INVAL : 0; 1101950f8459SAlan Cox VM_OBJECT_LOCK(object); 1102950f8459SAlan Cox vm_object_page_clean(object, 1103950f8459SAlan Cox OFF_TO_IDX(offset), 1104950f8459SAlan Cox OFF_TO_IDX(offset + size + PAGE_MASK), 1105950f8459SAlan Cox flags); 1106950f8459SAlan Cox VM_OBJECT_UNLOCK(object); 110722db15c0SAttilio Rao VOP_UNLOCK(vp, 0); 1108ae51ff11SJeff Roberson VFS_UNLOCK_GIANT(vfslocked); 11093b582b4eSTor Egge vn_finished_write(mp); 1110950f8459SAlan Cox VM_OBJECT_LOCK(object); 1111950f8459SAlan Cox } 1112950f8459SAlan Cox if ((object->type == OBJT_VNODE || 1113950f8459SAlan Cox object->type == OBJT_DEVICE) && invalidate) { 1114874f0135SDoug Rabson boolean_t purge; 1115874f0135SDoug Rabson purge = old_msync || (object->type == OBJT_DEVICE); 1116950f8459SAlan Cox vm_object_page_remove(object, 1117950f8459SAlan Cox OFF_TO_IDX(offset), 1118950f8459SAlan Cox OFF_TO_IDX(offset + size + PAGE_MASK), 1119874f0135SDoug Rabson purge ? FALSE : TRUE); 1120950f8459SAlan Cox } 1121950f8459SAlan Cox VM_OBJECT_UNLOCK(object); 1122950f8459SAlan Cox } 1123950f8459SAlan Cox 1124950f8459SAlan Cox /* 1125867a482dSJohn Dyson * vm_object_madvise: 1126867a482dSJohn Dyson * 1127867a482dSJohn Dyson * Implements the madvise function at the object/page level. 11281c7c3c6aSMatthew Dillon * 1129193b9358SAlan Cox * MADV_WILLNEED (any object) 1130193b9358SAlan Cox * 1131193b9358SAlan Cox * Activate the specified pages if they are resident. 1132193b9358SAlan Cox * 1133193b9358SAlan Cox * MADV_DONTNEED (any object) 1134193b9358SAlan Cox * 1135193b9358SAlan Cox * Deactivate the specified pages if they are resident. 1136193b9358SAlan Cox * 1137193b9358SAlan Cox * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 1138193b9358SAlan Cox * OBJ_ONEMAPPING only) 1139193b9358SAlan Cox * 1140193b9358SAlan Cox * Deactivate and clean the specified pages if they are 1141193b9358SAlan Cox * resident. This permits the process to reuse the pages 1142193b9358SAlan Cox * without faulting or the kernel to reclaim the pages 1143193b9358SAlan Cox * without I/O. 1144867a482dSJohn Dyson */ 1145867a482dSJohn Dyson void 11461b40f8c0SMatthew Dillon vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise) 1147867a482dSJohn Dyson { 11486e20a165SJohn Dyson vm_pindex_t end, tpindex; 114934567de7SAlan Cox vm_object_t backing_object, tobject; 1150867a482dSJohn Dyson vm_page_t m; 1151867a482dSJohn Dyson 1152867a482dSJohn Dyson if (object == NULL) 1153867a482dSJohn Dyson return; 11549b98b796SAlan Cox VM_OBJECT_LOCK(object); 1155867a482dSJohn Dyson end = pindex + count; 11561c7c3c6aSMatthew Dillon /* 11571c7c3c6aSMatthew Dillon * Locate and adjust resident pages 11581c7c3c6aSMatthew Dillon */ 11591c7c3c6aSMatthew Dillon for (; pindex < end; pindex += 1) { 11606e20a165SJohn Dyson relookup: 11616e20a165SJohn Dyson tobject = object; 11626e20a165SJohn Dyson tpindex = pindex; 11636e20a165SJohn Dyson shadowlookup: 116458b4e6ccSAlan Cox /* 116558b4e6ccSAlan Cox * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages 116658b4e6ccSAlan Cox * and those pages must be OBJ_ONEMAPPING. 116758b4e6ccSAlan Cox */ 116858b4e6ccSAlan Cox if (advise == MADV_FREE) { 116958b4e6ccSAlan Cox if ((tobject->type != OBJT_DEFAULT && 117058b4e6ccSAlan Cox tobject->type != OBJT_SWAP) || 117158b4e6ccSAlan Cox (tobject->flags & OBJ_ONEMAPPING) == 0) { 117234567de7SAlan Cox goto unlock_tobject; 11736e20a165SJohn Dyson } 117458b4e6ccSAlan Cox } 11751c7c3c6aSMatthew Dillon m = vm_page_lookup(tobject, tpindex); 11767bfda801SAlan Cox if (m == NULL && advise == MADV_WILLNEED) { 11777bfda801SAlan Cox /* 11787bfda801SAlan Cox * If the page is cached, reactivate it. 11797bfda801SAlan Cox */ 1180f3a2ed4bSAlan Cox m = vm_page_alloc(tobject, tpindex, VM_ALLOC_IFCACHED | 1181f3a2ed4bSAlan Cox VM_ALLOC_NOBUSY); 11827bfda801SAlan Cox } 11831c7c3c6aSMatthew Dillon if (m == NULL) { 11841ce137beSMatthew Dillon /* 11851ce137beSMatthew Dillon * There may be swap even if there is no backing page 11861ce137beSMatthew Dillon */ 11871ce137beSMatthew Dillon if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 11881ce137beSMatthew Dillon swap_pager_freespace(tobject, tpindex, 1); 11891ce137beSMatthew Dillon /* 11901ce137beSMatthew Dillon * next object 11911ce137beSMatthew Dillon */ 119234567de7SAlan Cox backing_object = tobject->backing_object; 119334567de7SAlan Cox if (backing_object == NULL) 119434567de7SAlan Cox goto unlock_tobject; 119534567de7SAlan Cox VM_OBJECT_LOCK(backing_object); 119656e0670fSAlan Cox tpindex += OFF_TO_IDX(tobject->backing_object_offset); 11979b98b796SAlan Cox if (tobject != object) 119834567de7SAlan Cox VM_OBJECT_UNLOCK(tobject); 119934567de7SAlan Cox tobject = backing_object; 12006e20a165SJohn Dyson goto shadowlookup; 12016e20a165SJohn Dyson } 1202867a482dSJohn Dyson /* 1203867a482dSJohn Dyson * If the page is busy or not in a normal active state, 12048b03c8edSMatthew Dillon * we skip it. If the page is not managed there are no 12058b03c8edSMatthew Dillon * page queues to mess with. Things can break if we mess 12068b03c8edSMatthew Dillon * with pages in any of the below states. 1207867a482dSJohn Dyson */ 120832585dd6SAlan Cox vm_page_lock_queues(); 120932585dd6SAlan Cox if (m->hold_count || 12101c7c3c6aSMatthew Dillon m->wire_count || 12118b03c8edSMatthew Dillon (m->flags & PG_UNMANAGED) || 121232585dd6SAlan Cox m->valid != VM_PAGE_BITS_ALL) { 121332585dd6SAlan Cox vm_page_unlock_queues(); 121434567de7SAlan Cox goto unlock_tobject; 12156e20a165SJohn Dyson } 12169af80719SAlan Cox if ((m->oflags & VPO_BUSY) || m->busy) { 12175786be7cSAlan Cox vm_page_flag_set(m, PG_REFERENCED); 121891449ce9SAlan Cox vm_page_unlock_queues(); 12199b98b796SAlan Cox if (object != tobject) 12209b98b796SAlan Cox VM_OBJECT_UNLOCK(object); 12215786be7cSAlan Cox m->oflags |= VPO_WANTED; 122291449ce9SAlan Cox msleep(m, VM_OBJECT_MTX(tobject), PDROP | PVM, "madvpo", 0); 12239b98b796SAlan Cox VM_OBJECT_LOCK(object); 12246e20a165SJohn Dyson goto relookup; 122534567de7SAlan Cox } 1226867a482dSJohn Dyson if (advise == MADV_WILLNEED) { 1227867a482dSJohn Dyson vm_page_activate(m); 12286e20a165SJohn Dyson } else if (advise == MADV_DONTNEED) { 1229479112dfSMatthew Dillon vm_page_dontneed(m); 12300a47b48bSJohn Dyson } else if (advise == MADV_FREE) { 12311c7c3c6aSMatthew Dillon /* 12322aaeadf8SMatthew Dillon * Mark the page clean. This will allow the page 12332aaeadf8SMatthew Dillon * to be freed up by the system. However, such pages 12342aaeadf8SMatthew Dillon * are often reused quickly by malloc()/free() 12352aaeadf8SMatthew Dillon * so we do not do anything that would cause 12362aaeadf8SMatthew Dillon * a page fault if we can help it. 12372aaeadf8SMatthew Dillon * 12382aaeadf8SMatthew Dillon * Specifically, we do not try to actually free 12392aaeadf8SMatthew Dillon * the page now nor do we try to put it in the 12402aaeadf8SMatthew Dillon * cache (which would cause a page fault on reuse). 124141c67e12SMatthew Dillon * 124241c67e12SMatthew Dillon * But we do make the page is freeable as we 124341c67e12SMatthew Dillon * can without actually taking the step of unmapping 124441c67e12SMatthew Dillon * it. 12451c7c3c6aSMatthew Dillon */ 12460385347cSPeter Wemm pmap_clear_modify(m); 12476e20a165SJohn Dyson m->dirty = 0; 124841c67e12SMatthew Dillon m->act_count = 0; 1249479112dfSMatthew Dillon vm_page_dontneed(m); 1250867a482dSJohn Dyson } 12512999e9faSAlan Cox vm_page_unlock_queues(); 12522999e9faSAlan Cox if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 12532999e9faSAlan Cox swap_pager_freespace(tobject, tpindex, 1); 125434567de7SAlan Cox unlock_tobject: 12559b98b796SAlan Cox if (tobject != object) 125634567de7SAlan Cox VM_OBJECT_UNLOCK(tobject); 1257867a482dSJohn Dyson } 12589b98b796SAlan Cox VM_OBJECT_UNLOCK(object); 1259867a482dSJohn Dyson } 1260867a482dSJohn Dyson 1261867a482dSJohn Dyson /* 1262df8bae1dSRodney W. Grimes * vm_object_shadow: 1263df8bae1dSRodney W. Grimes * 1264df8bae1dSRodney W. Grimes * Create a new object which is backed by the 1265df8bae1dSRodney W. Grimes * specified existing object range. The source 1266df8bae1dSRodney W. Grimes * object reference is deallocated. 1267df8bae1dSRodney W. Grimes * 1268df8bae1dSRodney W. Grimes * The new object and offset into that object 1269df8bae1dSRodney W. Grimes * are returned in the source parameters. 1270df8bae1dSRodney W. Grimes */ 127126f9a767SRodney W. Grimes void 12721b40f8c0SMatthew Dillon vm_object_shadow( 12731b40f8c0SMatthew Dillon vm_object_t *object, /* IN/OUT */ 12741b40f8c0SMatthew Dillon vm_ooffset_t *offset, /* IN/OUT */ 12751b40f8c0SMatthew Dillon vm_size_t length) 1276df8bae1dSRodney W. Grimes { 1277d031cff1SMatthew Dillon vm_object_t source; 1278d031cff1SMatthew Dillon vm_object_t result; 1279df8bae1dSRodney W. Grimes 1280df8bae1dSRodney W. Grimes source = *object; 1281df8bae1dSRodney W. Grimes 1282df8bae1dSRodney W. Grimes /* 12839a2f6362SAlan Cox * Don't create the new object if the old object isn't shared. 12849a2f6362SAlan Cox */ 1285570a2f4aSAlan Cox if (source != NULL) { 1286570a2f4aSAlan Cox VM_OBJECT_LOCK(source); 1287570a2f4aSAlan Cox if (source->ref_count == 1 && 12889a2f6362SAlan Cox source->handle == NULL && 12899a2f6362SAlan Cox (source->type == OBJT_DEFAULT || 12909917e010SAlan Cox source->type == OBJT_SWAP)) { 1291570a2f4aSAlan Cox VM_OBJECT_UNLOCK(source); 12929a2f6362SAlan Cox return; 12939917e010SAlan Cox } 1294570a2f4aSAlan Cox VM_OBJECT_UNLOCK(source); 1295570a2f4aSAlan Cox } 12969a2f6362SAlan Cox 12979a2f6362SAlan Cox /* 1298570a2f4aSAlan Cox * Allocate a new object with the given length. 1299df8bae1dSRodney W. Grimes */ 1300971dd342SAlfred Perlstein result = vm_object_allocate(OBJT_DEFAULT, length); 1301df8bae1dSRodney W. Grimes 1302df8bae1dSRodney W. Grimes /* 13030d94caffSDavid Greenman * The new object shadows the source object, adding a reference to it. 13040d94caffSDavid Greenman * Our caller changes his reference to point to the new object, 13050d94caffSDavid Greenman * removing a reference to the source object. Net result: no change 13060d94caffSDavid Greenman * of reference count. 13079b09fe24SMatthew Dillon * 13089b09fe24SMatthew Dillon * Try to optimize the result object's page color when shadowing 1309956f3135SPhilippe Charnier * in order to maintain page coloring consistency in the combined 13109b09fe24SMatthew Dillon * shadowed object. 1311df8bae1dSRodney W. Grimes */ 131224a1cce3SDavid Greenman result->backing_object = source; 13139174ca7bSTor Egge /* 13149174ca7bSTor Egge * Store the offset into the source object, and fix up the offset into 13159174ca7bSTor Egge * the new object. 13169174ca7bSTor Egge */ 13179174ca7bSTor Egge result->backing_object_offset = *offset; 1318570a2f4aSAlan Cox if (source != NULL) { 1319570a2f4aSAlan Cox VM_OBJECT_LOCK(source); 13201c500307SAlan Cox LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list); 1321eaf13dd7SJohn Dyson source->shadow_count++; 1322eaf13dd7SJohn Dyson source->generation++; 1323f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 13247b54b1a9SAlan Cox result->flags |= source->flags & OBJ_COLORED; 1325f8a47341SAlan Cox result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & 1326f8a47341SAlan Cox ((1 << (VM_NFREEORDER - 1)) - 1); 1327f8a47341SAlan Cox #endif 1328570a2f4aSAlan Cox VM_OBJECT_UNLOCK(source); 1329de5f6a77SJohn Dyson } 1330df8bae1dSRodney W. Grimes 1331df8bae1dSRodney W. Grimes 1332df8bae1dSRodney W. Grimes /* 1333df8bae1dSRodney W. Grimes * Return the new things 1334df8bae1dSRodney W. Grimes */ 1335df8bae1dSRodney W. Grimes *offset = 0; 1336df8bae1dSRodney W. Grimes *object = result; 1337df8bae1dSRodney W. Grimes } 1338df8bae1dSRodney W. Grimes 1339c5aaa06dSAlan Cox /* 1340c5aaa06dSAlan Cox * vm_object_split: 1341c5aaa06dSAlan Cox * 1342c5aaa06dSAlan Cox * Split the pages in a map entry into a new object. This affords 1343c5aaa06dSAlan Cox * easier removal of unused pages, and keeps object inheritance from 1344c5aaa06dSAlan Cox * being a negative impact on memory usage. 1345c5aaa06dSAlan Cox */ 1346c5aaa06dSAlan Cox void 1347c5aaa06dSAlan Cox vm_object_split(vm_map_entry_t entry) 1348c5aaa06dSAlan Cox { 134973000556SAlan Cox vm_page_t m, m_next; 1350c5aaa06dSAlan Cox vm_object_t orig_object, new_object, source; 135173000556SAlan Cox vm_pindex_t idx, offidxstart; 135273000556SAlan Cox vm_size_t size; 1353c5aaa06dSAlan Cox 1354c5aaa06dSAlan Cox orig_object = entry->object.vm_object; 1355c5aaa06dSAlan Cox if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 1356c5aaa06dSAlan Cox return; 1357c5aaa06dSAlan Cox if (orig_object->ref_count <= 1) 1358c5aaa06dSAlan Cox return; 13594da9f125SAlan Cox VM_OBJECT_UNLOCK(orig_object); 1360c5aaa06dSAlan Cox 13614da9f125SAlan Cox offidxstart = OFF_TO_IDX(entry->offset); 136295442adfSAlan Cox size = atop(entry->end - entry->start); 1363c5aaa06dSAlan Cox 13644da9f125SAlan Cox /* 13654da9f125SAlan Cox * If swap_pager_copy() is later called, it will convert new_object 13664da9f125SAlan Cox * into a swap object. 13674da9f125SAlan Cox */ 13684da9f125SAlan Cox new_object = vm_object_allocate(OBJT_DEFAULT, size); 1369c5aaa06dSAlan Cox 1370c5474b8fSAlan Cox /* 1371c5474b8fSAlan Cox * At this point, the new object is still private, so the order in 1372c5474b8fSAlan Cox * which the original and new objects are locked does not matter. 1373c5474b8fSAlan Cox */ 137463f6cefcSAlan Cox VM_OBJECT_LOCK(new_object); 137563f6cefcSAlan Cox VM_OBJECT_LOCK(orig_object); 1376c5aaa06dSAlan Cox source = orig_object->backing_object; 1377c5aaa06dSAlan Cox if (source != NULL) { 13788e3a76fbSAlan Cox VM_OBJECT_LOCK(source); 137919c244d0SAlan Cox if ((source->flags & OBJ_DEAD) != 0) { 138019c244d0SAlan Cox VM_OBJECT_UNLOCK(source); 138119c244d0SAlan Cox VM_OBJECT_UNLOCK(orig_object); 138219c244d0SAlan Cox VM_OBJECT_UNLOCK(new_object); 138319c244d0SAlan Cox vm_object_deallocate(new_object); 138419c244d0SAlan Cox VM_OBJECT_LOCK(orig_object); 138519c244d0SAlan Cox return; 138619c244d0SAlan Cox } 13871c500307SAlan Cox LIST_INSERT_HEAD(&source->shadow_head, 1388c5aaa06dSAlan Cox new_object, shadow_list); 13898e3a76fbSAlan Cox source->shadow_count++; 13908e3a76fbSAlan Cox source->generation++; 1391b921a12bSAlan Cox vm_object_reference_locked(source); /* for new_object */ 1392c5aaa06dSAlan Cox vm_object_clear_flag(source, OBJ_ONEMAPPING); 1393e2479b4fSAlan Cox VM_OBJECT_UNLOCK(source); 1394c5aaa06dSAlan Cox new_object->backing_object_offset = 13954da9f125SAlan Cox orig_object->backing_object_offset + entry->offset; 1396c5aaa06dSAlan Cox new_object->backing_object = source; 1397c5aaa06dSAlan Cox } 13983364c323SKonstantin Belousov if (orig_object->uip != NULL) { 13993364c323SKonstantin Belousov new_object->uip = orig_object->uip; 14003364c323SKonstantin Belousov uihold(orig_object->uip); 14013364c323SKonstantin Belousov new_object->charge = ptoa(size); 14023364c323SKonstantin Belousov KASSERT(orig_object->charge >= ptoa(size), 14033364c323SKonstantin Belousov ("orig_object->charge < 0")); 14043364c323SKonstantin Belousov orig_object->charge -= ptoa(size); 14053364c323SKonstantin Belousov } 1406c5aaa06dSAlan Cox retry: 140773000556SAlan Cox if ((m = TAILQ_FIRST(&orig_object->memq)) != NULL) { 140873000556SAlan Cox if (m->pindex < offidxstart) { 140973000556SAlan Cox m = vm_page_splay(offidxstart, orig_object->root); 141073000556SAlan Cox if ((orig_object->root = m)->pindex < offidxstart) 141173000556SAlan Cox m = TAILQ_NEXT(m, listq); 141273000556SAlan Cox } 141373000556SAlan Cox } 141473000556SAlan Cox vm_page_lock_queues(); 141573000556SAlan Cox for (; m != NULL && (idx = m->pindex - offidxstart) < size; 141673000556SAlan Cox m = m_next) { 141773000556SAlan Cox m_next = TAILQ_NEXT(m, listq); 1418c5aaa06dSAlan Cox 1419c5aaa06dSAlan Cox /* 1420c5aaa06dSAlan Cox * We must wait for pending I/O to complete before we can 1421c5aaa06dSAlan Cox * rename the page. 1422c5aaa06dSAlan Cox * 1423c5aaa06dSAlan Cox * We do not have to VM_PROT_NONE the page as mappings should 1424c5aaa06dSAlan Cox * not be changed by this operation. 1425c5aaa06dSAlan Cox */ 14269af80719SAlan Cox if ((m->oflags & VPO_BUSY) || m->busy) { 14275786be7cSAlan Cox vm_page_flag_set(m, PG_REFERENCED); 142891449ce9SAlan Cox vm_page_unlock_queues(); 1429de33beddSAlan Cox VM_OBJECT_UNLOCK(new_object); 14305786be7cSAlan Cox m->oflags |= VPO_WANTED; 1431c5474b8fSAlan Cox msleep(m, VM_OBJECT_MTX(orig_object), PVM, "spltwt", 0); 1432de33beddSAlan Cox VM_OBJECT_LOCK(new_object); 1433c5aaa06dSAlan Cox goto retry; 1434de33beddSAlan Cox } 1435c5aaa06dSAlan Cox vm_page_rename(m, new_object, idx); 1436c5aaa06dSAlan Cox /* page automatically made dirty by rename and cache handled */ 1437c5aaa06dSAlan Cox vm_page_busy(m); 1438c5aaa06dSAlan Cox } 14395ba514bcSAlan Cox vm_page_unlock_queues(); 1440d7a013c3SAlan Cox if (orig_object->type == OBJT_SWAP) { 1441c5aaa06dSAlan Cox /* 1442c7c8dd7eSAlan Cox * swap_pager_copy() can sleep, in which case the orig_object's 1443c7c8dd7eSAlan Cox * and new_object's locks are released and reacquired. 1444c5aaa06dSAlan Cox */ 1445c5aaa06dSAlan Cox swap_pager_copy(orig_object, new_object, offidxstart, 0); 14467bfda801SAlan Cox 14477bfda801SAlan Cox /* 14487bfda801SAlan Cox * Transfer any cached pages from orig_object to new_object. 14497bfda801SAlan Cox */ 14507bfda801SAlan Cox if (__predict_false(orig_object->cache != NULL)) 14517bfda801SAlan Cox vm_page_cache_transfer(orig_object, offidxstart, 14527bfda801SAlan Cox new_object); 1453c5aaa06dSAlan Cox } 1454d7a013c3SAlan Cox VM_OBJECT_UNLOCK(orig_object); 1455c7118ed6SAlan Cox TAILQ_FOREACH(m, &new_object->memq, listq) 1456c5aaa06dSAlan Cox vm_page_wakeup(m); 1457c7c8dd7eSAlan Cox VM_OBJECT_UNLOCK(new_object); 1458c5aaa06dSAlan Cox entry->object.vm_object = new_object; 1459c5aaa06dSAlan Cox entry->offset = 0LL; 1460c5aaa06dSAlan Cox vm_object_deallocate(orig_object); 14614da9f125SAlan Cox VM_OBJECT_LOCK(new_object); 1462c5aaa06dSAlan Cox } 1463c5aaa06dSAlan Cox 14642ad1a3f7SMatthew Dillon #define OBSC_TEST_ALL_SHADOWED 0x0001 14652ad1a3f7SMatthew Dillon #define OBSC_COLLAPSE_NOWAIT 0x0002 14662ad1a3f7SMatthew Dillon #define OBSC_COLLAPSE_WAIT 0x0004 14672ad1a3f7SMatthew Dillon 1468b4ae4780SPoul-Henning Kamp static int 14692ad1a3f7SMatthew Dillon vm_object_backing_scan(vm_object_t object, int op) 14702ad1a3f7SMatthew Dillon { 14712ad1a3f7SMatthew Dillon int r = 1; 14722ad1a3f7SMatthew Dillon vm_page_t p; 14732ad1a3f7SMatthew Dillon vm_object_t backing_object; 14742ad1a3f7SMatthew Dillon vm_pindex_t backing_offset_index; 14752ad1a3f7SMatthew Dillon 14767ca33ad1SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 14777ca33ad1SAlan Cox VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED); 14782ad1a3f7SMatthew Dillon 14792ad1a3f7SMatthew Dillon backing_object = object->backing_object; 14802ad1a3f7SMatthew Dillon backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 14812ad1a3f7SMatthew Dillon 14822ad1a3f7SMatthew Dillon /* 14832ad1a3f7SMatthew Dillon * Initial conditions 14842ad1a3f7SMatthew Dillon */ 14852ad1a3f7SMatthew Dillon if (op & OBSC_TEST_ALL_SHADOWED) { 14862ad1a3f7SMatthew Dillon /* 14877bfda801SAlan Cox * We do not want to have to test for the existence of cache 14887bfda801SAlan Cox * or swap pages in the backing object. XXX but with the 14892ad1a3f7SMatthew Dillon * new swapper this would be pretty easy to do. 14902ad1a3f7SMatthew Dillon * 14912ad1a3f7SMatthew Dillon * XXX what about anonymous MAP_SHARED memory that hasn't 14922ad1a3f7SMatthew Dillon * been ZFOD faulted yet? If we do not test for this, the 14932ad1a3f7SMatthew Dillon * shadow test may succeed! XXX 14942ad1a3f7SMatthew Dillon */ 14952ad1a3f7SMatthew Dillon if (backing_object->type != OBJT_DEFAULT) { 14962ad1a3f7SMatthew Dillon return (0); 14972ad1a3f7SMatthew Dillon } 14982ad1a3f7SMatthew Dillon } 14992ad1a3f7SMatthew Dillon if (op & OBSC_COLLAPSE_WAIT) { 15002ad1a3f7SMatthew Dillon vm_object_set_flag(backing_object, OBJ_DEAD); 15012ad1a3f7SMatthew Dillon } 15022ad1a3f7SMatthew Dillon 15032ad1a3f7SMatthew Dillon /* 15042ad1a3f7SMatthew Dillon * Our scan 15052ad1a3f7SMatthew Dillon */ 15062ad1a3f7SMatthew Dillon p = TAILQ_FIRST(&backing_object->memq); 15072ad1a3f7SMatthew Dillon while (p) { 15082ad1a3f7SMatthew Dillon vm_page_t next = TAILQ_NEXT(p, listq); 15092ad1a3f7SMatthew Dillon vm_pindex_t new_pindex = p->pindex - backing_offset_index; 15102ad1a3f7SMatthew Dillon 15112ad1a3f7SMatthew Dillon if (op & OBSC_TEST_ALL_SHADOWED) { 15122ad1a3f7SMatthew Dillon vm_page_t pp; 15132ad1a3f7SMatthew Dillon 15142ad1a3f7SMatthew Dillon /* 15152ad1a3f7SMatthew Dillon * Ignore pages outside the parent object's range 15162ad1a3f7SMatthew Dillon * and outside the parent object's mapping of the 15172ad1a3f7SMatthew Dillon * backing object. 15182ad1a3f7SMatthew Dillon * 15192ad1a3f7SMatthew Dillon * note that we do not busy the backing object's 15202ad1a3f7SMatthew Dillon * page. 15212ad1a3f7SMatthew Dillon */ 15222ad1a3f7SMatthew Dillon if ( 15232ad1a3f7SMatthew Dillon p->pindex < backing_offset_index || 15242ad1a3f7SMatthew Dillon new_pindex >= object->size 15252ad1a3f7SMatthew Dillon ) { 15262ad1a3f7SMatthew Dillon p = next; 15272ad1a3f7SMatthew Dillon continue; 15282ad1a3f7SMatthew Dillon } 15292ad1a3f7SMatthew Dillon 15302ad1a3f7SMatthew Dillon /* 15312ad1a3f7SMatthew Dillon * See if the parent has the page or if the parent's 15322ad1a3f7SMatthew Dillon * object pager has the page. If the parent has the 15332ad1a3f7SMatthew Dillon * page but the page is not valid, the parent's 15342ad1a3f7SMatthew Dillon * object pager must have the page. 15352ad1a3f7SMatthew Dillon * 15362ad1a3f7SMatthew Dillon * If this fails, the parent does not completely shadow 15372ad1a3f7SMatthew Dillon * the object and we might as well give up now. 15382ad1a3f7SMatthew Dillon */ 15392ad1a3f7SMatthew Dillon 15402ad1a3f7SMatthew Dillon pp = vm_page_lookup(object, new_pindex); 15412ad1a3f7SMatthew Dillon if ( 15422ad1a3f7SMatthew Dillon (pp == NULL || pp->valid == 0) && 15432ad1a3f7SMatthew Dillon !vm_pager_has_page(object, new_pindex, NULL, NULL) 15442ad1a3f7SMatthew Dillon ) { 15452ad1a3f7SMatthew Dillon r = 0; 15462ad1a3f7SMatthew Dillon break; 15472ad1a3f7SMatthew Dillon } 15482ad1a3f7SMatthew Dillon } 15492ad1a3f7SMatthew Dillon 15502ad1a3f7SMatthew Dillon /* 15512ad1a3f7SMatthew Dillon * Check for busy page 15522ad1a3f7SMatthew Dillon */ 15532ad1a3f7SMatthew Dillon if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { 15542ad1a3f7SMatthew Dillon vm_page_t pp; 15552ad1a3f7SMatthew Dillon 15562ad1a3f7SMatthew Dillon if (op & OBSC_COLLAPSE_NOWAIT) { 15579af80719SAlan Cox if ((p->oflags & VPO_BUSY) || 15582ad1a3f7SMatthew Dillon !p->valid || 155900f9e8b4SAlan Cox p->busy) { 15602ad1a3f7SMatthew Dillon p = next; 15612ad1a3f7SMatthew Dillon continue; 15622ad1a3f7SMatthew Dillon } 15632ad1a3f7SMatthew Dillon } else if (op & OBSC_COLLAPSE_WAIT) { 15649af80719SAlan Cox if ((p->oflags & VPO_BUSY) || p->busy) { 1565c6ec6a7cSAlan Cox vm_page_lock_queues(); 15665786be7cSAlan Cox vm_page_flag_set(p, PG_REFERENCED); 156791449ce9SAlan Cox vm_page_unlock_queues(); 15687ca33ad1SAlan Cox VM_OBJECT_UNLOCK(object); 15695786be7cSAlan Cox p->oflags |= VPO_WANTED; 157091449ce9SAlan Cox msleep(p, VM_OBJECT_MTX(backing_object), 15717ca33ad1SAlan Cox PDROP | PVM, "vmocol", 0); 15727ca33ad1SAlan Cox VM_OBJECT_LOCK(object); 15737ca33ad1SAlan Cox VM_OBJECT_LOCK(backing_object); 15742ad1a3f7SMatthew Dillon /* 15752ad1a3f7SMatthew Dillon * If we slept, anything could have 15762ad1a3f7SMatthew Dillon * happened. Since the object is 15772ad1a3f7SMatthew Dillon * marked dead, the backing offset 15782ad1a3f7SMatthew Dillon * should not have changed so we 15792ad1a3f7SMatthew Dillon * just restart our scan. 15802ad1a3f7SMatthew Dillon */ 15812ad1a3f7SMatthew Dillon p = TAILQ_FIRST(&backing_object->memq); 15822ad1a3f7SMatthew Dillon continue; 15832ad1a3f7SMatthew Dillon } 15842ad1a3f7SMatthew Dillon } 15852ad1a3f7SMatthew Dillon 15862ad1a3f7SMatthew Dillon KASSERT( 15872ad1a3f7SMatthew Dillon p->object == backing_object, 15888e99783bSAlan Cox ("vm_object_backing_scan: object mismatch") 15892ad1a3f7SMatthew Dillon ); 15902ad1a3f7SMatthew Dillon 15912ad1a3f7SMatthew Dillon /* 15922ad1a3f7SMatthew Dillon * Destroy any associated swap 15932ad1a3f7SMatthew Dillon */ 15942ad1a3f7SMatthew Dillon if (backing_object->type == OBJT_SWAP) { 15952ad1a3f7SMatthew Dillon swap_pager_freespace( 15962ad1a3f7SMatthew Dillon backing_object, 15972ad1a3f7SMatthew Dillon p->pindex, 15982ad1a3f7SMatthew Dillon 1 15992ad1a3f7SMatthew Dillon ); 16002ad1a3f7SMatthew Dillon } 16012ad1a3f7SMatthew Dillon 16022ad1a3f7SMatthew Dillon if ( 16032ad1a3f7SMatthew Dillon p->pindex < backing_offset_index || 16042ad1a3f7SMatthew Dillon new_pindex >= object->size 16052ad1a3f7SMatthew Dillon ) { 16062ad1a3f7SMatthew Dillon /* 16072ad1a3f7SMatthew Dillon * Page is out of the parent object's range, we 16082ad1a3f7SMatthew Dillon * can simply destroy it. 16092ad1a3f7SMatthew Dillon */ 16106a684ecfSAlan Cox vm_page_lock_queues(); 1611f6d89838SAlan Cox KASSERT(!pmap_page_is_mapped(p), 1612f6d89838SAlan Cox ("freeing mapped page %p", p)); 1613f6d89838SAlan Cox if (p->wire_count == 0) 16142ad1a3f7SMatthew Dillon vm_page_free(p); 1615f6d89838SAlan Cox else 1616f6d89838SAlan Cox vm_page_remove(p); 16176a684ecfSAlan Cox vm_page_unlock_queues(); 16182ad1a3f7SMatthew Dillon p = next; 16192ad1a3f7SMatthew Dillon continue; 16202ad1a3f7SMatthew Dillon } 16212ad1a3f7SMatthew Dillon 16222ad1a3f7SMatthew Dillon pp = vm_page_lookup(object, new_pindex); 16232ad1a3f7SMatthew Dillon if ( 16242ad1a3f7SMatthew Dillon pp != NULL || 16252ad1a3f7SMatthew Dillon vm_pager_has_page(object, new_pindex, NULL, NULL) 16262ad1a3f7SMatthew Dillon ) { 16272ad1a3f7SMatthew Dillon /* 16282ad1a3f7SMatthew Dillon * page already exists in parent OR swap exists 16292ad1a3f7SMatthew Dillon * for this location in the parent. Destroy 16302ad1a3f7SMatthew Dillon * the original page from the backing object. 16312ad1a3f7SMatthew Dillon * 16322ad1a3f7SMatthew Dillon * Leave the parent's page alone 16332ad1a3f7SMatthew Dillon */ 16346a684ecfSAlan Cox vm_page_lock_queues(); 1635f6d89838SAlan Cox KASSERT(!pmap_page_is_mapped(p), 1636f6d89838SAlan Cox ("freeing mapped page %p", p)); 1637f6d89838SAlan Cox if (p->wire_count == 0) 16382ad1a3f7SMatthew Dillon vm_page_free(p); 1639f6d89838SAlan Cox else 1640f6d89838SAlan Cox vm_page_remove(p); 16416a684ecfSAlan Cox vm_page_unlock_queues(); 16422ad1a3f7SMatthew Dillon p = next; 16432ad1a3f7SMatthew Dillon continue; 16442ad1a3f7SMatthew Dillon } 16452ad1a3f7SMatthew Dillon 1646f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1647f8a47341SAlan Cox /* 1648f8a47341SAlan Cox * Rename the reservation. 1649f8a47341SAlan Cox */ 1650f8a47341SAlan Cox vm_reserv_rename(p, object, backing_object, 1651f8a47341SAlan Cox backing_offset_index); 1652f8a47341SAlan Cox #endif 1653f8a47341SAlan Cox 16542ad1a3f7SMatthew Dillon /* 16552ad1a3f7SMatthew Dillon * Page does not exist in parent, rename the 16562ad1a3f7SMatthew Dillon * page from the backing object to the main object. 1657d1bf5d56SMatthew Dillon * 1658d1bf5d56SMatthew Dillon * If the page was mapped to a process, it can remain 1659d1bf5d56SMatthew Dillon * mapped through the rename. 16602ad1a3f7SMatthew Dillon */ 1661a28cc55eSAlan Cox vm_page_lock_queues(); 16622ad1a3f7SMatthew Dillon vm_page_rename(p, object, new_pindex); 1663a28cc55eSAlan Cox vm_page_unlock_queues(); 16642ad1a3f7SMatthew Dillon /* page automatically made dirty by rename */ 16652ad1a3f7SMatthew Dillon } 16662ad1a3f7SMatthew Dillon p = next; 16672ad1a3f7SMatthew Dillon } 16682ad1a3f7SMatthew Dillon return (r); 16692ad1a3f7SMatthew Dillon } 16702ad1a3f7SMatthew Dillon 1671df8bae1dSRodney W. Grimes 1672df8bae1dSRodney W. Grimes /* 16732fe6e4d7SDavid Greenman * this version of collapse allows the operation to occur earlier and 16742fe6e4d7SDavid Greenman * when paging_in_progress is true for an object... This is not a complete 16752fe6e4d7SDavid Greenman * operation, but should plug 99.9% of the rest of the leaks. 16762fe6e4d7SDavid Greenman */ 16772fe6e4d7SDavid Greenman static void 16781b40f8c0SMatthew Dillon vm_object_qcollapse(vm_object_t object) 16792fe6e4d7SDavid Greenman { 16802ad1a3f7SMatthew Dillon vm_object_t backing_object = object->backing_object; 16812fe6e4d7SDavid Greenman 168206ecade7SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 168306ecade7SAlan Cox VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED); 16841b40f8c0SMatthew Dillon 16852fe6e4d7SDavid Greenman if (backing_object->ref_count != 1) 16862fe6e4d7SDavid Greenman return; 16872fe6e4d7SDavid Greenman 16882ad1a3f7SMatthew Dillon vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT); 16892fe6e4d7SDavid Greenman } 16902fe6e4d7SDavid Greenman 1691df8bae1dSRodney W. Grimes /* 1692df8bae1dSRodney W. Grimes * vm_object_collapse: 1693df8bae1dSRodney W. Grimes * 1694df8bae1dSRodney W. Grimes * Collapse an object with the object backing it. 1695df8bae1dSRodney W. Grimes * Pages in the backing object are moved into the 1696df8bae1dSRodney W. Grimes * parent, and the backing object is deallocated. 1697df8bae1dSRodney W. Grimes */ 169826f9a767SRodney W. Grimes void 16991b40f8c0SMatthew Dillon vm_object_collapse(vm_object_t object) 1700df8bae1dSRodney W. Grimes { 1701d7fc2210SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 170223955314SAlfred Perlstein 1703df8bae1dSRodney W. Grimes while (TRUE) { 17042ad1a3f7SMatthew Dillon vm_object_t backing_object; 17052ad1a3f7SMatthew Dillon 1706df8bae1dSRodney W. Grimes /* 1707df8bae1dSRodney W. Grimes * Verify that the conditions are right for collapse: 1708df8bae1dSRodney W. Grimes * 17092ad1a3f7SMatthew Dillon * The object exists and the backing object exists. 1710df8bae1dSRodney W. Grimes */ 171124a1cce3SDavid Greenman if ((backing_object = object->backing_object) == NULL) 17122ad1a3f7SMatthew Dillon break; 1713df8bae1dSRodney W. Grimes 1714f919ebdeSDavid Greenman /* 1715f919ebdeSDavid Greenman * we check the backing object first, because it is most likely 171624a1cce3SDavid Greenman * not collapsable. 1717f919ebdeSDavid Greenman */ 171840b808a8SAlan Cox VM_OBJECT_LOCK(backing_object); 171924a1cce3SDavid Greenman if (backing_object->handle != NULL || 172024a1cce3SDavid Greenman (backing_object->type != OBJT_DEFAULT && 172124a1cce3SDavid Greenman backing_object->type != OBJT_SWAP) || 1722f919ebdeSDavid Greenman (backing_object->flags & OBJ_DEAD) || 172324a1cce3SDavid Greenman object->handle != NULL || 172424a1cce3SDavid Greenman (object->type != OBJT_DEFAULT && 172524a1cce3SDavid Greenman object->type != OBJT_SWAP) || 172624a1cce3SDavid Greenman (object->flags & OBJ_DEAD)) { 172740b808a8SAlan Cox VM_OBJECT_UNLOCK(backing_object); 17282ad1a3f7SMatthew Dillon break; 172924a1cce3SDavid Greenman } 17309b4814bbSDavid Greenman 17312ad1a3f7SMatthew Dillon if ( 17322ad1a3f7SMatthew Dillon object->paging_in_progress != 0 || 17332ad1a3f7SMatthew Dillon backing_object->paging_in_progress != 0 17342ad1a3f7SMatthew Dillon ) { 1735b9921222SDavid Greenman vm_object_qcollapse(object); 173640b808a8SAlan Cox VM_OBJECT_UNLOCK(backing_object); 17372ad1a3f7SMatthew Dillon break; 1738df8bae1dSRodney W. Grimes } 173926f9a767SRodney W. Grimes /* 17400d94caffSDavid Greenman * We know that we can either collapse the backing object (if 17412ad1a3f7SMatthew Dillon * the parent is the only reference to it) or (perhaps) have 17422ad1a3f7SMatthew Dillon * the parent bypass the object if the parent happens to shadow 17432ad1a3f7SMatthew Dillon * all the resident pages in the entire backing object. 17442ad1a3f7SMatthew Dillon * 17452ad1a3f7SMatthew Dillon * This is ignoring pager-backed pages such as swap pages. 17462ad1a3f7SMatthew Dillon * vm_object_backing_scan fails the shadowing test in this 17472ad1a3f7SMatthew Dillon * case. 1748df8bae1dSRodney W. Grimes */ 1749df8bae1dSRodney W. Grimes if (backing_object->ref_count == 1) { 1750df8bae1dSRodney W. Grimes /* 17512ad1a3f7SMatthew Dillon * If there is exactly one reference to the backing 17522ad1a3f7SMatthew Dillon * object, we can collapse it into the parent. 1753df8bae1dSRodney W. Grimes */ 17542ad1a3f7SMatthew Dillon vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT); 1755df8bae1dSRodney W. Grimes 1756f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1757f8a47341SAlan Cox /* 1758f8a47341SAlan Cox * Break any reservations from backing_object. 1759f8a47341SAlan Cox */ 1760f8a47341SAlan Cox if (__predict_false(!LIST_EMPTY(&backing_object->rvq))) 1761f8a47341SAlan Cox vm_reserv_break_all(backing_object); 1762f8a47341SAlan Cox #endif 1763f8a47341SAlan Cox 1764df8bae1dSRodney W. Grimes /* 1765df8bae1dSRodney W. Grimes * Move the pager from backing_object to object. 1766df8bae1dSRodney W. Grimes */ 17676be36525SAlan Cox if (backing_object->type == OBJT_SWAP) { 176824a1cce3SDavid Greenman /* 1769c7c8dd7eSAlan Cox * swap_pager_copy() can sleep, in which case 1770c7c8dd7eSAlan Cox * the backing_object's and object's locks are 1771c7c8dd7eSAlan Cox * released and reacquired. 177224a1cce3SDavid Greenman */ 17731c7c3c6aSMatthew Dillon swap_pager_copy( 17741c7c3c6aSMatthew Dillon backing_object, 17751c7c3c6aSMatthew Dillon object, 17761c7c3c6aSMatthew Dillon OFF_TO_IDX(object->backing_object_offset), TRUE); 17777bfda801SAlan Cox 17787bfda801SAlan Cox /* 17797bfda801SAlan Cox * Free any cached pages from backing_object. 17807bfda801SAlan Cox */ 17817bfda801SAlan Cox if (__predict_false(backing_object->cache != NULL)) 1782c9444914SAlan Cox vm_page_cache_free(backing_object, 0, 0); 1783c0503609SDavid Greenman } 1784df8bae1dSRodney W. Grimes /* 1785df8bae1dSRodney W. Grimes * Object now shadows whatever backing_object did. 17862ad1a3f7SMatthew Dillon * Note that the reference to 17872ad1a3f7SMatthew Dillon * backing_object->backing_object moves from within 17882ad1a3f7SMatthew Dillon * backing_object to within object. 1789df8bae1dSRodney W. Grimes */ 17901c500307SAlan Cox LIST_REMOVE(object, shadow_list); 17914f7c7f6eSAlan Cox backing_object->shadow_count--; 17924f7c7f6eSAlan Cox backing_object->generation++; 1793de5f6a77SJohn Dyson if (backing_object->backing_object) { 17946be36525SAlan Cox VM_OBJECT_LOCK(backing_object->backing_object); 17951c500307SAlan Cox LIST_REMOVE(backing_object, shadow_list); 179643186e53SAlan Cox LIST_INSERT_HEAD( 179743186e53SAlan Cox &backing_object->backing_object->shadow_head, 179843186e53SAlan Cox object, shadow_list); 179943186e53SAlan Cox /* 180043186e53SAlan Cox * The shadow_count has not changed. 180143186e53SAlan Cox */ 1802eaf13dd7SJohn Dyson backing_object->backing_object->generation++; 18036be36525SAlan Cox VM_OBJECT_UNLOCK(backing_object->backing_object); 1804de5f6a77SJohn Dyson } 180524a1cce3SDavid Greenman object->backing_object = backing_object->backing_object; 18062ad1a3f7SMatthew Dillon object->backing_object_offset += 18072ad1a3f7SMatthew Dillon backing_object->backing_object_offset; 18082ad1a3f7SMatthew Dillon 1809df8bae1dSRodney W. Grimes /* 1810df8bae1dSRodney W. Grimes * Discard backing_object. 1811df8bae1dSRodney W. Grimes * 18120d94caffSDavid Greenman * Since the backing object has no pages, no pager left, 18130d94caffSDavid Greenman * and no object references within it, all that is 18140d94caffSDavid Greenman * necessary is to dispose of it. 1815df8bae1dSRodney W. Grimes */ 18169b4d473aSKonstantin Belousov KASSERT(backing_object->ref_count == 1, ( 18179b4d473aSKonstantin Belousov "backing_object %p was somehow re-referenced during collapse!", 18189b4d473aSKonstantin Belousov backing_object)); 18196be36525SAlan Cox VM_OBJECT_UNLOCK(backing_object); 18209b4d473aSKonstantin Belousov vm_object_destroy(backing_object); 1821df8bae1dSRodney W. Grimes 1822df8bae1dSRodney W. Grimes object_collapses++; 18230d94caffSDavid Greenman } else { 182495e5e988SJohn Dyson vm_object_t new_backing_object; 1825df8bae1dSRodney W. Grimes 1826df8bae1dSRodney W. Grimes /* 18272ad1a3f7SMatthew Dillon * If we do not entirely shadow the backing object, 18282ad1a3f7SMatthew Dillon * there is nothing we can do so we give up. 1829df8bae1dSRodney W. Grimes */ 1830df59a0feSJeff Roberson if (object->resident_page_count != object->size && 1831df59a0feSJeff Roberson vm_object_backing_scan(object, 1832df59a0feSJeff Roberson OBSC_TEST_ALL_SHADOWED) == 0) { 183340b808a8SAlan Cox VM_OBJECT_UNLOCK(backing_object); 18342ad1a3f7SMatthew Dillon break; 183524a1cce3SDavid Greenman } 1836df8bae1dSRodney W. Grimes 1837df8bae1dSRodney W. Grimes /* 18380d94caffSDavid Greenman * Make the parent shadow the next object in the 18390d94caffSDavid Greenman * chain. Deallocating backing_object will not remove 18400d94caffSDavid Greenman * it, since its reference count is at least 2. 1841df8bae1dSRodney W. Grimes */ 18421c500307SAlan Cox LIST_REMOVE(object, shadow_list); 1843eaf13dd7SJohn Dyson backing_object->shadow_count--; 1844eaf13dd7SJohn Dyson backing_object->generation++; 184595e5e988SJohn Dyson 184695e5e988SJohn Dyson new_backing_object = backing_object->backing_object; 18478aef1712SMatthew Dillon if ((object->backing_object = new_backing_object) != NULL) { 18486be36525SAlan Cox VM_OBJECT_LOCK(new_backing_object); 18491c500307SAlan Cox LIST_INSERT_HEAD( 18502ad1a3f7SMatthew Dillon &new_backing_object->shadow_head, 18512ad1a3f7SMatthew Dillon object, 18522ad1a3f7SMatthew Dillon shadow_list 18532ad1a3f7SMatthew Dillon ); 1854eaf13dd7SJohn Dyson new_backing_object->shadow_count++; 1855eaf13dd7SJohn Dyson new_backing_object->generation++; 1856b921a12bSAlan Cox vm_object_reference_locked(new_backing_object); 18576be36525SAlan Cox VM_OBJECT_UNLOCK(new_backing_object); 185895e5e988SJohn Dyson object->backing_object_offset += 185995e5e988SJohn Dyson backing_object->backing_object_offset; 1860de5f6a77SJohn Dyson } 1861df8bae1dSRodney W. Grimes 1862df8bae1dSRodney W. Grimes /* 18630d94caffSDavid Greenman * Drop the reference count on backing_object. Since 186422ec553fSAlan Cox * its ref_count was at least 2, it will not vanish. 1865df8bae1dSRodney W. Grimes */ 186622ec553fSAlan Cox backing_object->ref_count--; 186722ec553fSAlan Cox VM_OBJECT_UNLOCK(backing_object); 1868df8bae1dSRodney W. Grimes object_bypasses++; 1869df8bae1dSRodney W. Grimes } 1870df8bae1dSRodney W. Grimes 1871df8bae1dSRodney W. Grimes /* 1872df8bae1dSRodney W. Grimes * Try again with this object's new backing object. 1873df8bae1dSRodney W. Grimes */ 1874df8bae1dSRodney W. Grimes } 1875df8bae1dSRodney W. Grimes } 1876df8bae1dSRodney W. Grimes 1877df8bae1dSRodney W. Grimes /* 1878bff99f0dSAlan Cox * vm_object_page_remove: 1879df8bae1dSRodney W. Grimes * 188068855966SAlan Cox * For the given object, either frees or invalidates each of the 188168855966SAlan Cox * specified pages. In general, a page is freed. However, if a 188268855966SAlan Cox * page is wired for any reason other than the existence of a 188368855966SAlan Cox * managed, wired mapping, then it may be invalidated but not 188468855966SAlan Cox * removed from the object. Pages are specified by the given 188568855966SAlan Cox * range ["start", "end") and Boolean "clean_only". As a 188668855966SAlan Cox * special case, if "end" is zero, then the range extends from 188768855966SAlan Cox * "start" to the end of the object. If "clean_only" is TRUE, 188868855966SAlan Cox * then only the non-dirty pages within the specified range are 188968855966SAlan Cox * affected. 189068855966SAlan Cox * 189168855966SAlan Cox * In general, this operation should only be performed on objects 189268855966SAlan Cox * that contain managed pages. There are two exceptions. First, 189368855966SAlan Cox * it may be performed on the kernel and kmem objects. Second, 189468855966SAlan Cox * it may be used by msync(..., MS_INVALIDATE) to invalidate 189568855966SAlan Cox * device-backed pages. 1896df8bae1dSRodney W. Grimes * 1897df8bae1dSRodney W. Grimes * The object must be locked. 1898df8bae1dSRodney W. Grimes */ 189926f9a767SRodney W. Grimes void 1900ecde4b32SAlan Cox vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 1901ecde4b32SAlan Cox boolean_t clean_only) 1902df8bae1dSRodney W. Grimes { 1903d031cff1SMatthew Dillon vm_page_t p, next; 190459677d3cSAlan Cox int wirings; 1905df8bae1dSRodney W. Grimes 1906ecde4b32SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1907ecde4b32SAlan Cox if (object->resident_page_count == 0) 190825732691SAlan Cox goto skipmemq; 190995e5e988SJohn Dyson 19108b03c8edSMatthew Dillon /* 19118b03c8edSMatthew Dillon * Since physically-backed objects do not use managed pages, we can't 19128b03c8edSMatthew Dillon * remove pages from the object (we must instead remove the page 19138b03c8edSMatthew Dillon * references, and then destroy the object). 19148b03c8edSMatthew Dillon */ 19159f5c801bSAlan Cox KASSERT(object->type != OBJT_PHYS || object == kernel_object || 19169f5c801bSAlan Cox object == kmem_object, 1917ecde4b32SAlan Cox ("attempt to remove pages from a physical object")); 19188b03c8edSMatthew Dillon 1919d474eaaaSDoug Rabson vm_object_pip_add(object, 1); 192026f9a767SRodney W. Grimes again: 192175741c04SAlan Cox if ((p = TAILQ_FIRST(&object->memq)) != NULL) { 192275741c04SAlan Cox if (p->pindex < start) { 192375741c04SAlan Cox p = vm_page_splay(start, object->root); 192475741c04SAlan Cox if ((object->root = p)->pindex < start) 192575741c04SAlan Cox p = TAILQ_NEXT(p, listq); 192675741c04SAlan Cox } 192775741c04SAlan Cox } 1928bfd9b137SAlan Cox vm_page_lock_queues(); 192975741c04SAlan Cox /* 193075741c04SAlan Cox * Assert: the variable p is either (1) the page with the 193175741c04SAlan Cox * least pindex greater than or equal to the parameter pindex 193275741c04SAlan Cox * or (2) NULL. 193375741c04SAlan Cox */ 193475741c04SAlan Cox for (; 1935bff99f0dSAlan Cox p != NULL && (p->pindex < end || end == 0); 193675741c04SAlan Cox p = next) { 1937b18bfc3dSJohn Dyson next = TAILQ_NEXT(p, listq); 193875741c04SAlan Cox 193959677d3cSAlan Cox /* 194059677d3cSAlan Cox * If the page is wired for any reason besides the 194159677d3cSAlan Cox * existence of managed, wired mappings, then it cannot 194268855966SAlan Cox * be freed. For example, fictitious pages, which 194368855966SAlan Cox * represent device memory, are inherently wired and 194468855966SAlan Cox * cannot be freed. They can, however, be invalidated 194568855966SAlan Cox * if "clean_only" is FALSE. 194659677d3cSAlan Cox */ 194759677d3cSAlan Cox if ((wirings = p->wire_count) != 0 && 194859677d3cSAlan Cox (wirings = pmap_page_wired_mappings(p)) != p->wire_count) { 194968855966SAlan Cox /* Fictitious pages do not have managed mappings. */ 195068855966SAlan Cox if ((p->flags & PG_FICTITIOUS) == 0) 19514fec79beSAlan Cox pmap_remove_all(p); 195259677d3cSAlan Cox /* Account for removal of managed, wired mappings. */ 195359677d3cSAlan Cox p->wire_count -= wirings; 1954a28042d1SAlan Cox if (!clean_only) { 1955bd7e5f99SJohn Dyson p->valid = 0; 1956a28042d1SAlan Cox vm_page_undirty(p); 1957a28042d1SAlan Cox } 19580d94caffSDavid Greenman continue; 19590d94caffSDavid Greenman } 196032585dd6SAlan Cox if (vm_page_sleep_if_busy(p, TRUE, "vmopar")) 196126f9a767SRodney W. Grimes goto again; 196268855966SAlan Cox KASSERT((p->flags & PG_FICTITIOUS) == 0, 196368855966SAlan Cox ("vm_object_page_remove: page %p is fictitious", p)); 19648f9110f6SJohn Dyson if (clean_only && p->valid) { 196578985e42SAlan Cox pmap_remove_write(p); 196626f4eea5SAlan Cox if (p->dirty) 19677c1f6cedSDavid Greenman continue; 19687c1f6cedSDavid Greenman } 19694fec79beSAlan Cox pmap_remove_all(p); 197059677d3cSAlan Cox /* Account for removal of managed, wired mappings. */ 197159677d3cSAlan Cox if (wirings != 0) 197259677d3cSAlan Cox p->wire_count -= wirings; 1973df8bae1dSRodney W. Grimes vm_page_free(p); 197426f9a767SRodney W. Grimes } 197532585dd6SAlan Cox vm_page_unlock_queues(); 1976f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 197725732691SAlan Cox skipmemq: 1978c9444914SAlan Cox if (__predict_false(object->cache != NULL)) 1979c9444914SAlan Cox vm_page_cache_free(object, start, end); 1980c0503609SDavid Greenman } 1981df8bae1dSRodney W. Grimes 1982df8bae1dSRodney W. Grimes /* 1983387aabc5SAlan Cox * Populate the specified range of the object with valid pages. Returns 1984387aabc5SAlan Cox * TRUE if the range is successfully populated and FALSE otherwise. 1985387aabc5SAlan Cox * 1986387aabc5SAlan Cox * Note: This function should be optimized to pass a larger array of 1987387aabc5SAlan Cox * pages to vm_pager_get_pages() before it is applied to a non- 1988387aabc5SAlan Cox * OBJT_DEVICE object. 1989387aabc5SAlan Cox * 1990387aabc5SAlan Cox * The object must be locked. 1991387aabc5SAlan Cox */ 1992387aabc5SAlan Cox boolean_t 1993387aabc5SAlan Cox vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 1994387aabc5SAlan Cox { 1995387aabc5SAlan Cox vm_page_t m, ma[1]; 1996387aabc5SAlan Cox vm_pindex_t pindex; 1997387aabc5SAlan Cox int rv; 1998387aabc5SAlan Cox 1999387aabc5SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2000387aabc5SAlan Cox for (pindex = start; pindex < end; pindex++) { 2001387aabc5SAlan Cox m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | 2002387aabc5SAlan Cox VM_ALLOC_RETRY); 2003387aabc5SAlan Cox if (m->valid != VM_PAGE_BITS_ALL) { 2004387aabc5SAlan Cox ma[0] = m; 2005387aabc5SAlan Cox rv = vm_pager_get_pages(object, ma, 1, 0); 2006387aabc5SAlan Cox m = vm_page_lookup(object, pindex); 2007387aabc5SAlan Cox if (m == NULL) 2008387aabc5SAlan Cox break; 2009387aabc5SAlan Cox if (rv != VM_PAGER_OK) { 2010387aabc5SAlan Cox vm_page_lock_queues(); 2011387aabc5SAlan Cox vm_page_free(m); 2012387aabc5SAlan Cox vm_page_unlock_queues(); 2013387aabc5SAlan Cox break; 2014387aabc5SAlan Cox } 2015387aabc5SAlan Cox } 2016387aabc5SAlan Cox /* 2017387aabc5SAlan Cox * Keep "m" busy because a subsequent iteration may unlock 2018387aabc5SAlan Cox * the object. 2019387aabc5SAlan Cox */ 2020387aabc5SAlan Cox } 2021387aabc5SAlan Cox if (pindex > start) { 2022387aabc5SAlan Cox m = vm_page_lookup(object, start); 2023387aabc5SAlan Cox while (m != NULL && m->pindex < pindex) { 2024387aabc5SAlan Cox vm_page_wakeup(m); 2025387aabc5SAlan Cox m = TAILQ_NEXT(m, listq); 2026387aabc5SAlan Cox } 2027387aabc5SAlan Cox } 2028387aabc5SAlan Cox return (pindex == end); 2029387aabc5SAlan Cox } 2030387aabc5SAlan Cox 2031387aabc5SAlan Cox /* 2032df8bae1dSRodney W. Grimes * Routine: vm_object_coalesce 2033df8bae1dSRodney W. Grimes * Function: Coalesces two objects backing up adjoining 2034df8bae1dSRodney W. Grimes * regions of memory into a single object. 2035df8bae1dSRodney W. Grimes * 2036df8bae1dSRodney W. Grimes * returns TRUE if objects were combined. 2037df8bae1dSRodney W. Grimes * 2038df8bae1dSRodney W. Grimes * NOTE: Only works at the moment if the second object is NULL - 2039df8bae1dSRodney W. Grimes * if it's not, which object do we lock first? 2040df8bae1dSRodney W. Grimes * 2041df8bae1dSRodney W. Grimes * Parameters: 2042df8bae1dSRodney W. Grimes * prev_object First object to coalesce 2043df8bae1dSRodney W. Grimes * prev_offset Offset into prev_object 2044df8bae1dSRodney W. Grimes * prev_size Size of reference to prev_object 204557a21abaSAlan Cox * next_size Size of reference to the second object 20463364c323SKonstantin Belousov * reserved Indicator that extension region has 20473364c323SKonstantin Belousov * swap accounted for 2048df8bae1dSRodney W. Grimes * 2049df8bae1dSRodney W. Grimes * Conditions: 2050df8bae1dSRodney W. Grimes * The object must *not* be locked. 2051df8bae1dSRodney W. Grimes */ 20520d94caffSDavid Greenman boolean_t 205357a21abaSAlan Cox vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, 20543364c323SKonstantin Belousov vm_size_t prev_size, vm_size_t next_size, boolean_t reserved) 2055df8bae1dSRodney W. Grimes { 2056ea41812fSAlan Cox vm_pindex_t next_pindex; 2057df8bae1dSRodney W. Grimes 205800e1854aSAlan Cox if (prev_object == NULL) 2059df8bae1dSRodney W. Grimes return (TRUE); 2060bdbfbaafSAlan Cox VM_OBJECT_LOCK(prev_object); 20614112823fSMatthew Dillon if (prev_object->type != OBJT_DEFAULT && 20624112823fSMatthew Dillon prev_object->type != OBJT_SWAP) { 2063bdbfbaafSAlan Cox VM_OBJECT_UNLOCK(prev_object); 206430dcfc09SJohn Dyson return (FALSE); 206530dcfc09SJohn Dyson } 206630dcfc09SJohn Dyson 2067df8bae1dSRodney W. Grimes /* 2068df8bae1dSRodney W. Grimes * Try to collapse the object first 2069df8bae1dSRodney W. Grimes */ 2070df8bae1dSRodney W. Grimes vm_object_collapse(prev_object); 2071df8bae1dSRodney W. Grimes 2072df8bae1dSRodney W. Grimes /* 20730d94caffSDavid Greenman * Can't coalesce if: . more than one reference . paged out . shadows 20740d94caffSDavid Greenman * another object . has a copy elsewhere (any of which mean that the 20750d94caffSDavid Greenman * pages not mapped to prev_entry may be in use anyway) 2076df8bae1dSRodney W. Grimes */ 20778cc7e047SJohn Dyson if (prev_object->backing_object != NULL) { 2078bdbfbaafSAlan Cox VM_OBJECT_UNLOCK(prev_object); 2079df8bae1dSRodney W. Grimes return (FALSE); 2080df8bae1dSRodney W. Grimes } 2081a316d390SJohn Dyson 2082a316d390SJohn Dyson prev_size >>= PAGE_SHIFT; 2083a316d390SJohn Dyson next_size >>= PAGE_SHIFT; 208457a21abaSAlan Cox next_pindex = OFF_TO_IDX(prev_offset) + prev_size; 20858cc7e047SJohn Dyson 20868cc7e047SJohn Dyson if ((prev_object->ref_count > 1) && 2087ea41812fSAlan Cox (prev_object->size != next_pindex)) { 2088bdbfbaafSAlan Cox VM_OBJECT_UNLOCK(prev_object); 20898cc7e047SJohn Dyson return (FALSE); 20908cc7e047SJohn Dyson } 20918cc7e047SJohn Dyson 2092df8bae1dSRodney W. Grimes /* 20933364c323SKonstantin Belousov * Account for the charge. 20943364c323SKonstantin Belousov */ 20953364c323SKonstantin Belousov if (prev_object->uip != NULL) { 20963364c323SKonstantin Belousov 20973364c323SKonstantin Belousov /* 20983364c323SKonstantin Belousov * If prev_object was charged, then this mapping, 20993364c323SKonstantin Belousov * althought not charged now, may become writable 21003364c323SKonstantin Belousov * later. Non-NULL uip in the object would prevent 21013364c323SKonstantin Belousov * swap reservation during enabling of the write 21023364c323SKonstantin Belousov * access, so reserve swap now. Failed reservation 21033364c323SKonstantin Belousov * cause allocation of the separate object for the map 21043364c323SKonstantin Belousov * entry, and swap reservation for this entry is 21053364c323SKonstantin Belousov * managed in appropriate time. 21063364c323SKonstantin Belousov */ 21073364c323SKonstantin Belousov if (!reserved && !swap_reserve_by_uid(ptoa(next_size), 21083364c323SKonstantin Belousov prev_object->uip)) { 21093364c323SKonstantin Belousov return (FALSE); 21103364c323SKonstantin Belousov } 21113364c323SKonstantin Belousov prev_object->charge += ptoa(next_size); 21123364c323SKonstantin Belousov } 21133364c323SKonstantin Belousov 21143364c323SKonstantin Belousov /* 21150d94caffSDavid Greenman * Remove any pages that may still be in the object from a previous 21160d94caffSDavid Greenman * deallocation. 2117df8bae1dSRodney W. Grimes */ 2118ea41812fSAlan Cox if (next_pindex < prev_object->size) { 2119df8bae1dSRodney W. Grimes vm_object_page_remove(prev_object, 2120ea41812fSAlan Cox next_pindex, 2121ea41812fSAlan Cox next_pindex + next_size, FALSE); 2122ea41812fSAlan Cox if (prev_object->type == OBJT_SWAP) 2123ea41812fSAlan Cox swap_pager_freespace(prev_object, 2124ea41812fSAlan Cox next_pindex, next_size); 21253364c323SKonstantin Belousov #if 0 21263364c323SKonstantin Belousov if (prev_object->uip != NULL) { 21273364c323SKonstantin Belousov KASSERT(prev_object->charge >= 21283364c323SKonstantin Belousov ptoa(prev_object->size - next_pindex), 21293364c323SKonstantin Belousov ("object %p overcharged 1 %jx %jx", prev_object, 21303364c323SKonstantin Belousov (uintmax_t)next_pindex, (uintmax_t)next_size)); 21313364c323SKonstantin Belousov prev_object->charge -= ptoa(prev_object->size - 21323364c323SKonstantin Belousov next_pindex); 21333364c323SKonstantin Belousov } 21343364c323SKonstantin Belousov #endif 2135ea41812fSAlan Cox } 2136df8bae1dSRodney W. Grimes 2137df8bae1dSRodney W. Grimes /* 2138df8bae1dSRodney W. Grimes * Extend the object if necessary. 2139df8bae1dSRodney W. Grimes */ 2140ea41812fSAlan Cox if (next_pindex + next_size > prev_object->size) 2141ea41812fSAlan Cox prev_object->size = next_pindex + next_size; 2142df8bae1dSRodney W. Grimes 2143bdbfbaafSAlan Cox VM_OBJECT_UNLOCK(prev_object); 2144df8bae1dSRodney W. Grimes return (TRUE); 2145df8bae1dSRodney W. Grimes } 2146df8bae1dSRodney W. Grimes 21477a5a6352SMatthew Dillon void 21487a5a6352SMatthew Dillon vm_object_set_writeable_dirty(vm_object_t object) 21497a5a6352SMatthew Dillon { 21507a5a6352SMatthew Dillon struct vnode *vp; 21517a5a6352SMatthew Dillon 2152de33beddSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2153af51d7bfSAlan Cox if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) 2154ee39666aSJeff Roberson return; 2155af51d7bfSAlan Cox vm_object_set_flag(object, OBJ_MIGHTBEDIRTY); 21567a5a6352SMatthew Dillon if (object->type == OBJT_VNODE && 21577a5a6352SMatthew Dillon (vp = (struct vnode *)object->handle) != NULL) { 2158e6e370a7SJeff Roberson VI_LOCK(vp); 2159e6e370a7SJeff Roberson vp->v_iflag |= VI_OBJDIRTY; 2160e6e370a7SJeff Roberson VI_UNLOCK(vp); 21617a5a6352SMatthew Dillon } 21627a5a6352SMatthew Dillon } 21637a5a6352SMatthew Dillon 2164c7c34a24SBruce Evans #include "opt_ddb.h" 2165c3cb3e12SDavid Greenman #ifdef DDB 2166c7c34a24SBruce Evans #include <sys/kernel.h> 2167c7c34a24SBruce Evans 2168ce9edcf5SPoul-Henning Kamp #include <sys/cons.h> 2169c7c34a24SBruce Evans 2170c7c34a24SBruce Evans #include <ddb/ddb.h> 2171c7c34a24SBruce Evans 2172cac597e4SBruce Evans static int 21731b40f8c0SMatthew Dillon _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 2174a1f6d91cSDavid Greenman { 2175a1f6d91cSDavid Greenman vm_map_t tmpm; 2176a1f6d91cSDavid Greenman vm_map_entry_t tmpe; 2177a1f6d91cSDavid Greenman vm_object_t obj; 2178a1f6d91cSDavid Greenman int entcount; 2179a1f6d91cSDavid Greenman 2180a1f6d91cSDavid Greenman if (map == 0) 2181a1f6d91cSDavid Greenman return 0; 2182a1f6d91cSDavid Greenman 2183a1f6d91cSDavid Greenman if (entry == 0) { 2184a1f6d91cSDavid Greenman tmpe = map->header.next; 2185a1f6d91cSDavid Greenman entcount = map->nentries; 2186a1f6d91cSDavid Greenman while (entcount-- && (tmpe != &map->header)) { 2187a1f6d91cSDavid Greenman if (_vm_object_in_map(map, object, tmpe)) { 2188a1f6d91cSDavid Greenman return 1; 2189a1f6d91cSDavid Greenman } 2190a1f6d91cSDavid Greenman tmpe = tmpe->next; 2191a1f6d91cSDavid Greenman } 21929fdfe602SMatthew Dillon } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 21939fdfe602SMatthew Dillon tmpm = entry->object.sub_map; 2194a1f6d91cSDavid Greenman tmpe = tmpm->header.next; 2195a1f6d91cSDavid Greenman entcount = tmpm->nentries; 2196a1f6d91cSDavid Greenman while (entcount-- && tmpe != &tmpm->header) { 2197a1f6d91cSDavid Greenman if (_vm_object_in_map(tmpm, object, tmpe)) { 2198a1f6d91cSDavid Greenman return 1; 2199a1f6d91cSDavid Greenman } 2200a1f6d91cSDavid Greenman tmpe = tmpe->next; 2201a1f6d91cSDavid Greenman } 22028aef1712SMatthew Dillon } else if ((obj = entry->object.vm_object) != NULL) { 220324a1cce3SDavid Greenman for (; obj; obj = obj->backing_object) 2204a1f6d91cSDavid Greenman if (obj == object) { 2205a1f6d91cSDavid Greenman return 1; 2206a1f6d91cSDavid Greenman } 2207a1f6d91cSDavid Greenman } 2208a1f6d91cSDavid Greenman return 0; 2209a1f6d91cSDavid Greenman } 2210a1f6d91cSDavid Greenman 2211cac597e4SBruce Evans static int 22121b40f8c0SMatthew Dillon vm_object_in_map(vm_object_t object) 2213a1f6d91cSDavid Greenman { 2214a1f6d91cSDavid Greenman struct proc *p; 22151005a129SJohn Baldwin 221660517fd1SJohn Baldwin /* sx_slock(&allproc_lock); */ 2217f67af5c9SXin LI FOREACH_PROC_IN_SYSTEM(p) { 2218a1f6d91cSDavid Greenman if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 2219a1f6d91cSDavid Greenman continue; 2220553629ebSJake Burkholder if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 222160517fd1SJohn Baldwin /* sx_sunlock(&allproc_lock); */ 2222a1f6d91cSDavid Greenman return 1; 2223a1f6d91cSDavid Greenman } 2224553629ebSJake Burkholder } 222560517fd1SJohn Baldwin /* sx_sunlock(&allproc_lock); */ 2226a1f6d91cSDavid Greenman if (_vm_object_in_map(kernel_map, object, 0)) 2227a1f6d91cSDavid Greenman return 1; 2228a1f6d91cSDavid Greenman if (_vm_object_in_map(kmem_map, object, 0)) 2229a1f6d91cSDavid Greenman return 1; 2230a1f6d91cSDavid Greenman if (_vm_object_in_map(pager_map, object, 0)) 2231a1f6d91cSDavid Greenman return 1; 2232a1f6d91cSDavid Greenman if (_vm_object_in_map(buffer_map, object, 0)) 2233a1f6d91cSDavid Greenman return 1; 2234a1f6d91cSDavid Greenman return 0; 2235a1f6d91cSDavid Greenman } 2236a1f6d91cSDavid Greenman 2237c7c34a24SBruce Evans DB_SHOW_COMMAND(vmochk, vm_object_check) 2238f708ef1bSPoul-Henning Kamp { 2239a1f6d91cSDavid Greenman vm_object_t object; 2240a1f6d91cSDavid Greenman 2241a1f6d91cSDavid Greenman /* 2242a1f6d91cSDavid Greenman * make sure that internal objs are in a map somewhere 2243a1f6d91cSDavid Greenman * and none have zero ref counts. 2244a1f6d91cSDavid Greenman */ 2245cc64b484SAlfred Perlstein TAILQ_FOREACH(object, &vm_object_list, object_list) { 224624a1cce3SDavid Greenman if (object->handle == NULL && 224724a1cce3SDavid Greenman (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2248a1f6d91cSDavid Greenman if (object->ref_count == 0) { 22493efc015bSPeter Wemm db_printf("vmochk: internal obj has zero ref count: %ld\n", 22503efc015bSPeter Wemm (long)object->size); 2251a1f6d91cSDavid Greenman } 2252a1f6d91cSDavid Greenman if (!vm_object_in_map(object)) { 2253fc62ef1fSBruce Evans db_printf( 2254fc62ef1fSBruce Evans "vmochk: internal obj is not in a map: " 2255fc62ef1fSBruce Evans "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 2256fc62ef1fSBruce Evans object->ref_count, (u_long)object->size, 2257fc62ef1fSBruce Evans (u_long)object->size, 2258fc62ef1fSBruce Evans (void *)object->backing_object); 2259a1f6d91cSDavid Greenman } 2260a1f6d91cSDavid Greenman } 2261a1f6d91cSDavid Greenman } 2262a1f6d91cSDavid Greenman } 2263a1f6d91cSDavid Greenman 226426f9a767SRodney W. Grimes /* 2265df8bae1dSRodney W. Grimes * vm_object_print: [ debug ] 2266df8bae1dSRodney W. Grimes */ 2267c7c34a24SBruce Evans DB_SHOW_COMMAND(object, vm_object_print_static) 2268df8bae1dSRodney W. Grimes { 2269c7c34a24SBruce Evans /* XXX convert args. */ 2270c7c34a24SBruce Evans vm_object_t object = (vm_object_t)addr; 2271c7c34a24SBruce Evans boolean_t full = have_addr; 2272c7c34a24SBruce Evans 2273d031cff1SMatthew Dillon vm_page_t p; 2274df8bae1dSRodney W. Grimes 2275c7c34a24SBruce Evans /* XXX count is an (unused) arg. Avoid shadowing it. */ 2276c7c34a24SBruce Evans #define count was_count 2277c7c34a24SBruce Evans 2278d031cff1SMatthew Dillon int count; 2279df8bae1dSRodney W. Grimes 2280df8bae1dSRodney W. Grimes if (object == NULL) 2281df8bae1dSRodney W. Grimes return; 2282df8bae1dSRodney W. Grimes 2283eb95adefSBruce Evans db_iprintf( 22843364c323SKonstantin Belousov "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x uip %d charge %jx\n", 2285e47cd172SMaxime Henrion object, (int)object->type, (uintmax_t)object->size, 22863364c323SKonstantin Belousov object->resident_page_count, object->ref_count, object->flags, 22873364c323SKonstantin Belousov object->uip ? object->uip->ui_uid : -1, (uintmax_t)object->charge); 2288e47cd172SMaxime Henrion db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", 22891c7c3c6aSMatthew Dillon object->shadow_count, 2290eb95adefSBruce Evans object->backing_object ? object->backing_object->ref_count : 0, 2291e47cd172SMaxime Henrion object->backing_object, (uintmax_t)object->backing_object_offset); 2292df8bae1dSRodney W. Grimes 2293df8bae1dSRodney W. Grimes if (!full) 2294df8bae1dSRodney W. Grimes return; 2295df8bae1dSRodney W. Grimes 2296c7c34a24SBruce Evans db_indent += 2; 2297df8bae1dSRodney W. Grimes count = 0; 2298fc2ffbe6SPoul-Henning Kamp TAILQ_FOREACH(p, &object->memq, listq) { 2299df8bae1dSRodney W. Grimes if (count == 0) 2300c7c34a24SBruce Evans db_iprintf("memory:="); 2301df8bae1dSRodney W. Grimes else if (count == 6) { 2302c7c34a24SBruce Evans db_printf("\n"); 2303c7c34a24SBruce Evans db_iprintf(" ..."); 2304df8bae1dSRodney W. Grimes count = 0; 2305df8bae1dSRodney W. Grimes } else 2306c7c34a24SBruce Evans db_printf(","); 2307df8bae1dSRodney W. Grimes count++; 2308df8bae1dSRodney W. Grimes 2309e47cd172SMaxime Henrion db_printf("(off=0x%jx,page=0x%jx)", 2310e47cd172SMaxime Henrion (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); 2311df8bae1dSRodney W. Grimes } 2312df8bae1dSRodney W. Grimes if (count != 0) 2313c7c34a24SBruce Evans db_printf("\n"); 2314c7c34a24SBruce Evans db_indent -= 2; 2315df8bae1dSRodney W. Grimes } 23165070c7f8SJohn Dyson 2317c7c34a24SBruce Evans /* XXX. */ 2318c7c34a24SBruce Evans #undef count 2319c7c34a24SBruce Evans 2320c7c34a24SBruce Evans /* XXX need this non-static entry for calling from vm_map_print. */ 23215070c7f8SJohn Dyson void 23221b40f8c0SMatthew Dillon vm_object_print( 23231b40f8c0SMatthew Dillon /* db_expr_t */ long addr, 23241b40f8c0SMatthew Dillon boolean_t have_addr, 23251b40f8c0SMatthew Dillon /* db_expr_t */ long count, 23261b40f8c0SMatthew Dillon char *modif) 2327c7c34a24SBruce Evans { 2328c7c34a24SBruce Evans vm_object_print_static(addr, have_addr, count, modif); 2329c7c34a24SBruce Evans } 2330c7c34a24SBruce Evans 2331c7c34a24SBruce Evans DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 23325070c7f8SJohn Dyson { 23335070c7f8SJohn Dyson vm_object_t object; 2334bb2ac86fSKonstantin Belousov vm_pindex_t fidx; 2335bb2ac86fSKonstantin Belousov vm_paddr_t pa; 2336bb2ac86fSKonstantin Belousov vm_page_t m, prev_m; 2337bb2ac86fSKonstantin Belousov int rcount, nl, c; 2338cc64b484SAlfred Perlstein 2339bb2ac86fSKonstantin Belousov nl = 0; 2340cc64b484SAlfred Perlstein TAILQ_FOREACH(object, &vm_object_list, object_list) { 2341fc62ef1fSBruce Evans db_printf("new object: %p\n", (void *)object); 23425070c7f8SJohn Dyson if (nl > 18) { 23435070c7f8SJohn Dyson c = cngetc(); 23445070c7f8SJohn Dyson if (c != ' ') 23455070c7f8SJohn Dyson return; 23465070c7f8SJohn Dyson nl = 0; 23475070c7f8SJohn Dyson } 23485070c7f8SJohn Dyson nl++; 23495070c7f8SJohn Dyson rcount = 0; 23505070c7f8SJohn Dyson fidx = 0; 2351bb2ac86fSKonstantin Belousov pa = -1; 2352bb2ac86fSKonstantin Belousov TAILQ_FOREACH(m, &object->memq, listq) { 2353bb2ac86fSKonstantin Belousov if (m->pindex > 128) 2354bb2ac86fSKonstantin Belousov break; 2355bb2ac86fSKonstantin Belousov if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL && 2356bb2ac86fSKonstantin Belousov prev_m->pindex + 1 != m->pindex) { 23575070c7f8SJohn Dyson if (rcount) { 23583efc015bSPeter Wemm db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 23593efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 23605070c7f8SJohn Dyson if (nl > 18) { 23615070c7f8SJohn Dyson c = cngetc(); 23625070c7f8SJohn Dyson if (c != ' ') 23635070c7f8SJohn Dyson return; 23645070c7f8SJohn Dyson nl = 0; 23655070c7f8SJohn Dyson } 23665070c7f8SJohn Dyson nl++; 23675070c7f8SJohn Dyson rcount = 0; 23685070c7f8SJohn Dyson } 23695070c7f8SJohn Dyson } 23705070c7f8SJohn Dyson if (rcount && 23715070c7f8SJohn Dyson (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 23725070c7f8SJohn Dyson ++rcount; 23735070c7f8SJohn Dyson continue; 23745070c7f8SJohn Dyson } 23755070c7f8SJohn Dyson if (rcount) { 23762446e4f0SAlan Cox db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 23773efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 23785070c7f8SJohn Dyson if (nl > 18) { 23795070c7f8SJohn Dyson c = cngetc(); 23805070c7f8SJohn Dyson if (c != ' ') 23815070c7f8SJohn Dyson return; 23825070c7f8SJohn Dyson nl = 0; 23835070c7f8SJohn Dyson } 23845070c7f8SJohn Dyson nl++; 23855070c7f8SJohn Dyson } 2386bb2ac86fSKonstantin Belousov fidx = m->pindex; 23875070c7f8SJohn Dyson pa = VM_PAGE_TO_PHYS(m); 23885070c7f8SJohn Dyson rcount = 1; 23895070c7f8SJohn Dyson } 23905070c7f8SJohn Dyson if (rcount) { 23913efc015bSPeter Wemm db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 23923efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 23935070c7f8SJohn Dyson if (nl > 18) { 23945070c7f8SJohn Dyson c = cngetc(); 23955070c7f8SJohn Dyson if (c != ' ') 23965070c7f8SJohn Dyson return; 23975070c7f8SJohn Dyson nl = 0; 23985070c7f8SJohn Dyson } 23995070c7f8SJohn Dyson nl++; 24005070c7f8SJohn Dyson } 24015070c7f8SJohn Dyson } 24025070c7f8SJohn Dyson } 2403c3cb3e12SDavid Greenman #endif /* DDB */ 2404