160727d8bSWarner Losh /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 6df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 9df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 10df8bae1dSRodney W. Grimes * are met: 11df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 12df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 13df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 15df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 16fbbd9655SWarner Losh * 3. Neither the name of the University nor the names of its contributors 17df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 18df8bae1dSRodney W. Grimes * without specific prior written permission. 19df8bae1dSRodney W. Grimes * 20df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30df8bae1dSRodney W. Grimes * SUCH DAMAGE. 31df8bae1dSRodney W. Grimes * 323c4dd356SDavid Greenman * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 33df8bae1dSRodney W. Grimes * 34df8bae1dSRodney W. Grimes * 35df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36df8bae1dSRodney W. Grimes * All rights reserved. 37df8bae1dSRodney W. Grimes * 38df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39df8bae1dSRodney W. Grimes * 40df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 41df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 42df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 43df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 44df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 51df8bae1dSRodney W. Grimes * 52df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53df8bae1dSRodney W. Grimes * School of Computer Science 54df8bae1dSRodney W. Grimes * Carnegie Mellon University 55df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 56df8bae1dSRodney W. Grimes * 57df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 58df8bae1dSRodney W. Grimes * rights to redistribute these changes. 59df8bae1dSRodney W. Grimes */ 60df8bae1dSRodney W. Grimes 61df8bae1dSRodney W. Grimes /* 62df8bae1dSRodney W. Grimes * Virtual memory object module. 63df8bae1dSRodney W. Grimes */ 64df8bae1dSRodney W. Grimes 65874651b1SDavid E. O'Brien #include <sys/cdefs.h> 66874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 67874651b1SDavid E. O'Brien 68f8a47341SAlan Cox #include "opt_vm.h" 69f8a47341SAlan Cox 70df8bae1dSRodney W. Grimes #include <sys/param.h> 71df8bae1dSRodney W. Grimes #include <sys/systm.h> 72fb919e4dSMark Murray #include <sys/lock.h> 73867a482dSJohn Dyson #include <sys/mman.h> 74cf2819ccSJohn Dyson #include <sys/mount.h> 75b9b7a4beSMatthew Dillon #include <sys/kernel.h> 76f425ab8eSKonstantin Belousov #include <sys/pctrie.h> 77b9b7a4beSMatthew Dillon #include <sys/sysctl.h> 781b367556SJason Evans #include <sys/mutex.h> 79fb919e4dSMark Murray #include <sys/proc.h> /* for curproc, pageproc */ 80fb919e4dSMark Murray #include <sys/socket.h> 813364c323SKonstantin Belousov #include <sys/resourcevar.h> 8289f6b863SAttilio Rao #include <sys/rwlock.h> 83ff87ae35SJohn Baldwin #include <sys/user.h> 84fb919e4dSMark Murray #include <sys/vnode.h> 85fb919e4dSMark Murray #include <sys/vmmeter.h> 861005a129SJohn Baldwin #include <sys/sx.h> 87df8bae1dSRodney W. Grimes 88df8bae1dSRodney W. Grimes #include <vm/vm.h> 89efeaf95aSDavid Greenman #include <vm/vm_param.h> 90efeaf95aSDavid Greenman #include <vm/pmap.h> 91efeaf95aSDavid Greenman #include <vm/vm_map.h> 92efeaf95aSDavid Greenman #include <vm/vm_object.h> 93df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 9426f9a767SRodney W. Grimes #include <vm/vm_pageout.h> 950d94caffSDavid Greenman #include <vm/vm_pager.h> 9605f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 97a1f6d91cSDavid Greenman #include <vm/vm_kern.h> 98efeaf95aSDavid Greenman #include <vm/vm_extern.h> 99774d251dSAttilio Rao #include <vm/vm_radix.h> 100f8a47341SAlan Cox #include <vm/vm_reserv.h> 101670d17b5SJeff Roberson #include <vm/uma.h> 10226f9a767SRodney W. Grimes 103c53f7aceSDag-Erling Smørgrav static int old_msync; 104c53f7aceSDag-Erling Smørgrav SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, 105c53f7aceSDag-Erling Smørgrav "Use old (insecure) msync behavior"); 106c53f7aceSDag-Erling Smørgrav 107757216f3SKonstantin Belousov static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, 108126d6082SKonstantin Belousov int pagerflags, int flags, boolean_t *clearobjflags, 109126d6082SKonstantin Belousov boolean_t *eio); 1103280870dSKonstantin Belousov static boolean_t vm_object_page_remove_write(vm_page_t p, int flags, 111126d6082SKonstantin Belousov boolean_t *clearobjflags); 112b9b7a4beSMatthew Dillon static void vm_object_qcollapse(vm_object_t object); 11302dd8331SAlan Cox static void vm_object_vndeallocate(vm_object_t object); 114f6b04d2bSDavid Greenman 115df8bae1dSRodney W. Grimes /* 116df8bae1dSRodney W. Grimes * Virtual memory objects maintain the actual data 117df8bae1dSRodney W. Grimes * associated with allocated virtual memory. A given 118df8bae1dSRodney W. Grimes * page of memory exists within exactly one object. 119df8bae1dSRodney W. Grimes * 120df8bae1dSRodney W. Grimes * An object is only deallocated when all "references" 121df8bae1dSRodney W. Grimes * are given up. Only one "reference" to a given 122df8bae1dSRodney W. Grimes * region of an object should be writeable. 123df8bae1dSRodney W. Grimes * 124df8bae1dSRodney W. Grimes * Associated with each object is a list of all resident 125df8bae1dSRodney W. Grimes * memory pages belonging to that object; this list is 126df8bae1dSRodney W. Grimes * maintained by the "vm_page" module, and locked by the object's 127df8bae1dSRodney W. Grimes * lock. 128df8bae1dSRodney W. Grimes * 129df8bae1dSRodney W. Grimes * Each object also records a "pager" routine which is 130df8bae1dSRodney W. Grimes * used to retrieve (and store) pages to the proper backing 131df8bae1dSRodney W. Grimes * storage. In addition, objects may be backed by other 132df8bae1dSRodney W. Grimes * objects from which they were virtual-copied. 133df8bae1dSRodney W. Grimes * 134df8bae1dSRodney W. Grimes * The only items within the object structure which are 135df8bae1dSRodney W. Grimes * modified after time of creation are: 136df8bae1dSRodney W. Grimes * reference count locked by object's lock 137df8bae1dSRodney W. Grimes * pager routine locked by object's lock 138df8bae1dSRodney W. Grimes * 139df8bae1dSRodney W. Grimes */ 140df8bae1dSRodney W. Grimes 14128f8db14SBruce Evans struct object_q vm_object_list; 142a5698387SAlan Cox struct mtx vm_object_list_mtx; /* lock for object list and count */ 143cccf11b8SAlan Cox 144cccf11b8SAlan Cox struct vm_object kernel_object_store; 145cccf11b8SAlan Cox struct vm_object kmem_object_store; 146df8bae1dSRodney W. Grimes 1476472ac3dSEd Schouten static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, 1486472ac3dSEd Schouten "VM object stats"); 149604c2bbcSAlan Cox 150f708ef1bSPoul-Henning Kamp static long object_collapses; 151604c2bbcSAlan Cox SYSCTL_LONG(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD, 152604c2bbcSAlan Cox &object_collapses, 0, "VM object collapses"); 153604c2bbcSAlan Cox 154f708ef1bSPoul-Henning Kamp static long object_bypasses; 155604c2bbcSAlan Cox SYSCTL_LONG(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD, 156604c2bbcSAlan Cox &object_bypasses, 0, "VM object bypasses"); 157dad740e9SAlan Cox 158670d17b5SJeff Roberson static uma_zone_t obj_zone; 1598355f576SJeff Roberson 160b23f72e9SBrian Feldman static int vm_object_zinit(void *mem, int size, int flags); 1618355f576SJeff Roberson 1628355f576SJeff Roberson #ifdef INVARIANTS 1638355f576SJeff Roberson static void vm_object_zdtor(void *mem, int size, void *arg); 1648355f576SJeff Roberson 1658355f576SJeff Roberson static void 1668355f576SJeff Roberson vm_object_zdtor(void *mem, int size, void *arg) 1678355f576SJeff Roberson { 1688355f576SJeff Roberson vm_object_t object; 1698355f576SJeff Roberson 1708355f576SJeff Roberson object = (vm_object_t)mem; 171e735691bSJohn Baldwin KASSERT(object->ref_count == 0, 172e735691bSJohn Baldwin ("object %p ref_count = %d", object, object->ref_count)); 17343186e53SAlan Cox KASSERT(TAILQ_EMPTY(&object->memq), 174198da1b2SAttilio Rao ("object %p has resident pages in its memq", object)); 175774d251dSAttilio Rao KASSERT(vm_radix_is_empty(&object->rtree), 176774d251dSAttilio Rao ("object %p has resident pages in its trie", object)); 177f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 178f8a47341SAlan Cox KASSERT(LIST_EMPTY(&object->rvq), 179f8a47341SAlan Cox ("object %p has reservations", 180f8a47341SAlan Cox object)); 181f8a47341SAlan Cox #endif 1828355f576SJeff Roberson KASSERT(object->paging_in_progress == 0, 1838355f576SJeff Roberson ("object %p paging_in_progress = %d", 1848355f576SJeff Roberson object, object->paging_in_progress)); 1858355f576SJeff Roberson KASSERT(object->resident_page_count == 0, 1868355f576SJeff Roberson ("object %p resident_page_count = %d", 1878355f576SJeff Roberson object, object->resident_page_count)); 1888355f576SJeff Roberson KASSERT(object->shadow_count == 0, 1898355f576SJeff Roberson ("object %p shadow_count = %d", 1908355f576SJeff Roberson object, object->shadow_count)); 191e735691bSJohn Baldwin KASSERT(object->type == OBJT_DEAD, 192e735691bSJohn Baldwin ("object %p has non-dead type %d", 193e735691bSJohn Baldwin object, object->type)); 1948355f576SJeff Roberson } 1958355f576SJeff Roberson #endif 1968355f576SJeff Roberson 197b23f72e9SBrian Feldman static int 198b23f72e9SBrian Feldman vm_object_zinit(void *mem, int size, int flags) 1998355f576SJeff Roberson { 2008355f576SJeff Roberson vm_object_t object; 2018355f576SJeff Roberson 2028355f576SJeff Roberson object = (vm_object_t)mem; 203777a36c5SAlan Cox rw_init_flags(&object->lock, "vm object", RW_DUPOK | RW_NEW); 2048355f576SJeff Roberson 2058355f576SJeff Roberson /* These are true for any object that has been freed */ 206e735691bSJohn Baldwin object->type = OBJT_DEAD; 207e735691bSJohn Baldwin object->ref_count = 0; 208cd1241fbSKonstantin Belousov vm_radix_init(&object->rtree); 2098355f576SJeff Roberson object->paging_in_progress = 0; 2108355f576SJeff Roberson object->resident_page_count = 0; 2118355f576SJeff Roberson object->shadow_count = 0; 212f425ab8eSKonstantin Belousov object->flags = OBJ_DEAD; 213e735691bSJohn Baldwin 214e735691bSJohn Baldwin mtx_lock(&vm_object_list_mtx); 215e735691bSJohn Baldwin TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 216e735691bSJohn Baldwin mtx_unlock(&vm_object_list_mtx); 217b23f72e9SBrian Feldman return (0); 2188355f576SJeff Roberson } 219df8bae1dSRodney W. Grimes 220a4915c21SAttilio Rao static void 2216395da54SIan Dowse _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) 222df8bae1dSRodney W. Grimes { 2230cddd8f0SMatthew Dillon 224df8bae1dSRodney W. Grimes TAILQ_INIT(&object->memq); 2251c500307SAlan Cox LIST_INIT(&object->shadow_head); 226a1f6d91cSDavid Greenman 22724a1cce3SDavid Greenman object->type = type; 228f425ab8eSKonstantin Belousov if (type == OBJT_SWAP) 229f425ab8eSKonstantin Belousov pctrie_init(&object->un_pager.swp.swp_blks); 230f425ab8eSKonstantin Belousov 231f425ab8eSKonstantin Belousov /* 232f425ab8eSKonstantin Belousov * Ensure that swap_pager_swapoff() iteration over object_list 233f425ab8eSKonstantin Belousov * sees up to date type and pctrie head if it observed 234f425ab8eSKonstantin Belousov * non-dead object. 235f425ab8eSKonstantin Belousov */ 236f425ab8eSKonstantin Belousov atomic_thread_fence_rel(); 237f425ab8eSKonstantin Belousov 23828634820SAlan Cox switch (type) { 23928634820SAlan Cox case OBJT_DEAD: 24028634820SAlan Cox panic("_vm_object_allocate: can't create OBJT_DEAD"); 24128634820SAlan Cox case OBJT_DEFAULT: 24228634820SAlan Cox case OBJT_SWAP: 24328634820SAlan Cox object->flags = OBJ_ONEMAPPING; 24428634820SAlan Cox break; 24528634820SAlan Cox case OBJT_DEVICE: 24628634820SAlan Cox case OBJT_SG: 24728634820SAlan Cox object->flags = OBJ_FICTITIOUS | OBJ_UNMANAGED; 24828634820SAlan Cox break; 24928634820SAlan Cox case OBJT_MGTDEVICE: 25028634820SAlan Cox object->flags = OBJ_FICTITIOUS; 25128634820SAlan Cox break; 25228634820SAlan Cox case OBJT_PHYS: 25328634820SAlan Cox object->flags = OBJ_UNMANAGED; 25428634820SAlan Cox break; 25528634820SAlan Cox case OBJT_VNODE: 25628634820SAlan Cox object->flags = 0; 25728634820SAlan Cox break; 25828634820SAlan Cox default: 25928634820SAlan Cox panic("_vm_object_allocate: type %d is undefined", type); 26028634820SAlan Cox } 261df8bae1dSRodney W. Grimes object->size = size; 262b881da26SAlan Cox object->generation = 1; 263a1f6d91cSDavid Greenman object->ref_count = 1; 2643153e878SAlan Cox object->memattr = VM_MEMATTR_DEFAULT; 265ef694c1aSEdward Tomasz Napierala object->cred = NULL; 2663364c323SKonstantin Belousov object->charge = 0; 26724a1cce3SDavid Greenman object->handle = NULL; 26824a1cce3SDavid Greenman object->backing_object = NULL; 269a316d390SJohn Dyson object->backing_object_offset = (vm_ooffset_t) 0; 270f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 271f8a47341SAlan Cox LIST_INIT(&object->rvq); 272f8a47341SAlan Cox #endif 2731bdbd705SKonstantin Belousov umtx_shm_object_init(object); 274df8bae1dSRodney W. Grimes } 275df8bae1dSRodney W. Grimes 276df8bae1dSRodney W. Grimes /* 27726f9a767SRodney W. Grimes * vm_object_init: 27826f9a767SRodney W. Grimes * 27926f9a767SRodney W. Grimes * Initialize the VM objects module. 28026f9a767SRodney W. Grimes */ 28126f9a767SRodney W. Grimes void 2821b40f8c0SMatthew Dillon vm_object_init(void) 28326f9a767SRodney W. Grimes { 28426f9a767SRodney W. Grimes TAILQ_INIT(&vm_object_list); 2856008862bSJohn Baldwin mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); 2860217125fSDavid Greenman 28789f6b863SAttilio Rao rw_init(&kernel_object->lock, "kernel vm object"); 288d1780e8dSKonstantin Belousov _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - 289d1780e8dSKonstantin Belousov VM_MIN_KERNEL_ADDRESS), kernel_object); 290f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 291f8a47341SAlan Cox kernel_object->flags |= OBJ_COLORED; 292f8a47341SAlan Cox kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 293f8a47341SAlan Cox #endif 29426f9a767SRodney W. Grimes 29589f6b863SAttilio Rao rw_init(&kmem_object->lock, "kmem vm object"); 296d1780e8dSKonstantin Belousov _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - 297d1780e8dSKonstantin Belousov VM_MIN_KERNEL_ADDRESS), kmem_object); 298f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 299f8a47341SAlan Cox kmem_object->flags |= OBJ_COLORED; 300f8a47341SAlan Cox kmem_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 301f8a47341SAlan Cox #endif 302ed6a7863SAlan Cox 3038dbca793STor Egge /* 3048dbca793STor Egge * The lock portion of struct vm_object must be type stable due 3058dbca793STor Egge * to vm_pageout_fallback_object_lock locking a vm object 3068dbca793STor Egge * without holding any references to it. 3078dbca793STor Egge */ 3088355f576SJeff Roberson obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, 3098355f576SJeff Roberson #ifdef INVARIANTS 3108355f576SJeff Roberson vm_object_zdtor, 3118355f576SJeff Roberson #else 3128355f576SJeff Roberson NULL, 3138355f576SJeff Roberson #endif 3145df87b21SJeff Roberson vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 315774d251dSAttilio Rao 316cd1241fbSKonstantin Belousov vm_radix_zinit(); 31799448ed1SJohn Dyson } 31899448ed1SJohn Dyson 31999448ed1SJohn Dyson void 3201b40f8c0SMatthew Dillon vm_object_clear_flag(vm_object_t object, u_short bits) 3211b40f8c0SMatthew Dillon { 3225440b5a9SAlan Cox 32389f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 324b06805adSJake Burkholder object->flags &= ~bits; 3251b40f8c0SMatthew Dillon } 3261b40f8c0SMatthew Dillon 3273153e878SAlan Cox /* 3283153e878SAlan Cox * Sets the default memory attribute for the specified object. Pages 3293153e878SAlan Cox * that are allocated to this object are by default assigned this memory 3303153e878SAlan Cox * attribute. 3313153e878SAlan Cox * 3323153e878SAlan Cox * Presently, this function must be called before any pages are allocated 3333153e878SAlan Cox * to the object. In the future, this requirement may be relaxed for 3343153e878SAlan Cox * "default" and "swap" objects. 3353153e878SAlan Cox */ 3363153e878SAlan Cox int 3373153e878SAlan Cox vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr) 3383153e878SAlan Cox { 3393153e878SAlan Cox 34089f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 3413153e878SAlan Cox switch (object->type) { 3423153e878SAlan Cox case OBJT_DEFAULT: 3433153e878SAlan Cox case OBJT_DEVICE: 34496b0b92aSAlan Cox case OBJT_MGTDEVICE: 3453153e878SAlan Cox case OBJT_PHYS: 34601381811SJohn Baldwin case OBJT_SG: 3473153e878SAlan Cox case OBJT_SWAP: 3483153e878SAlan Cox case OBJT_VNODE: 3493153e878SAlan Cox if (!TAILQ_EMPTY(&object->memq)) 3503153e878SAlan Cox return (KERN_FAILURE); 3513153e878SAlan Cox break; 3523153e878SAlan Cox case OBJT_DEAD: 3533153e878SAlan Cox return (KERN_INVALID_ARGUMENT); 35496b0b92aSAlan Cox default: 35596b0b92aSAlan Cox panic("vm_object_set_memattr: object %p is of undefined type", 35696b0b92aSAlan Cox object); 3573153e878SAlan Cox } 3583153e878SAlan Cox object->memattr = memattr; 3593153e878SAlan Cox return (KERN_SUCCESS); 3603153e878SAlan Cox } 3613153e878SAlan Cox 3621b40f8c0SMatthew Dillon void 3631b40f8c0SMatthew Dillon vm_object_pip_add(vm_object_t object, short i) 3641b40f8c0SMatthew Dillon { 365f279b88dSAlan Cox 36689f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 367b06805adSJake Burkholder object->paging_in_progress += i; 3681b40f8c0SMatthew Dillon } 3691b40f8c0SMatthew Dillon 3701b40f8c0SMatthew Dillon void 3711b40f8c0SMatthew Dillon vm_object_pip_subtract(vm_object_t object, short i) 3721b40f8c0SMatthew Dillon { 373d647a0edSAlan Cox 37489f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 375b06805adSJake Burkholder object->paging_in_progress -= i; 3761b40f8c0SMatthew Dillon } 3771b40f8c0SMatthew Dillon 3781b40f8c0SMatthew Dillon void 3791b40f8c0SMatthew Dillon vm_object_pip_wakeup(vm_object_t object) 3801b40f8c0SMatthew Dillon { 381f279b88dSAlan Cox 38289f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 383b06805adSJake Burkholder object->paging_in_progress--; 3841b40f8c0SMatthew Dillon if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 3851b40f8c0SMatthew Dillon vm_object_clear_flag(object, OBJ_PIPWNT); 3861b40f8c0SMatthew Dillon wakeup(object); 3871b40f8c0SMatthew Dillon } 3881b40f8c0SMatthew Dillon } 3891b40f8c0SMatthew Dillon 3901b40f8c0SMatthew Dillon void 3911b40f8c0SMatthew Dillon vm_object_pip_wakeupn(vm_object_t object, short i) 3921b40f8c0SMatthew Dillon { 393d647a0edSAlan Cox 39489f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 3951b40f8c0SMatthew Dillon if (i) 396b06805adSJake Burkholder object->paging_in_progress -= i; 3971b40f8c0SMatthew Dillon if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 3981b40f8c0SMatthew Dillon vm_object_clear_flag(object, OBJ_PIPWNT); 3991b40f8c0SMatthew Dillon wakeup(object); 4001b40f8c0SMatthew Dillon } 4011b40f8c0SMatthew Dillon } 4021b40f8c0SMatthew Dillon 4031b40f8c0SMatthew Dillon void 4041b40f8c0SMatthew Dillon vm_object_pip_wait(vm_object_t object, char *waitid) 4051b40f8c0SMatthew Dillon { 4061ca58953SAlan Cox 40789f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 4081ca58953SAlan Cox while (object->paging_in_progress) { 4091ca58953SAlan Cox object->flags |= OBJ_PIPWNT; 4100dde287bSAttilio Rao VM_OBJECT_SLEEP(object, object, PVM, waitid, 0); 4111ca58953SAlan Cox } 4121b40f8c0SMatthew Dillon } 4131b40f8c0SMatthew Dillon 41426f9a767SRodney W. Grimes /* 41526f9a767SRodney W. Grimes * vm_object_allocate: 41626f9a767SRodney W. Grimes * 41726f9a767SRodney W. Grimes * Returns a new object with the given size. 41826f9a767SRodney W. Grimes */ 41926f9a767SRodney W. Grimes vm_object_t 4206395da54SIan Dowse vm_object_allocate(objtype_t type, vm_pindex_t size) 42126f9a767SRodney W. Grimes { 42290688d13SAlan Cox vm_object_t object; 42390688d13SAlan Cox 42490688d13SAlan Cox object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); 42590688d13SAlan Cox _vm_object_allocate(type, size, object); 42690688d13SAlan Cox return (object); 42726f9a767SRodney W. Grimes } 42826f9a767SRodney W. Grimes 42926f9a767SRodney W. Grimes 43026f9a767SRodney W. Grimes /* 431df8bae1dSRodney W. Grimes * vm_object_reference: 432df8bae1dSRodney W. Grimes * 43315347817SAlan Cox * Gets another reference to the given object. Note: OBJ_DEAD 43415347817SAlan Cox * objects can be referenced during final cleaning. 435df8bae1dSRodney W. Grimes */ 4366476c0d2SJohn Dyson void 4371b40f8c0SMatthew Dillon vm_object_reference(vm_object_t object) 438df8bae1dSRodney W. Grimes { 439df8bae1dSRodney W. Grimes if (object == NULL) 440df8bae1dSRodney W. Grimes return; 44189f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 44252481a9aSJeff Roberson vm_object_reference_locked(object); 44389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 44495e5e988SJohn Dyson } 44595e5e988SJohn Dyson 44623955314SAlfred Perlstein /* 447b921a12bSAlan Cox * vm_object_reference_locked: 448b921a12bSAlan Cox * 449b921a12bSAlan Cox * Gets another reference to the given object. 450b921a12bSAlan Cox * 451b921a12bSAlan Cox * The object must be locked. 452b921a12bSAlan Cox */ 453b921a12bSAlan Cox void 454b921a12bSAlan Cox vm_object_reference_locked(vm_object_t object) 455b921a12bSAlan Cox { 456b921a12bSAlan Cox struct vnode *vp; 457b921a12bSAlan Cox 45889f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 459b921a12bSAlan Cox object->ref_count++; 460b921a12bSAlan Cox if (object->type == OBJT_VNODE) { 461b921a12bSAlan Cox vp = object->handle; 462b921a12bSAlan Cox vref(vp); 463b921a12bSAlan Cox } 464b921a12bSAlan Cox } 465b921a12bSAlan Cox 466b921a12bSAlan Cox /* 4679d5abbddSJens Schweikhardt * Handle deallocating an object of type OBJT_VNODE. 46823955314SAlfred Perlstein */ 46902dd8331SAlan Cox static void 4701b40f8c0SMatthew Dillon vm_object_vndeallocate(vm_object_t object) 47195e5e988SJohn Dyson { 47295e5e988SJohn Dyson struct vnode *vp = (struct vnode *) object->handle; 473219cbf59SEivind Eklund 47489f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 4755526d2d9SEivind Eklund KASSERT(object->type == OBJT_VNODE, 4765526d2d9SEivind Eklund ("vm_object_vndeallocate: not a vnode object")); 477219cbf59SEivind Eklund KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 478219cbf59SEivind Eklund #ifdef INVARIANTS 47995e5e988SJohn Dyson if (object->ref_count == 0) { 480411455a8SEdward Tomasz Napierala vn_printf(vp, "vm_object_vndeallocate "); 48195e5e988SJohn Dyson panic("vm_object_vndeallocate: bad object reference count"); 48295e5e988SJohn Dyson } 48395e5e988SJohn Dyson #endif 48495e5e988SJohn Dyson 4852a339d9eSKonstantin Belousov if (!umtx_shm_vnobj_persistent && object->ref_count == 1) 4861bdbd705SKonstantin Belousov umtx_shm_object_terminated(object); 4871bdbd705SKonstantin Belousov 48830d57414SKonstantin Belousov /* 48930d57414SKonstantin Belousov * The test for text of vp vnode does not need a bypass to 49030d57414SKonstantin Belousov * reach right VV_TEXT there, since it is obtained from 49130d57414SKonstantin Belousov * object->handle. 49230d57414SKonstantin Belousov */ 49330d57414SKonstantin Belousov if (object->ref_count > 1 || (vp->v_vflag & VV_TEXT) == 0) { 49495e5e988SJohn Dyson object->ref_count--; 49589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 49603fa5b34SKonstantin Belousov /* vrele may need the vnode lock. */ 49747221757SJohn Dyson vrele(vp); 49803fa5b34SKonstantin Belousov } else { 49986769ac0SKonstantin Belousov vhold(vp); 50089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 50103fa5b34SKonstantin Belousov vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 50286769ac0SKonstantin Belousov vdrop(vp); 50389f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 50403fa5b34SKonstantin Belousov object->ref_count--; 50586769ac0SKonstantin Belousov if (object->type == OBJT_DEAD) { 50689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 50786769ac0SKonstantin Belousov VOP_UNLOCK(vp, 0); 50886769ac0SKonstantin Belousov } else { 50903fa5b34SKonstantin Belousov if (object->ref_count == 0) 510877d24acSKonstantin Belousov VOP_UNSET_TEXT(vp); 51189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 51203fa5b34SKonstantin Belousov vput(vp); 51303fa5b34SKonstantin Belousov } 514df8bae1dSRodney W. Grimes } 51586769ac0SKonstantin Belousov } 516df8bae1dSRodney W. Grimes 517df8bae1dSRodney W. Grimes /* 518df8bae1dSRodney W. Grimes * vm_object_deallocate: 519df8bae1dSRodney W. Grimes * 520df8bae1dSRodney W. Grimes * Release a reference to the specified object, 521df8bae1dSRodney W. Grimes * gained either through a vm_object_allocate 522df8bae1dSRodney W. Grimes * or a vm_object_reference call. When all references 523df8bae1dSRodney W. Grimes * are gone, storage associated with this object 524df8bae1dSRodney W. Grimes * may be relinquished. 525df8bae1dSRodney W. Grimes * 526df8bae1dSRodney W. Grimes * No object may be locked. 527df8bae1dSRodney W. Grimes */ 52826f9a767SRodney W. Grimes void 5291b40f8c0SMatthew Dillon vm_object_deallocate(vm_object_t object) 530df8bae1dSRodney W. Grimes { 531df8bae1dSRodney W. Grimes vm_object_t temp; 5326f2af3fcSKonstantin Belousov struct vnode *vp; 533df8bae1dSRodney W. Grimes 534df8bae1dSRodney W. Grimes while (object != NULL) { 53589f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 5363b68228cSAlan Cox if (object->type == OBJT_VNODE) { 53795e5e988SJohn Dyson vm_object_vndeallocate(object); 53823b186d3SAlan Cox return; 5395050aa86SKonstantin Belousov } 54095e5e988SJohn Dyson 5418125b1e6SAlfred Perlstein KASSERT(object->ref_count != 0, 5428125b1e6SAlfred Perlstein ("vm_object_deallocate: object deallocated too many times: %d", object->type)); 5432be70f79SJohn Dyson 5442be70f79SJohn Dyson /* 5458125b1e6SAlfred Perlstein * If the reference count goes to 0 we start calling 5468125b1e6SAlfred Perlstein * vm_object_terminate() on the object chain. 5478125b1e6SAlfred Perlstein * A ref count of 1 may be a special case depending on the 5488125b1e6SAlfred Perlstein * shadow count being 0 or 1. 5492be70f79SJohn Dyson */ 550c0877f10SJohn Dyson object->ref_count--; 5518125b1e6SAlfred Perlstein if (object->ref_count > 1) { 55289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 55323b186d3SAlan Cox return; 5548125b1e6SAlfred Perlstein } else if (object->ref_count == 1) { 5556f2af3fcSKonstantin Belousov if (object->type == OBJT_SWAP && 5566f2af3fcSKonstantin Belousov (object->flags & OBJ_TMPFS) != 0) { 5576f2af3fcSKonstantin Belousov vp = object->un_pager.swp.swp_tmpfs; 5586f2af3fcSKonstantin Belousov vhold(vp); 5596f2af3fcSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 5606f2af3fcSKonstantin Belousov vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5616f2af3fcSKonstantin Belousov VM_OBJECT_WLOCK(object); 5627560005cSKonstantin Belousov if (object->type == OBJT_DEAD || 5637560005cSKonstantin Belousov object->ref_count != 1) { 5646f2af3fcSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 5656f2af3fcSKonstantin Belousov VOP_UNLOCK(vp, 0); 5662309fa9bSKonstantin Belousov vdrop(vp); 5676f2af3fcSKonstantin Belousov return; 5687560005cSKonstantin Belousov } 5697560005cSKonstantin Belousov if ((object->flags & OBJ_TMPFS) != 0) 5706f2af3fcSKonstantin Belousov VOP_UNSET_TEXT(vp); 5716f2af3fcSKonstantin Belousov VOP_UNLOCK(vp, 0); 5722309fa9bSKonstantin Belousov vdrop(vp); 5736f2af3fcSKonstantin Belousov } 5744c8e0452SAlan Cox if (object->shadow_count == 0 && 5754c8e0452SAlan Cox object->handle == NULL && 5764c8e0452SAlan Cox (object->type == OBJT_DEFAULT || 5776f2af3fcSKonstantin Belousov (object->type == OBJT_SWAP && 578f08f7dcaSKonstantin Belousov (object->flags & OBJ_TMPFS_NODE) == 0))) { 5798125b1e6SAlfred Perlstein vm_object_set_flag(object, OBJ_ONEMAPPING); 5808125b1e6SAlfred Perlstein } else if ((object->shadow_count == 1) && 5818125b1e6SAlfred Perlstein (object->handle == NULL) && 58224a1cce3SDavid Greenman (object->type == OBJT_DEFAULT || 58324a1cce3SDavid Greenman object->type == OBJT_SWAP)) { 584a1f6d91cSDavid Greenman vm_object_t robject; 58595e5e988SJohn Dyson 5861c500307SAlan Cox robject = LIST_FIRST(&object->shadow_head); 5875526d2d9SEivind Eklund KASSERT(robject != NULL, 588219cbf59SEivind Eklund ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 5895526d2d9SEivind Eklund object->ref_count, 5905526d2d9SEivind Eklund object->shadow_count)); 5914bace8e7SKonstantin Belousov KASSERT((robject->flags & OBJ_TMPFS_NODE) == 0, 5924bace8e7SKonstantin Belousov ("shadowed tmpfs v_object %p", object)); 59389f6b863SAttilio Rao if (!VM_OBJECT_TRYWLOCK(robject)) { 594b72b0115SAlan Cox /* 595b72b0115SAlan Cox * Avoid a potential deadlock. 596b72b0115SAlan Cox */ 597b72b0115SAlan Cox object->ref_count++; 59889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 599a7d86121SAlan Cox /* 600a7d86121SAlan Cox * More likely than not the thread 601a7d86121SAlan Cox * holding robject's lock has lower 602a7d86121SAlan Cox * priority than the current thread. 603a7d86121SAlan Cox * Let the lower priority thread run. 604a7d86121SAlan Cox */ 6058db5fc58SJohn Baldwin pause("vmo_de", 1); 606b72b0115SAlan Cox continue; 607b72b0115SAlan Cox } 608d936694fSAlan Cox /* 609d936694fSAlan Cox * Collapse object into its shadow unless its 610d936694fSAlan Cox * shadow is dead. In that case, object will 611d936694fSAlan Cox * be deallocated by the thread that is 612d936694fSAlan Cox * deallocating its shadow. 613d936694fSAlan Cox */ 614d936694fSAlan Cox if ((robject->flags & OBJ_DEAD) == 0 && 615d936694fSAlan Cox (robject->handle == NULL) && 61624a1cce3SDavid Greenman (robject->type == OBJT_DEFAULT || 61724a1cce3SDavid Greenman robject->type == OBJT_SWAP)) { 618a1f6d91cSDavid Greenman 61995e5e988SJohn Dyson robject->ref_count++; 620138449dcSAlan Cox retry: 621138449dcSAlan Cox if (robject->paging_in_progress) { 62289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 623138449dcSAlan Cox vm_object_pip_wait(robject, 624138449dcSAlan Cox "objde1"); 6252e9f4a69SAlan Cox temp = robject->backing_object; 6262e9f4a69SAlan Cox if (object == temp) { 62789f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 628138449dcSAlan Cox goto retry; 6292e9f4a69SAlan Cox } 630138449dcSAlan Cox } else if (object->paging_in_progress) { 63189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(robject); 632138449dcSAlan Cox object->flags |= OBJ_PIPWNT; 6330dde287bSAttilio Rao VM_OBJECT_SLEEP(object, object, 634138449dcSAlan Cox PDROP | PVM, "objde2", 0); 63589f6b863SAttilio Rao VM_OBJECT_WLOCK(robject); 6362e9f4a69SAlan Cox temp = robject->backing_object; 6372e9f4a69SAlan Cox if (object == temp) { 63889f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 639138449dcSAlan Cox goto retry; 640a1f6d91cSDavid Greenman } 6412e9f4a69SAlan Cox } else 64289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 6432e9f4a69SAlan Cox 64495e5e988SJohn Dyson if (robject->ref_count == 1) { 64595e5e988SJohn Dyson robject->ref_count--; 646ba8da839SDavid Greenman object = robject; 64795e5e988SJohn Dyson goto doterm; 64895e5e988SJohn Dyson } 64995e5e988SJohn Dyson object = robject; 65095e5e988SJohn Dyson vm_object_collapse(object); 65189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 652ba8da839SDavid Greenman continue; 653a1f6d91cSDavid Greenman } 65489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(robject); 65595e5e988SJohn Dyson } 65689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 65723b186d3SAlan Cox return; 65895e5e988SJohn Dyson } 65995e5e988SJohn Dyson doterm: 6601bdbd705SKonstantin Belousov umtx_shm_object_terminated(object); 66124a1cce3SDavid Greenman temp = object->backing_object; 662c9917419SAlan Cox if (temp != NULL) { 6634bace8e7SKonstantin Belousov KASSERT((object->flags & OBJ_TMPFS_NODE) == 0, 6644bace8e7SKonstantin Belousov ("shadowed tmpfs v_object 2 %p", object)); 66589f6b863SAttilio Rao VM_OBJECT_WLOCK(temp); 6661c500307SAlan Cox LIST_REMOVE(object, shadow_list); 66795e5e988SJohn Dyson temp->shadow_count--; 66889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(temp); 66995461b45SJohn Dyson object->backing_object = NULL; 670de5f6a77SJohn Dyson } 671245df27cSMatthew Dillon /* 672245df27cSMatthew Dillon * Don't double-terminate, we could be in a termination 673245df27cSMatthew Dillon * recursion due to the terminate having to sync data 674245df27cSMatthew Dillon * to disk. 675245df27cSMatthew Dillon */ 676245df27cSMatthew Dillon if ((object->flags & OBJ_DEAD) == 0) 677df8bae1dSRodney W. Grimes vm_object_terminate(object); 678c829b9d0SAlan Cox else 67989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 680df8bae1dSRodney W. Grimes object = temp; 681df8bae1dSRodney W. Grimes } 682df8bae1dSRodney W. Grimes } 683df8bae1dSRodney W. Grimes 684df8bae1dSRodney W. Grimes /* 6852ac78f0eSStephan Uphoff * vm_object_destroy removes the object from the global object list 6862ac78f0eSStephan Uphoff * and frees the space for the object. 6872ac78f0eSStephan Uphoff */ 6882ac78f0eSStephan Uphoff void 6892ac78f0eSStephan Uphoff vm_object_destroy(vm_object_t object) 6902ac78f0eSStephan Uphoff { 6912ac78f0eSStephan Uphoff 6922ac78f0eSStephan Uphoff /* 6933364c323SKonstantin Belousov * Release the allocation charge. 6943364c323SKonstantin Belousov */ 695ef694c1aSEdward Tomasz Napierala if (object->cred != NULL) { 696ef694c1aSEdward Tomasz Napierala swap_release_by_cred(object->charge, object->cred); 6973364c323SKonstantin Belousov object->charge = 0; 698ef694c1aSEdward Tomasz Napierala crfree(object->cred); 699ef694c1aSEdward Tomasz Napierala object->cred = NULL; 7003364c323SKonstantin Belousov } 7013364c323SKonstantin Belousov 7023364c323SKonstantin Belousov /* 7032ac78f0eSStephan Uphoff * Free the space for the object. 7042ac78f0eSStephan Uphoff */ 7052ac78f0eSStephan Uphoff uma_zfree(obj_zone, object); 7062ac78f0eSStephan Uphoff } 7072ac78f0eSStephan Uphoff 7082ac78f0eSStephan Uphoff /* 7097bbdb843SRuslan Bukin * vm_object_terminate_pages removes any remaining pageable pages 7107bbdb843SRuslan Bukin * from the object and resets the object to an empty state. 7117bbdb843SRuslan Bukin */ 7127bbdb843SRuslan Bukin static void 7137bbdb843SRuslan Bukin vm_object_terminate_pages(vm_object_t object) 7147bbdb843SRuslan Bukin { 7157bbdb843SRuslan Bukin vm_page_t p, p_next; 7162fcd1ff6SKonstantin Belousov struct mtx *mtx, *mtx1; 7172fcd1ff6SKonstantin Belousov struct vm_pagequeue *pq, *pq1; 7184074d642SAlan Cox int dequeued; 7197bbdb843SRuslan Bukin 7207bbdb843SRuslan Bukin VM_OBJECT_ASSERT_WLOCKED(object); 7217bbdb843SRuslan Bukin 7222fcd1ff6SKonstantin Belousov mtx = NULL; 7232fcd1ff6SKonstantin Belousov pq = NULL; 7242fcd1ff6SKonstantin Belousov 7257bbdb843SRuslan Bukin /* 7267bbdb843SRuslan Bukin * Free any remaining pageable pages. This also removes them from the 7277bbdb843SRuslan Bukin * paging queues. However, don't free wired pages, just remove them 7287bbdb843SRuslan Bukin * from the object. Rather than incrementally removing each page from 7297bbdb843SRuslan Bukin * the object, the page and object are reset to any empty state. 7307bbdb843SRuslan Bukin */ 7317bbdb843SRuslan Bukin TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) { 7327bbdb843SRuslan Bukin vm_page_assert_unbusied(p); 7332fcd1ff6SKonstantin Belousov if ((object->flags & OBJ_UNMANAGED) == 0) { 7347bbdb843SRuslan Bukin /* 7352fcd1ff6SKonstantin Belousov * vm_page_free_prep() only needs the page 7362fcd1ff6SKonstantin Belousov * lock for managed pages. 7377bbdb843SRuslan Bukin */ 7382fcd1ff6SKonstantin Belousov mtx1 = vm_page_lockptr(p); 7392fcd1ff6SKonstantin Belousov if (mtx1 != mtx) { 7402fcd1ff6SKonstantin Belousov if (mtx != NULL) 7412fcd1ff6SKonstantin Belousov mtx_unlock(mtx); 7422fcd1ff6SKonstantin Belousov if (pq != NULL) { 7434074d642SAlan Cox vm_pagequeue_cnt_add(pq, dequeued); 7442fcd1ff6SKonstantin Belousov vm_pagequeue_unlock(pq); 7452fcd1ff6SKonstantin Belousov pq = NULL; 7462fcd1ff6SKonstantin Belousov } 7472fcd1ff6SKonstantin Belousov mtx = mtx1; 7482fcd1ff6SKonstantin Belousov mtx_lock(mtx); 7492fcd1ff6SKonstantin Belousov } 7502fcd1ff6SKonstantin Belousov } 7517bbdb843SRuslan Bukin p->object = NULL; 7522fcd1ff6SKonstantin Belousov if (p->wire_count != 0) 7532fcd1ff6SKonstantin Belousov goto unlist; 7547bbdb843SRuslan Bukin VM_CNT_INC(v_pfree); 7552fcd1ff6SKonstantin Belousov p->flags &= ~PG_ZERO; 7562fcd1ff6SKonstantin Belousov if (p->queue != PQ_NONE) { 7572fcd1ff6SKonstantin Belousov KASSERT(p->queue < PQ_COUNT, ("vm_object_terminate: " 7582fcd1ff6SKonstantin Belousov "page %p is not queued", p)); 7592fcd1ff6SKonstantin Belousov pq1 = vm_page_pagequeue(p); 7602fcd1ff6SKonstantin Belousov if (pq != pq1) { 7614074d642SAlan Cox if (pq != NULL) { 7624074d642SAlan Cox vm_pagequeue_cnt_add(pq, dequeued); 7632fcd1ff6SKonstantin Belousov vm_pagequeue_unlock(pq); 7644074d642SAlan Cox } 7652fcd1ff6SKonstantin Belousov pq = pq1; 7662fcd1ff6SKonstantin Belousov vm_pagequeue_lock(pq); 7674074d642SAlan Cox dequeued = 0; 7687bbdb843SRuslan Bukin } 7694074d642SAlan Cox p->queue = PQ_NONE; 7704074d642SAlan Cox TAILQ_REMOVE(&pq->pq_pl, p, plinks.q); 7714074d642SAlan Cox dequeued--; 7727bbdb843SRuslan Bukin } 7732fcd1ff6SKonstantin Belousov if (vm_page_free_prep(p, true)) 7742fcd1ff6SKonstantin Belousov continue; 7752fcd1ff6SKonstantin Belousov unlist: 7762fcd1ff6SKonstantin Belousov TAILQ_REMOVE(&object->memq, p, listq); 7772fcd1ff6SKonstantin Belousov } 7784074d642SAlan Cox if (pq != NULL) { 7794074d642SAlan Cox vm_pagequeue_cnt_add(pq, dequeued); 7802fcd1ff6SKonstantin Belousov vm_pagequeue_unlock(pq); 7814074d642SAlan Cox } 7822fcd1ff6SKonstantin Belousov if (mtx != NULL) 7832fcd1ff6SKonstantin Belousov mtx_unlock(mtx); 7842fcd1ff6SKonstantin Belousov 7852fcd1ff6SKonstantin Belousov vm_page_free_phys_pglist(&object->memq); 7862fcd1ff6SKonstantin Belousov 7877bbdb843SRuslan Bukin /* 7887bbdb843SRuslan Bukin * If the object contained any pages, then reset it to an empty state. 7897bbdb843SRuslan Bukin * None of the object's fields, including "resident_page_count", were 7907bbdb843SRuslan Bukin * modified by the preceding loop. 7917bbdb843SRuslan Bukin */ 7927bbdb843SRuslan Bukin if (object->resident_page_count != 0) { 7937bbdb843SRuslan Bukin vm_radix_reclaim_allnodes(&object->rtree); 7947bbdb843SRuslan Bukin TAILQ_INIT(&object->memq); 7957bbdb843SRuslan Bukin object->resident_page_count = 0; 7967bbdb843SRuslan Bukin if (object->type == OBJT_VNODE) 7977bbdb843SRuslan Bukin vdrop(object->handle); 7987bbdb843SRuslan Bukin } 7997bbdb843SRuslan Bukin } 8007bbdb843SRuslan Bukin 8017bbdb843SRuslan Bukin /* 802df8bae1dSRodney W. Grimes * vm_object_terminate actually destroys the specified object, freeing 803df8bae1dSRodney W. Grimes * up all previously used resources. 804df8bae1dSRodney W. Grimes * 805df8bae1dSRodney W. Grimes * The object must be locked. 8061c7c3c6aSMatthew Dillon * This routine may block. 807df8bae1dSRodney W. Grimes */ 80895e5e988SJohn Dyson void 8091b40f8c0SMatthew Dillon vm_object_terminate(vm_object_t object) 810df8bae1dSRodney W. Grimes { 811df8bae1dSRodney W. Grimes 81289f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 8130cddd8f0SMatthew Dillon 81495e5e988SJohn Dyson /* 81595e5e988SJohn Dyson * Make sure no one uses us. 81695e5e988SJohn Dyson */ 817069e9bc1SDoug Rabson vm_object_set_flag(object, OBJ_DEAD); 8183c631446SJohn Dyson 819df8bae1dSRodney W. Grimes /* 820f6b04d2bSDavid Greenman * wait for the pageout daemon to be done with the object 821df8bae1dSRodney W. Grimes */ 82266095752SJohn Dyson vm_object_pip_wait(object, "objtrm"); 823df8bae1dSRodney W. Grimes 8245526d2d9SEivind Eklund KASSERT(!object->paging_in_progress, 8255526d2d9SEivind Eklund ("vm_object_terminate: pageout in progress")); 82626f9a767SRodney W. Grimes 82726f9a767SRodney W. Grimes /* 8280d94caffSDavid Greenman * Clean and free the pages, as appropriate. All references to the 8290d94caffSDavid Greenman * object are gone, so we don't need to lock it. 83026f9a767SRodney W. Grimes */ 83124a1cce3SDavid Greenman if (object->type == OBJT_VNODE) { 832f7dd7b63SAlan Cox struct vnode *vp = (struct vnode *)object->handle; 83395e5e988SJohn Dyson 83495e5e988SJohn Dyson /* 83595e5e988SJohn Dyson * Clean pages and flush buffers. 83695e5e988SJohn Dyson */ 8378f9110f6SJohn Dyson vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 83889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 83995e5e988SJohn Dyson 8400d7935fdSAttilio Rao vinvalbuf(vp, V_SAVE, 0, 0); 841f7dd7b63SAlan Cox 84219efd8a5SKonstantin Belousov BO_LOCK(&vp->v_bufobj); 84319efd8a5SKonstantin Belousov vp->v_bufobj.bo_flag |= BO_DEAD; 84419efd8a5SKonstantin Belousov BO_UNLOCK(&vp->v_bufobj); 84519efd8a5SKonstantin Belousov 84689f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 847bef608bdSJohn Dyson } 848bef608bdSJohn Dyson 849971dd342SAlfred Perlstein KASSERT(object->ref_count == 0, 850971dd342SAlfred Perlstein ("vm_object_terminate: object with references, ref_count=%d", 851971dd342SAlfred Perlstein object->ref_count)); 852996c772fSJohn Dyson 8537bbdb843SRuslan Bukin if ((object->flags & OBJ_PG_DTOR) == 0) 8547bbdb843SRuslan Bukin vm_object_terminate_pages(object); 855bef608bdSJohn Dyson 856f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 857f8a47341SAlan Cox if (__predict_false(!LIST_EMPTY(&object->rvq))) 858f8a47341SAlan Cox vm_reserv_break_all(object); 859f8a47341SAlan Cox #endif 8607bfda801SAlan Cox 861e735691bSJohn Baldwin KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT || 862e735691bSJohn Baldwin object->type == OBJT_SWAP, 863e735691bSJohn Baldwin ("%s: non-swap obj %p has cred", __func__, object)); 864e735691bSJohn Baldwin 8652d8acc0fSJohn Dyson /* 8669fcfb650SDavid Greenman * Let the pager know object is dead. 8679fcfb650SDavid Greenman */ 8689fcfb650SDavid Greenman vm_pager_deallocate(object); 86989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 8709fcfb650SDavid Greenman 8712ac78f0eSStephan Uphoff vm_object_destroy(object); 87247221757SJohn Dyson } 873df8bae1dSRodney W. Grimes 874edf93b25SAlan Cox /* 875edf93b25SAlan Cox * Make the page read-only so that we can clear the object flags. However, if 876edf93b25SAlan Cox * this is a nosync mmap then the object is likely to stay dirty so do not 877edf93b25SAlan Cox * mess with the page and do not clear the object flags. Returns TRUE if the 878edf93b25SAlan Cox * page should be flushed, and FALSE otherwise. 879edf93b25SAlan Cox */ 8803280870dSKonstantin Belousov static boolean_t 881126d6082SKonstantin Belousov vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *clearobjflags) 8823280870dSKonstantin Belousov { 8833280870dSKonstantin Belousov 8843280870dSKonstantin Belousov /* 8853280870dSKonstantin Belousov * If we have been asked to skip nosync pages and this is a 8863280870dSKonstantin Belousov * nosync page, skip it. Note that the object flags were not 8873280870dSKonstantin Belousov * cleared in this case so we do not have to set them. 8883280870dSKonstantin Belousov */ 8893280870dSKonstantin Belousov if ((flags & OBJPC_NOSYNC) != 0 && (p->oflags & VPO_NOSYNC) != 0) { 890126d6082SKonstantin Belousov *clearobjflags = FALSE; 8913280870dSKonstantin Belousov return (FALSE); 8923280870dSKonstantin Belousov } else { 8933280870dSKonstantin Belousov pmap_remove_write(p); 8943280870dSKonstantin Belousov return (p->dirty != 0); 8953280870dSKonstantin Belousov } 8963280870dSKonstantin Belousov } 8973280870dSKonstantin Belousov 898df8bae1dSRodney W. Grimes /* 899df8bae1dSRodney W. Grimes * vm_object_page_clean 900df8bae1dSRodney W. Grimes * 9014f79d873SMatthew Dillon * Clean all dirty pages in the specified range of object. Leaves page 9024f79d873SMatthew Dillon * on whatever queue it is currently on. If NOSYNC is set then do not 903b146f9e5SAlan Cox * write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC), 9044f79d873SMatthew Dillon * leaving the object dirty. 90526f9a767SRodney W. Grimes * 90643b7990eSMatthew Dillon * When stuffing pages asynchronously, allow clustering. XXX we need a 90743b7990eSMatthew Dillon * synchronous clustering mode implementation. 90843b7990eSMatthew Dillon * 90926f9a767SRodney W. Grimes * Odd semantics: if start == end, we clean everything. 91026f9a767SRodney W. Grimes * 91126f9a767SRodney W. Grimes * The object must be locked. 912126d6082SKonstantin Belousov * 913126d6082SKonstantin Belousov * Returns FALSE if some page from the range was not written, as 914126d6082SKonstantin Belousov * reported by the pager, and TRUE otherwise. 91526f9a767SRodney W. Grimes */ 916126d6082SKonstantin Belousov boolean_t 91717f3095dSAlan Cox vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, 918e239bb97SKonstantin Belousov int flags) 919f6b04d2bSDavid Greenman { 920e239bb97SKonstantin Belousov vm_page_t np, p; 92117f3095dSAlan Cox vm_pindex_t pi, tend, tstart; 922126d6082SKonstantin Belousov int curgeneration, n, pagerflags; 923126d6082SKonstantin Belousov boolean_t clearobjflags, eio, res; 924f6b04d2bSDavid Greenman 92589f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 926e5f299ffSKonstantin Belousov 927e5f299ffSKonstantin Belousov /* 928e5f299ffSKonstantin Belousov * The OBJ_MIGHTBEDIRTY flag is only set for OBJT_VNODE 929e5f299ffSKonstantin Belousov * objects. The check below prevents the function from 930e5f299ffSKonstantin Belousov * operating on non-vnode objects. 931e5f299ffSKonstantin Belousov */ 932e239bb97SKonstantin Belousov if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 || 933e239bb97SKonstantin Belousov object->resident_page_count == 0) 934126d6082SKonstantin Belousov return (TRUE); 935f6b04d2bSDavid Greenman 936e239bb97SKonstantin Belousov pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ? 937e239bb97SKonstantin Belousov VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 938e239bb97SKonstantin Belousov pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0; 939e239bb97SKonstantin Belousov 94017f3095dSAlan Cox tstart = OFF_TO_IDX(start); 94117f3095dSAlan Cox tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK); 94217f3095dSAlan Cox clearobjflags = tstart == 0 && tend >= object->size; 943126d6082SKonstantin Belousov res = TRUE; 944f6b04d2bSDavid Greenman 945bd7e5f99SJohn Dyson rescan: 9462d8acc0fSJohn Dyson curgeneration = object->generation; 9472d8acc0fSJohn Dyson 94817f3095dSAlan Cox for (p = vm_page_find_least(object, tstart); p != NULL; p = np) { 949bd7e5f99SJohn Dyson pi = p->pindex; 950e239bb97SKonstantin Belousov if (pi >= tend) 951e239bb97SKonstantin Belousov break; 952e239bb97SKonstantin Belousov np = TAILQ_NEXT(p, listq); 953e239bb97SKonstantin Belousov if (p->valid == 0) 954aef922f5SJohn Dyson continue; 955c7aebda8SAttilio Rao if (vm_page_sleep_if_busy(p, "vpcwai")) { 956e65919f9SKonstantin Belousov if (object->generation != curgeneration) { 957e65919f9SKonstantin Belousov if ((flags & OBJPC_SYNC) != 0) 958e239bb97SKonstantin Belousov goto rescan; 959e65919f9SKonstantin Belousov else 960126d6082SKonstantin Belousov clearobjflags = FALSE; 961e65919f9SKonstantin Belousov } 962780636b7SKonstantin Belousov np = vm_page_find_least(object, pi); 963780636b7SKonstantin Belousov continue; 964f6b04d2bSDavid Greenman } 9653280870dSKonstantin Belousov if (!vm_object_page_remove_write(p, flags, &clearobjflags)) 966bd7e5f99SJohn Dyson continue; 967e239bb97SKonstantin Belousov 9683280870dSKonstantin Belousov n = vm_object_page_collect_flush(object, p, pagerflags, 969126d6082SKonstantin Belousov flags, &clearobjflags, &eio); 970126d6082SKonstantin Belousov if (eio) { 971126d6082SKonstantin Belousov res = FALSE; 972126d6082SKonstantin Belousov clearobjflags = FALSE; 973126d6082SKonstantin Belousov } 974e65919f9SKonstantin Belousov if (object->generation != curgeneration) { 975e65919f9SKonstantin Belousov if ((flags & OBJPC_SYNC) != 0) 976b9b7a4beSMatthew Dillon goto rescan; 977e65919f9SKonstantin Belousov else 978126d6082SKonstantin Belousov clearobjflags = FALSE; 979e65919f9SKonstantin Belousov } 980031ec8c1SKonstantin Belousov 981031ec8c1SKonstantin Belousov /* 982031ec8c1SKonstantin Belousov * If the VOP_PUTPAGES() did a truncated write, so 983031ec8c1SKonstantin Belousov * that even the first page of the run is not fully 984031ec8c1SKonstantin Belousov * written, vm_pageout_flush() returns 0 as the run 985031ec8c1SKonstantin Belousov * length. Since the condition that caused truncated 986031ec8c1SKonstantin Belousov * write may be permanent, e.g. exhausted free space, 987031ec8c1SKonstantin Belousov * accepting n == 0 would cause an infinite loop. 988031ec8c1SKonstantin Belousov * 989031ec8c1SKonstantin Belousov * Forwarding the iterator leaves the unwritten page 990031ec8c1SKonstantin Belousov * behind, but there is not much we can do there if 991031ec8c1SKonstantin Belousov * filesystem refuses to write it. 992031ec8c1SKonstantin Belousov */ 993126d6082SKonstantin Belousov if (n == 0) { 994031ec8c1SKonstantin Belousov n = 1; 995126d6082SKonstantin Belousov clearobjflags = FALSE; 996126d6082SKonstantin Belousov } 997e239bb97SKonstantin Belousov np = vm_page_find_least(object, pi + n); 998b9b7a4beSMatthew Dillon } 999b9b7a4beSMatthew Dillon #if 0 1000e239bb97SKonstantin Belousov VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0); 1001b9b7a4beSMatthew Dillon #endif 1002b9b7a4beSMatthew Dillon 1003edf93b25SAlan Cox if (clearobjflags) 10043280870dSKonstantin Belousov vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY); 1005126d6082SKonstantin Belousov return (res); 1006b9b7a4beSMatthew Dillon } 1007b9b7a4beSMatthew Dillon 1008b9b7a4beSMatthew Dillon static int 10093280870dSKonstantin Belousov vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, 1010126d6082SKonstantin Belousov int flags, boolean_t *clearobjflags, boolean_t *eio) 1011b9b7a4beSMatthew Dillon { 10123157c503SKonstantin Belousov vm_page_t ma[vm_pageout_page_count], p_first, tp; 10133157c503SKonstantin Belousov int count, i, mreq, runlen; 1014b9b7a4beSMatthew Dillon 10157bec141bSKip Macy vm_page_lock_assert(p, MA_NOTOWNED); 101689f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 10173157c503SKonstantin Belousov 10183157c503SKonstantin Belousov count = 1; 10193157c503SKonstantin Belousov mreq = 0; 10203157c503SKonstantin Belousov 10213157c503SKonstantin Belousov for (tp = p; count < vm_pageout_page_count; count++) { 10223157c503SKonstantin Belousov tp = vm_page_next(tp); 1023c7aebda8SAttilio Rao if (tp == NULL || vm_page_busied(tp)) 1024bd7e5f99SJohn Dyson break; 10253280870dSKonstantin Belousov if (!vm_object_page_remove_write(tp, flags, clearobjflags)) 1026bd7e5f99SJohn Dyson break; 1027bd7e5f99SJohn Dyson } 1028aef922f5SJohn Dyson 10293157c503SKonstantin Belousov for (p_first = p; count < vm_pageout_page_count; count++) { 10303157c503SKonstantin Belousov tp = vm_page_prev(p_first); 1031c7aebda8SAttilio Rao if (tp == NULL || vm_page_busied(tp)) 1032bd7e5f99SJohn Dyson break; 10333280870dSKonstantin Belousov if (!vm_object_page_remove_write(tp, flags, clearobjflags)) 1034bd7e5f99SJohn Dyson break; 10353157c503SKonstantin Belousov p_first = tp; 10363157c503SKonstantin Belousov mreq++; 1037bd7e5f99SJohn Dyson } 1038bd7e5f99SJohn Dyson 10393157c503SKonstantin Belousov for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++) 10403157c503SKonstantin Belousov ma[i] = tp; 1041cf2819ccSJohn Dyson 1042126d6082SKonstantin Belousov vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio); 10431e8a675cSKonstantin Belousov return (runlen); 104426f9a767SRodney W. Grimes } 1045df8bae1dSRodney W. Grimes 10461efb74fbSJohn Dyson /* 1047950f8459SAlan Cox * Note that there is absolutely no sense in writing out 1048950f8459SAlan Cox * anonymous objects, so we track down the vnode object 1049950f8459SAlan Cox * to write out. 1050950f8459SAlan Cox * We invalidate (remove) all pages from the address space 1051950f8459SAlan Cox * for semantic correctness. 1052950f8459SAlan Cox * 10536bbee8e2SAlan Cox * If the backing object is a device object with unmanaged pages, then any 10546bbee8e2SAlan Cox * mappings to the specified range of pages must be removed before this 10556bbee8e2SAlan Cox * function is called. 10566bbee8e2SAlan Cox * 1057950f8459SAlan Cox * Note: certain anonymous maps, such as MAP_NOSYNC maps, 1058950f8459SAlan Cox * may start out with a NULL object. 1059950f8459SAlan Cox */ 1060126d6082SKonstantin Belousov boolean_t 1061950f8459SAlan Cox vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, 1062950f8459SAlan Cox boolean_t syncio, boolean_t invalidate) 1063950f8459SAlan Cox { 1064950f8459SAlan Cox vm_object_t backing_object; 1065950f8459SAlan Cox struct vnode *vp; 10663b582b4eSTor Egge struct mount *mp; 1067126d6082SKonstantin Belousov int error, flags, fsync_after; 1068126d6082SKonstantin Belousov boolean_t res; 1069950f8459SAlan Cox 1070950f8459SAlan Cox if (object == NULL) 1071126d6082SKonstantin Belousov return (TRUE); 1072126d6082SKonstantin Belousov res = TRUE; 1073126d6082SKonstantin Belousov error = 0; 107489f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1075950f8459SAlan Cox while ((backing_object = object->backing_object) != NULL) { 107689f6b863SAttilio Rao VM_OBJECT_WLOCK(backing_object); 107756e0670fSAlan Cox offset += object->backing_object_offset; 107889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1079950f8459SAlan Cox object = backing_object; 1080950f8459SAlan Cox if (object->size < OFF_TO_IDX(offset + size)) 1081950f8459SAlan Cox size = IDX_TO_OFF(object->size) - offset; 1082950f8459SAlan Cox } 1083950f8459SAlan Cox /* 1084950f8459SAlan Cox * Flush pages if writing is allowed, invalidate them 1085950f8459SAlan Cox * if invalidation requested. Pages undergoing I/O 1086950f8459SAlan Cox * will be ignored by vm_object_page_remove(). 1087950f8459SAlan Cox * 1088950f8459SAlan Cox * We cannot lock the vnode and then wait for paging 1089950f8459SAlan Cox * to complete without deadlocking against vm_fault. 1090950f8459SAlan Cox * Instead we simply call vm_object_page_remove() and 1091950f8459SAlan Cox * allow it to block internally on a page-by-page 1092950f8459SAlan Cox * basis when it encounters pages undergoing async 1093950f8459SAlan Cox * I/O. 1094950f8459SAlan Cox */ 1095950f8459SAlan Cox if (object->type == OBJT_VNODE && 10965bf94937SKonstantin Belousov (object->flags & OBJ_MIGHTBEDIRTY) != 0 && 10975bf94937SKonstantin Belousov ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) { 109889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 10993b582b4eSTor Egge (void) vn_start_write(vp, &mp, V_WAIT); 1100cb05b60aSAttilio Rao vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 110175ff604aSKonstantin Belousov if (syncio && !invalidate && offset == 0 && 1102d1780e8dSKonstantin Belousov atop(size) == object->size) { 110375ff604aSKonstantin Belousov /* 110475ff604aSKonstantin Belousov * If syncing the whole mapping of the file, 110575ff604aSKonstantin Belousov * it is faster to schedule all the writes in 110675ff604aSKonstantin Belousov * async mode, also allowing the clustering, 110775ff604aSKonstantin Belousov * and then wait for i/o to complete. 110875ff604aSKonstantin Belousov */ 110975ff604aSKonstantin Belousov flags = 0; 111075ff604aSKonstantin Belousov fsync_after = TRUE; 111175ff604aSKonstantin Belousov } else { 1112950f8459SAlan Cox flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 111375ff604aSKonstantin Belousov flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0; 111475ff604aSKonstantin Belousov fsync_after = FALSE; 111575ff604aSKonstantin Belousov } 111689f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1117126d6082SKonstantin Belousov res = vm_object_page_clean(object, offset, offset + size, 1118126d6082SKonstantin Belousov flags); 111989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 112075ff604aSKonstantin Belousov if (fsync_after) 1121126d6082SKonstantin Belousov error = VOP_FSYNC(vp, MNT_WAIT, curthread); 112222db15c0SAttilio Rao VOP_UNLOCK(vp, 0); 11233b582b4eSTor Egge vn_finished_write(mp); 1124126d6082SKonstantin Belousov if (error != 0) 1125126d6082SKonstantin Belousov res = FALSE; 112689f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1127950f8459SAlan Cox } 1128950f8459SAlan Cox if ((object->type == OBJT_VNODE || 1129950f8459SAlan Cox object->type == OBJT_DEVICE) && invalidate) { 11306bbee8e2SAlan Cox if (object->type == OBJT_DEVICE) 11316bbee8e2SAlan Cox /* 11326bbee8e2SAlan Cox * The option OBJPR_NOTMAPPED must be passed here 11336bbee8e2SAlan Cox * because vm_object_page_remove() cannot remove 11346bbee8e2SAlan Cox * unmanaged mappings. 11356bbee8e2SAlan Cox */ 11366bbee8e2SAlan Cox flags = OBJPR_NOTMAPPED; 11376bbee8e2SAlan Cox else if (old_msync) 11386195b24aSKonstantin Belousov flags = 0; 11396bbee8e2SAlan Cox else 11406195b24aSKonstantin Belousov flags = OBJPR_CLEANONLY; 11416bbee8e2SAlan Cox vm_object_page_remove(object, OFF_TO_IDX(offset), 11426bbee8e2SAlan Cox OFF_TO_IDX(offset + size + PAGE_MASK), flags); 1143950f8459SAlan Cox } 114489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1145126d6082SKonstantin Belousov return (res); 1146950f8459SAlan Cox } 1147950f8459SAlan Cox 1148950f8459SAlan Cox /* 1149aa3650eaSMark Johnston * Determine whether the given advice can be applied to the object. Advice is 1150aa3650eaSMark Johnston * not applied to unmanaged pages since they never belong to page queues, and 1151aa3650eaSMark Johnston * since MADV_FREE is destructive, it can apply only to anonymous pages that 1152aa3650eaSMark Johnston * have been mapped at most once. 1153aa3650eaSMark Johnston */ 1154aa3650eaSMark Johnston static bool 1155aa3650eaSMark Johnston vm_object_advice_applies(vm_object_t object, int advice) 1156aa3650eaSMark Johnston { 1157aa3650eaSMark Johnston 1158aa3650eaSMark Johnston if ((object->flags & OBJ_UNMANAGED) != 0) 1159aa3650eaSMark Johnston return (false); 1160aa3650eaSMark Johnston if (advice != MADV_FREE) 1161aa3650eaSMark Johnston return (true); 1162aa3650eaSMark Johnston return ((object->type == OBJT_DEFAULT || object->type == OBJT_SWAP) && 1163aa3650eaSMark Johnston (object->flags & OBJ_ONEMAPPING) != 0); 1164aa3650eaSMark Johnston } 1165aa3650eaSMark Johnston 1166aa3650eaSMark Johnston static void 1167aa3650eaSMark Johnston vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex, 1168aa3650eaSMark Johnston vm_size_t size) 1169aa3650eaSMark Johnston { 1170aa3650eaSMark Johnston 1171aa3650eaSMark Johnston if (advice == MADV_FREE && object->type == OBJT_SWAP) 1172aa3650eaSMark Johnston swap_pager_freespace(object, pindex, size); 1173aa3650eaSMark Johnston } 1174aa3650eaSMark Johnston 1175aa3650eaSMark Johnston /* 1176867a482dSJohn Dyson * vm_object_madvise: 1177867a482dSJohn Dyson * 1178867a482dSJohn Dyson * Implements the madvise function at the object/page level. 11791c7c3c6aSMatthew Dillon * 1180193b9358SAlan Cox * MADV_WILLNEED (any object) 1181193b9358SAlan Cox * 1182193b9358SAlan Cox * Activate the specified pages if they are resident. 1183193b9358SAlan Cox * 1184193b9358SAlan Cox * MADV_DONTNEED (any object) 1185193b9358SAlan Cox * 1186193b9358SAlan Cox * Deactivate the specified pages if they are resident. 1187193b9358SAlan Cox * 1188193b9358SAlan Cox * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 1189193b9358SAlan Cox * OBJ_ONEMAPPING only) 1190193b9358SAlan Cox * 1191193b9358SAlan Cox * Deactivate and clean the specified pages if they are 1192193b9358SAlan Cox * resident. This permits the process to reuse the pages 1193193b9358SAlan Cox * without faulting or the kernel to reclaim the pages 1194193b9358SAlan Cox * without I/O. 1195867a482dSJohn Dyson */ 1196867a482dSJohn Dyson void 119792a59946SJohn Baldwin vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end, 1198c2655a40SMark Johnston int advice) 1199867a482dSJohn Dyson { 120092a59946SJohn Baldwin vm_pindex_t tpindex; 120134567de7SAlan Cox vm_object_t backing_object, tobject; 1202aa3650eaSMark Johnston vm_page_t m, tm; 1203867a482dSJohn Dyson 1204867a482dSJohn Dyson if (object == NULL) 1205867a482dSJohn Dyson return; 1206c2655a40SMark Johnston 12076e20a165SJohn Dyson relookup: 1208aa3650eaSMark Johnston VM_OBJECT_WLOCK(object); 1209aa3650eaSMark Johnston if (!vm_object_advice_applies(object, advice)) { 1210aa3650eaSMark Johnston VM_OBJECT_WUNLOCK(object); 1211aa3650eaSMark Johnston return; 12126e20a165SJohn Dyson } 1213aa3650eaSMark Johnston for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) { 1214aa3650eaSMark Johnston tobject = object; 1215c2655a40SMark Johnston 12161ce137beSMatthew Dillon /* 1217aa3650eaSMark Johnston * If the next page isn't resident in the top-level object, we 1218aa3650eaSMark Johnston * need to search the shadow chain. When applying MADV_FREE, we 1219aa3650eaSMark Johnston * take care to release any swap space used to store 1220aa3650eaSMark Johnston * non-resident pages. 1221aa3650eaSMark Johnston */ 1222aa3650eaSMark Johnston if (m == NULL || pindex < m->pindex) { 1223aa3650eaSMark Johnston /* 1224aa3650eaSMark Johnston * Optimize a common case: if the top-level object has 1225aa3650eaSMark Johnston * no backing object, we can skip over the non-resident 1226aa3650eaSMark Johnston * range in constant time. 12271ce137beSMatthew Dillon */ 1228c2655a40SMark Johnston if (object->backing_object == NULL) { 1229c2655a40SMark Johnston tpindex = (m != NULL && m->pindex < end) ? 1230c2655a40SMark Johnston m->pindex : end; 1231aa3650eaSMark Johnston vm_object_madvise_freespace(object, advice, 1232aa3650eaSMark Johnston pindex, tpindex - pindex); 1233c2655a40SMark Johnston if ((pindex = tpindex) == end) 1234c2655a40SMark Johnston break; 1235aa3650eaSMark Johnston goto next_page; 1236aa3650eaSMark Johnston } 1237aa3650eaSMark Johnston 1238aa3650eaSMark Johnston tpindex = pindex; 1239aa3650eaSMark Johnston do { 1240aa3650eaSMark Johnston vm_object_madvise_freespace(tobject, advice, 1241aa3650eaSMark Johnston tpindex, 1); 12421ce137beSMatthew Dillon /* 1243aa3650eaSMark Johnston * Prepare to search the next object in the 1244aa3650eaSMark Johnston * chain. 12451ce137beSMatthew Dillon */ 124634567de7SAlan Cox backing_object = tobject->backing_object; 124734567de7SAlan Cox if (backing_object == NULL) 1248aa3650eaSMark Johnston goto next_pindex; 124989f6b863SAttilio Rao VM_OBJECT_WLOCK(backing_object); 1250aa3650eaSMark Johnston tpindex += 1251aa3650eaSMark Johnston OFF_TO_IDX(tobject->backing_object_offset); 12529b98b796SAlan Cox if (tobject != object) 125389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(tobject); 125434567de7SAlan Cox tobject = backing_object; 1255aa3650eaSMark Johnston if (!vm_object_advice_applies(tobject, advice)) 1256aa3650eaSMark Johnston goto next_pindex; 1257aa3650eaSMark Johnston } while ((tm = vm_page_lookup(tobject, tpindex)) == 1258aa3650eaSMark Johnston NULL); 1259aa3650eaSMark Johnston } else { 1260aa3650eaSMark Johnston next_page: 1261aa3650eaSMark Johnston tm = m; 1262aa3650eaSMark Johnston m = TAILQ_NEXT(m, listq); 1263c2655a40SMark Johnston } 1264c2655a40SMark Johnston 1265867a482dSJohn Dyson /* 12666a2a3d73SAlan Cox * If the page is not in a normal state, skip it. 1267867a482dSJohn Dyson */ 1268aa3650eaSMark Johnston if (tm->valid != VM_PAGE_BITS_ALL) 1269aa3650eaSMark Johnston goto next_pindex; 1270aa3650eaSMark Johnston vm_page_lock(tm); 1271aa3650eaSMark Johnston if (tm->hold_count != 0 || tm->wire_count != 0) { 1272aa3650eaSMark Johnston vm_page_unlock(tm); 1273aa3650eaSMark Johnston goto next_pindex; 12746e20a165SJohn Dyson } 1275aa3650eaSMark Johnston KASSERT((tm->flags & PG_FICTITIOUS) == 0, 1276aa3650eaSMark Johnston ("vm_object_madvise: page %p is fictitious", tm)); 1277aa3650eaSMark Johnston KASSERT((tm->oflags & VPO_UNMANAGED) == 0, 1278aa3650eaSMark Johnston ("vm_object_madvise: page %p is not managed", tm)); 1279aa3650eaSMark Johnston if (vm_page_busied(tm)) { 1280aa3650eaSMark Johnston if (object != tobject) 1281aa3650eaSMark Johnston VM_OBJECT_WUNLOCK(tobject); 1282aa3650eaSMark Johnston VM_OBJECT_WUNLOCK(object); 1283c2655a40SMark Johnston if (advice == MADV_WILLNEED) { 1284b11b56b5SAlan Cox /* 1285b11b56b5SAlan Cox * Reference the page before unlocking and 1286b11b56b5SAlan Cox * sleeping so that the page daemon is less 1287b11b56b5SAlan Cox * likely to reclaim it. 1288b11b56b5SAlan Cox */ 1289aa3650eaSMark Johnston vm_page_aflag_set(tm, PGA_REFERENCED); 1290567e51e1SAlan Cox } 1291aa3650eaSMark Johnston vm_page_busy_sleep(tm, "madvpo", false); 12926e20a165SJohn Dyson goto relookup; 129334567de7SAlan Cox } 1294aa3650eaSMark Johnston vm_page_advise(tm, advice); 1295aa3650eaSMark Johnston vm_page_unlock(tm); 1296aa3650eaSMark Johnston vm_object_madvise_freespace(tobject, advice, tm->pindex, 1); 1297aa3650eaSMark Johnston next_pindex: 12989b98b796SAlan Cox if (tobject != object) 129989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(tobject); 1300867a482dSJohn Dyson } 130189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1302867a482dSJohn Dyson } 1303867a482dSJohn Dyson 1304867a482dSJohn Dyson /* 1305df8bae1dSRodney W. Grimes * vm_object_shadow: 1306df8bae1dSRodney W. Grimes * 1307df8bae1dSRodney W. Grimes * Create a new object which is backed by the 1308df8bae1dSRodney W. Grimes * specified existing object range. The source 1309df8bae1dSRodney W. Grimes * object reference is deallocated. 1310df8bae1dSRodney W. Grimes * 1311df8bae1dSRodney W. Grimes * The new object and offset into that object 1312df8bae1dSRodney W. Grimes * are returned in the source parameters. 1313df8bae1dSRodney W. Grimes */ 131426f9a767SRodney W. Grimes void 13151b40f8c0SMatthew Dillon vm_object_shadow( 13161b40f8c0SMatthew Dillon vm_object_t *object, /* IN/OUT */ 13171b40f8c0SMatthew Dillon vm_ooffset_t *offset, /* IN/OUT */ 13181b40f8c0SMatthew Dillon vm_size_t length) 1319df8bae1dSRodney W. Grimes { 1320d031cff1SMatthew Dillon vm_object_t source; 1321d031cff1SMatthew Dillon vm_object_t result; 1322df8bae1dSRodney W. Grimes 1323df8bae1dSRodney W. Grimes source = *object; 1324df8bae1dSRodney W. Grimes 1325df8bae1dSRodney W. Grimes /* 13269a2f6362SAlan Cox * Don't create the new object if the old object isn't shared. 13279a2f6362SAlan Cox */ 1328570a2f4aSAlan Cox if (source != NULL) { 132989f6b863SAttilio Rao VM_OBJECT_WLOCK(source); 1330570a2f4aSAlan Cox if (source->ref_count == 1 && 13319a2f6362SAlan Cox source->handle == NULL && 13329a2f6362SAlan Cox (source->type == OBJT_DEFAULT || 13339917e010SAlan Cox source->type == OBJT_SWAP)) { 133489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 13359a2f6362SAlan Cox return; 13369917e010SAlan Cox } 133789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 1338570a2f4aSAlan Cox } 13399a2f6362SAlan Cox 13409a2f6362SAlan Cox /* 1341570a2f4aSAlan Cox * Allocate a new object with the given length. 1342df8bae1dSRodney W. Grimes */ 13430cc74f14SAlan Cox result = vm_object_allocate(OBJT_DEFAULT, atop(length)); 1344df8bae1dSRodney W. Grimes 1345df8bae1dSRodney W. Grimes /* 13460d94caffSDavid Greenman * The new object shadows the source object, adding a reference to it. 13470d94caffSDavid Greenman * Our caller changes his reference to point to the new object, 13480d94caffSDavid Greenman * removing a reference to the source object. Net result: no change 13490d94caffSDavid Greenman * of reference count. 13509b09fe24SMatthew Dillon * 13519b09fe24SMatthew Dillon * Try to optimize the result object's page color when shadowing 1352956f3135SPhilippe Charnier * in order to maintain page coloring consistency in the combined 13539b09fe24SMatthew Dillon * shadowed object. 1354df8bae1dSRodney W. Grimes */ 135524a1cce3SDavid Greenman result->backing_object = source; 13569174ca7bSTor Egge /* 13579174ca7bSTor Egge * Store the offset into the source object, and fix up the offset into 13589174ca7bSTor Egge * the new object. 13599174ca7bSTor Egge */ 13609174ca7bSTor Egge result->backing_object_offset = *offset; 1361570a2f4aSAlan Cox if (source != NULL) { 136289f6b863SAttilio Rao VM_OBJECT_WLOCK(source); 13631c500307SAlan Cox LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list); 1364eaf13dd7SJohn Dyson source->shadow_count++; 1365f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 13667b54b1a9SAlan Cox result->flags |= source->flags & OBJ_COLORED; 1367f8a47341SAlan Cox result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & 1368f8a47341SAlan Cox ((1 << (VM_NFREEORDER - 1)) - 1); 1369f8a47341SAlan Cox #endif 137089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 1371de5f6a77SJohn Dyson } 1372df8bae1dSRodney W. Grimes 1373df8bae1dSRodney W. Grimes 1374df8bae1dSRodney W. Grimes /* 1375df8bae1dSRodney W. Grimes * Return the new things 1376df8bae1dSRodney W. Grimes */ 1377df8bae1dSRodney W. Grimes *offset = 0; 1378df8bae1dSRodney W. Grimes *object = result; 1379df8bae1dSRodney W. Grimes } 1380df8bae1dSRodney W. Grimes 1381c5aaa06dSAlan Cox /* 1382c5aaa06dSAlan Cox * vm_object_split: 1383c5aaa06dSAlan Cox * 1384c5aaa06dSAlan Cox * Split the pages in a map entry into a new object. This affords 1385c5aaa06dSAlan Cox * easier removal of unused pages, and keeps object inheritance from 1386c5aaa06dSAlan Cox * being a negative impact on memory usage. 1387c5aaa06dSAlan Cox */ 1388c5aaa06dSAlan Cox void 1389c5aaa06dSAlan Cox vm_object_split(vm_map_entry_t entry) 1390c5aaa06dSAlan Cox { 139173000556SAlan Cox vm_page_t m, m_next; 1392c5aaa06dSAlan Cox vm_object_t orig_object, new_object, source; 139373000556SAlan Cox vm_pindex_t idx, offidxstart; 139473000556SAlan Cox vm_size_t size; 1395c5aaa06dSAlan Cox 1396c5aaa06dSAlan Cox orig_object = entry->object.vm_object; 1397c5aaa06dSAlan Cox if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 1398c5aaa06dSAlan Cox return; 1399c5aaa06dSAlan Cox if (orig_object->ref_count <= 1) 1400c5aaa06dSAlan Cox return; 140189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 1402c5aaa06dSAlan Cox 14034da9f125SAlan Cox offidxstart = OFF_TO_IDX(entry->offset); 140495442adfSAlan Cox size = atop(entry->end - entry->start); 1405c5aaa06dSAlan Cox 14064da9f125SAlan Cox /* 14074da9f125SAlan Cox * If swap_pager_copy() is later called, it will convert new_object 14084da9f125SAlan Cox * into a swap object. 14094da9f125SAlan Cox */ 14104da9f125SAlan Cox new_object = vm_object_allocate(OBJT_DEFAULT, size); 1411c5aaa06dSAlan Cox 1412c5474b8fSAlan Cox /* 1413c5474b8fSAlan Cox * At this point, the new object is still private, so the order in 1414c5474b8fSAlan Cox * which the original and new objects are locked does not matter. 1415c5474b8fSAlan Cox */ 141689f6b863SAttilio Rao VM_OBJECT_WLOCK(new_object); 141789f6b863SAttilio Rao VM_OBJECT_WLOCK(orig_object); 1418c5aaa06dSAlan Cox source = orig_object->backing_object; 1419c5aaa06dSAlan Cox if (source != NULL) { 142089f6b863SAttilio Rao VM_OBJECT_WLOCK(source); 142119c244d0SAlan Cox if ((source->flags & OBJ_DEAD) != 0) { 142289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 142389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 142489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(new_object); 142519c244d0SAlan Cox vm_object_deallocate(new_object); 142689f6b863SAttilio Rao VM_OBJECT_WLOCK(orig_object); 142719c244d0SAlan Cox return; 142819c244d0SAlan Cox } 14291c500307SAlan Cox LIST_INSERT_HEAD(&source->shadow_head, 1430c5aaa06dSAlan Cox new_object, shadow_list); 14318e3a76fbSAlan Cox source->shadow_count++; 1432b921a12bSAlan Cox vm_object_reference_locked(source); /* for new_object */ 1433c5aaa06dSAlan Cox vm_object_clear_flag(source, OBJ_ONEMAPPING); 143489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 1435c5aaa06dSAlan Cox new_object->backing_object_offset = 14364da9f125SAlan Cox orig_object->backing_object_offset + entry->offset; 1437c5aaa06dSAlan Cox new_object->backing_object = source; 1438c5aaa06dSAlan Cox } 1439ef694c1aSEdward Tomasz Napierala if (orig_object->cred != NULL) { 1440ef694c1aSEdward Tomasz Napierala new_object->cred = orig_object->cred; 1441ef694c1aSEdward Tomasz Napierala crhold(orig_object->cred); 14423364c323SKonstantin Belousov new_object->charge = ptoa(size); 14433364c323SKonstantin Belousov KASSERT(orig_object->charge >= ptoa(size), 14443364c323SKonstantin Belousov ("orig_object->charge < 0")); 14453364c323SKonstantin Belousov orig_object->charge -= ptoa(size); 14463364c323SKonstantin Belousov } 1447c5aaa06dSAlan Cox retry: 1448b382c10aSKonstantin Belousov m = vm_page_find_least(orig_object, offidxstart); 144973000556SAlan Cox for (; m != NULL && (idx = m->pindex - offidxstart) < size; 145073000556SAlan Cox m = m_next) { 145173000556SAlan Cox m_next = TAILQ_NEXT(m, listq); 1452c5aaa06dSAlan Cox 1453c5aaa06dSAlan Cox /* 1454c5aaa06dSAlan Cox * We must wait for pending I/O to complete before we can 1455c5aaa06dSAlan Cox * rename the page. 1456c5aaa06dSAlan Cox * 1457c5aaa06dSAlan Cox * We do not have to VM_PROT_NONE the page as mappings should 1458c5aaa06dSAlan Cox * not be changed by this operation. 1459c5aaa06dSAlan Cox */ 1460c7aebda8SAttilio Rao if (vm_page_busied(m)) { 146189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(new_object); 1462c7aebda8SAttilio Rao vm_page_lock(m); 1463c7aebda8SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 14645975e53dSKonstantin Belousov vm_page_busy_sleep(m, "spltwt", false); 1465c7aebda8SAttilio Rao VM_OBJECT_WLOCK(orig_object); 146689f6b863SAttilio Rao VM_OBJECT_WLOCK(new_object); 1467c5aaa06dSAlan Cox goto retry; 1468de33beddSAlan Cox } 1469e946b949SAttilio Rao 14703453bca8SAlan Cox /* vm_page_rename() will dirty the page. */ 1471e946b949SAttilio Rao if (vm_page_rename(m, new_object, idx)) { 1472e946b949SAttilio Rao VM_OBJECT_WUNLOCK(new_object); 1473e946b949SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 1474*8d6fbbb8SJeff Roberson vm_radix_wait(); 1475e946b949SAttilio Rao VM_OBJECT_WLOCK(orig_object); 1476e946b949SAttilio Rao VM_OBJECT_WLOCK(new_object); 1477e946b949SAttilio Rao goto retry; 1478e946b949SAttilio Rao } 1479b5f359b7SAlan Cox #if VM_NRESERVLEVEL > 0 1480b5f359b7SAlan Cox /* 1481b5f359b7SAlan Cox * If some of the reservation's allocated pages remain with 1482b5f359b7SAlan Cox * the original object, then transferring the reservation to 1483b5f359b7SAlan Cox * the new object is neither particularly beneficial nor 1484b5f359b7SAlan Cox * particularly harmful as compared to leaving the reservation 1485b5f359b7SAlan Cox * with the original object. If, however, all of the 1486b5f359b7SAlan Cox * reservation's allocated pages are transferred to the new 1487b5f359b7SAlan Cox * object, then transferring the reservation is typically 1488b5f359b7SAlan Cox * beneficial. Determining which of these two cases applies 1489b5f359b7SAlan Cox * would be more costly than unconditionally renaming the 1490b5f359b7SAlan Cox * reservation. 1491b5f359b7SAlan Cox */ 1492b5f359b7SAlan Cox vm_reserv_rename(m, new_object, orig_object, offidxstart); 1493b5f359b7SAlan Cox #endif 1494dfd55c0cSAttilio Rao if (orig_object->type == OBJT_SWAP) 1495c7aebda8SAttilio Rao vm_page_xbusy(m); 1496c5aaa06dSAlan Cox } 1497d7a013c3SAlan Cox if (orig_object->type == OBJT_SWAP) { 1498c5aaa06dSAlan Cox /* 1499c7c8dd7eSAlan Cox * swap_pager_copy() can sleep, in which case the orig_object's 1500c7c8dd7eSAlan Cox * and new_object's locks are released and reacquired. 1501c5aaa06dSAlan Cox */ 1502c5aaa06dSAlan Cox swap_pager_copy(orig_object, new_object, offidxstart, 0); 1503dfd55c0cSAttilio Rao TAILQ_FOREACH(m, &new_object->memq, listq) 1504c7aebda8SAttilio Rao vm_page_xunbusy(m); 1505c5aaa06dSAlan Cox } 150689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 150789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(new_object); 1508c5aaa06dSAlan Cox entry->object.vm_object = new_object; 1509c5aaa06dSAlan Cox entry->offset = 0LL; 1510c5aaa06dSAlan Cox vm_object_deallocate(orig_object); 151189f6b863SAttilio Rao VM_OBJECT_WLOCK(new_object); 1512c5aaa06dSAlan Cox } 1513c5aaa06dSAlan Cox 15142ad1a3f7SMatthew Dillon #define OBSC_COLLAPSE_NOWAIT 0x0002 15152ad1a3f7SMatthew Dillon #define OBSC_COLLAPSE_WAIT 0x0004 15162ad1a3f7SMatthew Dillon 151799a1570aSKonstantin Belousov static vm_page_t 15184cc8daf7SConrad Meyer vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p, vm_page_t next, 151999a1570aSKonstantin Belousov int op) 152099a1570aSKonstantin Belousov { 152199a1570aSKonstantin Belousov vm_object_t backing_object; 152299a1570aSKonstantin Belousov 152399a1570aSKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(object); 152499a1570aSKonstantin Belousov backing_object = object->backing_object; 152599a1570aSKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(backing_object); 152699a1570aSKonstantin Belousov 152799a1570aSKonstantin Belousov KASSERT(p == NULL || vm_page_busied(p), ("unbusy page %p", p)); 152899a1570aSKonstantin Belousov KASSERT(p == NULL || p->object == object || p->object == backing_object, 152999a1570aSKonstantin Belousov ("invalid ownership %p %p %p", p, object, backing_object)); 153099a1570aSKonstantin Belousov if ((op & OBSC_COLLAPSE_NOWAIT) != 0) 153199a1570aSKonstantin Belousov return (next); 153299a1570aSKonstantin Belousov if (p != NULL) 153399a1570aSKonstantin Belousov vm_page_lock(p); 153499a1570aSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 153599a1570aSKonstantin Belousov VM_OBJECT_WUNLOCK(backing_object); 1536*8d6fbbb8SJeff Roberson /* The page is only NULL when rename fails. */ 153799a1570aSKonstantin Belousov if (p == NULL) 1538*8d6fbbb8SJeff Roberson vm_radix_wait(); 153999a1570aSKonstantin Belousov else 15405975e53dSKonstantin Belousov vm_page_busy_sleep(p, "vmocol", false); 154199a1570aSKonstantin Belousov VM_OBJECT_WLOCK(object); 154299a1570aSKonstantin Belousov VM_OBJECT_WLOCK(backing_object); 154399a1570aSKonstantin Belousov return (TAILQ_FIRST(&backing_object->memq)); 154499a1570aSKonstantin Belousov } 154599a1570aSKonstantin Belousov 154699a1570aSKonstantin Belousov static bool 15474cc8daf7SConrad Meyer vm_object_scan_all_shadowed(vm_object_t object) 15484cc8daf7SConrad Meyer { 15494cc8daf7SConrad Meyer vm_object_t backing_object; 15504cc8daf7SConrad Meyer vm_page_t p, pp; 155177d6fd97SKonstantin Belousov vm_pindex_t backing_offset_index, new_pindex, pi, ps; 15524cc8daf7SConrad Meyer 15534cc8daf7SConrad Meyer VM_OBJECT_ASSERT_WLOCKED(object); 15544cc8daf7SConrad Meyer VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 15554cc8daf7SConrad Meyer 15564cc8daf7SConrad Meyer backing_object = object->backing_object; 15574cc8daf7SConrad Meyer 155877d6fd97SKonstantin Belousov if (backing_object->type != OBJT_DEFAULT && 155977d6fd97SKonstantin Belousov backing_object->type != OBJT_SWAP) 15604cc8daf7SConrad Meyer return (false); 15614cc8daf7SConrad Meyer 156277d6fd97SKonstantin Belousov pi = backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 156377d6fd97SKonstantin Belousov p = vm_page_find_least(backing_object, pi); 156477d6fd97SKonstantin Belousov ps = swap_pager_find_least(backing_object, pi); 15654cc8daf7SConrad Meyer 15664cc8daf7SConrad Meyer /* 156777d6fd97SKonstantin Belousov * Only check pages inside the parent object's range and 156877d6fd97SKonstantin Belousov * inside the parent object's mapping of the backing object. 15694cc8daf7SConrad Meyer */ 157077d6fd97SKonstantin Belousov for (;; pi++) { 157177d6fd97SKonstantin Belousov if (p != NULL && p->pindex < pi) 157277d6fd97SKonstantin Belousov p = TAILQ_NEXT(p, listq); 157377d6fd97SKonstantin Belousov if (ps < pi) 157477d6fd97SKonstantin Belousov ps = swap_pager_find_least(backing_object, pi); 157577d6fd97SKonstantin Belousov if (p == NULL && ps >= backing_object->size) 157677d6fd97SKonstantin Belousov break; 157777d6fd97SKonstantin Belousov else if (p == NULL) 157877d6fd97SKonstantin Belousov pi = ps; 157977d6fd97SKonstantin Belousov else 158077d6fd97SKonstantin Belousov pi = MIN(p->pindex, ps); 158177d6fd97SKonstantin Belousov 158277d6fd97SKonstantin Belousov new_pindex = pi - backing_offset_index; 158377d6fd97SKonstantin Belousov if (new_pindex >= object->size) 158477d6fd97SKonstantin Belousov break; 15854cc8daf7SConrad Meyer 15864cc8daf7SConrad Meyer /* 15874cc8daf7SConrad Meyer * See if the parent has the page or if the parent's object 15884cc8daf7SConrad Meyer * pager has the page. If the parent has the page but the page 15894cc8daf7SConrad Meyer * is not valid, the parent's object pager must have the page. 15904cc8daf7SConrad Meyer * 15914cc8daf7SConrad Meyer * If this fails, the parent does not completely shadow the 15924cc8daf7SConrad Meyer * object and we might as well give up now. 15934cc8daf7SConrad Meyer */ 15944cc8daf7SConrad Meyer pp = vm_page_lookup(object, new_pindex); 15954cc8daf7SConrad Meyer if ((pp == NULL || pp->valid == 0) && 15964cc8daf7SConrad Meyer !vm_pager_has_page(object, new_pindex, NULL, NULL)) 15974cc8daf7SConrad Meyer return (false); 15984cc8daf7SConrad Meyer } 15994cc8daf7SConrad Meyer return (true); 16004cc8daf7SConrad Meyer } 16014cc8daf7SConrad Meyer 16024cc8daf7SConrad Meyer static bool 16034cc8daf7SConrad Meyer vm_object_collapse_scan(vm_object_t object, int op) 16042ad1a3f7SMatthew Dillon { 16052ad1a3f7SMatthew Dillon vm_object_t backing_object; 160699a1570aSKonstantin Belousov vm_page_t next, p, pp; 160799a1570aSKonstantin Belousov vm_pindex_t backing_offset_index, new_pindex; 16082ad1a3f7SMatthew Dillon 160989f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 161089f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 16112ad1a3f7SMatthew Dillon 16122ad1a3f7SMatthew Dillon backing_object = object->backing_object; 16132ad1a3f7SMatthew Dillon backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 16142ad1a3f7SMatthew Dillon 16152ad1a3f7SMatthew Dillon /* 16162ad1a3f7SMatthew Dillon * Initial conditions 16172ad1a3f7SMatthew Dillon */ 16184cc8daf7SConrad Meyer if ((op & OBSC_COLLAPSE_WAIT) != 0) 16192ad1a3f7SMatthew Dillon vm_object_set_flag(backing_object, OBJ_DEAD); 16202ad1a3f7SMatthew Dillon 16212ad1a3f7SMatthew Dillon /* 16222ad1a3f7SMatthew Dillon * Our scan 16232ad1a3f7SMatthew Dillon */ 16244cc8daf7SConrad Meyer for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; p = next) { 162599a1570aSKonstantin Belousov next = TAILQ_NEXT(p, listq); 162699a1570aSKonstantin Belousov new_pindex = p->pindex - backing_offset_index; 16272ad1a3f7SMatthew Dillon 16282ad1a3f7SMatthew Dillon /* 16292ad1a3f7SMatthew Dillon * Check for busy page 16302ad1a3f7SMatthew Dillon */ 1631c7aebda8SAttilio Rao if (vm_page_busied(p)) { 16324cc8daf7SConrad Meyer next = vm_object_collapse_scan_wait(object, p, next, op); 16332ad1a3f7SMatthew Dillon continue; 16342ad1a3f7SMatthew Dillon } 16352ad1a3f7SMatthew Dillon 163699a1570aSKonstantin Belousov KASSERT(p->object == backing_object, 16374cc8daf7SConrad Meyer ("vm_object_collapse_scan: object mismatch")); 16382ad1a3f7SMatthew Dillon 163999a1570aSKonstantin Belousov if (p->pindex < backing_offset_index || 164099a1570aSKonstantin Belousov new_pindex >= object->size) { 1641e946b949SAttilio Rao if (backing_object->type == OBJT_SWAP) 16424cc8daf7SConrad Meyer swap_pager_freespace(backing_object, p->pindex, 16434cc8daf7SConrad Meyer 1); 1644e946b949SAttilio Rao 16452ad1a3f7SMatthew Dillon /* 16464cc8daf7SConrad Meyer * Page is out of the parent object's range, we can 16474cc8daf7SConrad Meyer * simply destroy it. 16482ad1a3f7SMatthew Dillon */ 16492965a453SKip Macy vm_page_lock(p); 1650f6d89838SAlan Cox KASSERT(!pmap_page_is_mapped(p), 1651f6d89838SAlan Cox ("freeing mapped page %p", p)); 1652f6d89838SAlan Cox if (p->wire_count == 0) 16532ad1a3f7SMatthew Dillon vm_page_free(p); 1654f6d89838SAlan Cox else 1655f6d89838SAlan Cox vm_page_remove(p); 16562965a453SKip Macy vm_page_unlock(p); 16572ad1a3f7SMatthew Dillon continue; 16582ad1a3f7SMatthew Dillon } 16592ad1a3f7SMatthew Dillon 16602ad1a3f7SMatthew Dillon pp = vm_page_lookup(object, new_pindex); 166199a1570aSKonstantin Belousov if (pp != NULL && vm_page_busied(pp)) { 1662e18cc7bfSMax Laier /* 16634cc8daf7SConrad Meyer * The page in the parent is busy and possibly not 16644cc8daf7SConrad Meyer * (yet) valid. Until its state is finalized by the 16654cc8daf7SConrad Meyer * busy bit owner, we can't tell whether it shadows the 16664cc8daf7SConrad Meyer * original page. Therefore, we must either skip it 16674cc8daf7SConrad Meyer * and the original (backing_object) page or wait for 16684cc8daf7SConrad Meyer * its state to be finalized. 1669e18cc7bfSMax Laier * 16704cc8daf7SConrad Meyer * This is due to a race with vm_fault() where we must 16714cc8daf7SConrad Meyer * unbusy the original (backing_obj) page before we can 16724cc8daf7SConrad Meyer * (re)lock the parent. Hence we can get here. 1673e18cc7bfSMax Laier */ 16744cc8daf7SConrad Meyer next = vm_object_collapse_scan_wait(object, pp, next, 16754cc8daf7SConrad Meyer op); 1676e18cc7bfSMax Laier continue; 1677e18cc7bfSMax Laier } 167899a1570aSKonstantin Belousov 167999a1570aSKonstantin Belousov KASSERT(pp == NULL || pp->valid != 0, 168099a1570aSKonstantin Belousov ("unbusy invalid page %p", pp)); 168199a1570aSKonstantin Belousov 16824cc8daf7SConrad Meyer if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL, 16834cc8daf7SConrad Meyer NULL)) { 168499a1570aSKonstantin Belousov /* 16854cc8daf7SConrad Meyer * The page already exists in the parent OR swap exists 16864cc8daf7SConrad Meyer * for this location in the parent. Leave the parent's 16874cc8daf7SConrad Meyer * page alone. Destroy the original page from the 16884cc8daf7SConrad Meyer * backing object. 168999a1570aSKonstantin Belousov */ 1690e946b949SAttilio Rao if (backing_object->type == OBJT_SWAP) 16914cc8daf7SConrad Meyer swap_pager_freespace(backing_object, p->pindex, 16924cc8daf7SConrad Meyer 1); 16932965a453SKip Macy vm_page_lock(p); 1694f6d89838SAlan Cox KASSERT(!pmap_page_is_mapped(p), 1695f6d89838SAlan Cox ("freeing mapped page %p", p)); 1696f6d89838SAlan Cox if (p->wire_count == 0) 16972ad1a3f7SMatthew Dillon vm_page_free(p); 1698f6d89838SAlan Cox else 1699f6d89838SAlan Cox vm_page_remove(p); 17002965a453SKip Macy vm_page_unlock(p); 17012ad1a3f7SMatthew Dillon continue; 17022ad1a3f7SMatthew Dillon } 17032ad1a3f7SMatthew Dillon 1704e946b949SAttilio Rao /* 17054cc8daf7SConrad Meyer * Page does not exist in parent, rename the page from the 17064cc8daf7SConrad Meyer * backing object to the main object. 1707e946b949SAttilio Rao * 17084cc8daf7SConrad Meyer * If the page was mapped to a process, it can remain mapped 17093453bca8SAlan Cox * through the rename. vm_page_rename() will dirty the page. 1710e946b949SAttilio Rao */ 1711e946b949SAttilio Rao if (vm_page_rename(p, object, new_pindex)) { 17124cc8daf7SConrad Meyer next = vm_object_collapse_scan_wait(object, NULL, next, 17134cc8daf7SConrad Meyer op); 1714e946b949SAttilio Rao continue; 1715e946b949SAttilio Rao } 171614a5dc17SAttilio Rao 171714a5dc17SAttilio Rao /* Use the old pindex to free the right page. */ 1718e946b949SAttilio Rao if (backing_object->type == OBJT_SWAP) 171914a5dc17SAttilio Rao swap_pager_freespace(backing_object, 172014a5dc17SAttilio Rao new_pindex + backing_offset_index, 1); 1721e946b949SAttilio Rao 1722f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1723f8a47341SAlan Cox /* 1724f8a47341SAlan Cox * Rename the reservation. 1725f8a47341SAlan Cox */ 1726f8a47341SAlan Cox vm_reserv_rename(p, object, backing_object, 1727f8a47341SAlan Cox backing_offset_index); 1728f8a47341SAlan Cox #endif 17292ad1a3f7SMatthew Dillon } 173099a1570aSKonstantin Belousov return (true); 17312ad1a3f7SMatthew Dillon } 17322ad1a3f7SMatthew Dillon 1733df8bae1dSRodney W. Grimes 1734df8bae1dSRodney W. Grimes /* 17352fe6e4d7SDavid Greenman * this version of collapse allows the operation to occur earlier and 17362fe6e4d7SDavid Greenman * when paging_in_progress is true for an object... This is not a complete 17372fe6e4d7SDavid Greenman * operation, but should plug 99.9% of the rest of the leaks. 17382fe6e4d7SDavid Greenman */ 17392fe6e4d7SDavid Greenman static void 17401b40f8c0SMatthew Dillon vm_object_qcollapse(vm_object_t object) 17412fe6e4d7SDavid Greenman { 17422ad1a3f7SMatthew Dillon vm_object_t backing_object = object->backing_object; 17432fe6e4d7SDavid Greenman 174489f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 174589f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(backing_object); 17461b40f8c0SMatthew Dillon 17472fe6e4d7SDavid Greenman if (backing_object->ref_count != 1) 17482fe6e4d7SDavid Greenman return; 17492fe6e4d7SDavid Greenman 17504cc8daf7SConrad Meyer vm_object_collapse_scan(object, OBSC_COLLAPSE_NOWAIT); 17512fe6e4d7SDavid Greenman } 17522fe6e4d7SDavid Greenman 1753df8bae1dSRodney W. Grimes /* 1754df8bae1dSRodney W. Grimes * vm_object_collapse: 1755df8bae1dSRodney W. Grimes * 1756df8bae1dSRodney W. Grimes * Collapse an object with the object backing it. 1757df8bae1dSRodney W. Grimes * Pages in the backing object are moved into the 1758df8bae1dSRodney W. Grimes * parent, and the backing object is deallocated. 1759df8bae1dSRodney W. Grimes */ 176026f9a767SRodney W. Grimes void 17611b40f8c0SMatthew Dillon vm_object_collapse(vm_object_t object) 1762df8bae1dSRodney W. Grimes { 176398f139daSKonstantin Belousov vm_object_t backing_object, new_backing_object; 176498f139daSKonstantin Belousov 176589f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 176623955314SAlfred Perlstein 1767df8bae1dSRodney W. Grimes while (TRUE) { 1768df8bae1dSRodney W. Grimes /* 1769df8bae1dSRodney W. Grimes * Verify that the conditions are right for collapse: 1770df8bae1dSRodney W. Grimes * 17712ad1a3f7SMatthew Dillon * The object exists and the backing object exists. 1772df8bae1dSRodney W. Grimes */ 177324a1cce3SDavid Greenman if ((backing_object = object->backing_object) == NULL) 17742ad1a3f7SMatthew Dillon break; 1775df8bae1dSRodney W. Grimes 1776f919ebdeSDavid Greenman /* 1777f919ebdeSDavid Greenman * we check the backing object first, because it is most likely 177824a1cce3SDavid Greenman * not collapsable. 1779f919ebdeSDavid Greenman */ 178089f6b863SAttilio Rao VM_OBJECT_WLOCK(backing_object); 178124a1cce3SDavid Greenman if (backing_object->handle != NULL || 178224a1cce3SDavid Greenman (backing_object->type != OBJT_DEFAULT && 178324a1cce3SDavid Greenman backing_object->type != OBJT_SWAP) || 1784f919ebdeSDavid Greenman (backing_object->flags & OBJ_DEAD) || 178524a1cce3SDavid Greenman object->handle != NULL || 178624a1cce3SDavid Greenman (object->type != OBJT_DEFAULT && 178724a1cce3SDavid Greenman object->type != OBJT_SWAP) || 178824a1cce3SDavid Greenman (object->flags & OBJ_DEAD)) { 178989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 17902ad1a3f7SMatthew Dillon break; 179124a1cce3SDavid Greenman } 17929b4814bbSDavid Greenman 179398f139daSKonstantin Belousov if (object->paging_in_progress != 0 || 179498f139daSKonstantin Belousov backing_object->paging_in_progress != 0) { 1795b9921222SDavid Greenman vm_object_qcollapse(object); 179689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 17972ad1a3f7SMatthew Dillon break; 1798df8bae1dSRodney W. Grimes } 179998f139daSKonstantin Belousov 180026f9a767SRodney W. Grimes /* 18010d94caffSDavid Greenman * We know that we can either collapse the backing object (if 18022ad1a3f7SMatthew Dillon * the parent is the only reference to it) or (perhaps) have 18032ad1a3f7SMatthew Dillon * the parent bypass the object if the parent happens to shadow 18042ad1a3f7SMatthew Dillon * all the resident pages in the entire backing object. 18052ad1a3f7SMatthew Dillon * 18062ad1a3f7SMatthew Dillon * This is ignoring pager-backed pages such as swap pages. 18074cc8daf7SConrad Meyer * vm_object_collapse_scan fails the shadowing test in this 18082ad1a3f7SMatthew Dillon * case. 1809df8bae1dSRodney W. Grimes */ 1810df8bae1dSRodney W. Grimes if (backing_object->ref_count == 1) { 1811aa9bc3b1SKonstantin Belousov vm_object_pip_add(object, 1); 1812aa9bc3b1SKonstantin Belousov vm_object_pip_add(backing_object, 1); 1813aa9bc3b1SKonstantin Belousov 1814df8bae1dSRodney W. Grimes /* 18152ad1a3f7SMatthew Dillon * If there is exactly one reference to the backing 18162ad1a3f7SMatthew Dillon * object, we can collapse it into the parent. 1817df8bae1dSRodney W. Grimes */ 18184cc8daf7SConrad Meyer vm_object_collapse_scan(object, OBSC_COLLAPSE_WAIT); 1819df8bae1dSRodney W. Grimes 1820f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1821f8a47341SAlan Cox /* 1822f8a47341SAlan Cox * Break any reservations from backing_object. 1823f8a47341SAlan Cox */ 1824f8a47341SAlan Cox if (__predict_false(!LIST_EMPTY(&backing_object->rvq))) 1825f8a47341SAlan Cox vm_reserv_break_all(backing_object); 1826f8a47341SAlan Cox #endif 1827f8a47341SAlan Cox 1828df8bae1dSRodney W. Grimes /* 1829df8bae1dSRodney W. Grimes * Move the pager from backing_object to object. 1830df8bae1dSRodney W. Grimes */ 18316be36525SAlan Cox if (backing_object->type == OBJT_SWAP) { 183224a1cce3SDavid Greenman /* 1833c7c8dd7eSAlan Cox * swap_pager_copy() can sleep, in which case 1834c7c8dd7eSAlan Cox * the backing_object's and object's locks are 1835c7c8dd7eSAlan Cox * released and reacquired. 1836571a1e92SAttilio Rao * Since swap_pager_copy() is being asked to 1837571a1e92SAttilio Rao * destroy the source, it will change the 1838571a1e92SAttilio Rao * backing_object's type to OBJT_DEFAULT. 183924a1cce3SDavid Greenman */ 18401c7c3c6aSMatthew Dillon swap_pager_copy( 18411c7c3c6aSMatthew Dillon backing_object, 18421c7c3c6aSMatthew Dillon object, 18431c7c3c6aSMatthew Dillon OFF_TO_IDX(object->backing_object_offset), TRUE); 1844c0503609SDavid Greenman } 1845df8bae1dSRodney W. Grimes /* 1846df8bae1dSRodney W. Grimes * Object now shadows whatever backing_object did. 18472ad1a3f7SMatthew Dillon * Note that the reference to 18482ad1a3f7SMatthew Dillon * backing_object->backing_object moves from within 18492ad1a3f7SMatthew Dillon * backing_object to within object. 1850df8bae1dSRodney W. Grimes */ 18511c500307SAlan Cox LIST_REMOVE(object, shadow_list); 18524f7c7f6eSAlan Cox backing_object->shadow_count--; 1853de5f6a77SJohn Dyson if (backing_object->backing_object) { 185489f6b863SAttilio Rao VM_OBJECT_WLOCK(backing_object->backing_object); 18551c500307SAlan Cox LIST_REMOVE(backing_object, shadow_list); 185643186e53SAlan Cox LIST_INSERT_HEAD( 185743186e53SAlan Cox &backing_object->backing_object->shadow_head, 185843186e53SAlan Cox object, shadow_list); 185943186e53SAlan Cox /* 186043186e53SAlan Cox * The shadow_count has not changed. 186143186e53SAlan Cox */ 186289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object->backing_object); 1863de5f6a77SJohn Dyson } 186424a1cce3SDavid Greenman object->backing_object = backing_object->backing_object; 18652ad1a3f7SMatthew Dillon object->backing_object_offset += 18662ad1a3f7SMatthew Dillon backing_object->backing_object_offset; 18672ad1a3f7SMatthew Dillon 1868df8bae1dSRodney W. Grimes /* 1869df8bae1dSRodney W. Grimes * Discard backing_object. 1870df8bae1dSRodney W. Grimes * 18710d94caffSDavid Greenman * Since the backing object has no pages, no pager left, 18720d94caffSDavid Greenman * and no object references within it, all that is 18730d94caffSDavid Greenman * necessary is to dispose of it. 1874df8bae1dSRodney W. Grimes */ 18759b4d473aSKonstantin Belousov KASSERT(backing_object->ref_count == 1, ( 18769b4d473aSKonstantin Belousov "backing_object %p was somehow re-referenced during collapse!", 18779b4d473aSKonstantin Belousov backing_object)); 1878aa9bc3b1SKonstantin Belousov vm_object_pip_wakeup(backing_object); 1879e735691bSJohn Baldwin backing_object->type = OBJT_DEAD; 1880e735691bSJohn Baldwin backing_object->ref_count = 0; 188189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 18829b4d473aSKonstantin Belousov vm_object_destroy(backing_object); 1883df8bae1dSRodney W. Grimes 1884aa9bc3b1SKonstantin Belousov vm_object_pip_wakeup(object); 1885df8bae1dSRodney W. Grimes object_collapses++; 18860d94caffSDavid Greenman } else { 1887df8bae1dSRodney W. Grimes /* 18882ad1a3f7SMatthew Dillon * If we do not entirely shadow the backing object, 18892ad1a3f7SMatthew Dillon * there is nothing we can do so we give up. 1890df8bae1dSRodney W. Grimes */ 1891df59a0feSJeff Roberson if (object->resident_page_count != object->size && 18924cc8daf7SConrad Meyer !vm_object_scan_all_shadowed(object)) { 189389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 18942ad1a3f7SMatthew Dillon break; 189524a1cce3SDavid Greenman } 1896df8bae1dSRodney W. Grimes 1897df8bae1dSRodney W. Grimes /* 18980d94caffSDavid Greenman * Make the parent shadow the next object in the 18990d94caffSDavid Greenman * chain. Deallocating backing_object will not remove 19000d94caffSDavid Greenman * it, since its reference count is at least 2. 1901df8bae1dSRodney W. Grimes */ 19021c500307SAlan Cox LIST_REMOVE(object, shadow_list); 1903eaf13dd7SJohn Dyson backing_object->shadow_count--; 190495e5e988SJohn Dyson 190595e5e988SJohn Dyson new_backing_object = backing_object->backing_object; 19068aef1712SMatthew Dillon if ((object->backing_object = new_backing_object) != NULL) { 190789f6b863SAttilio Rao VM_OBJECT_WLOCK(new_backing_object); 19081c500307SAlan Cox LIST_INSERT_HEAD( 19092ad1a3f7SMatthew Dillon &new_backing_object->shadow_head, 19102ad1a3f7SMatthew Dillon object, 19112ad1a3f7SMatthew Dillon shadow_list 19122ad1a3f7SMatthew Dillon ); 1913eaf13dd7SJohn Dyson new_backing_object->shadow_count++; 1914b921a12bSAlan Cox vm_object_reference_locked(new_backing_object); 191589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(new_backing_object); 191695e5e988SJohn Dyson object->backing_object_offset += 191795e5e988SJohn Dyson backing_object->backing_object_offset; 1918de5f6a77SJohn Dyson } 1919df8bae1dSRodney W. Grimes 1920df8bae1dSRodney W. Grimes /* 19210d94caffSDavid Greenman * Drop the reference count on backing_object. Since 192222ec553fSAlan Cox * its ref_count was at least 2, it will not vanish. 1923df8bae1dSRodney W. Grimes */ 192422ec553fSAlan Cox backing_object->ref_count--; 192589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 1926df8bae1dSRodney W. Grimes object_bypasses++; 1927df8bae1dSRodney W. Grimes } 1928df8bae1dSRodney W. Grimes 1929df8bae1dSRodney W. Grimes /* 1930df8bae1dSRodney W. Grimes * Try again with this object's new backing object. 1931df8bae1dSRodney W. Grimes */ 1932df8bae1dSRodney W. Grimes } 1933df8bae1dSRodney W. Grimes } 1934df8bae1dSRodney W. Grimes 1935df8bae1dSRodney W. Grimes /* 1936bff99f0dSAlan Cox * vm_object_page_remove: 1937df8bae1dSRodney W. Grimes * 193868855966SAlan Cox * For the given object, either frees or invalidates each of the 19396bbee8e2SAlan Cox * specified pages. In general, a page is freed. However, if a page is 19406bbee8e2SAlan Cox * wired for any reason other than the existence of a managed, wired 19416bbee8e2SAlan Cox * mapping, then it may be invalidated but not removed from the object. 19426bbee8e2SAlan Cox * Pages are specified by the given range ["start", "end") and the option 19436bbee8e2SAlan Cox * OBJPR_CLEANONLY. As a special case, if "end" is zero, then the range 19446bbee8e2SAlan Cox * extends from "start" to the end of the object. If the option 19456bbee8e2SAlan Cox * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the 19466bbee8e2SAlan Cox * specified range are affected. If the option OBJPR_NOTMAPPED is 19476bbee8e2SAlan Cox * specified, then the pages within the specified range must have no 19486bbee8e2SAlan Cox * mappings. Otherwise, if this option is not specified, any mappings to 19496bbee8e2SAlan Cox * the specified pages are removed before the pages are freed or 19506bbee8e2SAlan Cox * invalidated. 195168855966SAlan Cox * 19526bbee8e2SAlan Cox * In general, this operation should only be performed on objects that 19536bbee8e2SAlan Cox * contain managed pages. There are, however, two exceptions. First, it 19546bbee8e2SAlan Cox * is performed on the kernel and kmem objects by vm_map_entry_delete(). 19556bbee8e2SAlan Cox * Second, it is used by msync(..., MS_INVALIDATE) to invalidate device- 19566bbee8e2SAlan Cox * backed pages. In both of these cases, the option OBJPR_CLEANONLY must 19576bbee8e2SAlan Cox * not be specified and the option OBJPR_NOTMAPPED must be specified. 1958df8bae1dSRodney W. Grimes * 1959df8bae1dSRodney W. Grimes * The object must be locked. 1960df8bae1dSRodney W. Grimes */ 196126f9a767SRodney W. Grimes void 1962ecde4b32SAlan Cox vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 19636bbee8e2SAlan Cox int options) 1964df8bae1dSRodney W. Grimes { 1965d031cff1SMatthew Dillon vm_page_t p, next; 196693c5d3a4SKonstantin Belousov struct mtx *mtx; 1967bba52ecaSKonstantin Belousov struct pglist pgl; 1968df8bae1dSRodney W. Grimes 196989f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 197028634820SAlan Cox KASSERT((object->flags & OBJ_UNMANAGED) == 0 || 19716bbee8e2SAlan Cox (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED, 19726bbee8e2SAlan Cox ("vm_object_page_remove: illegal options for object %p", object)); 1973ecde4b32SAlan Cox if (object->resident_page_count == 0) 19747667839aSAlan Cox return; 1975d474eaaaSDoug Rabson vm_object_pip_add(object, 1); 1976bba52ecaSKonstantin Belousov TAILQ_INIT(&pgl); 197726f9a767SRodney W. Grimes again: 1978b382c10aSKonstantin Belousov p = vm_page_find_least(object, start); 197993c5d3a4SKonstantin Belousov mtx = NULL; 19802965a453SKip Macy 198175741c04SAlan Cox /* 19826bbee8e2SAlan Cox * Here, the variable "p" is either (1) the page with the least pindex 19836bbee8e2SAlan Cox * greater than or equal to the parameter "start" or (2) NULL. 198475741c04SAlan Cox */ 19856bbee8e2SAlan Cox for (; p != NULL && (p->pindex < end || end == 0); p = next) { 1986b18bfc3dSJohn Dyson next = TAILQ_NEXT(p, listq); 198775741c04SAlan Cox 198859677d3cSAlan Cox /* 19896bbee8e2SAlan Cox * If the page is wired for any reason besides the existence 19906bbee8e2SAlan Cox * of managed, wired mappings, then it cannot be freed. For 19916bbee8e2SAlan Cox * example, fictitious pages, which represent device memory, 19926bbee8e2SAlan Cox * are inherently wired and cannot be freed. They can, 19936bbee8e2SAlan Cox * however, be invalidated if the option OBJPR_CLEANONLY is 19946bbee8e2SAlan Cox * not specified. 199559677d3cSAlan Cox */ 199693c5d3a4SKonstantin Belousov vm_page_change_lock(p, &mtx); 19973aaea6efSKonstantin Belousov if (vm_page_xbusied(p)) { 19983aaea6efSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 19995975e53dSKonstantin Belousov vm_page_busy_sleep(p, "vmopax", true); 20003aaea6efSKonstantin Belousov VM_OBJECT_WLOCK(object); 20013aaea6efSKonstantin Belousov goto again; 20023aaea6efSKonstantin Belousov } 20036195b24aSKonstantin Belousov if (p->wire_count != 0) { 2004cf060942SAlan Cox if ((options & OBJPR_NOTMAPPED) == 0 && 2005cf060942SAlan Cox object->ref_count != 0) 20064fec79beSAlan Cox pmap_remove_all(p); 20076bbee8e2SAlan Cox if ((options & OBJPR_CLEANONLY) == 0) { 2008bd7e5f99SJohn Dyson p->valid = 0; 2009a28042d1SAlan Cox vm_page_undirty(p); 2010a28042d1SAlan Cox } 201193c5d3a4SKonstantin Belousov continue; 20120d94caffSDavid Greenman } 2013c7aebda8SAttilio Rao if (vm_page_busied(p)) { 2014c7aebda8SAttilio Rao VM_OBJECT_WUNLOCK(object); 20155975e53dSKonstantin Belousov vm_page_busy_sleep(p, "vmopar", false); 2016c7aebda8SAttilio Rao VM_OBJECT_WLOCK(object); 201726f9a767SRodney W. Grimes goto again; 2018c7aebda8SAttilio Rao } 201968855966SAlan Cox KASSERT((p->flags & PG_FICTITIOUS) == 0, 202068855966SAlan Cox ("vm_object_page_remove: page %p is fictitious", p)); 20216bbee8e2SAlan Cox if ((options & OBJPR_CLEANONLY) != 0 && p->valid != 0) { 2022cf060942SAlan Cox if ((options & OBJPR_NOTMAPPED) == 0 && 2023cf060942SAlan Cox object->ref_count != 0) 202478985e42SAlan Cox pmap_remove_write(p); 2025cf060942SAlan Cox if (p->dirty != 0) 202693c5d3a4SKonstantin Belousov continue; 20272965a453SKip Macy } 2028cf060942SAlan Cox if ((options & OBJPR_NOTMAPPED) == 0 && object->ref_count != 0) 20294fec79beSAlan Cox pmap_remove_all(p); 2030bba52ecaSKonstantin Belousov p->flags &= ~PG_ZERO; 2031bba52ecaSKonstantin Belousov if (vm_page_free_prep(p, false)) 2032bba52ecaSKonstantin Belousov TAILQ_INSERT_TAIL(&pgl, p, listq); 20332965a453SKip Macy } 203493c5d3a4SKonstantin Belousov if (mtx != NULL) 203593c5d3a4SKonstantin Belousov mtx_unlock(mtx); 2036bba52ecaSKonstantin Belousov vm_page_free_phys_pglist(&pgl); 2037f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 2038c0503609SDavid Greenman } 2039df8bae1dSRodney W. Grimes 2040df8bae1dSRodney W. Grimes /* 20413138cd36SMark Johnston * vm_object_page_noreuse: 2042936c09acSJohn Baldwin * 20433138cd36SMark Johnston * For the given object, attempt to move the specified pages to 20443138cd36SMark Johnston * the head of the inactive queue. This bypasses regular LRU 20453138cd36SMark Johnston * operation and allows the pages to be reused quickly under memory 20463138cd36SMark Johnston * pressure. If a page is wired for any reason, then it will not 20473138cd36SMark Johnston * be queued. Pages are specified by the range ["start", "end"). 20483138cd36SMark Johnston * As a special case, if "end" is zero, then the range extends from 20493138cd36SMark Johnston * "start" to the end of the object. 2050936c09acSJohn Baldwin * 2051936c09acSJohn Baldwin * This operation should only be performed on objects that 205228634820SAlan Cox * contain non-fictitious, managed pages. 2053936c09acSJohn Baldwin * 2054936c09acSJohn Baldwin * The object must be locked. 2055936c09acSJohn Baldwin */ 2056936c09acSJohn Baldwin void 20573138cd36SMark Johnston vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 2058936c09acSJohn Baldwin { 205993c5d3a4SKonstantin Belousov struct mtx *mtx; 2060936c09acSJohn Baldwin vm_page_t p, next; 2061936c09acSJohn Baldwin 206252d1addaSAlan Cox VM_OBJECT_ASSERT_LOCKED(object); 206328634820SAlan Cox KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0, 20643138cd36SMark Johnston ("vm_object_page_noreuse: illegal object %p", object)); 2065936c09acSJohn Baldwin if (object->resident_page_count == 0) 2066936c09acSJohn Baldwin return; 2067936c09acSJohn Baldwin p = vm_page_find_least(object, start); 2068936c09acSJohn Baldwin 2069936c09acSJohn Baldwin /* 2070936c09acSJohn Baldwin * Here, the variable "p" is either (1) the page with the least pindex 2071936c09acSJohn Baldwin * greater than or equal to the parameter "start" or (2) NULL. 2072936c09acSJohn Baldwin */ 2073936c09acSJohn Baldwin mtx = NULL; 2074936c09acSJohn Baldwin for (; p != NULL && (p->pindex < end || end == 0); p = next) { 2075936c09acSJohn Baldwin next = TAILQ_NEXT(p, listq); 207693c5d3a4SKonstantin Belousov vm_page_change_lock(p, &mtx); 20773138cd36SMark Johnston vm_page_deactivate_noreuse(p); 2078936c09acSJohn Baldwin } 2079936c09acSJohn Baldwin if (mtx != NULL) 2080936c09acSJohn Baldwin mtx_unlock(mtx); 2081936c09acSJohn Baldwin } 2082936c09acSJohn Baldwin 2083936c09acSJohn Baldwin /* 2084387aabc5SAlan Cox * Populate the specified range of the object with valid pages. Returns 2085387aabc5SAlan Cox * TRUE if the range is successfully populated and FALSE otherwise. 2086387aabc5SAlan Cox * 2087387aabc5SAlan Cox * Note: This function should be optimized to pass a larger array of 2088387aabc5SAlan Cox * pages to vm_pager_get_pages() before it is applied to a non- 2089387aabc5SAlan Cox * OBJT_DEVICE object. 2090387aabc5SAlan Cox * 2091387aabc5SAlan Cox * The object must be locked. 2092387aabc5SAlan Cox */ 2093387aabc5SAlan Cox boolean_t 2094387aabc5SAlan Cox vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 2095387aabc5SAlan Cox { 2096093c7f39SGleb Smirnoff vm_page_t m; 2097387aabc5SAlan Cox vm_pindex_t pindex; 2098387aabc5SAlan Cox int rv; 2099387aabc5SAlan Cox 210089f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 2101387aabc5SAlan Cox for (pindex = start; pindex < end; pindex++) { 21025944de8eSKonstantin Belousov m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); 2103387aabc5SAlan Cox if (m->valid != VM_PAGE_BITS_ALL) { 2104b0cd2017SGleb Smirnoff rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); 2105387aabc5SAlan Cox if (rv != VM_PAGER_OK) { 21062965a453SKip Macy vm_page_lock(m); 2107387aabc5SAlan Cox vm_page_free(m); 21082965a453SKip Macy vm_page_unlock(m); 2109387aabc5SAlan Cox break; 2110387aabc5SAlan Cox } 2111387aabc5SAlan Cox } 2112387aabc5SAlan Cox /* 2113387aabc5SAlan Cox * Keep "m" busy because a subsequent iteration may unlock 2114387aabc5SAlan Cox * the object. 2115387aabc5SAlan Cox */ 2116387aabc5SAlan Cox } 2117387aabc5SAlan Cox if (pindex > start) { 2118387aabc5SAlan Cox m = vm_page_lookup(object, start); 2119387aabc5SAlan Cox while (m != NULL && m->pindex < pindex) { 2120c7aebda8SAttilio Rao vm_page_xunbusy(m); 2121387aabc5SAlan Cox m = TAILQ_NEXT(m, listq); 2122387aabc5SAlan Cox } 2123387aabc5SAlan Cox } 2124387aabc5SAlan Cox return (pindex == end); 2125387aabc5SAlan Cox } 2126387aabc5SAlan Cox 2127387aabc5SAlan Cox /* 2128df8bae1dSRodney W. Grimes * Routine: vm_object_coalesce 2129df8bae1dSRodney W. Grimes * Function: Coalesces two objects backing up adjoining 2130df8bae1dSRodney W. Grimes * regions of memory into a single object. 2131df8bae1dSRodney W. Grimes * 2132df8bae1dSRodney W. Grimes * returns TRUE if objects were combined. 2133df8bae1dSRodney W. Grimes * 2134df8bae1dSRodney W. Grimes * NOTE: Only works at the moment if the second object is NULL - 2135df8bae1dSRodney W. Grimes * if it's not, which object do we lock first? 2136df8bae1dSRodney W. Grimes * 2137df8bae1dSRodney W. Grimes * Parameters: 2138df8bae1dSRodney W. Grimes * prev_object First object to coalesce 2139df8bae1dSRodney W. Grimes * prev_offset Offset into prev_object 2140df8bae1dSRodney W. Grimes * prev_size Size of reference to prev_object 214157a21abaSAlan Cox * next_size Size of reference to the second object 21423364c323SKonstantin Belousov * reserved Indicator that extension region has 21433364c323SKonstantin Belousov * swap accounted for 2144df8bae1dSRodney W. Grimes * 2145df8bae1dSRodney W. Grimes * Conditions: 2146df8bae1dSRodney W. Grimes * The object must *not* be locked. 2147df8bae1dSRodney W. Grimes */ 21480d94caffSDavid Greenman boolean_t 214957a21abaSAlan Cox vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, 21503364c323SKonstantin Belousov vm_size_t prev_size, vm_size_t next_size, boolean_t reserved) 2151df8bae1dSRodney W. Grimes { 2152ea41812fSAlan Cox vm_pindex_t next_pindex; 2153df8bae1dSRodney W. Grimes 215400e1854aSAlan Cox if (prev_object == NULL) 2155df8bae1dSRodney W. Grimes return (TRUE); 215689f6b863SAttilio Rao VM_OBJECT_WLOCK(prev_object); 21579ded9474SKonstantin Belousov if ((prev_object->type != OBJT_DEFAULT && 21589ded9474SKonstantin Belousov prev_object->type != OBJT_SWAP) || 2159f08f7dcaSKonstantin Belousov (prev_object->flags & OBJ_TMPFS_NODE) != 0) { 216089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(prev_object); 216130dcfc09SJohn Dyson return (FALSE); 216230dcfc09SJohn Dyson } 216330dcfc09SJohn Dyson 2164df8bae1dSRodney W. Grimes /* 2165df8bae1dSRodney W. Grimes * Try to collapse the object first 2166df8bae1dSRodney W. Grimes */ 2167df8bae1dSRodney W. Grimes vm_object_collapse(prev_object); 2168df8bae1dSRodney W. Grimes 2169df8bae1dSRodney W. Grimes /* 21700d94caffSDavid Greenman * Can't coalesce if: . more than one reference . paged out . shadows 21710d94caffSDavid Greenman * another object . has a copy elsewhere (any of which mean that the 21720d94caffSDavid Greenman * pages not mapped to prev_entry may be in use anyway) 2173df8bae1dSRodney W. Grimes */ 21748cc7e047SJohn Dyson if (prev_object->backing_object != NULL) { 217589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(prev_object); 2176df8bae1dSRodney W. Grimes return (FALSE); 2177df8bae1dSRodney W. Grimes } 2178a316d390SJohn Dyson 2179a316d390SJohn Dyson prev_size >>= PAGE_SHIFT; 2180a316d390SJohn Dyson next_size >>= PAGE_SHIFT; 218157a21abaSAlan Cox next_pindex = OFF_TO_IDX(prev_offset) + prev_size; 21828cc7e047SJohn Dyson 21838cc7e047SJohn Dyson if ((prev_object->ref_count > 1) && 2184ea41812fSAlan Cox (prev_object->size != next_pindex)) { 218589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(prev_object); 21868cc7e047SJohn Dyson return (FALSE); 21878cc7e047SJohn Dyson } 21888cc7e047SJohn Dyson 2189df8bae1dSRodney W. Grimes /* 21903364c323SKonstantin Belousov * Account for the charge. 21913364c323SKonstantin Belousov */ 2192ef694c1aSEdward Tomasz Napierala if (prev_object->cred != NULL) { 21933364c323SKonstantin Belousov 21943364c323SKonstantin Belousov /* 21953364c323SKonstantin Belousov * If prev_object was charged, then this mapping, 2196763df3ecSPedro F. Giffuni * although not charged now, may become writable 2197ef694c1aSEdward Tomasz Napierala * later. Non-NULL cred in the object would prevent 21983364c323SKonstantin Belousov * swap reservation during enabling of the write 21993364c323SKonstantin Belousov * access, so reserve swap now. Failed reservation 22003364c323SKonstantin Belousov * cause allocation of the separate object for the map 22013364c323SKonstantin Belousov * entry, and swap reservation for this entry is 22023364c323SKonstantin Belousov * managed in appropriate time. 22033364c323SKonstantin Belousov */ 2204ef694c1aSEdward Tomasz Napierala if (!reserved && !swap_reserve_by_cred(ptoa(next_size), 2205ef694c1aSEdward Tomasz Napierala prev_object->cred)) { 22069f790a17SKonstantin Belousov VM_OBJECT_WUNLOCK(prev_object); 22073364c323SKonstantin Belousov return (FALSE); 22083364c323SKonstantin Belousov } 22093364c323SKonstantin Belousov prev_object->charge += ptoa(next_size); 22103364c323SKonstantin Belousov } 22113364c323SKonstantin Belousov 22123364c323SKonstantin Belousov /* 22130d94caffSDavid Greenman * Remove any pages that may still be in the object from a previous 22140d94caffSDavid Greenman * deallocation. 2215df8bae1dSRodney W. Grimes */ 2216ea41812fSAlan Cox if (next_pindex < prev_object->size) { 22176bbee8e2SAlan Cox vm_object_page_remove(prev_object, next_pindex, next_pindex + 22186bbee8e2SAlan Cox next_size, 0); 2219ea41812fSAlan Cox if (prev_object->type == OBJT_SWAP) 2220ea41812fSAlan Cox swap_pager_freespace(prev_object, 2221ea41812fSAlan Cox next_pindex, next_size); 22223364c323SKonstantin Belousov #if 0 2223ef694c1aSEdward Tomasz Napierala if (prev_object->cred != NULL) { 22243364c323SKonstantin Belousov KASSERT(prev_object->charge >= 22253364c323SKonstantin Belousov ptoa(prev_object->size - next_pindex), 22263364c323SKonstantin Belousov ("object %p overcharged 1 %jx %jx", prev_object, 22273364c323SKonstantin Belousov (uintmax_t)next_pindex, (uintmax_t)next_size)); 22283364c323SKonstantin Belousov prev_object->charge -= ptoa(prev_object->size - 22293364c323SKonstantin Belousov next_pindex); 22303364c323SKonstantin Belousov } 22313364c323SKonstantin Belousov #endif 2232ea41812fSAlan Cox } 2233df8bae1dSRodney W. Grimes 2234df8bae1dSRodney W. Grimes /* 2235df8bae1dSRodney W. Grimes * Extend the object if necessary. 2236df8bae1dSRodney W. Grimes */ 2237ea41812fSAlan Cox if (next_pindex + next_size > prev_object->size) 2238ea41812fSAlan Cox prev_object->size = next_pindex + next_size; 2239df8bae1dSRodney W. Grimes 224089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(prev_object); 2241df8bae1dSRodney W. Grimes return (TRUE); 2242df8bae1dSRodney W. Grimes } 2243df8bae1dSRodney W. Grimes 22447a5a6352SMatthew Dillon void 22457a5a6352SMatthew Dillon vm_object_set_writeable_dirty(vm_object_t object) 22467a5a6352SMatthew Dillon { 22477a5a6352SMatthew Dillon 224889f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 2249f40cb1c6SKonstantin Belousov if (object->type != OBJT_VNODE) { 2250f40cb1c6SKonstantin Belousov if ((object->flags & OBJ_TMPFS_NODE) != 0) { 2251f40cb1c6SKonstantin Belousov KASSERT(object->type == OBJT_SWAP, ("non-swap tmpfs")); 2252f40cb1c6SKonstantin Belousov vm_object_set_flag(object, OBJ_TMPFS_DIRTY); 2253f40cb1c6SKonstantin Belousov } 22543280870dSKonstantin Belousov return; 2255f40cb1c6SKonstantin Belousov } 22563280870dSKonstantin Belousov object->generation++; 22573280870dSKonstantin Belousov if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) 2258ee39666aSJeff Roberson return; 2259af51d7bfSAlan Cox vm_object_set_flag(object, OBJ_MIGHTBEDIRTY); 22607a5a6352SMatthew Dillon } 22617a5a6352SMatthew Dillon 226203462509SAlan Cox /* 226303462509SAlan Cox * vm_object_unwire: 226403462509SAlan Cox * 226503462509SAlan Cox * For each page offset within the specified range of the given object, 226603462509SAlan Cox * find the highest-level page in the shadow chain and unwire it. A page 226703462509SAlan Cox * must exist at every page offset, and the highest-level page must be 226803462509SAlan Cox * wired. 226903462509SAlan Cox */ 227003462509SAlan Cox void 227103462509SAlan Cox vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length, 227203462509SAlan Cox uint8_t queue) 227303462509SAlan Cox { 227403462509SAlan Cox vm_object_t tobject; 227503462509SAlan Cox vm_page_t m, tm; 227603462509SAlan Cox vm_pindex_t end_pindex, pindex, tpindex; 227703462509SAlan Cox int depth, locked_depth; 227803462509SAlan Cox 227903462509SAlan Cox KASSERT((offset & PAGE_MASK) == 0, 228003462509SAlan Cox ("vm_object_unwire: offset is not page aligned")); 228103462509SAlan Cox KASSERT((length & PAGE_MASK) == 0, 228203462509SAlan Cox ("vm_object_unwire: length is not a multiple of PAGE_SIZE")); 228303462509SAlan Cox /* The wired count of a fictitious page never changes. */ 228403462509SAlan Cox if ((object->flags & OBJ_FICTITIOUS) != 0) 228503462509SAlan Cox return; 228603462509SAlan Cox pindex = OFF_TO_IDX(offset); 228703462509SAlan Cox end_pindex = pindex + atop(length); 228803462509SAlan Cox locked_depth = 1; 228903462509SAlan Cox VM_OBJECT_RLOCK(object); 229003462509SAlan Cox m = vm_page_find_least(object, pindex); 229103462509SAlan Cox while (pindex < end_pindex) { 229203462509SAlan Cox if (m == NULL || pindex < m->pindex) { 229303462509SAlan Cox /* 229403462509SAlan Cox * The first object in the shadow chain doesn't 229503462509SAlan Cox * contain a page at the current index. Therefore, 229603462509SAlan Cox * the page must exist in a backing object. 229703462509SAlan Cox */ 229803462509SAlan Cox tobject = object; 229903462509SAlan Cox tpindex = pindex; 230003462509SAlan Cox depth = 0; 230103462509SAlan Cox do { 230203462509SAlan Cox tpindex += 230303462509SAlan Cox OFF_TO_IDX(tobject->backing_object_offset); 230403462509SAlan Cox tobject = tobject->backing_object; 230503462509SAlan Cox KASSERT(tobject != NULL, 230603462509SAlan Cox ("vm_object_unwire: missing page")); 230703462509SAlan Cox if ((tobject->flags & OBJ_FICTITIOUS) != 0) 230803462509SAlan Cox goto next_page; 230903462509SAlan Cox depth++; 231003462509SAlan Cox if (depth == locked_depth) { 231103462509SAlan Cox locked_depth++; 231203462509SAlan Cox VM_OBJECT_RLOCK(tobject); 231303462509SAlan Cox } 231403462509SAlan Cox } while ((tm = vm_page_lookup(tobject, tpindex)) == 231503462509SAlan Cox NULL); 231603462509SAlan Cox } else { 231703462509SAlan Cox tm = m; 231803462509SAlan Cox m = TAILQ_NEXT(m, listq); 231903462509SAlan Cox } 232003462509SAlan Cox vm_page_lock(tm); 232103462509SAlan Cox vm_page_unwire(tm, queue); 232203462509SAlan Cox vm_page_unlock(tm); 232303462509SAlan Cox next_page: 232403462509SAlan Cox pindex++; 232503462509SAlan Cox } 232603462509SAlan Cox /* Release the accumulated object locks. */ 232703462509SAlan Cox for (depth = 0; depth < locked_depth; depth++) { 232803462509SAlan Cox tobject = object->backing_object; 232903462509SAlan Cox VM_OBJECT_RUNLOCK(object); 233003462509SAlan Cox object = tobject; 233103462509SAlan Cox } 233203462509SAlan Cox } 233303462509SAlan Cox 233463e4c6cdSEric van Gyzen struct vnode * 233563e4c6cdSEric van Gyzen vm_object_vnode(vm_object_t object) 233663e4c6cdSEric van Gyzen { 233763e4c6cdSEric van Gyzen 233863e4c6cdSEric van Gyzen VM_OBJECT_ASSERT_LOCKED(object); 233963e4c6cdSEric van Gyzen if (object->type == OBJT_VNODE) 234063e4c6cdSEric van Gyzen return (object->handle); 234163e4c6cdSEric van Gyzen if (object->type == OBJT_SWAP && (object->flags & OBJ_TMPFS) != 0) 234263e4c6cdSEric van Gyzen return (object->un_pager.swp.swp_tmpfs); 234363e4c6cdSEric van Gyzen return (NULL); 234463e4c6cdSEric van Gyzen } 234563e4c6cdSEric van Gyzen 2346ff87ae35SJohn Baldwin static int 2347ff87ae35SJohn Baldwin sysctl_vm_object_list(SYSCTL_HANDLER_ARGS) 2348ff87ae35SJohn Baldwin { 23490ecee546SKonstantin Belousov struct kinfo_vmobject *kvo; 2350ff87ae35SJohn Baldwin char *fullpath, *freepath; 2351ff87ae35SJohn Baldwin struct vnode *vp; 2352ff87ae35SJohn Baldwin struct vattr va; 2353ff87ae35SJohn Baldwin vm_object_t obj; 2354ff87ae35SJohn Baldwin vm_page_t m; 2355ff87ae35SJohn Baldwin int count, error; 2356ff87ae35SJohn Baldwin 2357ff87ae35SJohn Baldwin if (req->oldptr == NULL) { 2358ff87ae35SJohn Baldwin /* 2359ff87ae35SJohn Baldwin * If an old buffer has not been provided, generate an 2360ff87ae35SJohn Baldwin * estimate of the space needed for a subsequent call. 2361ff87ae35SJohn Baldwin */ 2362ff87ae35SJohn Baldwin mtx_lock(&vm_object_list_mtx); 2363ff87ae35SJohn Baldwin count = 0; 2364ff87ae35SJohn Baldwin TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2365ff87ae35SJohn Baldwin if (obj->type == OBJT_DEAD) 2366ff87ae35SJohn Baldwin continue; 2367ff87ae35SJohn Baldwin count++; 2368ff87ae35SJohn Baldwin } 2369ff87ae35SJohn Baldwin mtx_unlock(&vm_object_list_mtx); 2370ff87ae35SJohn Baldwin return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) * 2371ff87ae35SJohn Baldwin count * 11 / 10)); 2372ff87ae35SJohn Baldwin } 2373ff87ae35SJohn Baldwin 23740ecee546SKonstantin Belousov kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK); 2375ff87ae35SJohn Baldwin error = 0; 2376ff87ae35SJohn Baldwin 2377ff87ae35SJohn Baldwin /* 2378ff87ae35SJohn Baldwin * VM objects are type stable and are never removed from the 2379ff87ae35SJohn Baldwin * list once added. This allows us to safely read obj->object_list 2380ff87ae35SJohn Baldwin * after reacquiring the VM object lock. 2381ff87ae35SJohn Baldwin */ 2382ff87ae35SJohn Baldwin mtx_lock(&vm_object_list_mtx); 2383ff87ae35SJohn Baldwin TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2384ff87ae35SJohn Baldwin if (obj->type == OBJT_DEAD) 2385ff87ae35SJohn Baldwin continue; 2386ff87ae35SJohn Baldwin VM_OBJECT_RLOCK(obj); 2387ff87ae35SJohn Baldwin if (obj->type == OBJT_DEAD) { 2388ff87ae35SJohn Baldwin VM_OBJECT_RUNLOCK(obj); 2389ff87ae35SJohn Baldwin continue; 2390ff87ae35SJohn Baldwin } 2391ff87ae35SJohn Baldwin mtx_unlock(&vm_object_list_mtx); 23920ecee546SKonstantin Belousov kvo->kvo_size = ptoa(obj->size); 23930ecee546SKonstantin Belousov kvo->kvo_resident = obj->resident_page_count; 23940ecee546SKonstantin Belousov kvo->kvo_ref_count = obj->ref_count; 23950ecee546SKonstantin Belousov kvo->kvo_shadow_count = obj->shadow_count; 23960ecee546SKonstantin Belousov kvo->kvo_memattr = obj->memattr; 23970ecee546SKonstantin Belousov kvo->kvo_active = 0; 23980ecee546SKonstantin Belousov kvo->kvo_inactive = 0; 2399ff87ae35SJohn Baldwin TAILQ_FOREACH(m, &obj->memq, listq) { 2400ff87ae35SJohn Baldwin /* 2401ff87ae35SJohn Baldwin * A page may belong to the object but be 2402ff87ae35SJohn Baldwin * dequeued and set to PQ_NONE while the 2403ff87ae35SJohn Baldwin * object lock is not held. This makes the 2404ff87ae35SJohn Baldwin * reads of m->queue below racy, and we do not 2405ff87ae35SJohn Baldwin * count pages set to PQ_NONE. However, this 2406ff87ae35SJohn Baldwin * sysctl is only meant to give an 2407ff87ae35SJohn Baldwin * approximation of the system anyway. 2408ff87ae35SJohn Baldwin */ 2409ebcddc72SAlan Cox if (vm_page_active(m)) 24100ecee546SKonstantin Belousov kvo->kvo_active++; 2411ebcddc72SAlan Cox else if (vm_page_inactive(m)) 24120ecee546SKonstantin Belousov kvo->kvo_inactive++; 2413ff87ae35SJohn Baldwin } 2414ff87ae35SJohn Baldwin 24150ecee546SKonstantin Belousov kvo->kvo_vn_fileid = 0; 24160ecee546SKonstantin Belousov kvo->kvo_vn_fsid = 0; 24170ecee546SKonstantin Belousov kvo->kvo_vn_fsid_freebsd11 = 0; 2418ff87ae35SJohn Baldwin freepath = NULL; 2419ff87ae35SJohn Baldwin fullpath = ""; 2420ff87ae35SJohn Baldwin vp = NULL; 2421ff87ae35SJohn Baldwin switch (obj->type) { 2422ff87ae35SJohn Baldwin case OBJT_DEFAULT: 24230ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_DEFAULT; 2424ff87ae35SJohn Baldwin break; 2425ff87ae35SJohn Baldwin case OBJT_VNODE: 24260ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_VNODE; 2427ff87ae35SJohn Baldwin vp = obj->handle; 2428ff87ae35SJohn Baldwin vref(vp); 2429ff87ae35SJohn Baldwin break; 2430ff87ae35SJohn Baldwin case OBJT_SWAP: 24310ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_SWAP; 2432ff87ae35SJohn Baldwin break; 2433ff87ae35SJohn Baldwin case OBJT_DEVICE: 24340ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_DEVICE; 2435ff87ae35SJohn Baldwin break; 2436ff87ae35SJohn Baldwin case OBJT_PHYS: 24370ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_PHYS; 2438ff87ae35SJohn Baldwin break; 2439ff87ae35SJohn Baldwin case OBJT_DEAD: 24400ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_DEAD; 2441ff87ae35SJohn Baldwin break; 2442ff87ae35SJohn Baldwin case OBJT_SG: 24430ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_SG; 2444ff87ae35SJohn Baldwin break; 2445ff87ae35SJohn Baldwin case OBJT_MGTDEVICE: 24460ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_MGTDEVICE; 2447ff87ae35SJohn Baldwin break; 2448ff87ae35SJohn Baldwin default: 24490ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_UNKNOWN; 2450ff87ae35SJohn Baldwin break; 2451ff87ae35SJohn Baldwin } 2452ff87ae35SJohn Baldwin VM_OBJECT_RUNLOCK(obj); 2453ff87ae35SJohn Baldwin if (vp != NULL) { 2454ff87ae35SJohn Baldwin vn_fullpath(curthread, vp, &fullpath, &freepath); 2455ff87ae35SJohn Baldwin vn_lock(vp, LK_SHARED | LK_RETRY); 2456ff87ae35SJohn Baldwin if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) { 24570ecee546SKonstantin Belousov kvo->kvo_vn_fileid = va.va_fileid; 24580ecee546SKonstantin Belousov kvo->kvo_vn_fsid = va.va_fsid; 24590ecee546SKonstantin Belousov kvo->kvo_vn_fsid_freebsd11 = va.va_fsid; 246069921123SKonstantin Belousov /* truncate */ 2461ff87ae35SJohn Baldwin } 2462ff87ae35SJohn Baldwin vput(vp); 2463ff87ae35SJohn Baldwin } 2464ff87ae35SJohn Baldwin 24650ecee546SKonstantin Belousov strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path)); 2466ff87ae35SJohn Baldwin if (freepath != NULL) 2467ff87ae35SJohn Baldwin free(freepath, M_TEMP); 2468ff87ae35SJohn Baldwin 2469ff87ae35SJohn Baldwin /* Pack record size down */ 24700ecee546SKonstantin Belousov kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path) 24710ecee546SKonstantin Belousov + strlen(kvo->kvo_path) + 1; 24720ecee546SKonstantin Belousov kvo->kvo_structsize = roundup(kvo->kvo_structsize, 2473ff87ae35SJohn Baldwin sizeof(uint64_t)); 24740ecee546SKonstantin Belousov error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize); 2475ff87ae35SJohn Baldwin mtx_lock(&vm_object_list_mtx); 2476ff87ae35SJohn Baldwin if (error) 2477ff87ae35SJohn Baldwin break; 2478ff87ae35SJohn Baldwin } 2479ff87ae35SJohn Baldwin mtx_unlock(&vm_object_list_mtx); 24800ecee546SKonstantin Belousov free(kvo, M_TEMP); 2481ff87ae35SJohn Baldwin return (error); 2482ff87ae35SJohn Baldwin } 2483ff87ae35SJohn Baldwin SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP | 2484ff87ae35SJohn Baldwin CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject", 2485ff87ae35SJohn Baldwin "List of VM objects"); 2486ff87ae35SJohn Baldwin 2487c7c34a24SBruce Evans #include "opt_ddb.h" 2488c3cb3e12SDavid Greenman #ifdef DDB 2489c7c34a24SBruce Evans #include <sys/kernel.h> 2490c7c34a24SBruce Evans 2491ce9edcf5SPoul-Henning Kamp #include <sys/cons.h> 2492c7c34a24SBruce Evans 2493c7c34a24SBruce Evans #include <ddb/ddb.h> 2494c7c34a24SBruce Evans 2495cac597e4SBruce Evans static int 24961b40f8c0SMatthew Dillon _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 2497a1f6d91cSDavid Greenman { 2498a1f6d91cSDavid Greenman vm_map_t tmpm; 2499a1f6d91cSDavid Greenman vm_map_entry_t tmpe; 2500a1f6d91cSDavid Greenman vm_object_t obj; 2501a1f6d91cSDavid Greenman int entcount; 2502a1f6d91cSDavid Greenman 2503a1f6d91cSDavid Greenman if (map == 0) 2504a1f6d91cSDavid Greenman return 0; 2505a1f6d91cSDavid Greenman 2506a1f6d91cSDavid Greenman if (entry == 0) { 2507a1f6d91cSDavid Greenman tmpe = map->header.next; 2508a1f6d91cSDavid Greenman entcount = map->nentries; 2509a1f6d91cSDavid Greenman while (entcount-- && (tmpe != &map->header)) { 2510a1f6d91cSDavid Greenman if (_vm_object_in_map(map, object, tmpe)) { 2511a1f6d91cSDavid Greenman return 1; 2512a1f6d91cSDavid Greenman } 2513a1f6d91cSDavid Greenman tmpe = tmpe->next; 2514a1f6d91cSDavid Greenman } 25159fdfe602SMatthew Dillon } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 25169fdfe602SMatthew Dillon tmpm = entry->object.sub_map; 2517a1f6d91cSDavid Greenman tmpe = tmpm->header.next; 2518a1f6d91cSDavid Greenman entcount = tmpm->nentries; 2519a1f6d91cSDavid Greenman while (entcount-- && tmpe != &tmpm->header) { 2520a1f6d91cSDavid Greenman if (_vm_object_in_map(tmpm, object, tmpe)) { 2521a1f6d91cSDavid Greenman return 1; 2522a1f6d91cSDavid Greenman } 2523a1f6d91cSDavid Greenman tmpe = tmpe->next; 2524a1f6d91cSDavid Greenman } 25258aef1712SMatthew Dillon } else if ((obj = entry->object.vm_object) != NULL) { 252624a1cce3SDavid Greenman for (; obj; obj = obj->backing_object) 2527a1f6d91cSDavid Greenman if (obj == object) { 2528a1f6d91cSDavid Greenman return 1; 2529a1f6d91cSDavid Greenman } 2530a1f6d91cSDavid Greenman } 2531a1f6d91cSDavid Greenman return 0; 2532a1f6d91cSDavid Greenman } 2533a1f6d91cSDavid Greenman 2534cac597e4SBruce Evans static int 25351b40f8c0SMatthew Dillon vm_object_in_map(vm_object_t object) 2536a1f6d91cSDavid Greenman { 2537a1f6d91cSDavid Greenman struct proc *p; 25381005a129SJohn Baldwin 253960517fd1SJohn Baldwin /* sx_slock(&allproc_lock); */ 2540f67af5c9SXin LI FOREACH_PROC_IN_SYSTEM(p) { 2541a1f6d91cSDavid Greenman if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 2542a1f6d91cSDavid Greenman continue; 2543553629ebSJake Burkholder if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 254460517fd1SJohn Baldwin /* sx_sunlock(&allproc_lock); */ 2545a1f6d91cSDavid Greenman return 1; 2546a1f6d91cSDavid Greenman } 2547553629ebSJake Burkholder } 254860517fd1SJohn Baldwin /* sx_sunlock(&allproc_lock); */ 2549a1f6d91cSDavid Greenman if (_vm_object_in_map(kernel_map, object, 0)) 2550a1f6d91cSDavid Greenman return 1; 2551a1f6d91cSDavid Greenman return 0; 2552a1f6d91cSDavid Greenman } 2553a1f6d91cSDavid Greenman 2554c7c34a24SBruce Evans DB_SHOW_COMMAND(vmochk, vm_object_check) 2555f708ef1bSPoul-Henning Kamp { 2556a1f6d91cSDavid Greenman vm_object_t object; 2557a1f6d91cSDavid Greenman 2558a1f6d91cSDavid Greenman /* 2559a1f6d91cSDavid Greenman * make sure that internal objs are in a map somewhere 2560a1f6d91cSDavid Greenman * and none have zero ref counts. 2561a1f6d91cSDavid Greenman */ 2562cc64b484SAlfred Perlstein TAILQ_FOREACH(object, &vm_object_list, object_list) { 256324a1cce3SDavid Greenman if (object->handle == NULL && 256424a1cce3SDavid Greenman (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2565a1f6d91cSDavid Greenman if (object->ref_count == 0) { 25663efc015bSPeter Wemm db_printf("vmochk: internal obj has zero ref count: %ld\n", 25673efc015bSPeter Wemm (long)object->size); 2568a1f6d91cSDavid Greenman } 2569a1f6d91cSDavid Greenman if (!vm_object_in_map(object)) { 2570fc62ef1fSBruce Evans db_printf( 2571fc62ef1fSBruce Evans "vmochk: internal obj is not in a map: " 2572fc62ef1fSBruce Evans "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 2573fc62ef1fSBruce Evans object->ref_count, (u_long)object->size, 2574fc62ef1fSBruce Evans (u_long)object->size, 2575fc62ef1fSBruce Evans (void *)object->backing_object); 2576a1f6d91cSDavid Greenman } 2577a1f6d91cSDavid Greenman } 2578a1f6d91cSDavid Greenman } 2579a1f6d91cSDavid Greenman } 2580a1f6d91cSDavid Greenman 258126f9a767SRodney W. Grimes /* 2582df8bae1dSRodney W. Grimes * vm_object_print: [ debug ] 2583df8bae1dSRodney W. Grimes */ 2584c7c34a24SBruce Evans DB_SHOW_COMMAND(object, vm_object_print_static) 2585df8bae1dSRodney W. Grimes { 2586c7c34a24SBruce Evans /* XXX convert args. */ 2587c7c34a24SBruce Evans vm_object_t object = (vm_object_t)addr; 2588c7c34a24SBruce Evans boolean_t full = have_addr; 2589c7c34a24SBruce Evans 2590d031cff1SMatthew Dillon vm_page_t p; 2591df8bae1dSRodney W. Grimes 2592c7c34a24SBruce Evans /* XXX count is an (unused) arg. Avoid shadowing it. */ 2593c7c34a24SBruce Evans #define count was_count 2594c7c34a24SBruce Evans 2595d031cff1SMatthew Dillon int count; 2596df8bae1dSRodney W. Grimes 2597df8bae1dSRodney W. Grimes if (object == NULL) 2598df8bae1dSRodney W. Grimes return; 2599df8bae1dSRodney W. Grimes 2600eb95adefSBruce Evans db_iprintf( 2601ef694c1aSEdward Tomasz Napierala "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n", 2602e47cd172SMaxime Henrion object, (int)object->type, (uintmax_t)object->size, 26033364c323SKonstantin Belousov object->resident_page_count, object->ref_count, object->flags, 2604ef694c1aSEdward Tomasz Napierala object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge); 2605e47cd172SMaxime Henrion db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", 26061c7c3c6aSMatthew Dillon object->shadow_count, 2607eb95adefSBruce Evans object->backing_object ? object->backing_object->ref_count : 0, 2608e47cd172SMaxime Henrion object->backing_object, (uintmax_t)object->backing_object_offset); 2609df8bae1dSRodney W. Grimes 2610df8bae1dSRodney W. Grimes if (!full) 2611df8bae1dSRodney W. Grimes return; 2612df8bae1dSRodney W. Grimes 2613c7c34a24SBruce Evans db_indent += 2; 2614df8bae1dSRodney W. Grimes count = 0; 2615fc2ffbe6SPoul-Henning Kamp TAILQ_FOREACH(p, &object->memq, listq) { 2616df8bae1dSRodney W. Grimes if (count == 0) 2617c7c34a24SBruce Evans db_iprintf("memory:="); 2618df8bae1dSRodney W. Grimes else if (count == 6) { 2619c7c34a24SBruce Evans db_printf("\n"); 2620c7c34a24SBruce Evans db_iprintf(" ..."); 2621df8bae1dSRodney W. Grimes count = 0; 2622df8bae1dSRodney W. Grimes } else 2623c7c34a24SBruce Evans db_printf(","); 2624df8bae1dSRodney W. Grimes count++; 2625df8bae1dSRodney W. Grimes 2626e47cd172SMaxime Henrion db_printf("(off=0x%jx,page=0x%jx)", 2627e47cd172SMaxime Henrion (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); 2628df8bae1dSRodney W. Grimes } 2629df8bae1dSRodney W. Grimes if (count != 0) 2630c7c34a24SBruce Evans db_printf("\n"); 2631c7c34a24SBruce Evans db_indent -= 2; 2632df8bae1dSRodney W. Grimes } 26335070c7f8SJohn Dyson 2634c7c34a24SBruce Evans /* XXX. */ 2635c7c34a24SBruce Evans #undef count 2636c7c34a24SBruce Evans 2637c7c34a24SBruce Evans /* XXX need this non-static entry for calling from vm_map_print. */ 26385070c7f8SJohn Dyson void 26391b40f8c0SMatthew Dillon vm_object_print( 26401b40f8c0SMatthew Dillon /* db_expr_t */ long addr, 26411b40f8c0SMatthew Dillon boolean_t have_addr, 26421b40f8c0SMatthew Dillon /* db_expr_t */ long count, 26431b40f8c0SMatthew Dillon char *modif) 2644c7c34a24SBruce Evans { 2645c7c34a24SBruce Evans vm_object_print_static(addr, have_addr, count, modif); 2646c7c34a24SBruce Evans } 2647c7c34a24SBruce Evans 2648c7c34a24SBruce Evans DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 26495070c7f8SJohn Dyson { 26505070c7f8SJohn Dyson vm_object_t object; 2651bb2ac86fSKonstantin Belousov vm_pindex_t fidx; 2652bb2ac86fSKonstantin Belousov vm_paddr_t pa; 2653bb2ac86fSKonstantin Belousov vm_page_t m, prev_m; 2654bb2ac86fSKonstantin Belousov int rcount, nl, c; 2655cc64b484SAlfred Perlstein 2656bb2ac86fSKonstantin Belousov nl = 0; 2657cc64b484SAlfred Perlstein TAILQ_FOREACH(object, &vm_object_list, object_list) { 2658fc62ef1fSBruce Evans db_printf("new object: %p\n", (void *)object); 26595070c7f8SJohn Dyson if (nl > 18) { 26605070c7f8SJohn Dyson c = cngetc(); 26615070c7f8SJohn Dyson if (c != ' ') 26625070c7f8SJohn Dyson return; 26635070c7f8SJohn Dyson nl = 0; 26645070c7f8SJohn Dyson } 26655070c7f8SJohn Dyson nl++; 26665070c7f8SJohn Dyson rcount = 0; 26675070c7f8SJohn Dyson fidx = 0; 2668bb2ac86fSKonstantin Belousov pa = -1; 2669bb2ac86fSKonstantin Belousov TAILQ_FOREACH(m, &object->memq, listq) { 2670bb2ac86fSKonstantin Belousov if (m->pindex > 128) 2671bb2ac86fSKonstantin Belousov break; 2672bb2ac86fSKonstantin Belousov if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL && 2673bb2ac86fSKonstantin Belousov prev_m->pindex + 1 != m->pindex) { 26745070c7f8SJohn Dyson if (rcount) { 26753efc015bSPeter Wemm db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 26763efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 26775070c7f8SJohn Dyson if (nl > 18) { 26785070c7f8SJohn Dyson c = cngetc(); 26795070c7f8SJohn Dyson if (c != ' ') 26805070c7f8SJohn Dyson return; 26815070c7f8SJohn Dyson nl = 0; 26825070c7f8SJohn Dyson } 26835070c7f8SJohn Dyson nl++; 26845070c7f8SJohn Dyson rcount = 0; 26855070c7f8SJohn Dyson } 26865070c7f8SJohn Dyson } 26875070c7f8SJohn Dyson if (rcount && 26885070c7f8SJohn Dyson (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 26895070c7f8SJohn Dyson ++rcount; 26905070c7f8SJohn Dyson continue; 26915070c7f8SJohn Dyson } 26925070c7f8SJohn Dyson if (rcount) { 26932446e4f0SAlan Cox db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 26943efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 26955070c7f8SJohn Dyson if (nl > 18) { 26965070c7f8SJohn Dyson c = cngetc(); 26975070c7f8SJohn Dyson if (c != ' ') 26985070c7f8SJohn Dyson return; 26995070c7f8SJohn Dyson nl = 0; 27005070c7f8SJohn Dyson } 27015070c7f8SJohn Dyson nl++; 27025070c7f8SJohn Dyson } 2703bb2ac86fSKonstantin Belousov fidx = m->pindex; 27045070c7f8SJohn Dyson pa = VM_PAGE_TO_PHYS(m); 27055070c7f8SJohn Dyson rcount = 1; 27065070c7f8SJohn Dyson } 27075070c7f8SJohn Dyson if (rcount) { 27083efc015bSPeter Wemm db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 27093efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 27105070c7f8SJohn Dyson if (nl > 18) { 27115070c7f8SJohn Dyson c = cngetc(); 27125070c7f8SJohn Dyson if (c != ' ') 27135070c7f8SJohn Dyson return; 27145070c7f8SJohn Dyson nl = 0; 27155070c7f8SJohn Dyson } 27165070c7f8SJohn Dyson nl++; 27175070c7f8SJohn Dyson } 27185070c7f8SJohn Dyson } 27195070c7f8SJohn Dyson } 2720c3cb3e12SDavid Greenman #endif /* DDB */ 2721