160727d8bSWarner Losh /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 6df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 9df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 10df8bae1dSRodney W. Grimes * are met: 11df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 12df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 13df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 15df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 16fbbd9655SWarner Losh * 3. Neither the name of the University nor the names of its contributors 17df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 18df8bae1dSRodney W. Grimes * without specific prior written permission. 19df8bae1dSRodney W. Grimes * 20df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30df8bae1dSRodney W. Grimes * SUCH DAMAGE. 31df8bae1dSRodney W. Grimes * 323c4dd356SDavid Greenman * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 33df8bae1dSRodney W. Grimes * 34df8bae1dSRodney W. Grimes * 35df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36df8bae1dSRodney W. Grimes * All rights reserved. 37df8bae1dSRodney W. Grimes * 38df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39df8bae1dSRodney W. Grimes * 40df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 41df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 42df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 43df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 44df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 51df8bae1dSRodney W. Grimes * 52df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53df8bae1dSRodney W. Grimes * School of Computer Science 54df8bae1dSRodney W. Grimes * Carnegie Mellon University 55df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 56df8bae1dSRodney W. Grimes * 57df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 58df8bae1dSRodney W. Grimes * rights to redistribute these changes. 59df8bae1dSRodney W. Grimes */ 60df8bae1dSRodney W. Grimes 61df8bae1dSRodney W. Grimes /* 62df8bae1dSRodney W. Grimes * Virtual memory object module. 63df8bae1dSRodney W. Grimes */ 64df8bae1dSRodney W. Grimes 65874651b1SDavid E. O'Brien #include <sys/cdefs.h> 66874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 67874651b1SDavid E. O'Brien 68f8a47341SAlan Cox #include "opt_vm.h" 69f8a47341SAlan Cox 70df8bae1dSRodney W. Grimes #include <sys/param.h> 71df8bae1dSRodney W. Grimes #include <sys/systm.h> 72fb919e4dSMark Murray #include <sys/lock.h> 73867a482dSJohn Dyson #include <sys/mman.h> 74cf2819ccSJohn Dyson #include <sys/mount.h> 75b9b7a4beSMatthew Dillon #include <sys/kernel.h> 76b9b7a4beSMatthew Dillon #include <sys/sysctl.h> 771b367556SJason Evans #include <sys/mutex.h> 78fb919e4dSMark Murray #include <sys/proc.h> /* for curproc, pageproc */ 79fb919e4dSMark Murray #include <sys/socket.h> 803364c323SKonstantin Belousov #include <sys/resourcevar.h> 8189f6b863SAttilio Rao #include <sys/rwlock.h> 82ff87ae35SJohn Baldwin #include <sys/user.h> 83fb919e4dSMark Murray #include <sys/vnode.h> 84fb919e4dSMark Murray #include <sys/vmmeter.h> 851005a129SJohn Baldwin #include <sys/sx.h> 86df8bae1dSRodney W. Grimes 87df8bae1dSRodney W. Grimes #include <vm/vm.h> 88efeaf95aSDavid Greenman #include <vm/vm_param.h> 89efeaf95aSDavid Greenman #include <vm/pmap.h> 90efeaf95aSDavid Greenman #include <vm/vm_map.h> 91efeaf95aSDavid Greenman #include <vm/vm_object.h> 92df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 9326f9a767SRodney W. Grimes #include <vm/vm_pageout.h> 940d94caffSDavid Greenman #include <vm/vm_pager.h> 9505f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 96a1f6d91cSDavid Greenman #include <vm/vm_kern.h> 97efeaf95aSDavid Greenman #include <vm/vm_extern.h> 98774d251dSAttilio Rao #include <vm/vm_radix.h> 99f8a47341SAlan Cox #include <vm/vm_reserv.h> 100670d17b5SJeff Roberson #include <vm/uma.h> 10126f9a767SRodney W. Grimes 102c53f7aceSDag-Erling Smørgrav static int old_msync; 103c53f7aceSDag-Erling Smørgrav SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, 104c53f7aceSDag-Erling Smørgrav "Use old (insecure) msync behavior"); 105c53f7aceSDag-Erling Smørgrav 106757216f3SKonstantin Belousov static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, 107126d6082SKonstantin Belousov int pagerflags, int flags, boolean_t *clearobjflags, 108126d6082SKonstantin Belousov boolean_t *eio); 1093280870dSKonstantin Belousov static boolean_t vm_object_page_remove_write(vm_page_t p, int flags, 110126d6082SKonstantin Belousov boolean_t *clearobjflags); 111b9b7a4beSMatthew Dillon static void vm_object_qcollapse(vm_object_t object); 11202dd8331SAlan Cox static void vm_object_vndeallocate(vm_object_t object); 113f6b04d2bSDavid Greenman 114df8bae1dSRodney W. Grimes /* 115df8bae1dSRodney W. Grimes * Virtual memory objects maintain the actual data 116df8bae1dSRodney W. Grimes * associated with allocated virtual memory. A given 117df8bae1dSRodney W. Grimes * page of memory exists within exactly one object. 118df8bae1dSRodney W. Grimes * 119df8bae1dSRodney W. Grimes * An object is only deallocated when all "references" 120df8bae1dSRodney W. Grimes * are given up. Only one "reference" to a given 121df8bae1dSRodney W. Grimes * region of an object should be writeable. 122df8bae1dSRodney W. Grimes * 123df8bae1dSRodney W. Grimes * Associated with each object is a list of all resident 124df8bae1dSRodney W. Grimes * memory pages belonging to that object; this list is 125df8bae1dSRodney W. Grimes * maintained by the "vm_page" module, and locked by the object's 126df8bae1dSRodney W. Grimes * lock. 127df8bae1dSRodney W. Grimes * 128df8bae1dSRodney W. Grimes * Each object also records a "pager" routine which is 129df8bae1dSRodney W. Grimes * used to retrieve (and store) pages to the proper backing 130df8bae1dSRodney W. Grimes * storage. In addition, objects may be backed by other 131df8bae1dSRodney W. Grimes * objects from which they were virtual-copied. 132df8bae1dSRodney W. Grimes * 133df8bae1dSRodney W. Grimes * The only items within the object structure which are 134df8bae1dSRodney W. Grimes * modified after time of creation are: 135df8bae1dSRodney W. Grimes * reference count locked by object's lock 136df8bae1dSRodney W. Grimes * pager routine locked by object's lock 137df8bae1dSRodney W. Grimes * 138df8bae1dSRodney W. Grimes */ 139df8bae1dSRodney W. Grimes 14028f8db14SBruce Evans struct object_q vm_object_list; 141a5698387SAlan Cox struct mtx vm_object_list_mtx; /* lock for object list and count */ 142cccf11b8SAlan Cox 143cccf11b8SAlan Cox struct vm_object kernel_object_store; 144cccf11b8SAlan Cox struct vm_object kmem_object_store; 145df8bae1dSRodney W. Grimes 1466472ac3dSEd Schouten static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, 1476472ac3dSEd Schouten "VM object stats"); 148604c2bbcSAlan Cox 149f708ef1bSPoul-Henning Kamp static long object_collapses; 150604c2bbcSAlan Cox SYSCTL_LONG(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD, 151604c2bbcSAlan Cox &object_collapses, 0, "VM object collapses"); 152604c2bbcSAlan Cox 153f708ef1bSPoul-Henning Kamp static long object_bypasses; 154604c2bbcSAlan Cox SYSCTL_LONG(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD, 155604c2bbcSAlan Cox &object_bypasses, 0, "VM object bypasses"); 156dad740e9SAlan Cox 157670d17b5SJeff Roberson static uma_zone_t obj_zone; 1588355f576SJeff Roberson 159b23f72e9SBrian Feldman static int vm_object_zinit(void *mem, int size, int flags); 1608355f576SJeff Roberson 1618355f576SJeff Roberson #ifdef INVARIANTS 1628355f576SJeff Roberson static void vm_object_zdtor(void *mem, int size, void *arg); 1638355f576SJeff Roberson 1648355f576SJeff Roberson static void 1658355f576SJeff Roberson vm_object_zdtor(void *mem, int size, void *arg) 1668355f576SJeff Roberson { 1678355f576SJeff Roberson vm_object_t object; 1688355f576SJeff Roberson 1698355f576SJeff Roberson object = (vm_object_t)mem; 170e735691bSJohn Baldwin KASSERT(object->ref_count == 0, 171e735691bSJohn Baldwin ("object %p ref_count = %d", object, object->ref_count)); 17243186e53SAlan Cox KASSERT(TAILQ_EMPTY(&object->memq), 173198da1b2SAttilio Rao ("object %p has resident pages in its memq", object)); 174774d251dSAttilio Rao KASSERT(vm_radix_is_empty(&object->rtree), 175774d251dSAttilio Rao ("object %p has resident pages in its trie", object)); 176f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 177f8a47341SAlan Cox KASSERT(LIST_EMPTY(&object->rvq), 178f8a47341SAlan Cox ("object %p has reservations", 179f8a47341SAlan Cox object)); 180f8a47341SAlan Cox #endif 1818355f576SJeff Roberson KASSERT(object->paging_in_progress == 0, 1828355f576SJeff Roberson ("object %p paging_in_progress = %d", 1838355f576SJeff Roberson object, object->paging_in_progress)); 1848355f576SJeff Roberson KASSERT(object->resident_page_count == 0, 1858355f576SJeff Roberson ("object %p resident_page_count = %d", 1868355f576SJeff Roberson object, object->resident_page_count)); 1878355f576SJeff Roberson KASSERT(object->shadow_count == 0, 1888355f576SJeff Roberson ("object %p shadow_count = %d", 1898355f576SJeff Roberson object, object->shadow_count)); 190e735691bSJohn Baldwin KASSERT(object->type == OBJT_DEAD, 191e735691bSJohn Baldwin ("object %p has non-dead type %d", 192e735691bSJohn Baldwin object, object->type)); 1938355f576SJeff Roberson } 1948355f576SJeff Roberson #endif 1958355f576SJeff Roberson 196b23f72e9SBrian Feldman static int 197b23f72e9SBrian Feldman vm_object_zinit(void *mem, int size, int flags) 1988355f576SJeff Roberson { 1998355f576SJeff Roberson vm_object_t object; 2008355f576SJeff Roberson 2018355f576SJeff Roberson object = (vm_object_t)mem; 202777a36c5SAlan Cox rw_init_flags(&object->lock, "vm object", RW_DUPOK | RW_NEW); 2038355f576SJeff Roberson 2048355f576SJeff Roberson /* These are true for any object that has been freed */ 205e735691bSJohn Baldwin object->type = OBJT_DEAD; 206e735691bSJohn Baldwin object->ref_count = 0; 207cd1241fbSKonstantin Belousov vm_radix_init(&object->rtree); 2088355f576SJeff Roberson object->paging_in_progress = 0; 2098355f576SJeff Roberson object->resident_page_count = 0; 2108355f576SJeff Roberson object->shadow_count = 0; 211e735691bSJohn Baldwin 212e735691bSJohn Baldwin mtx_lock(&vm_object_list_mtx); 213e735691bSJohn Baldwin TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 214e735691bSJohn Baldwin mtx_unlock(&vm_object_list_mtx); 215b23f72e9SBrian Feldman return (0); 2168355f576SJeff Roberson } 217df8bae1dSRodney W. Grimes 218a4915c21SAttilio Rao static void 2196395da54SIan Dowse _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) 220df8bae1dSRodney W. Grimes { 2210cddd8f0SMatthew Dillon 222df8bae1dSRodney W. Grimes TAILQ_INIT(&object->memq); 2231c500307SAlan Cox LIST_INIT(&object->shadow_head); 224a1f6d91cSDavid Greenman 22524a1cce3SDavid Greenman object->type = type; 22628634820SAlan Cox switch (type) { 22728634820SAlan Cox case OBJT_DEAD: 22828634820SAlan Cox panic("_vm_object_allocate: can't create OBJT_DEAD"); 22928634820SAlan Cox case OBJT_DEFAULT: 23028634820SAlan Cox case OBJT_SWAP: 23128634820SAlan Cox object->flags = OBJ_ONEMAPPING; 23228634820SAlan Cox break; 23328634820SAlan Cox case OBJT_DEVICE: 23428634820SAlan Cox case OBJT_SG: 23528634820SAlan Cox object->flags = OBJ_FICTITIOUS | OBJ_UNMANAGED; 23628634820SAlan Cox break; 23728634820SAlan Cox case OBJT_MGTDEVICE: 23828634820SAlan Cox object->flags = OBJ_FICTITIOUS; 23928634820SAlan Cox break; 24028634820SAlan Cox case OBJT_PHYS: 24128634820SAlan Cox object->flags = OBJ_UNMANAGED; 24228634820SAlan Cox break; 24328634820SAlan Cox case OBJT_VNODE: 24428634820SAlan Cox object->flags = 0; 24528634820SAlan Cox break; 24628634820SAlan Cox default: 24728634820SAlan Cox panic("_vm_object_allocate: type %d is undefined", type); 24828634820SAlan Cox } 249df8bae1dSRodney W. Grimes object->size = size; 250b881da26SAlan Cox object->generation = 1; 251a1f6d91cSDavid Greenman object->ref_count = 1; 2523153e878SAlan Cox object->memattr = VM_MEMATTR_DEFAULT; 253ef694c1aSEdward Tomasz Napierala object->cred = NULL; 2543364c323SKonstantin Belousov object->charge = 0; 25524a1cce3SDavid Greenman object->handle = NULL; 25624a1cce3SDavid Greenman object->backing_object = NULL; 257a316d390SJohn Dyson object->backing_object_offset = (vm_ooffset_t) 0; 258f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 259f8a47341SAlan Cox LIST_INIT(&object->rvq); 260f8a47341SAlan Cox #endif 2611bdbd705SKonstantin Belousov umtx_shm_object_init(object); 262df8bae1dSRodney W. Grimes } 263df8bae1dSRodney W. Grimes 264df8bae1dSRodney W. Grimes /* 26526f9a767SRodney W. Grimes * vm_object_init: 26626f9a767SRodney W. Grimes * 26726f9a767SRodney W. Grimes * Initialize the VM objects module. 26826f9a767SRodney W. Grimes */ 26926f9a767SRodney W. Grimes void 2701b40f8c0SMatthew Dillon vm_object_init(void) 27126f9a767SRodney W. Grimes { 27226f9a767SRodney W. Grimes TAILQ_INIT(&vm_object_list); 2736008862bSJohn Baldwin mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); 2740217125fSDavid Greenman 27589f6b863SAttilio Rao rw_init(&kernel_object->lock, "kernel vm object"); 276d1780e8dSKonstantin Belousov _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - 277d1780e8dSKonstantin Belousov VM_MIN_KERNEL_ADDRESS), kernel_object); 278f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 279f8a47341SAlan Cox kernel_object->flags |= OBJ_COLORED; 280f8a47341SAlan Cox kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 281f8a47341SAlan Cox #endif 28226f9a767SRodney W. Grimes 28389f6b863SAttilio Rao rw_init(&kmem_object->lock, "kmem vm object"); 284d1780e8dSKonstantin Belousov _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - 285d1780e8dSKonstantin Belousov VM_MIN_KERNEL_ADDRESS), kmem_object); 286f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 287f8a47341SAlan Cox kmem_object->flags |= OBJ_COLORED; 288f8a47341SAlan Cox kmem_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 289f8a47341SAlan Cox #endif 290ed6a7863SAlan Cox 2918dbca793STor Egge /* 2928dbca793STor Egge * The lock portion of struct vm_object must be type stable due 2938dbca793STor Egge * to vm_pageout_fallback_object_lock locking a vm object 2948dbca793STor Egge * without holding any references to it. 2958dbca793STor Egge */ 2968355f576SJeff Roberson obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, 2978355f576SJeff Roberson #ifdef INVARIANTS 2988355f576SJeff Roberson vm_object_zdtor, 2998355f576SJeff Roberson #else 3008355f576SJeff Roberson NULL, 3018355f576SJeff Roberson #endif 3025df87b21SJeff Roberson vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 303774d251dSAttilio Rao 304cd1241fbSKonstantin Belousov vm_radix_zinit(); 30599448ed1SJohn Dyson } 30699448ed1SJohn Dyson 30799448ed1SJohn Dyson void 3081b40f8c0SMatthew Dillon vm_object_clear_flag(vm_object_t object, u_short bits) 3091b40f8c0SMatthew Dillon { 3105440b5a9SAlan Cox 31189f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 312b06805adSJake Burkholder object->flags &= ~bits; 3131b40f8c0SMatthew Dillon } 3141b40f8c0SMatthew Dillon 3153153e878SAlan Cox /* 3163153e878SAlan Cox * Sets the default memory attribute for the specified object. Pages 3173153e878SAlan Cox * that are allocated to this object are by default assigned this memory 3183153e878SAlan Cox * attribute. 3193153e878SAlan Cox * 3203153e878SAlan Cox * Presently, this function must be called before any pages are allocated 3213153e878SAlan Cox * to the object. In the future, this requirement may be relaxed for 3223153e878SAlan Cox * "default" and "swap" objects. 3233153e878SAlan Cox */ 3243153e878SAlan Cox int 3253153e878SAlan Cox vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr) 3263153e878SAlan Cox { 3273153e878SAlan Cox 32889f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 3293153e878SAlan Cox switch (object->type) { 3303153e878SAlan Cox case OBJT_DEFAULT: 3313153e878SAlan Cox case OBJT_DEVICE: 33296b0b92aSAlan Cox case OBJT_MGTDEVICE: 3333153e878SAlan Cox case OBJT_PHYS: 33401381811SJohn Baldwin case OBJT_SG: 3353153e878SAlan Cox case OBJT_SWAP: 3363153e878SAlan Cox case OBJT_VNODE: 3373153e878SAlan Cox if (!TAILQ_EMPTY(&object->memq)) 3383153e878SAlan Cox return (KERN_FAILURE); 3393153e878SAlan Cox break; 3403153e878SAlan Cox case OBJT_DEAD: 3413153e878SAlan Cox return (KERN_INVALID_ARGUMENT); 34296b0b92aSAlan Cox default: 34396b0b92aSAlan Cox panic("vm_object_set_memattr: object %p is of undefined type", 34496b0b92aSAlan Cox object); 3453153e878SAlan Cox } 3463153e878SAlan Cox object->memattr = memattr; 3473153e878SAlan Cox return (KERN_SUCCESS); 3483153e878SAlan Cox } 3493153e878SAlan Cox 3501b40f8c0SMatthew Dillon void 3511b40f8c0SMatthew Dillon vm_object_pip_add(vm_object_t object, short i) 3521b40f8c0SMatthew Dillon { 353f279b88dSAlan Cox 35489f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 355b06805adSJake Burkholder object->paging_in_progress += i; 3561b40f8c0SMatthew Dillon } 3571b40f8c0SMatthew Dillon 3581b40f8c0SMatthew Dillon void 3591b40f8c0SMatthew Dillon vm_object_pip_subtract(vm_object_t object, short i) 3601b40f8c0SMatthew Dillon { 361d647a0edSAlan Cox 36289f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 363b06805adSJake Burkholder object->paging_in_progress -= i; 3641b40f8c0SMatthew Dillon } 3651b40f8c0SMatthew Dillon 3661b40f8c0SMatthew Dillon void 3671b40f8c0SMatthew Dillon vm_object_pip_wakeup(vm_object_t object) 3681b40f8c0SMatthew Dillon { 369f279b88dSAlan Cox 37089f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 371b06805adSJake Burkholder object->paging_in_progress--; 3721b40f8c0SMatthew Dillon if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 3731b40f8c0SMatthew Dillon vm_object_clear_flag(object, OBJ_PIPWNT); 3741b40f8c0SMatthew Dillon wakeup(object); 3751b40f8c0SMatthew Dillon } 3761b40f8c0SMatthew Dillon } 3771b40f8c0SMatthew Dillon 3781b40f8c0SMatthew Dillon void 3791b40f8c0SMatthew Dillon vm_object_pip_wakeupn(vm_object_t object, short i) 3801b40f8c0SMatthew Dillon { 381d647a0edSAlan Cox 38289f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 3831b40f8c0SMatthew Dillon if (i) 384b06805adSJake Burkholder object->paging_in_progress -= i; 3851b40f8c0SMatthew Dillon if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 3861b40f8c0SMatthew Dillon vm_object_clear_flag(object, OBJ_PIPWNT); 3871b40f8c0SMatthew Dillon wakeup(object); 3881b40f8c0SMatthew Dillon } 3891b40f8c0SMatthew Dillon } 3901b40f8c0SMatthew Dillon 3911b40f8c0SMatthew Dillon void 3921b40f8c0SMatthew Dillon vm_object_pip_wait(vm_object_t object, char *waitid) 3931b40f8c0SMatthew Dillon { 3941ca58953SAlan Cox 39589f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 3961ca58953SAlan Cox while (object->paging_in_progress) { 3971ca58953SAlan Cox object->flags |= OBJ_PIPWNT; 3980dde287bSAttilio Rao VM_OBJECT_SLEEP(object, object, PVM, waitid, 0); 3991ca58953SAlan Cox } 4001b40f8c0SMatthew Dillon } 4011b40f8c0SMatthew Dillon 40226f9a767SRodney W. Grimes /* 40326f9a767SRodney W. Grimes * vm_object_allocate: 40426f9a767SRodney W. Grimes * 40526f9a767SRodney W. Grimes * Returns a new object with the given size. 40626f9a767SRodney W. Grimes */ 40726f9a767SRodney W. Grimes vm_object_t 4086395da54SIan Dowse vm_object_allocate(objtype_t type, vm_pindex_t size) 40926f9a767SRodney W. Grimes { 41090688d13SAlan Cox vm_object_t object; 41190688d13SAlan Cox 41290688d13SAlan Cox object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); 41390688d13SAlan Cox _vm_object_allocate(type, size, object); 41490688d13SAlan Cox return (object); 41526f9a767SRodney W. Grimes } 41626f9a767SRodney W. Grimes 41726f9a767SRodney W. Grimes 41826f9a767SRodney W. Grimes /* 419df8bae1dSRodney W. Grimes * vm_object_reference: 420df8bae1dSRodney W. Grimes * 42115347817SAlan Cox * Gets another reference to the given object. Note: OBJ_DEAD 42215347817SAlan Cox * objects can be referenced during final cleaning. 423df8bae1dSRodney W. Grimes */ 4246476c0d2SJohn Dyson void 4251b40f8c0SMatthew Dillon vm_object_reference(vm_object_t object) 426df8bae1dSRodney W. Grimes { 427df8bae1dSRodney W. Grimes if (object == NULL) 428df8bae1dSRodney W. Grimes return; 42989f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 43052481a9aSJeff Roberson vm_object_reference_locked(object); 43189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 43295e5e988SJohn Dyson } 43395e5e988SJohn Dyson 43423955314SAlfred Perlstein /* 435b921a12bSAlan Cox * vm_object_reference_locked: 436b921a12bSAlan Cox * 437b921a12bSAlan Cox * Gets another reference to the given object. 438b921a12bSAlan Cox * 439b921a12bSAlan Cox * The object must be locked. 440b921a12bSAlan Cox */ 441b921a12bSAlan Cox void 442b921a12bSAlan Cox vm_object_reference_locked(vm_object_t object) 443b921a12bSAlan Cox { 444b921a12bSAlan Cox struct vnode *vp; 445b921a12bSAlan Cox 44689f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 447b921a12bSAlan Cox object->ref_count++; 448b921a12bSAlan Cox if (object->type == OBJT_VNODE) { 449b921a12bSAlan Cox vp = object->handle; 450b921a12bSAlan Cox vref(vp); 451b921a12bSAlan Cox } 452b921a12bSAlan Cox } 453b921a12bSAlan Cox 454b921a12bSAlan Cox /* 4559d5abbddSJens Schweikhardt * Handle deallocating an object of type OBJT_VNODE. 45623955314SAlfred Perlstein */ 45702dd8331SAlan Cox static void 4581b40f8c0SMatthew Dillon vm_object_vndeallocate(vm_object_t object) 45995e5e988SJohn Dyson { 46095e5e988SJohn Dyson struct vnode *vp = (struct vnode *) object->handle; 461219cbf59SEivind Eklund 46289f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 4635526d2d9SEivind Eklund KASSERT(object->type == OBJT_VNODE, 4645526d2d9SEivind Eklund ("vm_object_vndeallocate: not a vnode object")); 465219cbf59SEivind Eklund KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 466219cbf59SEivind Eklund #ifdef INVARIANTS 46795e5e988SJohn Dyson if (object->ref_count == 0) { 468411455a8SEdward Tomasz Napierala vn_printf(vp, "vm_object_vndeallocate "); 46995e5e988SJohn Dyson panic("vm_object_vndeallocate: bad object reference count"); 47095e5e988SJohn Dyson } 47195e5e988SJohn Dyson #endif 47295e5e988SJohn Dyson 4732a339d9eSKonstantin Belousov if (!umtx_shm_vnobj_persistent && object->ref_count == 1) 4741bdbd705SKonstantin Belousov umtx_shm_object_terminated(object); 4751bdbd705SKonstantin Belousov 47630d57414SKonstantin Belousov /* 47730d57414SKonstantin Belousov * The test for text of vp vnode does not need a bypass to 47830d57414SKonstantin Belousov * reach right VV_TEXT there, since it is obtained from 47930d57414SKonstantin Belousov * object->handle. 48030d57414SKonstantin Belousov */ 48130d57414SKonstantin Belousov if (object->ref_count > 1 || (vp->v_vflag & VV_TEXT) == 0) { 48295e5e988SJohn Dyson object->ref_count--; 48389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 48403fa5b34SKonstantin Belousov /* vrele may need the vnode lock. */ 48547221757SJohn Dyson vrele(vp); 48603fa5b34SKonstantin Belousov } else { 48786769ac0SKonstantin Belousov vhold(vp); 48889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 48903fa5b34SKonstantin Belousov vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 49086769ac0SKonstantin Belousov vdrop(vp); 49189f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 49203fa5b34SKonstantin Belousov object->ref_count--; 49386769ac0SKonstantin Belousov if (object->type == OBJT_DEAD) { 49489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 49586769ac0SKonstantin Belousov VOP_UNLOCK(vp, 0); 49686769ac0SKonstantin Belousov } else { 49703fa5b34SKonstantin Belousov if (object->ref_count == 0) 498877d24acSKonstantin Belousov VOP_UNSET_TEXT(vp); 49989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 50003fa5b34SKonstantin Belousov vput(vp); 50103fa5b34SKonstantin Belousov } 502df8bae1dSRodney W. Grimes } 50386769ac0SKonstantin Belousov } 504df8bae1dSRodney W. Grimes 505df8bae1dSRodney W. Grimes /* 506df8bae1dSRodney W. Grimes * vm_object_deallocate: 507df8bae1dSRodney W. Grimes * 508df8bae1dSRodney W. Grimes * Release a reference to the specified object, 509df8bae1dSRodney W. Grimes * gained either through a vm_object_allocate 510df8bae1dSRodney W. Grimes * or a vm_object_reference call. When all references 511df8bae1dSRodney W. Grimes * are gone, storage associated with this object 512df8bae1dSRodney W. Grimes * may be relinquished. 513df8bae1dSRodney W. Grimes * 514df8bae1dSRodney W. Grimes * No object may be locked. 515df8bae1dSRodney W. Grimes */ 51626f9a767SRodney W. Grimes void 5171b40f8c0SMatthew Dillon vm_object_deallocate(vm_object_t object) 518df8bae1dSRodney W. Grimes { 519df8bae1dSRodney W. Grimes vm_object_t temp; 5206f2af3fcSKonstantin Belousov struct vnode *vp; 521df8bae1dSRodney W. Grimes 522df8bae1dSRodney W. Grimes while (object != NULL) { 52389f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 5243b68228cSAlan Cox if (object->type == OBJT_VNODE) { 52595e5e988SJohn Dyson vm_object_vndeallocate(object); 52623b186d3SAlan Cox return; 5275050aa86SKonstantin Belousov } 52895e5e988SJohn Dyson 5298125b1e6SAlfred Perlstein KASSERT(object->ref_count != 0, 5308125b1e6SAlfred Perlstein ("vm_object_deallocate: object deallocated too many times: %d", object->type)); 5312be70f79SJohn Dyson 5322be70f79SJohn Dyson /* 5338125b1e6SAlfred Perlstein * If the reference count goes to 0 we start calling 5348125b1e6SAlfred Perlstein * vm_object_terminate() on the object chain. 5358125b1e6SAlfred Perlstein * A ref count of 1 may be a special case depending on the 5368125b1e6SAlfred Perlstein * shadow count being 0 or 1. 5372be70f79SJohn Dyson */ 538c0877f10SJohn Dyson object->ref_count--; 5398125b1e6SAlfred Perlstein if (object->ref_count > 1) { 54089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 54123b186d3SAlan Cox return; 5428125b1e6SAlfred Perlstein } else if (object->ref_count == 1) { 5436f2af3fcSKonstantin Belousov if (object->type == OBJT_SWAP && 5446f2af3fcSKonstantin Belousov (object->flags & OBJ_TMPFS) != 0) { 5456f2af3fcSKonstantin Belousov vp = object->un_pager.swp.swp_tmpfs; 5466f2af3fcSKonstantin Belousov vhold(vp); 5476f2af3fcSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 5486f2af3fcSKonstantin Belousov vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5496f2af3fcSKonstantin Belousov VM_OBJECT_WLOCK(object); 5507560005cSKonstantin Belousov if (object->type == OBJT_DEAD || 5517560005cSKonstantin Belousov object->ref_count != 1) { 5526f2af3fcSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 5536f2af3fcSKonstantin Belousov VOP_UNLOCK(vp, 0); 5542309fa9bSKonstantin Belousov vdrop(vp); 5556f2af3fcSKonstantin Belousov return; 5567560005cSKonstantin Belousov } 5577560005cSKonstantin Belousov if ((object->flags & OBJ_TMPFS) != 0) 5586f2af3fcSKonstantin Belousov VOP_UNSET_TEXT(vp); 5596f2af3fcSKonstantin Belousov VOP_UNLOCK(vp, 0); 5602309fa9bSKonstantin Belousov vdrop(vp); 5616f2af3fcSKonstantin Belousov } 5624c8e0452SAlan Cox if (object->shadow_count == 0 && 5634c8e0452SAlan Cox object->handle == NULL && 5644c8e0452SAlan Cox (object->type == OBJT_DEFAULT || 5656f2af3fcSKonstantin Belousov (object->type == OBJT_SWAP && 566f08f7dcaSKonstantin Belousov (object->flags & OBJ_TMPFS_NODE) == 0))) { 5678125b1e6SAlfred Perlstein vm_object_set_flag(object, OBJ_ONEMAPPING); 5688125b1e6SAlfred Perlstein } else if ((object->shadow_count == 1) && 5698125b1e6SAlfred Perlstein (object->handle == NULL) && 57024a1cce3SDavid Greenman (object->type == OBJT_DEFAULT || 57124a1cce3SDavid Greenman object->type == OBJT_SWAP)) { 572a1f6d91cSDavid Greenman vm_object_t robject; 57395e5e988SJohn Dyson 5741c500307SAlan Cox robject = LIST_FIRST(&object->shadow_head); 5755526d2d9SEivind Eklund KASSERT(robject != NULL, 576219cbf59SEivind Eklund ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 5775526d2d9SEivind Eklund object->ref_count, 5785526d2d9SEivind Eklund object->shadow_count)); 5794bace8e7SKonstantin Belousov KASSERT((robject->flags & OBJ_TMPFS_NODE) == 0, 5804bace8e7SKonstantin Belousov ("shadowed tmpfs v_object %p", object)); 58189f6b863SAttilio Rao if (!VM_OBJECT_TRYWLOCK(robject)) { 582b72b0115SAlan Cox /* 583b72b0115SAlan Cox * Avoid a potential deadlock. 584b72b0115SAlan Cox */ 585b72b0115SAlan Cox object->ref_count++; 58689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 587a7d86121SAlan Cox /* 588a7d86121SAlan Cox * More likely than not the thread 589a7d86121SAlan Cox * holding robject's lock has lower 590a7d86121SAlan Cox * priority than the current thread. 591a7d86121SAlan Cox * Let the lower priority thread run. 592a7d86121SAlan Cox */ 5938db5fc58SJohn Baldwin pause("vmo_de", 1); 594b72b0115SAlan Cox continue; 595b72b0115SAlan Cox } 596d936694fSAlan Cox /* 597d936694fSAlan Cox * Collapse object into its shadow unless its 598d936694fSAlan Cox * shadow is dead. In that case, object will 599d936694fSAlan Cox * be deallocated by the thread that is 600d936694fSAlan Cox * deallocating its shadow. 601d936694fSAlan Cox */ 602d936694fSAlan Cox if ((robject->flags & OBJ_DEAD) == 0 && 603d936694fSAlan Cox (robject->handle == NULL) && 60424a1cce3SDavid Greenman (robject->type == OBJT_DEFAULT || 60524a1cce3SDavid Greenman robject->type == OBJT_SWAP)) { 606a1f6d91cSDavid Greenman 60795e5e988SJohn Dyson robject->ref_count++; 608138449dcSAlan Cox retry: 609138449dcSAlan Cox if (robject->paging_in_progress) { 61089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 611138449dcSAlan Cox vm_object_pip_wait(robject, 612138449dcSAlan Cox "objde1"); 6132e9f4a69SAlan Cox temp = robject->backing_object; 6142e9f4a69SAlan Cox if (object == temp) { 61589f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 616138449dcSAlan Cox goto retry; 6172e9f4a69SAlan Cox } 618138449dcSAlan Cox } else if (object->paging_in_progress) { 61989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(robject); 620138449dcSAlan Cox object->flags |= OBJ_PIPWNT; 6210dde287bSAttilio Rao VM_OBJECT_SLEEP(object, object, 622138449dcSAlan Cox PDROP | PVM, "objde2", 0); 62389f6b863SAttilio Rao VM_OBJECT_WLOCK(robject); 6242e9f4a69SAlan Cox temp = robject->backing_object; 6252e9f4a69SAlan Cox if (object == temp) { 62689f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 627138449dcSAlan Cox goto retry; 628a1f6d91cSDavid Greenman } 6292e9f4a69SAlan Cox } else 63089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 6312e9f4a69SAlan Cox 63295e5e988SJohn Dyson if (robject->ref_count == 1) { 63395e5e988SJohn Dyson robject->ref_count--; 634ba8da839SDavid Greenman object = robject; 63595e5e988SJohn Dyson goto doterm; 63695e5e988SJohn Dyson } 63795e5e988SJohn Dyson object = robject; 63895e5e988SJohn Dyson vm_object_collapse(object); 63989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 640ba8da839SDavid Greenman continue; 641a1f6d91cSDavid Greenman } 64289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(robject); 64395e5e988SJohn Dyson } 64489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 64523b186d3SAlan Cox return; 64695e5e988SJohn Dyson } 64795e5e988SJohn Dyson doterm: 6481bdbd705SKonstantin Belousov umtx_shm_object_terminated(object); 64924a1cce3SDavid Greenman temp = object->backing_object; 650c9917419SAlan Cox if (temp != NULL) { 6514bace8e7SKonstantin Belousov KASSERT((object->flags & OBJ_TMPFS_NODE) == 0, 6524bace8e7SKonstantin Belousov ("shadowed tmpfs v_object 2 %p", object)); 65389f6b863SAttilio Rao VM_OBJECT_WLOCK(temp); 6541c500307SAlan Cox LIST_REMOVE(object, shadow_list); 65595e5e988SJohn Dyson temp->shadow_count--; 65689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(temp); 65795461b45SJohn Dyson object->backing_object = NULL; 658de5f6a77SJohn Dyson } 659245df27cSMatthew Dillon /* 660245df27cSMatthew Dillon * Don't double-terminate, we could be in a termination 661245df27cSMatthew Dillon * recursion due to the terminate having to sync data 662245df27cSMatthew Dillon * to disk. 663245df27cSMatthew Dillon */ 664245df27cSMatthew Dillon if ((object->flags & OBJ_DEAD) == 0) 665df8bae1dSRodney W. Grimes vm_object_terminate(object); 666c829b9d0SAlan Cox else 66789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 668df8bae1dSRodney W. Grimes object = temp; 669df8bae1dSRodney W. Grimes } 670df8bae1dSRodney W. Grimes } 671df8bae1dSRodney W. Grimes 672df8bae1dSRodney W. Grimes /* 6732ac78f0eSStephan Uphoff * vm_object_destroy removes the object from the global object list 6742ac78f0eSStephan Uphoff * and frees the space for the object. 6752ac78f0eSStephan Uphoff */ 6762ac78f0eSStephan Uphoff void 6772ac78f0eSStephan Uphoff vm_object_destroy(vm_object_t object) 6782ac78f0eSStephan Uphoff { 6792ac78f0eSStephan Uphoff 6802ac78f0eSStephan Uphoff /* 6813364c323SKonstantin Belousov * Release the allocation charge. 6823364c323SKonstantin Belousov */ 683ef694c1aSEdward Tomasz Napierala if (object->cred != NULL) { 684ef694c1aSEdward Tomasz Napierala swap_release_by_cred(object->charge, object->cred); 6853364c323SKonstantin Belousov object->charge = 0; 686ef694c1aSEdward Tomasz Napierala crfree(object->cred); 687ef694c1aSEdward Tomasz Napierala object->cred = NULL; 6883364c323SKonstantin Belousov } 6893364c323SKonstantin Belousov 6903364c323SKonstantin Belousov /* 6912ac78f0eSStephan Uphoff * Free the space for the object. 6922ac78f0eSStephan Uphoff */ 6932ac78f0eSStephan Uphoff uma_zfree(obj_zone, object); 6942ac78f0eSStephan Uphoff } 6952ac78f0eSStephan Uphoff 6962ac78f0eSStephan Uphoff /* 697*7bbdb843SRuslan Bukin * vm_object_terminate_pages removes any remaining pageable pages 698*7bbdb843SRuslan Bukin * from the object and resets the object to an empty state. 699*7bbdb843SRuslan Bukin */ 700*7bbdb843SRuslan Bukin static void 701*7bbdb843SRuslan Bukin vm_object_terminate_pages(vm_object_t object) 702*7bbdb843SRuslan Bukin { 703*7bbdb843SRuslan Bukin vm_page_t p, p_next; 704*7bbdb843SRuslan Bukin 705*7bbdb843SRuslan Bukin VM_OBJECT_ASSERT_WLOCKED(object); 706*7bbdb843SRuslan Bukin 707*7bbdb843SRuslan Bukin /* 708*7bbdb843SRuslan Bukin * Free any remaining pageable pages. This also removes them from the 709*7bbdb843SRuslan Bukin * paging queues. However, don't free wired pages, just remove them 710*7bbdb843SRuslan Bukin * from the object. Rather than incrementally removing each page from 711*7bbdb843SRuslan Bukin * the object, the page and object are reset to any empty state. 712*7bbdb843SRuslan Bukin */ 713*7bbdb843SRuslan Bukin TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) { 714*7bbdb843SRuslan Bukin vm_page_assert_unbusied(p); 715*7bbdb843SRuslan Bukin vm_page_lock(p); 716*7bbdb843SRuslan Bukin /* 717*7bbdb843SRuslan Bukin * Optimize the page's removal from the object by resetting 718*7bbdb843SRuslan Bukin * its "object" field. Specifically, if the page is not 719*7bbdb843SRuslan Bukin * wired, then the effect of this assignment is that 720*7bbdb843SRuslan Bukin * vm_page_free()'s call to vm_page_remove() will return 721*7bbdb843SRuslan Bukin * immediately without modifying the page or the object. 722*7bbdb843SRuslan Bukin */ 723*7bbdb843SRuslan Bukin p->object = NULL; 724*7bbdb843SRuslan Bukin if (p->wire_count == 0) { 725*7bbdb843SRuslan Bukin vm_page_free(p); 726*7bbdb843SRuslan Bukin VM_CNT_INC(v_pfree); 727*7bbdb843SRuslan Bukin } 728*7bbdb843SRuslan Bukin vm_page_unlock(p); 729*7bbdb843SRuslan Bukin } 730*7bbdb843SRuslan Bukin /* 731*7bbdb843SRuslan Bukin * If the object contained any pages, then reset it to an empty state. 732*7bbdb843SRuslan Bukin * None of the object's fields, including "resident_page_count", were 733*7bbdb843SRuslan Bukin * modified by the preceding loop. 734*7bbdb843SRuslan Bukin */ 735*7bbdb843SRuslan Bukin if (object->resident_page_count != 0) { 736*7bbdb843SRuslan Bukin vm_radix_reclaim_allnodes(&object->rtree); 737*7bbdb843SRuslan Bukin TAILQ_INIT(&object->memq); 738*7bbdb843SRuslan Bukin object->resident_page_count = 0; 739*7bbdb843SRuslan Bukin if (object->type == OBJT_VNODE) 740*7bbdb843SRuslan Bukin vdrop(object->handle); 741*7bbdb843SRuslan Bukin } 742*7bbdb843SRuslan Bukin } 743*7bbdb843SRuslan Bukin 744*7bbdb843SRuslan Bukin /* 745df8bae1dSRodney W. Grimes * vm_object_terminate actually destroys the specified object, freeing 746df8bae1dSRodney W. Grimes * up all previously used resources. 747df8bae1dSRodney W. Grimes * 748df8bae1dSRodney W. Grimes * The object must be locked. 7491c7c3c6aSMatthew Dillon * This routine may block. 750df8bae1dSRodney W. Grimes */ 75195e5e988SJohn Dyson void 7521b40f8c0SMatthew Dillon vm_object_terminate(vm_object_t object) 753df8bae1dSRodney W. Grimes { 754df8bae1dSRodney W. Grimes 75589f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 7560cddd8f0SMatthew Dillon 75795e5e988SJohn Dyson /* 75895e5e988SJohn Dyson * Make sure no one uses us. 75995e5e988SJohn Dyson */ 760069e9bc1SDoug Rabson vm_object_set_flag(object, OBJ_DEAD); 7613c631446SJohn Dyson 762df8bae1dSRodney W. Grimes /* 763f6b04d2bSDavid Greenman * wait for the pageout daemon to be done with the object 764df8bae1dSRodney W. Grimes */ 76566095752SJohn Dyson vm_object_pip_wait(object, "objtrm"); 766df8bae1dSRodney W. Grimes 7675526d2d9SEivind Eklund KASSERT(!object->paging_in_progress, 7685526d2d9SEivind Eklund ("vm_object_terminate: pageout in progress")); 76926f9a767SRodney W. Grimes 77026f9a767SRodney W. Grimes /* 7710d94caffSDavid Greenman * Clean and free the pages, as appropriate. All references to the 7720d94caffSDavid Greenman * object are gone, so we don't need to lock it. 77326f9a767SRodney W. Grimes */ 77424a1cce3SDavid Greenman if (object->type == OBJT_VNODE) { 775f7dd7b63SAlan Cox struct vnode *vp = (struct vnode *)object->handle; 77695e5e988SJohn Dyson 77795e5e988SJohn Dyson /* 77895e5e988SJohn Dyson * Clean pages and flush buffers. 77995e5e988SJohn Dyson */ 7808f9110f6SJohn Dyson vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 78189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 78295e5e988SJohn Dyson 7830d7935fdSAttilio Rao vinvalbuf(vp, V_SAVE, 0, 0); 784f7dd7b63SAlan Cox 78519efd8a5SKonstantin Belousov BO_LOCK(&vp->v_bufobj); 78619efd8a5SKonstantin Belousov vp->v_bufobj.bo_flag |= BO_DEAD; 78719efd8a5SKonstantin Belousov BO_UNLOCK(&vp->v_bufobj); 78819efd8a5SKonstantin Belousov 78989f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 790bef608bdSJohn Dyson } 791bef608bdSJohn Dyson 792971dd342SAlfred Perlstein KASSERT(object->ref_count == 0, 793971dd342SAlfred Perlstein ("vm_object_terminate: object with references, ref_count=%d", 794971dd342SAlfred Perlstein object->ref_count)); 795996c772fSJohn Dyson 796*7bbdb843SRuslan Bukin if ((object->flags & OBJ_PG_DTOR) == 0) 797*7bbdb843SRuslan Bukin vm_object_terminate_pages(object); 798bef608bdSJohn Dyson 799f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 800f8a47341SAlan Cox if (__predict_false(!LIST_EMPTY(&object->rvq))) 801f8a47341SAlan Cox vm_reserv_break_all(object); 802f8a47341SAlan Cox #endif 8037bfda801SAlan Cox 804e735691bSJohn Baldwin KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT || 805e735691bSJohn Baldwin object->type == OBJT_SWAP, 806e735691bSJohn Baldwin ("%s: non-swap obj %p has cred", __func__, object)); 807e735691bSJohn Baldwin 8082d8acc0fSJohn Dyson /* 8099fcfb650SDavid Greenman * Let the pager know object is dead. 8109fcfb650SDavid Greenman */ 8119fcfb650SDavid Greenman vm_pager_deallocate(object); 81289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 8139fcfb650SDavid Greenman 8142ac78f0eSStephan Uphoff vm_object_destroy(object); 81547221757SJohn Dyson } 816df8bae1dSRodney W. Grimes 817edf93b25SAlan Cox /* 818edf93b25SAlan Cox * Make the page read-only so that we can clear the object flags. However, if 819edf93b25SAlan Cox * this is a nosync mmap then the object is likely to stay dirty so do not 820edf93b25SAlan Cox * mess with the page and do not clear the object flags. Returns TRUE if the 821edf93b25SAlan Cox * page should be flushed, and FALSE otherwise. 822edf93b25SAlan Cox */ 8233280870dSKonstantin Belousov static boolean_t 824126d6082SKonstantin Belousov vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *clearobjflags) 8253280870dSKonstantin Belousov { 8263280870dSKonstantin Belousov 8273280870dSKonstantin Belousov /* 8283280870dSKonstantin Belousov * If we have been asked to skip nosync pages and this is a 8293280870dSKonstantin Belousov * nosync page, skip it. Note that the object flags were not 8303280870dSKonstantin Belousov * cleared in this case so we do not have to set them. 8313280870dSKonstantin Belousov */ 8323280870dSKonstantin Belousov if ((flags & OBJPC_NOSYNC) != 0 && (p->oflags & VPO_NOSYNC) != 0) { 833126d6082SKonstantin Belousov *clearobjflags = FALSE; 8343280870dSKonstantin Belousov return (FALSE); 8353280870dSKonstantin Belousov } else { 8363280870dSKonstantin Belousov pmap_remove_write(p); 8373280870dSKonstantin Belousov return (p->dirty != 0); 8383280870dSKonstantin Belousov } 8393280870dSKonstantin Belousov } 8403280870dSKonstantin Belousov 841df8bae1dSRodney W. Grimes /* 842df8bae1dSRodney W. Grimes * vm_object_page_clean 843df8bae1dSRodney W. Grimes * 8444f79d873SMatthew Dillon * Clean all dirty pages in the specified range of object. Leaves page 8454f79d873SMatthew Dillon * on whatever queue it is currently on. If NOSYNC is set then do not 846b146f9e5SAlan Cox * write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC), 8474f79d873SMatthew Dillon * leaving the object dirty. 84826f9a767SRodney W. Grimes * 84943b7990eSMatthew Dillon * When stuffing pages asynchronously, allow clustering. XXX we need a 85043b7990eSMatthew Dillon * synchronous clustering mode implementation. 85143b7990eSMatthew Dillon * 85226f9a767SRodney W. Grimes * Odd semantics: if start == end, we clean everything. 85326f9a767SRodney W. Grimes * 85426f9a767SRodney W. Grimes * The object must be locked. 855126d6082SKonstantin Belousov * 856126d6082SKonstantin Belousov * Returns FALSE if some page from the range was not written, as 857126d6082SKonstantin Belousov * reported by the pager, and TRUE otherwise. 85826f9a767SRodney W. Grimes */ 859126d6082SKonstantin Belousov boolean_t 86017f3095dSAlan Cox vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, 861e239bb97SKonstantin Belousov int flags) 862f6b04d2bSDavid Greenman { 863e239bb97SKonstantin Belousov vm_page_t np, p; 86417f3095dSAlan Cox vm_pindex_t pi, tend, tstart; 865126d6082SKonstantin Belousov int curgeneration, n, pagerflags; 866126d6082SKonstantin Belousov boolean_t clearobjflags, eio, res; 867f6b04d2bSDavid Greenman 86889f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 869e5f299ffSKonstantin Belousov 870e5f299ffSKonstantin Belousov /* 871e5f299ffSKonstantin Belousov * The OBJ_MIGHTBEDIRTY flag is only set for OBJT_VNODE 872e5f299ffSKonstantin Belousov * objects. The check below prevents the function from 873e5f299ffSKonstantin Belousov * operating on non-vnode objects. 874e5f299ffSKonstantin Belousov */ 875e239bb97SKonstantin Belousov if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 || 876e239bb97SKonstantin Belousov object->resident_page_count == 0) 877126d6082SKonstantin Belousov return (TRUE); 878f6b04d2bSDavid Greenman 879e239bb97SKonstantin Belousov pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ? 880e239bb97SKonstantin Belousov VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 881e239bb97SKonstantin Belousov pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0; 882e239bb97SKonstantin Belousov 88317f3095dSAlan Cox tstart = OFF_TO_IDX(start); 88417f3095dSAlan Cox tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK); 88517f3095dSAlan Cox clearobjflags = tstart == 0 && tend >= object->size; 886126d6082SKonstantin Belousov res = TRUE; 887f6b04d2bSDavid Greenman 888bd7e5f99SJohn Dyson rescan: 8892d8acc0fSJohn Dyson curgeneration = object->generation; 8902d8acc0fSJohn Dyson 89117f3095dSAlan Cox for (p = vm_page_find_least(object, tstart); p != NULL; p = np) { 892bd7e5f99SJohn Dyson pi = p->pindex; 893e239bb97SKonstantin Belousov if (pi >= tend) 894e239bb97SKonstantin Belousov break; 895e239bb97SKonstantin Belousov np = TAILQ_NEXT(p, listq); 896e239bb97SKonstantin Belousov if (p->valid == 0) 897aef922f5SJohn Dyson continue; 898c7aebda8SAttilio Rao if (vm_page_sleep_if_busy(p, "vpcwai")) { 899e65919f9SKonstantin Belousov if (object->generation != curgeneration) { 900e65919f9SKonstantin Belousov if ((flags & OBJPC_SYNC) != 0) 901e239bb97SKonstantin Belousov goto rescan; 902e65919f9SKonstantin Belousov else 903126d6082SKonstantin Belousov clearobjflags = FALSE; 904e65919f9SKonstantin Belousov } 905780636b7SKonstantin Belousov np = vm_page_find_least(object, pi); 906780636b7SKonstantin Belousov continue; 907f6b04d2bSDavid Greenman } 9083280870dSKonstantin Belousov if (!vm_object_page_remove_write(p, flags, &clearobjflags)) 909bd7e5f99SJohn Dyson continue; 910e239bb97SKonstantin Belousov 9113280870dSKonstantin Belousov n = vm_object_page_collect_flush(object, p, pagerflags, 912126d6082SKonstantin Belousov flags, &clearobjflags, &eio); 913126d6082SKonstantin Belousov if (eio) { 914126d6082SKonstantin Belousov res = FALSE; 915126d6082SKonstantin Belousov clearobjflags = FALSE; 916126d6082SKonstantin Belousov } 917e65919f9SKonstantin Belousov if (object->generation != curgeneration) { 918e65919f9SKonstantin Belousov if ((flags & OBJPC_SYNC) != 0) 919b9b7a4beSMatthew Dillon goto rescan; 920e65919f9SKonstantin Belousov else 921126d6082SKonstantin Belousov clearobjflags = FALSE; 922e65919f9SKonstantin Belousov } 923031ec8c1SKonstantin Belousov 924031ec8c1SKonstantin Belousov /* 925031ec8c1SKonstantin Belousov * If the VOP_PUTPAGES() did a truncated write, so 926031ec8c1SKonstantin Belousov * that even the first page of the run is not fully 927031ec8c1SKonstantin Belousov * written, vm_pageout_flush() returns 0 as the run 928031ec8c1SKonstantin Belousov * length. Since the condition that caused truncated 929031ec8c1SKonstantin Belousov * write may be permanent, e.g. exhausted free space, 930031ec8c1SKonstantin Belousov * accepting n == 0 would cause an infinite loop. 931031ec8c1SKonstantin Belousov * 932031ec8c1SKonstantin Belousov * Forwarding the iterator leaves the unwritten page 933031ec8c1SKonstantin Belousov * behind, but there is not much we can do there if 934031ec8c1SKonstantin Belousov * filesystem refuses to write it. 935031ec8c1SKonstantin Belousov */ 936126d6082SKonstantin Belousov if (n == 0) { 937031ec8c1SKonstantin Belousov n = 1; 938126d6082SKonstantin Belousov clearobjflags = FALSE; 939126d6082SKonstantin Belousov } 940e239bb97SKonstantin Belousov np = vm_page_find_least(object, pi + n); 941b9b7a4beSMatthew Dillon } 942b9b7a4beSMatthew Dillon #if 0 943e239bb97SKonstantin Belousov VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0); 944b9b7a4beSMatthew Dillon #endif 945b9b7a4beSMatthew Dillon 946edf93b25SAlan Cox if (clearobjflags) 9473280870dSKonstantin Belousov vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY); 948126d6082SKonstantin Belousov return (res); 949b9b7a4beSMatthew Dillon } 950b9b7a4beSMatthew Dillon 951b9b7a4beSMatthew Dillon static int 9523280870dSKonstantin Belousov vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, 953126d6082SKonstantin Belousov int flags, boolean_t *clearobjflags, boolean_t *eio) 954b9b7a4beSMatthew Dillon { 9553157c503SKonstantin Belousov vm_page_t ma[vm_pageout_page_count], p_first, tp; 9563157c503SKonstantin Belousov int count, i, mreq, runlen; 957b9b7a4beSMatthew Dillon 9587bec141bSKip Macy vm_page_lock_assert(p, MA_NOTOWNED); 95989f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 9603157c503SKonstantin Belousov 9613157c503SKonstantin Belousov count = 1; 9623157c503SKonstantin Belousov mreq = 0; 9633157c503SKonstantin Belousov 9643157c503SKonstantin Belousov for (tp = p; count < vm_pageout_page_count; count++) { 9653157c503SKonstantin Belousov tp = vm_page_next(tp); 966c7aebda8SAttilio Rao if (tp == NULL || vm_page_busied(tp)) 967bd7e5f99SJohn Dyson break; 9683280870dSKonstantin Belousov if (!vm_object_page_remove_write(tp, flags, clearobjflags)) 969bd7e5f99SJohn Dyson break; 970bd7e5f99SJohn Dyson } 971aef922f5SJohn Dyson 9723157c503SKonstantin Belousov for (p_first = p; count < vm_pageout_page_count; count++) { 9733157c503SKonstantin Belousov tp = vm_page_prev(p_first); 974c7aebda8SAttilio Rao if (tp == NULL || vm_page_busied(tp)) 975bd7e5f99SJohn Dyson break; 9763280870dSKonstantin Belousov if (!vm_object_page_remove_write(tp, flags, clearobjflags)) 977bd7e5f99SJohn Dyson break; 9783157c503SKonstantin Belousov p_first = tp; 9793157c503SKonstantin Belousov mreq++; 980bd7e5f99SJohn Dyson } 981bd7e5f99SJohn Dyson 9823157c503SKonstantin Belousov for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++) 9833157c503SKonstantin Belousov ma[i] = tp; 984cf2819ccSJohn Dyson 985126d6082SKonstantin Belousov vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio); 9861e8a675cSKonstantin Belousov return (runlen); 98726f9a767SRodney W. Grimes } 988df8bae1dSRodney W. Grimes 9891efb74fbSJohn Dyson /* 990950f8459SAlan Cox * Note that there is absolutely no sense in writing out 991950f8459SAlan Cox * anonymous objects, so we track down the vnode object 992950f8459SAlan Cox * to write out. 993950f8459SAlan Cox * We invalidate (remove) all pages from the address space 994950f8459SAlan Cox * for semantic correctness. 995950f8459SAlan Cox * 9966bbee8e2SAlan Cox * If the backing object is a device object with unmanaged pages, then any 9976bbee8e2SAlan Cox * mappings to the specified range of pages must be removed before this 9986bbee8e2SAlan Cox * function is called. 9996bbee8e2SAlan Cox * 1000950f8459SAlan Cox * Note: certain anonymous maps, such as MAP_NOSYNC maps, 1001950f8459SAlan Cox * may start out with a NULL object. 1002950f8459SAlan Cox */ 1003126d6082SKonstantin Belousov boolean_t 1004950f8459SAlan Cox vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, 1005950f8459SAlan Cox boolean_t syncio, boolean_t invalidate) 1006950f8459SAlan Cox { 1007950f8459SAlan Cox vm_object_t backing_object; 1008950f8459SAlan Cox struct vnode *vp; 10093b582b4eSTor Egge struct mount *mp; 1010126d6082SKonstantin Belousov int error, flags, fsync_after; 1011126d6082SKonstantin Belousov boolean_t res; 1012950f8459SAlan Cox 1013950f8459SAlan Cox if (object == NULL) 1014126d6082SKonstantin Belousov return (TRUE); 1015126d6082SKonstantin Belousov res = TRUE; 1016126d6082SKonstantin Belousov error = 0; 101789f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1018950f8459SAlan Cox while ((backing_object = object->backing_object) != NULL) { 101989f6b863SAttilio Rao VM_OBJECT_WLOCK(backing_object); 102056e0670fSAlan Cox offset += object->backing_object_offset; 102189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1022950f8459SAlan Cox object = backing_object; 1023950f8459SAlan Cox if (object->size < OFF_TO_IDX(offset + size)) 1024950f8459SAlan Cox size = IDX_TO_OFF(object->size) - offset; 1025950f8459SAlan Cox } 1026950f8459SAlan Cox /* 1027950f8459SAlan Cox * Flush pages if writing is allowed, invalidate them 1028950f8459SAlan Cox * if invalidation requested. Pages undergoing I/O 1029950f8459SAlan Cox * will be ignored by vm_object_page_remove(). 1030950f8459SAlan Cox * 1031950f8459SAlan Cox * We cannot lock the vnode and then wait for paging 1032950f8459SAlan Cox * to complete without deadlocking against vm_fault. 1033950f8459SAlan Cox * Instead we simply call vm_object_page_remove() and 1034950f8459SAlan Cox * allow it to block internally on a page-by-page 1035950f8459SAlan Cox * basis when it encounters pages undergoing async 1036950f8459SAlan Cox * I/O. 1037950f8459SAlan Cox */ 1038950f8459SAlan Cox if (object->type == OBJT_VNODE && 1039950f8459SAlan Cox (object->flags & OBJ_MIGHTBEDIRTY) != 0) { 1040950f8459SAlan Cox vp = object->handle; 104189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 10423b582b4eSTor Egge (void) vn_start_write(vp, &mp, V_WAIT); 1043cb05b60aSAttilio Rao vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 104475ff604aSKonstantin Belousov if (syncio && !invalidate && offset == 0 && 1045d1780e8dSKonstantin Belousov atop(size) == object->size) { 104675ff604aSKonstantin Belousov /* 104775ff604aSKonstantin Belousov * If syncing the whole mapping of the file, 104875ff604aSKonstantin Belousov * it is faster to schedule all the writes in 104975ff604aSKonstantin Belousov * async mode, also allowing the clustering, 105075ff604aSKonstantin Belousov * and then wait for i/o to complete. 105175ff604aSKonstantin Belousov */ 105275ff604aSKonstantin Belousov flags = 0; 105375ff604aSKonstantin Belousov fsync_after = TRUE; 105475ff604aSKonstantin Belousov } else { 1055950f8459SAlan Cox flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 105675ff604aSKonstantin Belousov flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0; 105775ff604aSKonstantin Belousov fsync_after = FALSE; 105875ff604aSKonstantin Belousov } 105989f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1060126d6082SKonstantin Belousov res = vm_object_page_clean(object, offset, offset + size, 1061126d6082SKonstantin Belousov flags); 106289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 106375ff604aSKonstantin Belousov if (fsync_after) 1064126d6082SKonstantin Belousov error = VOP_FSYNC(vp, MNT_WAIT, curthread); 106522db15c0SAttilio Rao VOP_UNLOCK(vp, 0); 10663b582b4eSTor Egge vn_finished_write(mp); 1067126d6082SKonstantin Belousov if (error != 0) 1068126d6082SKonstantin Belousov res = FALSE; 106989f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1070950f8459SAlan Cox } 1071950f8459SAlan Cox if ((object->type == OBJT_VNODE || 1072950f8459SAlan Cox object->type == OBJT_DEVICE) && invalidate) { 10736bbee8e2SAlan Cox if (object->type == OBJT_DEVICE) 10746bbee8e2SAlan Cox /* 10756bbee8e2SAlan Cox * The option OBJPR_NOTMAPPED must be passed here 10766bbee8e2SAlan Cox * because vm_object_page_remove() cannot remove 10776bbee8e2SAlan Cox * unmanaged mappings. 10786bbee8e2SAlan Cox */ 10796bbee8e2SAlan Cox flags = OBJPR_NOTMAPPED; 10806bbee8e2SAlan Cox else if (old_msync) 10816195b24aSKonstantin Belousov flags = 0; 10826bbee8e2SAlan Cox else 10836195b24aSKonstantin Belousov flags = OBJPR_CLEANONLY; 10846bbee8e2SAlan Cox vm_object_page_remove(object, OFF_TO_IDX(offset), 10856bbee8e2SAlan Cox OFF_TO_IDX(offset + size + PAGE_MASK), flags); 1086950f8459SAlan Cox } 108789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1088126d6082SKonstantin Belousov return (res); 1089950f8459SAlan Cox } 1090950f8459SAlan Cox 1091950f8459SAlan Cox /* 1092aa3650eaSMark Johnston * Determine whether the given advice can be applied to the object. Advice is 1093aa3650eaSMark Johnston * not applied to unmanaged pages since they never belong to page queues, and 1094aa3650eaSMark Johnston * since MADV_FREE is destructive, it can apply only to anonymous pages that 1095aa3650eaSMark Johnston * have been mapped at most once. 1096aa3650eaSMark Johnston */ 1097aa3650eaSMark Johnston static bool 1098aa3650eaSMark Johnston vm_object_advice_applies(vm_object_t object, int advice) 1099aa3650eaSMark Johnston { 1100aa3650eaSMark Johnston 1101aa3650eaSMark Johnston if ((object->flags & OBJ_UNMANAGED) != 0) 1102aa3650eaSMark Johnston return (false); 1103aa3650eaSMark Johnston if (advice != MADV_FREE) 1104aa3650eaSMark Johnston return (true); 1105aa3650eaSMark Johnston return ((object->type == OBJT_DEFAULT || object->type == OBJT_SWAP) && 1106aa3650eaSMark Johnston (object->flags & OBJ_ONEMAPPING) != 0); 1107aa3650eaSMark Johnston } 1108aa3650eaSMark Johnston 1109aa3650eaSMark Johnston static void 1110aa3650eaSMark Johnston vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex, 1111aa3650eaSMark Johnston vm_size_t size) 1112aa3650eaSMark Johnston { 1113aa3650eaSMark Johnston 1114aa3650eaSMark Johnston if (advice == MADV_FREE && object->type == OBJT_SWAP) 1115aa3650eaSMark Johnston swap_pager_freespace(object, pindex, size); 1116aa3650eaSMark Johnston } 1117aa3650eaSMark Johnston 1118aa3650eaSMark Johnston /* 1119867a482dSJohn Dyson * vm_object_madvise: 1120867a482dSJohn Dyson * 1121867a482dSJohn Dyson * Implements the madvise function at the object/page level. 11221c7c3c6aSMatthew Dillon * 1123193b9358SAlan Cox * MADV_WILLNEED (any object) 1124193b9358SAlan Cox * 1125193b9358SAlan Cox * Activate the specified pages if they are resident. 1126193b9358SAlan Cox * 1127193b9358SAlan Cox * MADV_DONTNEED (any object) 1128193b9358SAlan Cox * 1129193b9358SAlan Cox * Deactivate the specified pages if they are resident. 1130193b9358SAlan Cox * 1131193b9358SAlan Cox * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 1132193b9358SAlan Cox * OBJ_ONEMAPPING only) 1133193b9358SAlan Cox * 1134193b9358SAlan Cox * Deactivate and clean the specified pages if they are 1135193b9358SAlan Cox * resident. This permits the process to reuse the pages 1136193b9358SAlan Cox * without faulting or the kernel to reclaim the pages 1137193b9358SAlan Cox * without I/O. 1138867a482dSJohn Dyson */ 1139867a482dSJohn Dyson void 114092a59946SJohn Baldwin vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end, 1141c2655a40SMark Johnston int advice) 1142867a482dSJohn Dyson { 114392a59946SJohn Baldwin vm_pindex_t tpindex; 114434567de7SAlan Cox vm_object_t backing_object, tobject; 1145aa3650eaSMark Johnston vm_page_t m, tm; 1146867a482dSJohn Dyson 1147867a482dSJohn Dyson if (object == NULL) 1148867a482dSJohn Dyson return; 1149c2655a40SMark Johnston 11506e20a165SJohn Dyson relookup: 1151aa3650eaSMark Johnston VM_OBJECT_WLOCK(object); 1152aa3650eaSMark Johnston if (!vm_object_advice_applies(object, advice)) { 1153aa3650eaSMark Johnston VM_OBJECT_WUNLOCK(object); 1154aa3650eaSMark Johnston return; 11556e20a165SJohn Dyson } 1156aa3650eaSMark Johnston for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) { 1157aa3650eaSMark Johnston tobject = object; 1158c2655a40SMark Johnston 11591ce137beSMatthew Dillon /* 1160aa3650eaSMark Johnston * If the next page isn't resident in the top-level object, we 1161aa3650eaSMark Johnston * need to search the shadow chain. When applying MADV_FREE, we 1162aa3650eaSMark Johnston * take care to release any swap space used to store 1163aa3650eaSMark Johnston * non-resident pages. 1164aa3650eaSMark Johnston */ 1165aa3650eaSMark Johnston if (m == NULL || pindex < m->pindex) { 1166aa3650eaSMark Johnston /* 1167aa3650eaSMark Johnston * Optimize a common case: if the top-level object has 1168aa3650eaSMark Johnston * no backing object, we can skip over the non-resident 1169aa3650eaSMark Johnston * range in constant time. 11701ce137beSMatthew Dillon */ 1171c2655a40SMark Johnston if (object->backing_object == NULL) { 1172c2655a40SMark Johnston tpindex = (m != NULL && m->pindex < end) ? 1173c2655a40SMark Johnston m->pindex : end; 1174aa3650eaSMark Johnston vm_object_madvise_freespace(object, advice, 1175aa3650eaSMark Johnston pindex, tpindex - pindex); 1176c2655a40SMark Johnston if ((pindex = tpindex) == end) 1177c2655a40SMark Johnston break; 1178aa3650eaSMark Johnston goto next_page; 1179aa3650eaSMark Johnston } 1180aa3650eaSMark Johnston 1181aa3650eaSMark Johnston tpindex = pindex; 1182aa3650eaSMark Johnston do { 1183aa3650eaSMark Johnston vm_object_madvise_freespace(tobject, advice, 1184aa3650eaSMark Johnston tpindex, 1); 11851ce137beSMatthew Dillon /* 1186aa3650eaSMark Johnston * Prepare to search the next object in the 1187aa3650eaSMark Johnston * chain. 11881ce137beSMatthew Dillon */ 118934567de7SAlan Cox backing_object = tobject->backing_object; 119034567de7SAlan Cox if (backing_object == NULL) 1191aa3650eaSMark Johnston goto next_pindex; 119289f6b863SAttilio Rao VM_OBJECT_WLOCK(backing_object); 1193aa3650eaSMark Johnston tpindex += 1194aa3650eaSMark Johnston OFF_TO_IDX(tobject->backing_object_offset); 11959b98b796SAlan Cox if (tobject != object) 119689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(tobject); 119734567de7SAlan Cox tobject = backing_object; 1198aa3650eaSMark Johnston if (!vm_object_advice_applies(tobject, advice)) 1199aa3650eaSMark Johnston goto next_pindex; 1200aa3650eaSMark Johnston } while ((tm = vm_page_lookup(tobject, tpindex)) == 1201aa3650eaSMark Johnston NULL); 1202aa3650eaSMark Johnston } else { 1203aa3650eaSMark Johnston next_page: 1204aa3650eaSMark Johnston tm = m; 1205aa3650eaSMark Johnston m = TAILQ_NEXT(m, listq); 1206c2655a40SMark Johnston } 1207c2655a40SMark Johnston 1208867a482dSJohn Dyson /* 12096a2a3d73SAlan Cox * If the page is not in a normal state, skip it. 1210867a482dSJohn Dyson */ 1211aa3650eaSMark Johnston if (tm->valid != VM_PAGE_BITS_ALL) 1212aa3650eaSMark Johnston goto next_pindex; 1213aa3650eaSMark Johnston vm_page_lock(tm); 1214aa3650eaSMark Johnston if (tm->hold_count != 0 || tm->wire_count != 0) { 1215aa3650eaSMark Johnston vm_page_unlock(tm); 1216aa3650eaSMark Johnston goto next_pindex; 12176e20a165SJohn Dyson } 1218aa3650eaSMark Johnston KASSERT((tm->flags & PG_FICTITIOUS) == 0, 1219aa3650eaSMark Johnston ("vm_object_madvise: page %p is fictitious", tm)); 1220aa3650eaSMark Johnston KASSERT((tm->oflags & VPO_UNMANAGED) == 0, 1221aa3650eaSMark Johnston ("vm_object_madvise: page %p is not managed", tm)); 1222aa3650eaSMark Johnston if (vm_page_busied(tm)) { 1223aa3650eaSMark Johnston if (object != tobject) 1224aa3650eaSMark Johnston VM_OBJECT_WUNLOCK(tobject); 1225aa3650eaSMark Johnston VM_OBJECT_WUNLOCK(object); 1226c2655a40SMark Johnston if (advice == MADV_WILLNEED) { 1227b11b56b5SAlan Cox /* 1228b11b56b5SAlan Cox * Reference the page before unlocking and 1229b11b56b5SAlan Cox * sleeping so that the page daemon is less 1230b11b56b5SAlan Cox * likely to reclaim it. 1231b11b56b5SAlan Cox */ 1232aa3650eaSMark Johnston vm_page_aflag_set(tm, PGA_REFERENCED); 1233567e51e1SAlan Cox } 1234aa3650eaSMark Johnston vm_page_busy_sleep(tm, "madvpo", false); 12356e20a165SJohn Dyson goto relookup; 123634567de7SAlan Cox } 1237aa3650eaSMark Johnston vm_page_advise(tm, advice); 1238aa3650eaSMark Johnston vm_page_unlock(tm); 1239aa3650eaSMark Johnston vm_object_madvise_freespace(tobject, advice, tm->pindex, 1); 1240aa3650eaSMark Johnston next_pindex: 12419b98b796SAlan Cox if (tobject != object) 124289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(tobject); 1243867a482dSJohn Dyson } 124489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1245867a482dSJohn Dyson } 1246867a482dSJohn Dyson 1247867a482dSJohn Dyson /* 1248df8bae1dSRodney W. Grimes * vm_object_shadow: 1249df8bae1dSRodney W. Grimes * 1250df8bae1dSRodney W. Grimes * Create a new object which is backed by the 1251df8bae1dSRodney W. Grimes * specified existing object range. The source 1252df8bae1dSRodney W. Grimes * object reference is deallocated. 1253df8bae1dSRodney W. Grimes * 1254df8bae1dSRodney W. Grimes * The new object and offset into that object 1255df8bae1dSRodney W. Grimes * are returned in the source parameters. 1256df8bae1dSRodney W. Grimes */ 125726f9a767SRodney W. Grimes void 12581b40f8c0SMatthew Dillon vm_object_shadow( 12591b40f8c0SMatthew Dillon vm_object_t *object, /* IN/OUT */ 12601b40f8c0SMatthew Dillon vm_ooffset_t *offset, /* IN/OUT */ 12611b40f8c0SMatthew Dillon vm_size_t length) 1262df8bae1dSRodney W. Grimes { 1263d031cff1SMatthew Dillon vm_object_t source; 1264d031cff1SMatthew Dillon vm_object_t result; 1265df8bae1dSRodney W. Grimes 1266df8bae1dSRodney W. Grimes source = *object; 1267df8bae1dSRodney W. Grimes 1268df8bae1dSRodney W. Grimes /* 12699a2f6362SAlan Cox * Don't create the new object if the old object isn't shared. 12709a2f6362SAlan Cox */ 1271570a2f4aSAlan Cox if (source != NULL) { 127289f6b863SAttilio Rao VM_OBJECT_WLOCK(source); 1273570a2f4aSAlan Cox if (source->ref_count == 1 && 12749a2f6362SAlan Cox source->handle == NULL && 12759a2f6362SAlan Cox (source->type == OBJT_DEFAULT || 12769917e010SAlan Cox source->type == OBJT_SWAP)) { 127789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 12789a2f6362SAlan Cox return; 12799917e010SAlan Cox } 128089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 1281570a2f4aSAlan Cox } 12829a2f6362SAlan Cox 12839a2f6362SAlan Cox /* 1284570a2f4aSAlan Cox * Allocate a new object with the given length. 1285df8bae1dSRodney W. Grimes */ 12860cc74f14SAlan Cox result = vm_object_allocate(OBJT_DEFAULT, atop(length)); 1287df8bae1dSRodney W. Grimes 1288df8bae1dSRodney W. Grimes /* 12890d94caffSDavid Greenman * The new object shadows the source object, adding a reference to it. 12900d94caffSDavid Greenman * Our caller changes his reference to point to the new object, 12910d94caffSDavid Greenman * removing a reference to the source object. Net result: no change 12920d94caffSDavid Greenman * of reference count. 12939b09fe24SMatthew Dillon * 12949b09fe24SMatthew Dillon * Try to optimize the result object's page color when shadowing 1295956f3135SPhilippe Charnier * in order to maintain page coloring consistency in the combined 12969b09fe24SMatthew Dillon * shadowed object. 1297df8bae1dSRodney W. Grimes */ 129824a1cce3SDavid Greenman result->backing_object = source; 12999174ca7bSTor Egge /* 13009174ca7bSTor Egge * Store the offset into the source object, and fix up the offset into 13019174ca7bSTor Egge * the new object. 13029174ca7bSTor Egge */ 13039174ca7bSTor Egge result->backing_object_offset = *offset; 1304570a2f4aSAlan Cox if (source != NULL) { 130589f6b863SAttilio Rao VM_OBJECT_WLOCK(source); 13061c500307SAlan Cox LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list); 1307eaf13dd7SJohn Dyson source->shadow_count++; 1308f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 13097b54b1a9SAlan Cox result->flags |= source->flags & OBJ_COLORED; 1310f8a47341SAlan Cox result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & 1311f8a47341SAlan Cox ((1 << (VM_NFREEORDER - 1)) - 1); 1312f8a47341SAlan Cox #endif 131389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 1314de5f6a77SJohn Dyson } 1315df8bae1dSRodney W. Grimes 1316df8bae1dSRodney W. Grimes 1317df8bae1dSRodney W. Grimes /* 1318df8bae1dSRodney W. Grimes * Return the new things 1319df8bae1dSRodney W. Grimes */ 1320df8bae1dSRodney W. Grimes *offset = 0; 1321df8bae1dSRodney W. Grimes *object = result; 1322df8bae1dSRodney W. Grimes } 1323df8bae1dSRodney W. Grimes 1324c5aaa06dSAlan Cox /* 1325c5aaa06dSAlan Cox * vm_object_split: 1326c5aaa06dSAlan Cox * 1327c5aaa06dSAlan Cox * Split the pages in a map entry into a new object. This affords 1328c5aaa06dSAlan Cox * easier removal of unused pages, and keeps object inheritance from 1329c5aaa06dSAlan Cox * being a negative impact on memory usage. 1330c5aaa06dSAlan Cox */ 1331c5aaa06dSAlan Cox void 1332c5aaa06dSAlan Cox vm_object_split(vm_map_entry_t entry) 1333c5aaa06dSAlan Cox { 133473000556SAlan Cox vm_page_t m, m_next; 1335c5aaa06dSAlan Cox vm_object_t orig_object, new_object, source; 133673000556SAlan Cox vm_pindex_t idx, offidxstart; 133773000556SAlan Cox vm_size_t size; 1338c5aaa06dSAlan Cox 1339c5aaa06dSAlan Cox orig_object = entry->object.vm_object; 1340c5aaa06dSAlan Cox if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 1341c5aaa06dSAlan Cox return; 1342c5aaa06dSAlan Cox if (orig_object->ref_count <= 1) 1343c5aaa06dSAlan Cox return; 134489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 1345c5aaa06dSAlan Cox 13464da9f125SAlan Cox offidxstart = OFF_TO_IDX(entry->offset); 134795442adfSAlan Cox size = atop(entry->end - entry->start); 1348c5aaa06dSAlan Cox 13494da9f125SAlan Cox /* 13504da9f125SAlan Cox * If swap_pager_copy() is later called, it will convert new_object 13514da9f125SAlan Cox * into a swap object. 13524da9f125SAlan Cox */ 13534da9f125SAlan Cox new_object = vm_object_allocate(OBJT_DEFAULT, size); 1354c5aaa06dSAlan Cox 1355c5474b8fSAlan Cox /* 1356c5474b8fSAlan Cox * At this point, the new object is still private, so the order in 1357c5474b8fSAlan Cox * which the original and new objects are locked does not matter. 1358c5474b8fSAlan Cox */ 135989f6b863SAttilio Rao VM_OBJECT_WLOCK(new_object); 136089f6b863SAttilio Rao VM_OBJECT_WLOCK(orig_object); 1361c5aaa06dSAlan Cox source = orig_object->backing_object; 1362c5aaa06dSAlan Cox if (source != NULL) { 136389f6b863SAttilio Rao VM_OBJECT_WLOCK(source); 136419c244d0SAlan Cox if ((source->flags & OBJ_DEAD) != 0) { 136589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 136689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 136789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(new_object); 136819c244d0SAlan Cox vm_object_deallocate(new_object); 136989f6b863SAttilio Rao VM_OBJECT_WLOCK(orig_object); 137019c244d0SAlan Cox return; 137119c244d0SAlan Cox } 13721c500307SAlan Cox LIST_INSERT_HEAD(&source->shadow_head, 1373c5aaa06dSAlan Cox new_object, shadow_list); 13748e3a76fbSAlan Cox source->shadow_count++; 1375b921a12bSAlan Cox vm_object_reference_locked(source); /* for new_object */ 1376c5aaa06dSAlan Cox vm_object_clear_flag(source, OBJ_ONEMAPPING); 137789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 1378c5aaa06dSAlan Cox new_object->backing_object_offset = 13794da9f125SAlan Cox orig_object->backing_object_offset + entry->offset; 1380c5aaa06dSAlan Cox new_object->backing_object = source; 1381c5aaa06dSAlan Cox } 1382ef694c1aSEdward Tomasz Napierala if (orig_object->cred != NULL) { 1383ef694c1aSEdward Tomasz Napierala new_object->cred = orig_object->cred; 1384ef694c1aSEdward Tomasz Napierala crhold(orig_object->cred); 13853364c323SKonstantin Belousov new_object->charge = ptoa(size); 13863364c323SKonstantin Belousov KASSERT(orig_object->charge >= ptoa(size), 13873364c323SKonstantin Belousov ("orig_object->charge < 0")); 13883364c323SKonstantin Belousov orig_object->charge -= ptoa(size); 13893364c323SKonstantin Belousov } 1390c5aaa06dSAlan Cox retry: 1391b382c10aSKonstantin Belousov m = vm_page_find_least(orig_object, offidxstart); 139273000556SAlan Cox for (; m != NULL && (idx = m->pindex - offidxstart) < size; 139373000556SAlan Cox m = m_next) { 139473000556SAlan Cox m_next = TAILQ_NEXT(m, listq); 1395c5aaa06dSAlan Cox 1396c5aaa06dSAlan Cox /* 1397c5aaa06dSAlan Cox * We must wait for pending I/O to complete before we can 1398c5aaa06dSAlan Cox * rename the page. 1399c5aaa06dSAlan Cox * 1400c5aaa06dSAlan Cox * We do not have to VM_PROT_NONE the page as mappings should 1401c5aaa06dSAlan Cox * not be changed by this operation. 1402c5aaa06dSAlan Cox */ 1403c7aebda8SAttilio Rao if (vm_page_busied(m)) { 140489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(new_object); 1405c7aebda8SAttilio Rao vm_page_lock(m); 1406c7aebda8SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 14075975e53dSKonstantin Belousov vm_page_busy_sleep(m, "spltwt", false); 1408c7aebda8SAttilio Rao VM_OBJECT_WLOCK(orig_object); 140989f6b863SAttilio Rao VM_OBJECT_WLOCK(new_object); 1410c5aaa06dSAlan Cox goto retry; 1411de33beddSAlan Cox } 1412e946b949SAttilio Rao 14133453bca8SAlan Cox /* vm_page_rename() will dirty the page. */ 1414e946b949SAttilio Rao if (vm_page_rename(m, new_object, idx)) { 1415e946b949SAttilio Rao VM_OBJECT_WUNLOCK(new_object); 1416e946b949SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 1417e946b949SAttilio Rao VM_WAIT; 1418e946b949SAttilio Rao VM_OBJECT_WLOCK(orig_object); 1419e946b949SAttilio Rao VM_OBJECT_WLOCK(new_object); 1420e946b949SAttilio Rao goto retry; 1421e946b949SAttilio Rao } 1422b5f359b7SAlan Cox #if VM_NRESERVLEVEL > 0 1423b5f359b7SAlan Cox /* 1424b5f359b7SAlan Cox * If some of the reservation's allocated pages remain with 1425b5f359b7SAlan Cox * the original object, then transferring the reservation to 1426b5f359b7SAlan Cox * the new object is neither particularly beneficial nor 1427b5f359b7SAlan Cox * particularly harmful as compared to leaving the reservation 1428b5f359b7SAlan Cox * with the original object. If, however, all of the 1429b5f359b7SAlan Cox * reservation's allocated pages are transferred to the new 1430b5f359b7SAlan Cox * object, then transferring the reservation is typically 1431b5f359b7SAlan Cox * beneficial. Determining which of these two cases applies 1432b5f359b7SAlan Cox * would be more costly than unconditionally renaming the 1433b5f359b7SAlan Cox * reservation. 1434b5f359b7SAlan Cox */ 1435b5f359b7SAlan Cox vm_reserv_rename(m, new_object, orig_object, offidxstart); 1436b5f359b7SAlan Cox #endif 1437dfd55c0cSAttilio Rao if (orig_object->type == OBJT_SWAP) 1438c7aebda8SAttilio Rao vm_page_xbusy(m); 1439c5aaa06dSAlan Cox } 1440d7a013c3SAlan Cox if (orig_object->type == OBJT_SWAP) { 1441c5aaa06dSAlan Cox /* 1442c7c8dd7eSAlan Cox * swap_pager_copy() can sleep, in which case the orig_object's 1443c7c8dd7eSAlan Cox * and new_object's locks are released and reacquired. 1444c5aaa06dSAlan Cox */ 1445c5aaa06dSAlan Cox swap_pager_copy(orig_object, new_object, offidxstart, 0); 1446dfd55c0cSAttilio Rao TAILQ_FOREACH(m, &new_object->memq, listq) 1447c7aebda8SAttilio Rao vm_page_xunbusy(m); 1448c5aaa06dSAlan Cox } 144989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 145089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(new_object); 1451c5aaa06dSAlan Cox entry->object.vm_object = new_object; 1452c5aaa06dSAlan Cox entry->offset = 0LL; 1453c5aaa06dSAlan Cox vm_object_deallocate(orig_object); 145489f6b863SAttilio Rao VM_OBJECT_WLOCK(new_object); 1455c5aaa06dSAlan Cox } 1456c5aaa06dSAlan Cox 14572ad1a3f7SMatthew Dillon #define OBSC_COLLAPSE_NOWAIT 0x0002 14582ad1a3f7SMatthew Dillon #define OBSC_COLLAPSE_WAIT 0x0004 14592ad1a3f7SMatthew Dillon 146099a1570aSKonstantin Belousov static vm_page_t 14614cc8daf7SConrad Meyer vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p, vm_page_t next, 146299a1570aSKonstantin Belousov int op) 146399a1570aSKonstantin Belousov { 146499a1570aSKonstantin Belousov vm_object_t backing_object; 146599a1570aSKonstantin Belousov 146699a1570aSKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(object); 146799a1570aSKonstantin Belousov backing_object = object->backing_object; 146899a1570aSKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(backing_object); 146999a1570aSKonstantin Belousov 147099a1570aSKonstantin Belousov KASSERT(p == NULL || vm_page_busied(p), ("unbusy page %p", p)); 147199a1570aSKonstantin Belousov KASSERT(p == NULL || p->object == object || p->object == backing_object, 147299a1570aSKonstantin Belousov ("invalid ownership %p %p %p", p, object, backing_object)); 147399a1570aSKonstantin Belousov if ((op & OBSC_COLLAPSE_NOWAIT) != 0) 147499a1570aSKonstantin Belousov return (next); 147599a1570aSKonstantin Belousov if (p != NULL) 147699a1570aSKonstantin Belousov vm_page_lock(p); 147799a1570aSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 147899a1570aSKonstantin Belousov VM_OBJECT_WUNLOCK(backing_object); 147999a1570aSKonstantin Belousov if (p == NULL) 148099a1570aSKonstantin Belousov VM_WAIT; 148199a1570aSKonstantin Belousov else 14825975e53dSKonstantin Belousov vm_page_busy_sleep(p, "vmocol", false); 148399a1570aSKonstantin Belousov VM_OBJECT_WLOCK(object); 148499a1570aSKonstantin Belousov VM_OBJECT_WLOCK(backing_object); 148599a1570aSKonstantin Belousov return (TAILQ_FIRST(&backing_object->memq)); 148699a1570aSKonstantin Belousov } 148799a1570aSKonstantin Belousov 148899a1570aSKonstantin Belousov static bool 14894cc8daf7SConrad Meyer vm_object_scan_all_shadowed(vm_object_t object) 14904cc8daf7SConrad Meyer { 14914cc8daf7SConrad Meyer vm_object_t backing_object; 14924cc8daf7SConrad Meyer vm_page_t p, pp; 149377d6fd97SKonstantin Belousov vm_pindex_t backing_offset_index, new_pindex, pi, ps; 14944cc8daf7SConrad Meyer 14954cc8daf7SConrad Meyer VM_OBJECT_ASSERT_WLOCKED(object); 14964cc8daf7SConrad Meyer VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 14974cc8daf7SConrad Meyer 14984cc8daf7SConrad Meyer backing_object = object->backing_object; 14994cc8daf7SConrad Meyer 150077d6fd97SKonstantin Belousov if (backing_object->type != OBJT_DEFAULT && 150177d6fd97SKonstantin Belousov backing_object->type != OBJT_SWAP) 15024cc8daf7SConrad Meyer return (false); 15034cc8daf7SConrad Meyer 150477d6fd97SKonstantin Belousov pi = backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 150577d6fd97SKonstantin Belousov p = vm_page_find_least(backing_object, pi); 150677d6fd97SKonstantin Belousov ps = swap_pager_find_least(backing_object, pi); 15074cc8daf7SConrad Meyer 15084cc8daf7SConrad Meyer /* 150977d6fd97SKonstantin Belousov * Only check pages inside the parent object's range and 151077d6fd97SKonstantin Belousov * inside the parent object's mapping of the backing object. 15114cc8daf7SConrad Meyer */ 151277d6fd97SKonstantin Belousov for (;; pi++) { 151377d6fd97SKonstantin Belousov if (p != NULL && p->pindex < pi) 151477d6fd97SKonstantin Belousov p = TAILQ_NEXT(p, listq); 151577d6fd97SKonstantin Belousov if (ps < pi) 151677d6fd97SKonstantin Belousov ps = swap_pager_find_least(backing_object, pi); 151777d6fd97SKonstantin Belousov if (p == NULL && ps >= backing_object->size) 151877d6fd97SKonstantin Belousov break; 151977d6fd97SKonstantin Belousov else if (p == NULL) 152077d6fd97SKonstantin Belousov pi = ps; 152177d6fd97SKonstantin Belousov else 152277d6fd97SKonstantin Belousov pi = MIN(p->pindex, ps); 152377d6fd97SKonstantin Belousov 152477d6fd97SKonstantin Belousov new_pindex = pi - backing_offset_index; 152577d6fd97SKonstantin Belousov if (new_pindex >= object->size) 152677d6fd97SKonstantin Belousov break; 15274cc8daf7SConrad Meyer 15284cc8daf7SConrad Meyer /* 15294cc8daf7SConrad Meyer * See if the parent has the page or if the parent's object 15304cc8daf7SConrad Meyer * pager has the page. If the parent has the page but the page 15314cc8daf7SConrad Meyer * is not valid, the parent's object pager must have the page. 15324cc8daf7SConrad Meyer * 15334cc8daf7SConrad Meyer * If this fails, the parent does not completely shadow the 15344cc8daf7SConrad Meyer * object and we might as well give up now. 15354cc8daf7SConrad Meyer */ 15364cc8daf7SConrad Meyer pp = vm_page_lookup(object, new_pindex); 15374cc8daf7SConrad Meyer if ((pp == NULL || pp->valid == 0) && 15384cc8daf7SConrad Meyer !vm_pager_has_page(object, new_pindex, NULL, NULL)) 15394cc8daf7SConrad Meyer return (false); 15404cc8daf7SConrad Meyer } 15414cc8daf7SConrad Meyer return (true); 15424cc8daf7SConrad Meyer } 15434cc8daf7SConrad Meyer 15444cc8daf7SConrad Meyer static bool 15454cc8daf7SConrad Meyer vm_object_collapse_scan(vm_object_t object, int op) 15462ad1a3f7SMatthew Dillon { 15472ad1a3f7SMatthew Dillon vm_object_t backing_object; 154899a1570aSKonstantin Belousov vm_page_t next, p, pp; 154999a1570aSKonstantin Belousov vm_pindex_t backing_offset_index, new_pindex; 15502ad1a3f7SMatthew Dillon 155189f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 155289f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 15532ad1a3f7SMatthew Dillon 15542ad1a3f7SMatthew Dillon backing_object = object->backing_object; 15552ad1a3f7SMatthew Dillon backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 15562ad1a3f7SMatthew Dillon 15572ad1a3f7SMatthew Dillon /* 15582ad1a3f7SMatthew Dillon * Initial conditions 15592ad1a3f7SMatthew Dillon */ 15604cc8daf7SConrad Meyer if ((op & OBSC_COLLAPSE_WAIT) != 0) 15612ad1a3f7SMatthew Dillon vm_object_set_flag(backing_object, OBJ_DEAD); 15622ad1a3f7SMatthew Dillon 15632ad1a3f7SMatthew Dillon /* 15642ad1a3f7SMatthew Dillon * Our scan 15652ad1a3f7SMatthew Dillon */ 15664cc8daf7SConrad Meyer for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; p = next) { 156799a1570aSKonstantin Belousov next = TAILQ_NEXT(p, listq); 156899a1570aSKonstantin Belousov new_pindex = p->pindex - backing_offset_index; 15692ad1a3f7SMatthew Dillon 15702ad1a3f7SMatthew Dillon /* 15712ad1a3f7SMatthew Dillon * Check for busy page 15722ad1a3f7SMatthew Dillon */ 1573c7aebda8SAttilio Rao if (vm_page_busied(p)) { 15744cc8daf7SConrad Meyer next = vm_object_collapse_scan_wait(object, p, next, op); 15752ad1a3f7SMatthew Dillon continue; 15762ad1a3f7SMatthew Dillon } 15772ad1a3f7SMatthew Dillon 157899a1570aSKonstantin Belousov KASSERT(p->object == backing_object, 15794cc8daf7SConrad Meyer ("vm_object_collapse_scan: object mismatch")); 15802ad1a3f7SMatthew Dillon 158199a1570aSKonstantin Belousov if (p->pindex < backing_offset_index || 158299a1570aSKonstantin Belousov new_pindex >= object->size) { 1583e946b949SAttilio Rao if (backing_object->type == OBJT_SWAP) 15844cc8daf7SConrad Meyer swap_pager_freespace(backing_object, p->pindex, 15854cc8daf7SConrad Meyer 1); 1586e946b949SAttilio Rao 15872ad1a3f7SMatthew Dillon /* 15884cc8daf7SConrad Meyer * Page is out of the parent object's range, we can 15894cc8daf7SConrad Meyer * simply destroy it. 15902ad1a3f7SMatthew Dillon */ 15912965a453SKip Macy vm_page_lock(p); 1592f6d89838SAlan Cox KASSERT(!pmap_page_is_mapped(p), 1593f6d89838SAlan Cox ("freeing mapped page %p", p)); 1594f6d89838SAlan Cox if (p->wire_count == 0) 15952ad1a3f7SMatthew Dillon vm_page_free(p); 1596f6d89838SAlan Cox else 1597f6d89838SAlan Cox vm_page_remove(p); 15982965a453SKip Macy vm_page_unlock(p); 15992ad1a3f7SMatthew Dillon continue; 16002ad1a3f7SMatthew Dillon } 16012ad1a3f7SMatthew Dillon 16022ad1a3f7SMatthew Dillon pp = vm_page_lookup(object, new_pindex); 160399a1570aSKonstantin Belousov if (pp != NULL && vm_page_busied(pp)) { 1604e18cc7bfSMax Laier /* 16054cc8daf7SConrad Meyer * The page in the parent is busy and possibly not 16064cc8daf7SConrad Meyer * (yet) valid. Until its state is finalized by the 16074cc8daf7SConrad Meyer * busy bit owner, we can't tell whether it shadows the 16084cc8daf7SConrad Meyer * original page. Therefore, we must either skip it 16094cc8daf7SConrad Meyer * and the original (backing_object) page or wait for 16104cc8daf7SConrad Meyer * its state to be finalized. 1611e18cc7bfSMax Laier * 16124cc8daf7SConrad Meyer * This is due to a race with vm_fault() where we must 16134cc8daf7SConrad Meyer * unbusy the original (backing_obj) page before we can 16144cc8daf7SConrad Meyer * (re)lock the parent. Hence we can get here. 1615e18cc7bfSMax Laier */ 16164cc8daf7SConrad Meyer next = vm_object_collapse_scan_wait(object, pp, next, 16174cc8daf7SConrad Meyer op); 1618e18cc7bfSMax Laier continue; 1619e18cc7bfSMax Laier } 162099a1570aSKonstantin Belousov 162199a1570aSKonstantin Belousov KASSERT(pp == NULL || pp->valid != 0, 162299a1570aSKonstantin Belousov ("unbusy invalid page %p", pp)); 162399a1570aSKonstantin Belousov 16244cc8daf7SConrad Meyer if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL, 16254cc8daf7SConrad Meyer NULL)) { 162699a1570aSKonstantin Belousov /* 16274cc8daf7SConrad Meyer * The page already exists in the parent OR swap exists 16284cc8daf7SConrad Meyer * for this location in the parent. Leave the parent's 16294cc8daf7SConrad Meyer * page alone. Destroy the original page from the 16304cc8daf7SConrad Meyer * backing object. 163199a1570aSKonstantin Belousov */ 1632e946b949SAttilio Rao if (backing_object->type == OBJT_SWAP) 16334cc8daf7SConrad Meyer swap_pager_freespace(backing_object, p->pindex, 16344cc8daf7SConrad Meyer 1); 16352965a453SKip Macy vm_page_lock(p); 1636f6d89838SAlan Cox KASSERT(!pmap_page_is_mapped(p), 1637f6d89838SAlan Cox ("freeing mapped page %p", p)); 1638f6d89838SAlan Cox if (p->wire_count == 0) 16392ad1a3f7SMatthew Dillon vm_page_free(p); 1640f6d89838SAlan Cox else 1641f6d89838SAlan Cox vm_page_remove(p); 16422965a453SKip Macy vm_page_unlock(p); 16432ad1a3f7SMatthew Dillon continue; 16442ad1a3f7SMatthew Dillon } 16452ad1a3f7SMatthew Dillon 1646e946b949SAttilio Rao /* 16474cc8daf7SConrad Meyer * Page does not exist in parent, rename the page from the 16484cc8daf7SConrad Meyer * backing object to the main object. 1649e946b949SAttilio Rao * 16504cc8daf7SConrad Meyer * If the page was mapped to a process, it can remain mapped 16513453bca8SAlan Cox * through the rename. vm_page_rename() will dirty the page. 1652e946b949SAttilio Rao */ 1653e946b949SAttilio Rao if (vm_page_rename(p, object, new_pindex)) { 16544cc8daf7SConrad Meyer next = vm_object_collapse_scan_wait(object, NULL, next, 16554cc8daf7SConrad Meyer op); 1656e946b949SAttilio Rao continue; 1657e946b949SAttilio Rao } 165814a5dc17SAttilio Rao 165914a5dc17SAttilio Rao /* Use the old pindex to free the right page. */ 1660e946b949SAttilio Rao if (backing_object->type == OBJT_SWAP) 166114a5dc17SAttilio Rao swap_pager_freespace(backing_object, 166214a5dc17SAttilio Rao new_pindex + backing_offset_index, 1); 1663e946b949SAttilio Rao 1664f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1665f8a47341SAlan Cox /* 1666f8a47341SAlan Cox * Rename the reservation. 1667f8a47341SAlan Cox */ 1668f8a47341SAlan Cox vm_reserv_rename(p, object, backing_object, 1669f8a47341SAlan Cox backing_offset_index); 1670f8a47341SAlan Cox #endif 16712ad1a3f7SMatthew Dillon } 167299a1570aSKonstantin Belousov return (true); 16732ad1a3f7SMatthew Dillon } 16742ad1a3f7SMatthew Dillon 1675df8bae1dSRodney W. Grimes 1676df8bae1dSRodney W. Grimes /* 16772fe6e4d7SDavid Greenman * this version of collapse allows the operation to occur earlier and 16782fe6e4d7SDavid Greenman * when paging_in_progress is true for an object... This is not a complete 16792fe6e4d7SDavid Greenman * operation, but should plug 99.9% of the rest of the leaks. 16802fe6e4d7SDavid Greenman */ 16812fe6e4d7SDavid Greenman static void 16821b40f8c0SMatthew Dillon vm_object_qcollapse(vm_object_t object) 16832fe6e4d7SDavid Greenman { 16842ad1a3f7SMatthew Dillon vm_object_t backing_object = object->backing_object; 16852fe6e4d7SDavid Greenman 168689f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 168789f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(backing_object); 16881b40f8c0SMatthew Dillon 16892fe6e4d7SDavid Greenman if (backing_object->ref_count != 1) 16902fe6e4d7SDavid Greenman return; 16912fe6e4d7SDavid Greenman 16924cc8daf7SConrad Meyer vm_object_collapse_scan(object, OBSC_COLLAPSE_NOWAIT); 16932fe6e4d7SDavid Greenman } 16942fe6e4d7SDavid Greenman 1695df8bae1dSRodney W. Grimes /* 1696df8bae1dSRodney W. Grimes * vm_object_collapse: 1697df8bae1dSRodney W. Grimes * 1698df8bae1dSRodney W. Grimes * Collapse an object with the object backing it. 1699df8bae1dSRodney W. Grimes * Pages in the backing object are moved into the 1700df8bae1dSRodney W. Grimes * parent, and the backing object is deallocated. 1701df8bae1dSRodney W. Grimes */ 170226f9a767SRodney W. Grimes void 17031b40f8c0SMatthew Dillon vm_object_collapse(vm_object_t object) 1704df8bae1dSRodney W. Grimes { 170598f139daSKonstantin Belousov vm_object_t backing_object, new_backing_object; 170698f139daSKonstantin Belousov 170789f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 170823955314SAlfred Perlstein 1709df8bae1dSRodney W. Grimes while (TRUE) { 1710df8bae1dSRodney W. Grimes /* 1711df8bae1dSRodney W. Grimes * Verify that the conditions are right for collapse: 1712df8bae1dSRodney W. Grimes * 17132ad1a3f7SMatthew Dillon * The object exists and the backing object exists. 1714df8bae1dSRodney W. Grimes */ 171524a1cce3SDavid Greenman if ((backing_object = object->backing_object) == NULL) 17162ad1a3f7SMatthew Dillon break; 1717df8bae1dSRodney W. Grimes 1718f919ebdeSDavid Greenman /* 1719f919ebdeSDavid Greenman * we check the backing object first, because it is most likely 172024a1cce3SDavid Greenman * not collapsable. 1721f919ebdeSDavid Greenman */ 172289f6b863SAttilio Rao VM_OBJECT_WLOCK(backing_object); 172324a1cce3SDavid Greenman if (backing_object->handle != NULL || 172424a1cce3SDavid Greenman (backing_object->type != OBJT_DEFAULT && 172524a1cce3SDavid Greenman backing_object->type != OBJT_SWAP) || 1726f919ebdeSDavid Greenman (backing_object->flags & OBJ_DEAD) || 172724a1cce3SDavid Greenman object->handle != NULL || 172824a1cce3SDavid Greenman (object->type != OBJT_DEFAULT && 172924a1cce3SDavid Greenman object->type != OBJT_SWAP) || 173024a1cce3SDavid Greenman (object->flags & OBJ_DEAD)) { 173189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 17322ad1a3f7SMatthew Dillon break; 173324a1cce3SDavid Greenman } 17349b4814bbSDavid Greenman 173598f139daSKonstantin Belousov if (object->paging_in_progress != 0 || 173698f139daSKonstantin Belousov backing_object->paging_in_progress != 0) { 1737b9921222SDavid Greenman vm_object_qcollapse(object); 173889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 17392ad1a3f7SMatthew Dillon break; 1740df8bae1dSRodney W. Grimes } 174198f139daSKonstantin Belousov 174226f9a767SRodney W. Grimes /* 17430d94caffSDavid Greenman * We know that we can either collapse the backing object (if 17442ad1a3f7SMatthew Dillon * the parent is the only reference to it) or (perhaps) have 17452ad1a3f7SMatthew Dillon * the parent bypass the object if the parent happens to shadow 17462ad1a3f7SMatthew Dillon * all the resident pages in the entire backing object. 17472ad1a3f7SMatthew Dillon * 17482ad1a3f7SMatthew Dillon * This is ignoring pager-backed pages such as swap pages. 17494cc8daf7SConrad Meyer * vm_object_collapse_scan fails the shadowing test in this 17502ad1a3f7SMatthew Dillon * case. 1751df8bae1dSRodney W. Grimes */ 1752df8bae1dSRodney W. Grimes if (backing_object->ref_count == 1) { 1753aa9bc3b1SKonstantin Belousov vm_object_pip_add(object, 1); 1754aa9bc3b1SKonstantin Belousov vm_object_pip_add(backing_object, 1); 1755aa9bc3b1SKonstantin Belousov 1756df8bae1dSRodney W. Grimes /* 17572ad1a3f7SMatthew Dillon * If there is exactly one reference to the backing 17582ad1a3f7SMatthew Dillon * object, we can collapse it into the parent. 1759df8bae1dSRodney W. Grimes */ 17604cc8daf7SConrad Meyer vm_object_collapse_scan(object, OBSC_COLLAPSE_WAIT); 1761df8bae1dSRodney W. Grimes 1762f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1763f8a47341SAlan Cox /* 1764f8a47341SAlan Cox * Break any reservations from backing_object. 1765f8a47341SAlan Cox */ 1766f8a47341SAlan Cox if (__predict_false(!LIST_EMPTY(&backing_object->rvq))) 1767f8a47341SAlan Cox vm_reserv_break_all(backing_object); 1768f8a47341SAlan Cox #endif 1769f8a47341SAlan Cox 1770df8bae1dSRodney W. Grimes /* 1771df8bae1dSRodney W. Grimes * Move the pager from backing_object to object. 1772df8bae1dSRodney W. Grimes */ 17736be36525SAlan Cox if (backing_object->type == OBJT_SWAP) { 177424a1cce3SDavid Greenman /* 1775c7c8dd7eSAlan Cox * swap_pager_copy() can sleep, in which case 1776c7c8dd7eSAlan Cox * the backing_object's and object's locks are 1777c7c8dd7eSAlan Cox * released and reacquired. 1778571a1e92SAttilio Rao * Since swap_pager_copy() is being asked to 1779571a1e92SAttilio Rao * destroy the source, it will change the 1780571a1e92SAttilio Rao * backing_object's type to OBJT_DEFAULT. 178124a1cce3SDavid Greenman */ 17821c7c3c6aSMatthew Dillon swap_pager_copy( 17831c7c3c6aSMatthew Dillon backing_object, 17841c7c3c6aSMatthew Dillon object, 17851c7c3c6aSMatthew Dillon OFF_TO_IDX(object->backing_object_offset), TRUE); 1786c0503609SDavid Greenman } 1787df8bae1dSRodney W. Grimes /* 1788df8bae1dSRodney W. Grimes * Object now shadows whatever backing_object did. 17892ad1a3f7SMatthew Dillon * Note that the reference to 17902ad1a3f7SMatthew Dillon * backing_object->backing_object moves from within 17912ad1a3f7SMatthew Dillon * backing_object to within object. 1792df8bae1dSRodney W. Grimes */ 17931c500307SAlan Cox LIST_REMOVE(object, shadow_list); 17944f7c7f6eSAlan Cox backing_object->shadow_count--; 1795de5f6a77SJohn Dyson if (backing_object->backing_object) { 179689f6b863SAttilio Rao VM_OBJECT_WLOCK(backing_object->backing_object); 17971c500307SAlan Cox LIST_REMOVE(backing_object, shadow_list); 179843186e53SAlan Cox LIST_INSERT_HEAD( 179943186e53SAlan Cox &backing_object->backing_object->shadow_head, 180043186e53SAlan Cox object, shadow_list); 180143186e53SAlan Cox /* 180243186e53SAlan Cox * The shadow_count has not changed. 180343186e53SAlan Cox */ 180489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object->backing_object); 1805de5f6a77SJohn Dyson } 180624a1cce3SDavid Greenman object->backing_object = backing_object->backing_object; 18072ad1a3f7SMatthew Dillon object->backing_object_offset += 18082ad1a3f7SMatthew Dillon backing_object->backing_object_offset; 18092ad1a3f7SMatthew Dillon 1810df8bae1dSRodney W. Grimes /* 1811df8bae1dSRodney W. Grimes * Discard backing_object. 1812df8bae1dSRodney W. Grimes * 18130d94caffSDavid Greenman * Since the backing object has no pages, no pager left, 18140d94caffSDavid Greenman * and no object references within it, all that is 18150d94caffSDavid Greenman * necessary is to dispose of it. 1816df8bae1dSRodney W. Grimes */ 18179b4d473aSKonstantin Belousov KASSERT(backing_object->ref_count == 1, ( 18189b4d473aSKonstantin Belousov "backing_object %p was somehow re-referenced during collapse!", 18199b4d473aSKonstantin Belousov backing_object)); 1820aa9bc3b1SKonstantin Belousov vm_object_pip_wakeup(backing_object); 1821e735691bSJohn Baldwin backing_object->type = OBJT_DEAD; 1822e735691bSJohn Baldwin backing_object->ref_count = 0; 182389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 18249b4d473aSKonstantin Belousov vm_object_destroy(backing_object); 1825df8bae1dSRodney W. Grimes 1826aa9bc3b1SKonstantin Belousov vm_object_pip_wakeup(object); 1827df8bae1dSRodney W. Grimes object_collapses++; 18280d94caffSDavid Greenman } else { 1829df8bae1dSRodney W. Grimes /* 18302ad1a3f7SMatthew Dillon * If we do not entirely shadow the backing object, 18312ad1a3f7SMatthew Dillon * there is nothing we can do so we give up. 1832df8bae1dSRodney W. Grimes */ 1833df59a0feSJeff Roberson if (object->resident_page_count != object->size && 18344cc8daf7SConrad Meyer !vm_object_scan_all_shadowed(object)) { 183589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 18362ad1a3f7SMatthew Dillon break; 183724a1cce3SDavid Greenman } 1838df8bae1dSRodney W. Grimes 1839df8bae1dSRodney W. Grimes /* 18400d94caffSDavid Greenman * Make the parent shadow the next object in the 18410d94caffSDavid Greenman * chain. Deallocating backing_object will not remove 18420d94caffSDavid Greenman * it, since its reference count is at least 2. 1843df8bae1dSRodney W. Grimes */ 18441c500307SAlan Cox LIST_REMOVE(object, shadow_list); 1845eaf13dd7SJohn Dyson backing_object->shadow_count--; 184695e5e988SJohn Dyson 184795e5e988SJohn Dyson new_backing_object = backing_object->backing_object; 18488aef1712SMatthew Dillon if ((object->backing_object = new_backing_object) != NULL) { 184989f6b863SAttilio Rao VM_OBJECT_WLOCK(new_backing_object); 18501c500307SAlan Cox LIST_INSERT_HEAD( 18512ad1a3f7SMatthew Dillon &new_backing_object->shadow_head, 18522ad1a3f7SMatthew Dillon object, 18532ad1a3f7SMatthew Dillon shadow_list 18542ad1a3f7SMatthew Dillon ); 1855eaf13dd7SJohn Dyson new_backing_object->shadow_count++; 1856b921a12bSAlan Cox vm_object_reference_locked(new_backing_object); 185789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(new_backing_object); 185895e5e988SJohn Dyson object->backing_object_offset += 185995e5e988SJohn Dyson backing_object->backing_object_offset; 1860de5f6a77SJohn Dyson } 1861df8bae1dSRodney W. Grimes 1862df8bae1dSRodney W. Grimes /* 18630d94caffSDavid Greenman * Drop the reference count on backing_object. Since 186422ec553fSAlan Cox * its ref_count was at least 2, it will not vanish. 1865df8bae1dSRodney W. Grimes */ 186622ec553fSAlan Cox backing_object->ref_count--; 186789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 1868df8bae1dSRodney W. Grimes object_bypasses++; 1869df8bae1dSRodney W. Grimes } 1870df8bae1dSRodney W. Grimes 1871df8bae1dSRodney W. Grimes /* 1872df8bae1dSRodney W. Grimes * Try again with this object's new backing object. 1873df8bae1dSRodney W. Grimes */ 1874df8bae1dSRodney W. Grimes } 1875df8bae1dSRodney W. Grimes } 1876df8bae1dSRodney W. Grimes 1877df8bae1dSRodney W. Grimes /* 1878bff99f0dSAlan Cox * vm_object_page_remove: 1879df8bae1dSRodney W. Grimes * 188068855966SAlan Cox * For the given object, either frees or invalidates each of the 18816bbee8e2SAlan Cox * specified pages. In general, a page is freed. However, if a page is 18826bbee8e2SAlan Cox * wired for any reason other than the existence of a managed, wired 18836bbee8e2SAlan Cox * mapping, then it may be invalidated but not removed from the object. 18846bbee8e2SAlan Cox * Pages are specified by the given range ["start", "end") and the option 18856bbee8e2SAlan Cox * OBJPR_CLEANONLY. As a special case, if "end" is zero, then the range 18866bbee8e2SAlan Cox * extends from "start" to the end of the object. If the option 18876bbee8e2SAlan Cox * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the 18886bbee8e2SAlan Cox * specified range are affected. If the option OBJPR_NOTMAPPED is 18896bbee8e2SAlan Cox * specified, then the pages within the specified range must have no 18906bbee8e2SAlan Cox * mappings. Otherwise, if this option is not specified, any mappings to 18916bbee8e2SAlan Cox * the specified pages are removed before the pages are freed or 18926bbee8e2SAlan Cox * invalidated. 189368855966SAlan Cox * 18946bbee8e2SAlan Cox * In general, this operation should only be performed on objects that 18956bbee8e2SAlan Cox * contain managed pages. There are, however, two exceptions. First, it 18966bbee8e2SAlan Cox * is performed on the kernel and kmem objects by vm_map_entry_delete(). 18976bbee8e2SAlan Cox * Second, it is used by msync(..., MS_INVALIDATE) to invalidate device- 18986bbee8e2SAlan Cox * backed pages. In both of these cases, the option OBJPR_CLEANONLY must 18996bbee8e2SAlan Cox * not be specified and the option OBJPR_NOTMAPPED must be specified. 1900df8bae1dSRodney W. Grimes * 1901df8bae1dSRodney W. Grimes * The object must be locked. 1902df8bae1dSRodney W. Grimes */ 190326f9a767SRodney W. Grimes void 1904ecde4b32SAlan Cox vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 19056bbee8e2SAlan Cox int options) 1906df8bae1dSRodney W. Grimes { 1907d031cff1SMatthew Dillon vm_page_t p, next; 1908df8bae1dSRodney W. Grimes 190989f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 191028634820SAlan Cox KASSERT((object->flags & OBJ_UNMANAGED) == 0 || 19116bbee8e2SAlan Cox (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED, 19126bbee8e2SAlan Cox ("vm_object_page_remove: illegal options for object %p", object)); 1913ecde4b32SAlan Cox if (object->resident_page_count == 0) 19147667839aSAlan Cox return; 1915d474eaaaSDoug Rabson vm_object_pip_add(object, 1); 191626f9a767SRodney W. Grimes again: 1917b382c10aSKonstantin Belousov p = vm_page_find_least(object, start); 19182965a453SKip Macy 191975741c04SAlan Cox /* 19206bbee8e2SAlan Cox * Here, the variable "p" is either (1) the page with the least pindex 19216bbee8e2SAlan Cox * greater than or equal to the parameter "start" or (2) NULL. 192275741c04SAlan Cox */ 19236bbee8e2SAlan Cox for (; p != NULL && (p->pindex < end || end == 0); p = next) { 1924b18bfc3dSJohn Dyson next = TAILQ_NEXT(p, listq); 192575741c04SAlan Cox 192659677d3cSAlan Cox /* 19276bbee8e2SAlan Cox * If the page is wired for any reason besides the existence 19286bbee8e2SAlan Cox * of managed, wired mappings, then it cannot be freed. For 19296bbee8e2SAlan Cox * example, fictitious pages, which represent device memory, 19306bbee8e2SAlan Cox * are inherently wired and cannot be freed. They can, 19316bbee8e2SAlan Cox * however, be invalidated if the option OBJPR_CLEANONLY is 19326bbee8e2SAlan Cox * not specified. 193359677d3cSAlan Cox */ 19342965a453SKip Macy vm_page_lock(p); 19353aaea6efSKonstantin Belousov if (vm_page_xbusied(p)) { 19363aaea6efSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 19375975e53dSKonstantin Belousov vm_page_busy_sleep(p, "vmopax", true); 19383aaea6efSKonstantin Belousov VM_OBJECT_WLOCK(object); 19393aaea6efSKonstantin Belousov goto again; 19403aaea6efSKonstantin Belousov } 19416195b24aSKonstantin Belousov if (p->wire_count != 0) { 19426195b24aSKonstantin Belousov if ((options & OBJPR_NOTMAPPED) == 0) 19434fec79beSAlan Cox pmap_remove_all(p); 19446bbee8e2SAlan Cox if ((options & OBJPR_CLEANONLY) == 0) { 1945bd7e5f99SJohn Dyson p->valid = 0; 1946a28042d1SAlan Cox vm_page_undirty(p); 1947a28042d1SAlan Cox } 1948ebf5d94eSKonstantin Belousov goto next; 19490d94caffSDavid Greenman } 1950c7aebda8SAttilio Rao if (vm_page_busied(p)) { 1951c7aebda8SAttilio Rao VM_OBJECT_WUNLOCK(object); 19525975e53dSKonstantin Belousov vm_page_busy_sleep(p, "vmopar", false); 1953c7aebda8SAttilio Rao VM_OBJECT_WLOCK(object); 195426f9a767SRodney W. Grimes goto again; 1955c7aebda8SAttilio Rao } 195668855966SAlan Cox KASSERT((p->flags & PG_FICTITIOUS) == 0, 195768855966SAlan Cox ("vm_object_page_remove: page %p is fictitious", p)); 19586bbee8e2SAlan Cox if ((options & OBJPR_CLEANONLY) != 0 && p->valid != 0) { 19596bbee8e2SAlan Cox if ((options & OBJPR_NOTMAPPED) == 0) 196078985e42SAlan Cox pmap_remove_write(p); 1961ebf5d94eSKonstantin Belousov if (p->dirty) 1962ebf5d94eSKonstantin Belousov goto next; 19632965a453SKip Macy } 19646195b24aSKonstantin Belousov if ((options & OBJPR_NOTMAPPED) == 0) 19654fec79beSAlan Cox pmap_remove_all(p); 1966df8bae1dSRodney W. Grimes vm_page_free(p); 1967ebf5d94eSKonstantin Belousov next: 19682965a453SKip Macy vm_page_unlock(p); 19692965a453SKip Macy } 1970f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 1971c0503609SDavid Greenman } 1972df8bae1dSRodney W. Grimes 1973df8bae1dSRodney W. Grimes /* 19743138cd36SMark Johnston * vm_object_page_noreuse: 1975936c09acSJohn Baldwin * 19763138cd36SMark Johnston * For the given object, attempt to move the specified pages to 19773138cd36SMark Johnston * the head of the inactive queue. This bypasses regular LRU 19783138cd36SMark Johnston * operation and allows the pages to be reused quickly under memory 19793138cd36SMark Johnston * pressure. If a page is wired for any reason, then it will not 19803138cd36SMark Johnston * be queued. Pages are specified by the range ["start", "end"). 19813138cd36SMark Johnston * As a special case, if "end" is zero, then the range extends from 19823138cd36SMark Johnston * "start" to the end of the object. 1983936c09acSJohn Baldwin * 1984936c09acSJohn Baldwin * This operation should only be performed on objects that 198528634820SAlan Cox * contain non-fictitious, managed pages. 1986936c09acSJohn Baldwin * 1987936c09acSJohn Baldwin * The object must be locked. 1988936c09acSJohn Baldwin */ 1989936c09acSJohn Baldwin void 19903138cd36SMark Johnston vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 1991936c09acSJohn Baldwin { 1992936c09acSJohn Baldwin struct mtx *mtx, *new_mtx; 1993936c09acSJohn Baldwin vm_page_t p, next; 1994936c09acSJohn Baldwin 199552d1addaSAlan Cox VM_OBJECT_ASSERT_LOCKED(object); 199628634820SAlan Cox KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0, 19973138cd36SMark Johnston ("vm_object_page_noreuse: illegal object %p", object)); 1998936c09acSJohn Baldwin if (object->resident_page_count == 0) 1999936c09acSJohn Baldwin return; 2000936c09acSJohn Baldwin p = vm_page_find_least(object, start); 2001936c09acSJohn Baldwin 2002936c09acSJohn Baldwin /* 2003936c09acSJohn Baldwin * Here, the variable "p" is either (1) the page with the least pindex 2004936c09acSJohn Baldwin * greater than or equal to the parameter "start" or (2) NULL. 2005936c09acSJohn Baldwin */ 2006936c09acSJohn Baldwin mtx = NULL; 2007936c09acSJohn Baldwin for (; p != NULL && (p->pindex < end || end == 0); p = next) { 2008936c09acSJohn Baldwin next = TAILQ_NEXT(p, listq); 2009936c09acSJohn Baldwin 2010936c09acSJohn Baldwin /* 2011936c09acSJohn Baldwin * Avoid releasing and reacquiring the same page lock. 2012936c09acSJohn Baldwin */ 2013936c09acSJohn Baldwin new_mtx = vm_page_lockptr(p); 2014936c09acSJohn Baldwin if (mtx != new_mtx) { 2015936c09acSJohn Baldwin if (mtx != NULL) 2016936c09acSJohn Baldwin mtx_unlock(mtx); 2017936c09acSJohn Baldwin mtx = new_mtx; 2018936c09acSJohn Baldwin mtx_lock(mtx); 2019936c09acSJohn Baldwin } 20203138cd36SMark Johnston vm_page_deactivate_noreuse(p); 2021936c09acSJohn Baldwin } 2022936c09acSJohn Baldwin if (mtx != NULL) 2023936c09acSJohn Baldwin mtx_unlock(mtx); 2024936c09acSJohn Baldwin } 2025936c09acSJohn Baldwin 2026936c09acSJohn Baldwin /* 2027387aabc5SAlan Cox * Populate the specified range of the object with valid pages. Returns 2028387aabc5SAlan Cox * TRUE if the range is successfully populated and FALSE otherwise. 2029387aabc5SAlan Cox * 2030387aabc5SAlan Cox * Note: This function should be optimized to pass a larger array of 2031387aabc5SAlan Cox * pages to vm_pager_get_pages() before it is applied to a non- 2032387aabc5SAlan Cox * OBJT_DEVICE object. 2033387aabc5SAlan Cox * 2034387aabc5SAlan Cox * The object must be locked. 2035387aabc5SAlan Cox */ 2036387aabc5SAlan Cox boolean_t 2037387aabc5SAlan Cox vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 2038387aabc5SAlan Cox { 2039093c7f39SGleb Smirnoff vm_page_t m; 2040387aabc5SAlan Cox vm_pindex_t pindex; 2041387aabc5SAlan Cox int rv; 2042387aabc5SAlan Cox 204389f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 2044387aabc5SAlan Cox for (pindex = start; pindex < end; pindex++) { 20455944de8eSKonstantin Belousov m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); 2046387aabc5SAlan Cox if (m->valid != VM_PAGE_BITS_ALL) { 2047b0cd2017SGleb Smirnoff rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); 2048387aabc5SAlan Cox if (rv != VM_PAGER_OK) { 20492965a453SKip Macy vm_page_lock(m); 2050387aabc5SAlan Cox vm_page_free(m); 20512965a453SKip Macy vm_page_unlock(m); 2052387aabc5SAlan Cox break; 2053387aabc5SAlan Cox } 2054387aabc5SAlan Cox } 2055387aabc5SAlan Cox /* 2056387aabc5SAlan Cox * Keep "m" busy because a subsequent iteration may unlock 2057387aabc5SAlan Cox * the object. 2058387aabc5SAlan Cox */ 2059387aabc5SAlan Cox } 2060387aabc5SAlan Cox if (pindex > start) { 2061387aabc5SAlan Cox m = vm_page_lookup(object, start); 2062387aabc5SAlan Cox while (m != NULL && m->pindex < pindex) { 2063c7aebda8SAttilio Rao vm_page_xunbusy(m); 2064387aabc5SAlan Cox m = TAILQ_NEXT(m, listq); 2065387aabc5SAlan Cox } 2066387aabc5SAlan Cox } 2067387aabc5SAlan Cox return (pindex == end); 2068387aabc5SAlan Cox } 2069387aabc5SAlan Cox 2070387aabc5SAlan Cox /* 2071df8bae1dSRodney W. Grimes * Routine: vm_object_coalesce 2072df8bae1dSRodney W. Grimes * Function: Coalesces two objects backing up adjoining 2073df8bae1dSRodney W. Grimes * regions of memory into a single object. 2074df8bae1dSRodney W. Grimes * 2075df8bae1dSRodney W. Grimes * returns TRUE if objects were combined. 2076df8bae1dSRodney W. Grimes * 2077df8bae1dSRodney W. Grimes * NOTE: Only works at the moment if the second object is NULL - 2078df8bae1dSRodney W. Grimes * if it's not, which object do we lock first? 2079df8bae1dSRodney W. Grimes * 2080df8bae1dSRodney W. Grimes * Parameters: 2081df8bae1dSRodney W. Grimes * prev_object First object to coalesce 2082df8bae1dSRodney W. Grimes * prev_offset Offset into prev_object 2083df8bae1dSRodney W. Grimes * prev_size Size of reference to prev_object 208457a21abaSAlan Cox * next_size Size of reference to the second object 20853364c323SKonstantin Belousov * reserved Indicator that extension region has 20863364c323SKonstantin Belousov * swap accounted for 2087df8bae1dSRodney W. Grimes * 2088df8bae1dSRodney W. Grimes * Conditions: 2089df8bae1dSRodney W. Grimes * The object must *not* be locked. 2090df8bae1dSRodney W. Grimes */ 20910d94caffSDavid Greenman boolean_t 209257a21abaSAlan Cox vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, 20933364c323SKonstantin Belousov vm_size_t prev_size, vm_size_t next_size, boolean_t reserved) 2094df8bae1dSRodney W. Grimes { 2095ea41812fSAlan Cox vm_pindex_t next_pindex; 2096df8bae1dSRodney W. Grimes 209700e1854aSAlan Cox if (prev_object == NULL) 2098df8bae1dSRodney W. Grimes return (TRUE); 209989f6b863SAttilio Rao VM_OBJECT_WLOCK(prev_object); 21009ded9474SKonstantin Belousov if ((prev_object->type != OBJT_DEFAULT && 21019ded9474SKonstantin Belousov prev_object->type != OBJT_SWAP) || 2102f08f7dcaSKonstantin Belousov (prev_object->flags & OBJ_TMPFS_NODE) != 0) { 210389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(prev_object); 210430dcfc09SJohn Dyson return (FALSE); 210530dcfc09SJohn Dyson } 210630dcfc09SJohn Dyson 2107df8bae1dSRodney W. Grimes /* 2108df8bae1dSRodney W. Grimes * Try to collapse the object first 2109df8bae1dSRodney W. Grimes */ 2110df8bae1dSRodney W. Grimes vm_object_collapse(prev_object); 2111df8bae1dSRodney W. Grimes 2112df8bae1dSRodney W. Grimes /* 21130d94caffSDavid Greenman * Can't coalesce if: . more than one reference . paged out . shadows 21140d94caffSDavid Greenman * another object . has a copy elsewhere (any of which mean that the 21150d94caffSDavid Greenman * pages not mapped to prev_entry may be in use anyway) 2116df8bae1dSRodney W. Grimes */ 21178cc7e047SJohn Dyson if (prev_object->backing_object != NULL) { 211889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(prev_object); 2119df8bae1dSRodney W. Grimes return (FALSE); 2120df8bae1dSRodney W. Grimes } 2121a316d390SJohn Dyson 2122a316d390SJohn Dyson prev_size >>= PAGE_SHIFT; 2123a316d390SJohn Dyson next_size >>= PAGE_SHIFT; 212457a21abaSAlan Cox next_pindex = OFF_TO_IDX(prev_offset) + prev_size; 21258cc7e047SJohn Dyson 21268cc7e047SJohn Dyson if ((prev_object->ref_count > 1) && 2127ea41812fSAlan Cox (prev_object->size != next_pindex)) { 212889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(prev_object); 21298cc7e047SJohn Dyson return (FALSE); 21308cc7e047SJohn Dyson } 21318cc7e047SJohn Dyson 2132df8bae1dSRodney W. Grimes /* 21333364c323SKonstantin Belousov * Account for the charge. 21343364c323SKonstantin Belousov */ 2135ef694c1aSEdward Tomasz Napierala if (prev_object->cred != NULL) { 21363364c323SKonstantin Belousov 21373364c323SKonstantin Belousov /* 21383364c323SKonstantin Belousov * If prev_object was charged, then this mapping, 2139763df3ecSPedro F. Giffuni * although not charged now, may become writable 2140ef694c1aSEdward Tomasz Napierala * later. Non-NULL cred in the object would prevent 21413364c323SKonstantin Belousov * swap reservation during enabling of the write 21423364c323SKonstantin Belousov * access, so reserve swap now. Failed reservation 21433364c323SKonstantin Belousov * cause allocation of the separate object for the map 21443364c323SKonstantin Belousov * entry, and swap reservation for this entry is 21453364c323SKonstantin Belousov * managed in appropriate time. 21463364c323SKonstantin Belousov */ 2147ef694c1aSEdward Tomasz Napierala if (!reserved && !swap_reserve_by_cred(ptoa(next_size), 2148ef694c1aSEdward Tomasz Napierala prev_object->cred)) { 21499f790a17SKonstantin Belousov VM_OBJECT_WUNLOCK(prev_object); 21503364c323SKonstantin Belousov return (FALSE); 21513364c323SKonstantin Belousov } 21523364c323SKonstantin Belousov prev_object->charge += ptoa(next_size); 21533364c323SKonstantin Belousov } 21543364c323SKonstantin Belousov 21553364c323SKonstantin Belousov /* 21560d94caffSDavid Greenman * Remove any pages that may still be in the object from a previous 21570d94caffSDavid Greenman * deallocation. 2158df8bae1dSRodney W. Grimes */ 2159ea41812fSAlan Cox if (next_pindex < prev_object->size) { 21606bbee8e2SAlan Cox vm_object_page_remove(prev_object, next_pindex, next_pindex + 21616bbee8e2SAlan Cox next_size, 0); 2162ea41812fSAlan Cox if (prev_object->type == OBJT_SWAP) 2163ea41812fSAlan Cox swap_pager_freespace(prev_object, 2164ea41812fSAlan Cox next_pindex, next_size); 21653364c323SKonstantin Belousov #if 0 2166ef694c1aSEdward Tomasz Napierala if (prev_object->cred != NULL) { 21673364c323SKonstantin Belousov KASSERT(prev_object->charge >= 21683364c323SKonstantin Belousov ptoa(prev_object->size - next_pindex), 21693364c323SKonstantin Belousov ("object %p overcharged 1 %jx %jx", prev_object, 21703364c323SKonstantin Belousov (uintmax_t)next_pindex, (uintmax_t)next_size)); 21713364c323SKonstantin Belousov prev_object->charge -= ptoa(prev_object->size - 21723364c323SKonstantin Belousov next_pindex); 21733364c323SKonstantin Belousov } 21743364c323SKonstantin Belousov #endif 2175ea41812fSAlan Cox } 2176df8bae1dSRodney W. Grimes 2177df8bae1dSRodney W. Grimes /* 2178df8bae1dSRodney W. Grimes * Extend the object if necessary. 2179df8bae1dSRodney W. Grimes */ 2180ea41812fSAlan Cox if (next_pindex + next_size > prev_object->size) 2181ea41812fSAlan Cox prev_object->size = next_pindex + next_size; 2182df8bae1dSRodney W. Grimes 218389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(prev_object); 2184df8bae1dSRodney W. Grimes return (TRUE); 2185df8bae1dSRodney W. Grimes } 2186df8bae1dSRodney W. Grimes 21877a5a6352SMatthew Dillon void 21887a5a6352SMatthew Dillon vm_object_set_writeable_dirty(vm_object_t object) 21897a5a6352SMatthew Dillon { 21907a5a6352SMatthew Dillon 219189f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 2192f40cb1c6SKonstantin Belousov if (object->type != OBJT_VNODE) { 2193f40cb1c6SKonstantin Belousov if ((object->flags & OBJ_TMPFS_NODE) != 0) { 2194f40cb1c6SKonstantin Belousov KASSERT(object->type == OBJT_SWAP, ("non-swap tmpfs")); 2195f40cb1c6SKonstantin Belousov vm_object_set_flag(object, OBJ_TMPFS_DIRTY); 2196f40cb1c6SKonstantin Belousov } 21973280870dSKonstantin Belousov return; 2198f40cb1c6SKonstantin Belousov } 21993280870dSKonstantin Belousov object->generation++; 22003280870dSKonstantin Belousov if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) 2201ee39666aSJeff Roberson return; 2202af51d7bfSAlan Cox vm_object_set_flag(object, OBJ_MIGHTBEDIRTY); 22037a5a6352SMatthew Dillon } 22047a5a6352SMatthew Dillon 220503462509SAlan Cox /* 220603462509SAlan Cox * vm_object_unwire: 220703462509SAlan Cox * 220803462509SAlan Cox * For each page offset within the specified range of the given object, 220903462509SAlan Cox * find the highest-level page in the shadow chain and unwire it. A page 221003462509SAlan Cox * must exist at every page offset, and the highest-level page must be 221103462509SAlan Cox * wired. 221203462509SAlan Cox */ 221303462509SAlan Cox void 221403462509SAlan Cox vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length, 221503462509SAlan Cox uint8_t queue) 221603462509SAlan Cox { 221703462509SAlan Cox vm_object_t tobject; 221803462509SAlan Cox vm_page_t m, tm; 221903462509SAlan Cox vm_pindex_t end_pindex, pindex, tpindex; 222003462509SAlan Cox int depth, locked_depth; 222103462509SAlan Cox 222203462509SAlan Cox KASSERT((offset & PAGE_MASK) == 0, 222303462509SAlan Cox ("vm_object_unwire: offset is not page aligned")); 222403462509SAlan Cox KASSERT((length & PAGE_MASK) == 0, 222503462509SAlan Cox ("vm_object_unwire: length is not a multiple of PAGE_SIZE")); 222603462509SAlan Cox /* The wired count of a fictitious page never changes. */ 222703462509SAlan Cox if ((object->flags & OBJ_FICTITIOUS) != 0) 222803462509SAlan Cox return; 222903462509SAlan Cox pindex = OFF_TO_IDX(offset); 223003462509SAlan Cox end_pindex = pindex + atop(length); 223103462509SAlan Cox locked_depth = 1; 223203462509SAlan Cox VM_OBJECT_RLOCK(object); 223303462509SAlan Cox m = vm_page_find_least(object, pindex); 223403462509SAlan Cox while (pindex < end_pindex) { 223503462509SAlan Cox if (m == NULL || pindex < m->pindex) { 223603462509SAlan Cox /* 223703462509SAlan Cox * The first object in the shadow chain doesn't 223803462509SAlan Cox * contain a page at the current index. Therefore, 223903462509SAlan Cox * the page must exist in a backing object. 224003462509SAlan Cox */ 224103462509SAlan Cox tobject = object; 224203462509SAlan Cox tpindex = pindex; 224303462509SAlan Cox depth = 0; 224403462509SAlan Cox do { 224503462509SAlan Cox tpindex += 224603462509SAlan Cox OFF_TO_IDX(tobject->backing_object_offset); 224703462509SAlan Cox tobject = tobject->backing_object; 224803462509SAlan Cox KASSERT(tobject != NULL, 224903462509SAlan Cox ("vm_object_unwire: missing page")); 225003462509SAlan Cox if ((tobject->flags & OBJ_FICTITIOUS) != 0) 225103462509SAlan Cox goto next_page; 225203462509SAlan Cox depth++; 225303462509SAlan Cox if (depth == locked_depth) { 225403462509SAlan Cox locked_depth++; 225503462509SAlan Cox VM_OBJECT_RLOCK(tobject); 225603462509SAlan Cox } 225703462509SAlan Cox } while ((tm = vm_page_lookup(tobject, tpindex)) == 225803462509SAlan Cox NULL); 225903462509SAlan Cox } else { 226003462509SAlan Cox tm = m; 226103462509SAlan Cox m = TAILQ_NEXT(m, listq); 226203462509SAlan Cox } 226303462509SAlan Cox vm_page_lock(tm); 226403462509SAlan Cox vm_page_unwire(tm, queue); 226503462509SAlan Cox vm_page_unlock(tm); 226603462509SAlan Cox next_page: 226703462509SAlan Cox pindex++; 226803462509SAlan Cox } 226903462509SAlan Cox /* Release the accumulated object locks. */ 227003462509SAlan Cox for (depth = 0; depth < locked_depth; depth++) { 227103462509SAlan Cox tobject = object->backing_object; 227203462509SAlan Cox VM_OBJECT_RUNLOCK(object); 227303462509SAlan Cox object = tobject; 227403462509SAlan Cox } 227503462509SAlan Cox } 227603462509SAlan Cox 227763e4c6cdSEric van Gyzen struct vnode * 227863e4c6cdSEric van Gyzen vm_object_vnode(vm_object_t object) 227963e4c6cdSEric van Gyzen { 228063e4c6cdSEric van Gyzen 228163e4c6cdSEric van Gyzen VM_OBJECT_ASSERT_LOCKED(object); 228263e4c6cdSEric van Gyzen if (object->type == OBJT_VNODE) 228363e4c6cdSEric van Gyzen return (object->handle); 228463e4c6cdSEric van Gyzen if (object->type == OBJT_SWAP && (object->flags & OBJ_TMPFS) != 0) 228563e4c6cdSEric van Gyzen return (object->un_pager.swp.swp_tmpfs); 228663e4c6cdSEric van Gyzen return (NULL); 228763e4c6cdSEric van Gyzen } 228863e4c6cdSEric van Gyzen 2289ff87ae35SJohn Baldwin static int 2290ff87ae35SJohn Baldwin sysctl_vm_object_list(SYSCTL_HANDLER_ARGS) 2291ff87ae35SJohn Baldwin { 22920ecee546SKonstantin Belousov struct kinfo_vmobject *kvo; 2293ff87ae35SJohn Baldwin char *fullpath, *freepath; 2294ff87ae35SJohn Baldwin struct vnode *vp; 2295ff87ae35SJohn Baldwin struct vattr va; 2296ff87ae35SJohn Baldwin vm_object_t obj; 2297ff87ae35SJohn Baldwin vm_page_t m; 2298ff87ae35SJohn Baldwin int count, error; 2299ff87ae35SJohn Baldwin 2300ff87ae35SJohn Baldwin if (req->oldptr == NULL) { 2301ff87ae35SJohn Baldwin /* 2302ff87ae35SJohn Baldwin * If an old buffer has not been provided, generate an 2303ff87ae35SJohn Baldwin * estimate of the space needed for a subsequent call. 2304ff87ae35SJohn Baldwin */ 2305ff87ae35SJohn Baldwin mtx_lock(&vm_object_list_mtx); 2306ff87ae35SJohn Baldwin count = 0; 2307ff87ae35SJohn Baldwin TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2308ff87ae35SJohn Baldwin if (obj->type == OBJT_DEAD) 2309ff87ae35SJohn Baldwin continue; 2310ff87ae35SJohn Baldwin count++; 2311ff87ae35SJohn Baldwin } 2312ff87ae35SJohn Baldwin mtx_unlock(&vm_object_list_mtx); 2313ff87ae35SJohn Baldwin return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) * 2314ff87ae35SJohn Baldwin count * 11 / 10)); 2315ff87ae35SJohn Baldwin } 2316ff87ae35SJohn Baldwin 23170ecee546SKonstantin Belousov kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK); 2318ff87ae35SJohn Baldwin error = 0; 2319ff87ae35SJohn Baldwin 2320ff87ae35SJohn Baldwin /* 2321ff87ae35SJohn Baldwin * VM objects are type stable and are never removed from the 2322ff87ae35SJohn Baldwin * list once added. This allows us to safely read obj->object_list 2323ff87ae35SJohn Baldwin * after reacquiring the VM object lock. 2324ff87ae35SJohn Baldwin */ 2325ff87ae35SJohn Baldwin mtx_lock(&vm_object_list_mtx); 2326ff87ae35SJohn Baldwin TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2327ff87ae35SJohn Baldwin if (obj->type == OBJT_DEAD) 2328ff87ae35SJohn Baldwin continue; 2329ff87ae35SJohn Baldwin VM_OBJECT_RLOCK(obj); 2330ff87ae35SJohn Baldwin if (obj->type == OBJT_DEAD) { 2331ff87ae35SJohn Baldwin VM_OBJECT_RUNLOCK(obj); 2332ff87ae35SJohn Baldwin continue; 2333ff87ae35SJohn Baldwin } 2334ff87ae35SJohn Baldwin mtx_unlock(&vm_object_list_mtx); 23350ecee546SKonstantin Belousov kvo->kvo_size = ptoa(obj->size); 23360ecee546SKonstantin Belousov kvo->kvo_resident = obj->resident_page_count; 23370ecee546SKonstantin Belousov kvo->kvo_ref_count = obj->ref_count; 23380ecee546SKonstantin Belousov kvo->kvo_shadow_count = obj->shadow_count; 23390ecee546SKonstantin Belousov kvo->kvo_memattr = obj->memattr; 23400ecee546SKonstantin Belousov kvo->kvo_active = 0; 23410ecee546SKonstantin Belousov kvo->kvo_inactive = 0; 2342ff87ae35SJohn Baldwin TAILQ_FOREACH(m, &obj->memq, listq) { 2343ff87ae35SJohn Baldwin /* 2344ff87ae35SJohn Baldwin * A page may belong to the object but be 2345ff87ae35SJohn Baldwin * dequeued and set to PQ_NONE while the 2346ff87ae35SJohn Baldwin * object lock is not held. This makes the 2347ff87ae35SJohn Baldwin * reads of m->queue below racy, and we do not 2348ff87ae35SJohn Baldwin * count pages set to PQ_NONE. However, this 2349ff87ae35SJohn Baldwin * sysctl is only meant to give an 2350ff87ae35SJohn Baldwin * approximation of the system anyway. 2351ff87ae35SJohn Baldwin */ 2352ebcddc72SAlan Cox if (vm_page_active(m)) 23530ecee546SKonstantin Belousov kvo->kvo_active++; 2354ebcddc72SAlan Cox else if (vm_page_inactive(m)) 23550ecee546SKonstantin Belousov kvo->kvo_inactive++; 2356ff87ae35SJohn Baldwin } 2357ff87ae35SJohn Baldwin 23580ecee546SKonstantin Belousov kvo->kvo_vn_fileid = 0; 23590ecee546SKonstantin Belousov kvo->kvo_vn_fsid = 0; 23600ecee546SKonstantin Belousov kvo->kvo_vn_fsid_freebsd11 = 0; 2361ff87ae35SJohn Baldwin freepath = NULL; 2362ff87ae35SJohn Baldwin fullpath = ""; 2363ff87ae35SJohn Baldwin vp = NULL; 2364ff87ae35SJohn Baldwin switch (obj->type) { 2365ff87ae35SJohn Baldwin case OBJT_DEFAULT: 23660ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_DEFAULT; 2367ff87ae35SJohn Baldwin break; 2368ff87ae35SJohn Baldwin case OBJT_VNODE: 23690ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_VNODE; 2370ff87ae35SJohn Baldwin vp = obj->handle; 2371ff87ae35SJohn Baldwin vref(vp); 2372ff87ae35SJohn Baldwin break; 2373ff87ae35SJohn Baldwin case OBJT_SWAP: 23740ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_SWAP; 2375ff87ae35SJohn Baldwin break; 2376ff87ae35SJohn Baldwin case OBJT_DEVICE: 23770ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_DEVICE; 2378ff87ae35SJohn Baldwin break; 2379ff87ae35SJohn Baldwin case OBJT_PHYS: 23800ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_PHYS; 2381ff87ae35SJohn Baldwin break; 2382ff87ae35SJohn Baldwin case OBJT_DEAD: 23830ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_DEAD; 2384ff87ae35SJohn Baldwin break; 2385ff87ae35SJohn Baldwin case OBJT_SG: 23860ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_SG; 2387ff87ae35SJohn Baldwin break; 2388ff87ae35SJohn Baldwin case OBJT_MGTDEVICE: 23890ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_MGTDEVICE; 2390ff87ae35SJohn Baldwin break; 2391ff87ae35SJohn Baldwin default: 23920ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_UNKNOWN; 2393ff87ae35SJohn Baldwin break; 2394ff87ae35SJohn Baldwin } 2395ff87ae35SJohn Baldwin VM_OBJECT_RUNLOCK(obj); 2396ff87ae35SJohn Baldwin if (vp != NULL) { 2397ff87ae35SJohn Baldwin vn_fullpath(curthread, vp, &fullpath, &freepath); 2398ff87ae35SJohn Baldwin vn_lock(vp, LK_SHARED | LK_RETRY); 2399ff87ae35SJohn Baldwin if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) { 24000ecee546SKonstantin Belousov kvo->kvo_vn_fileid = va.va_fileid; 24010ecee546SKonstantin Belousov kvo->kvo_vn_fsid = va.va_fsid; 24020ecee546SKonstantin Belousov kvo->kvo_vn_fsid_freebsd11 = va.va_fsid; 240369921123SKonstantin Belousov /* truncate */ 2404ff87ae35SJohn Baldwin } 2405ff87ae35SJohn Baldwin vput(vp); 2406ff87ae35SJohn Baldwin } 2407ff87ae35SJohn Baldwin 24080ecee546SKonstantin Belousov strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path)); 2409ff87ae35SJohn Baldwin if (freepath != NULL) 2410ff87ae35SJohn Baldwin free(freepath, M_TEMP); 2411ff87ae35SJohn Baldwin 2412ff87ae35SJohn Baldwin /* Pack record size down */ 24130ecee546SKonstantin Belousov kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path) 24140ecee546SKonstantin Belousov + strlen(kvo->kvo_path) + 1; 24150ecee546SKonstantin Belousov kvo->kvo_structsize = roundup(kvo->kvo_structsize, 2416ff87ae35SJohn Baldwin sizeof(uint64_t)); 24170ecee546SKonstantin Belousov error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize); 2418ff87ae35SJohn Baldwin mtx_lock(&vm_object_list_mtx); 2419ff87ae35SJohn Baldwin if (error) 2420ff87ae35SJohn Baldwin break; 2421ff87ae35SJohn Baldwin } 2422ff87ae35SJohn Baldwin mtx_unlock(&vm_object_list_mtx); 24230ecee546SKonstantin Belousov free(kvo, M_TEMP); 2424ff87ae35SJohn Baldwin return (error); 2425ff87ae35SJohn Baldwin } 2426ff87ae35SJohn Baldwin SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP | 2427ff87ae35SJohn Baldwin CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject", 2428ff87ae35SJohn Baldwin "List of VM objects"); 2429ff87ae35SJohn Baldwin 2430c7c34a24SBruce Evans #include "opt_ddb.h" 2431c3cb3e12SDavid Greenman #ifdef DDB 2432c7c34a24SBruce Evans #include <sys/kernel.h> 2433c7c34a24SBruce Evans 2434ce9edcf5SPoul-Henning Kamp #include <sys/cons.h> 2435c7c34a24SBruce Evans 2436c7c34a24SBruce Evans #include <ddb/ddb.h> 2437c7c34a24SBruce Evans 2438cac597e4SBruce Evans static int 24391b40f8c0SMatthew Dillon _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 2440a1f6d91cSDavid Greenman { 2441a1f6d91cSDavid Greenman vm_map_t tmpm; 2442a1f6d91cSDavid Greenman vm_map_entry_t tmpe; 2443a1f6d91cSDavid Greenman vm_object_t obj; 2444a1f6d91cSDavid Greenman int entcount; 2445a1f6d91cSDavid Greenman 2446a1f6d91cSDavid Greenman if (map == 0) 2447a1f6d91cSDavid Greenman return 0; 2448a1f6d91cSDavid Greenman 2449a1f6d91cSDavid Greenman if (entry == 0) { 2450a1f6d91cSDavid Greenman tmpe = map->header.next; 2451a1f6d91cSDavid Greenman entcount = map->nentries; 2452a1f6d91cSDavid Greenman while (entcount-- && (tmpe != &map->header)) { 2453a1f6d91cSDavid Greenman if (_vm_object_in_map(map, object, tmpe)) { 2454a1f6d91cSDavid Greenman return 1; 2455a1f6d91cSDavid Greenman } 2456a1f6d91cSDavid Greenman tmpe = tmpe->next; 2457a1f6d91cSDavid Greenman } 24589fdfe602SMatthew Dillon } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 24599fdfe602SMatthew Dillon tmpm = entry->object.sub_map; 2460a1f6d91cSDavid Greenman tmpe = tmpm->header.next; 2461a1f6d91cSDavid Greenman entcount = tmpm->nentries; 2462a1f6d91cSDavid Greenman while (entcount-- && tmpe != &tmpm->header) { 2463a1f6d91cSDavid Greenman if (_vm_object_in_map(tmpm, object, tmpe)) { 2464a1f6d91cSDavid Greenman return 1; 2465a1f6d91cSDavid Greenman } 2466a1f6d91cSDavid Greenman tmpe = tmpe->next; 2467a1f6d91cSDavid Greenman } 24688aef1712SMatthew Dillon } else if ((obj = entry->object.vm_object) != NULL) { 246924a1cce3SDavid Greenman for (; obj; obj = obj->backing_object) 2470a1f6d91cSDavid Greenman if (obj == object) { 2471a1f6d91cSDavid Greenman return 1; 2472a1f6d91cSDavid Greenman } 2473a1f6d91cSDavid Greenman } 2474a1f6d91cSDavid Greenman return 0; 2475a1f6d91cSDavid Greenman } 2476a1f6d91cSDavid Greenman 2477cac597e4SBruce Evans static int 24781b40f8c0SMatthew Dillon vm_object_in_map(vm_object_t object) 2479a1f6d91cSDavid Greenman { 2480a1f6d91cSDavid Greenman struct proc *p; 24811005a129SJohn Baldwin 248260517fd1SJohn Baldwin /* sx_slock(&allproc_lock); */ 2483f67af5c9SXin LI FOREACH_PROC_IN_SYSTEM(p) { 2484a1f6d91cSDavid Greenman if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 2485a1f6d91cSDavid Greenman continue; 2486553629ebSJake Burkholder if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 248760517fd1SJohn Baldwin /* sx_sunlock(&allproc_lock); */ 2488a1f6d91cSDavid Greenman return 1; 2489a1f6d91cSDavid Greenman } 2490553629ebSJake Burkholder } 249160517fd1SJohn Baldwin /* sx_sunlock(&allproc_lock); */ 2492a1f6d91cSDavid Greenman if (_vm_object_in_map(kernel_map, object, 0)) 2493a1f6d91cSDavid Greenman return 1; 2494a1f6d91cSDavid Greenman return 0; 2495a1f6d91cSDavid Greenman } 2496a1f6d91cSDavid Greenman 2497c7c34a24SBruce Evans DB_SHOW_COMMAND(vmochk, vm_object_check) 2498f708ef1bSPoul-Henning Kamp { 2499a1f6d91cSDavid Greenman vm_object_t object; 2500a1f6d91cSDavid Greenman 2501a1f6d91cSDavid Greenman /* 2502a1f6d91cSDavid Greenman * make sure that internal objs are in a map somewhere 2503a1f6d91cSDavid Greenman * and none have zero ref counts. 2504a1f6d91cSDavid Greenman */ 2505cc64b484SAlfred Perlstein TAILQ_FOREACH(object, &vm_object_list, object_list) { 250624a1cce3SDavid Greenman if (object->handle == NULL && 250724a1cce3SDavid Greenman (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2508a1f6d91cSDavid Greenman if (object->ref_count == 0) { 25093efc015bSPeter Wemm db_printf("vmochk: internal obj has zero ref count: %ld\n", 25103efc015bSPeter Wemm (long)object->size); 2511a1f6d91cSDavid Greenman } 2512a1f6d91cSDavid Greenman if (!vm_object_in_map(object)) { 2513fc62ef1fSBruce Evans db_printf( 2514fc62ef1fSBruce Evans "vmochk: internal obj is not in a map: " 2515fc62ef1fSBruce Evans "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 2516fc62ef1fSBruce Evans object->ref_count, (u_long)object->size, 2517fc62ef1fSBruce Evans (u_long)object->size, 2518fc62ef1fSBruce Evans (void *)object->backing_object); 2519a1f6d91cSDavid Greenman } 2520a1f6d91cSDavid Greenman } 2521a1f6d91cSDavid Greenman } 2522a1f6d91cSDavid Greenman } 2523a1f6d91cSDavid Greenman 252426f9a767SRodney W. Grimes /* 2525df8bae1dSRodney W. Grimes * vm_object_print: [ debug ] 2526df8bae1dSRodney W. Grimes */ 2527c7c34a24SBruce Evans DB_SHOW_COMMAND(object, vm_object_print_static) 2528df8bae1dSRodney W. Grimes { 2529c7c34a24SBruce Evans /* XXX convert args. */ 2530c7c34a24SBruce Evans vm_object_t object = (vm_object_t)addr; 2531c7c34a24SBruce Evans boolean_t full = have_addr; 2532c7c34a24SBruce Evans 2533d031cff1SMatthew Dillon vm_page_t p; 2534df8bae1dSRodney W. Grimes 2535c7c34a24SBruce Evans /* XXX count is an (unused) arg. Avoid shadowing it. */ 2536c7c34a24SBruce Evans #define count was_count 2537c7c34a24SBruce Evans 2538d031cff1SMatthew Dillon int count; 2539df8bae1dSRodney W. Grimes 2540df8bae1dSRodney W. Grimes if (object == NULL) 2541df8bae1dSRodney W. Grimes return; 2542df8bae1dSRodney W. Grimes 2543eb95adefSBruce Evans db_iprintf( 2544ef694c1aSEdward Tomasz Napierala "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n", 2545e47cd172SMaxime Henrion object, (int)object->type, (uintmax_t)object->size, 25463364c323SKonstantin Belousov object->resident_page_count, object->ref_count, object->flags, 2547ef694c1aSEdward Tomasz Napierala object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge); 2548e47cd172SMaxime Henrion db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", 25491c7c3c6aSMatthew Dillon object->shadow_count, 2550eb95adefSBruce Evans object->backing_object ? object->backing_object->ref_count : 0, 2551e47cd172SMaxime Henrion object->backing_object, (uintmax_t)object->backing_object_offset); 2552df8bae1dSRodney W. Grimes 2553df8bae1dSRodney W. Grimes if (!full) 2554df8bae1dSRodney W. Grimes return; 2555df8bae1dSRodney W. Grimes 2556c7c34a24SBruce Evans db_indent += 2; 2557df8bae1dSRodney W. Grimes count = 0; 2558fc2ffbe6SPoul-Henning Kamp TAILQ_FOREACH(p, &object->memq, listq) { 2559df8bae1dSRodney W. Grimes if (count == 0) 2560c7c34a24SBruce Evans db_iprintf("memory:="); 2561df8bae1dSRodney W. Grimes else if (count == 6) { 2562c7c34a24SBruce Evans db_printf("\n"); 2563c7c34a24SBruce Evans db_iprintf(" ..."); 2564df8bae1dSRodney W. Grimes count = 0; 2565df8bae1dSRodney W. Grimes } else 2566c7c34a24SBruce Evans db_printf(","); 2567df8bae1dSRodney W. Grimes count++; 2568df8bae1dSRodney W. Grimes 2569e47cd172SMaxime Henrion db_printf("(off=0x%jx,page=0x%jx)", 2570e47cd172SMaxime Henrion (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); 2571df8bae1dSRodney W. Grimes } 2572df8bae1dSRodney W. Grimes if (count != 0) 2573c7c34a24SBruce Evans db_printf("\n"); 2574c7c34a24SBruce Evans db_indent -= 2; 2575df8bae1dSRodney W. Grimes } 25765070c7f8SJohn Dyson 2577c7c34a24SBruce Evans /* XXX. */ 2578c7c34a24SBruce Evans #undef count 2579c7c34a24SBruce Evans 2580c7c34a24SBruce Evans /* XXX need this non-static entry for calling from vm_map_print. */ 25815070c7f8SJohn Dyson void 25821b40f8c0SMatthew Dillon vm_object_print( 25831b40f8c0SMatthew Dillon /* db_expr_t */ long addr, 25841b40f8c0SMatthew Dillon boolean_t have_addr, 25851b40f8c0SMatthew Dillon /* db_expr_t */ long count, 25861b40f8c0SMatthew Dillon char *modif) 2587c7c34a24SBruce Evans { 2588c7c34a24SBruce Evans vm_object_print_static(addr, have_addr, count, modif); 2589c7c34a24SBruce Evans } 2590c7c34a24SBruce Evans 2591c7c34a24SBruce Evans DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 25925070c7f8SJohn Dyson { 25935070c7f8SJohn Dyson vm_object_t object; 2594bb2ac86fSKonstantin Belousov vm_pindex_t fidx; 2595bb2ac86fSKonstantin Belousov vm_paddr_t pa; 2596bb2ac86fSKonstantin Belousov vm_page_t m, prev_m; 2597bb2ac86fSKonstantin Belousov int rcount, nl, c; 2598cc64b484SAlfred Perlstein 2599bb2ac86fSKonstantin Belousov nl = 0; 2600cc64b484SAlfred Perlstein TAILQ_FOREACH(object, &vm_object_list, object_list) { 2601fc62ef1fSBruce Evans db_printf("new object: %p\n", (void *)object); 26025070c7f8SJohn Dyson if (nl > 18) { 26035070c7f8SJohn Dyson c = cngetc(); 26045070c7f8SJohn Dyson if (c != ' ') 26055070c7f8SJohn Dyson return; 26065070c7f8SJohn Dyson nl = 0; 26075070c7f8SJohn Dyson } 26085070c7f8SJohn Dyson nl++; 26095070c7f8SJohn Dyson rcount = 0; 26105070c7f8SJohn Dyson fidx = 0; 2611bb2ac86fSKonstantin Belousov pa = -1; 2612bb2ac86fSKonstantin Belousov TAILQ_FOREACH(m, &object->memq, listq) { 2613bb2ac86fSKonstantin Belousov if (m->pindex > 128) 2614bb2ac86fSKonstantin Belousov break; 2615bb2ac86fSKonstantin Belousov if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL && 2616bb2ac86fSKonstantin Belousov prev_m->pindex + 1 != m->pindex) { 26175070c7f8SJohn Dyson if (rcount) { 26183efc015bSPeter Wemm db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 26193efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 26205070c7f8SJohn Dyson if (nl > 18) { 26215070c7f8SJohn Dyson c = cngetc(); 26225070c7f8SJohn Dyson if (c != ' ') 26235070c7f8SJohn Dyson return; 26245070c7f8SJohn Dyson nl = 0; 26255070c7f8SJohn Dyson } 26265070c7f8SJohn Dyson nl++; 26275070c7f8SJohn Dyson rcount = 0; 26285070c7f8SJohn Dyson } 26295070c7f8SJohn Dyson } 26305070c7f8SJohn Dyson if (rcount && 26315070c7f8SJohn Dyson (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 26325070c7f8SJohn Dyson ++rcount; 26335070c7f8SJohn Dyson continue; 26345070c7f8SJohn Dyson } 26355070c7f8SJohn Dyson if (rcount) { 26362446e4f0SAlan Cox db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 26373efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 26385070c7f8SJohn Dyson if (nl > 18) { 26395070c7f8SJohn Dyson c = cngetc(); 26405070c7f8SJohn Dyson if (c != ' ') 26415070c7f8SJohn Dyson return; 26425070c7f8SJohn Dyson nl = 0; 26435070c7f8SJohn Dyson } 26445070c7f8SJohn Dyson nl++; 26455070c7f8SJohn Dyson } 2646bb2ac86fSKonstantin Belousov fidx = m->pindex; 26475070c7f8SJohn Dyson pa = VM_PAGE_TO_PHYS(m); 26485070c7f8SJohn Dyson rcount = 1; 26495070c7f8SJohn Dyson } 26505070c7f8SJohn Dyson if (rcount) { 26513efc015bSPeter Wemm db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 26523efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 26535070c7f8SJohn Dyson if (nl > 18) { 26545070c7f8SJohn Dyson c = cngetc(); 26555070c7f8SJohn Dyson if (c != ' ') 26565070c7f8SJohn Dyson return; 26575070c7f8SJohn Dyson nl = 0; 26585070c7f8SJohn Dyson } 26595070c7f8SJohn Dyson nl++; 26605070c7f8SJohn Dyson } 26615070c7f8SJohn Dyson } 26625070c7f8SJohn Dyson } 2663c3cb3e12SDavid Greenman #endif /* DDB */ 2664