160727d8bSWarner Losh /*- 2*796df753SPedro F. Giffuni * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 351369649SPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 5df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 6df8bae1dSRodney W. Grimes * 7df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 8df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18fbbd9655SWarner Losh * 3. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes * 343c4dd356SDavid Greenman * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 35df8bae1dSRodney W. Grimes * 36df8bae1dSRodney W. Grimes * 37df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38df8bae1dSRodney W. Grimes * All rights reserved. 39df8bae1dSRodney W. Grimes * 40df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41df8bae1dSRodney W. Grimes * 42df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 43df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 44df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 45df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 46df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 47df8bae1dSRodney W. Grimes * 48df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51df8bae1dSRodney W. Grimes * 52df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 53df8bae1dSRodney W. Grimes * 54df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55df8bae1dSRodney W. Grimes * School of Computer Science 56df8bae1dSRodney W. Grimes * Carnegie Mellon University 57df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 58df8bae1dSRodney W. Grimes * 59df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 60df8bae1dSRodney W. Grimes * rights to redistribute these changes. 61df8bae1dSRodney W. Grimes */ 62df8bae1dSRodney W. Grimes 63df8bae1dSRodney W. Grimes /* 64df8bae1dSRodney W. Grimes * Virtual memory object module. 65df8bae1dSRodney W. Grimes */ 66df8bae1dSRodney W. Grimes 67874651b1SDavid E. O'Brien #include <sys/cdefs.h> 68874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 69874651b1SDavid E. O'Brien 70f8a47341SAlan Cox #include "opt_vm.h" 71f8a47341SAlan Cox 72df8bae1dSRodney W. Grimes #include <sys/param.h> 73df8bae1dSRodney W. Grimes #include <sys/systm.h> 74fb919e4dSMark Murray #include <sys/lock.h> 75867a482dSJohn Dyson #include <sys/mman.h> 76cf2819ccSJohn Dyson #include <sys/mount.h> 77b9b7a4beSMatthew Dillon #include <sys/kernel.h> 78f425ab8eSKonstantin Belousov #include <sys/pctrie.h> 79b9b7a4beSMatthew Dillon #include <sys/sysctl.h> 801b367556SJason Evans #include <sys/mutex.h> 81fb919e4dSMark Murray #include <sys/proc.h> /* for curproc, pageproc */ 82fb919e4dSMark Murray #include <sys/socket.h> 833364c323SKonstantin Belousov #include <sys/resourcevar.h> 8489f6b863SAttilio Rao #include <sys/rwlock.h> 85ff87ae35SJohn Baldwin #include <sys/user.h> 86fb919e4dSMark Murray #include <sys/vnode.h> 87fb919e4dSMark Murray #include <sys/vmmeter.h> 881005a129SJohn Baldwin #include <sys/sx.h> 89df8bae1dSRodney W. Grimes 90df8bae1dSRodney W. Grimes #include <vm/vm.h> 91efeaf95aSDavid Greenman #include <vm/vm_param.h> 92efeaf95aSDavid Greenman #include <vm/pmap.h> 93efeaf95aSDavid Greenman #include <vm/vm_map.h> 94efeaf95aSDavid Greenman #include <vm/vm_object.h> 95df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 9626f9a767SRodney W. Grimes #include <vm/vm_pageout.h> 970d94caffSDavid Greenman #include <vm/vm_pager.h> 9805f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 99a1f6d91cSDavid Greenman #include <vm/vm_kern.h> 100efeaf95aSDavid Greenman #include <vm/vm_extern.h> 101774d251dSAttilio Rao #include <vm/vm_radix.h> 102f8a47341SAlan Cox #include <vm/vm_reserv.h> 103670d17b5SJeff Roberson #include <vm/uma.h> 10426f9a767SRodney W. Grimes 105c53f7aceSDag-Erling Smørgrav static int old_msync; 106c53f7aceSDag-Erling Smørgrav SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, 107c53f7aceSDag-Erling Smørgrav "Use old (insecure) msync behavior"); 108c53f7aceSDag-Erling Smørgrav 109757216f3SKonstantin Belousov static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, 110126d6082SKonstantin Belousov int pagerflags, int flags, boolean_t *clearobjflags, 111126d6082SKonstantin Belousov boolean_t *eio); 1123280870dSKonstantin Belousov static boolean_t vm_object_page_remove_write(vm_page_t p, int flags, 113126d6082SKonstantin Belousov boolean_t *clearobjflags); 114b9b7a4beSMatthew Dillon static void vm_object_qcollapse(vm_object_t object); 11502dd8331SAlan Cox static void vm_object_vndeallocate(vm_object_t object); 116f6b04d2bSDavid Greenman 117df8bae1dSRodney W. Grimes /* 118df8bae1dSRodney W. Grimes * Virtual memory objects maintain the actual data 119df8bae1dSRodney W. Grimes * associated with allocated virtual memory. A given 120df8bae1dSRodney W. Grimes * page of memory exists within exactly one object. 121df8bae1dSRodney W. Grimes * 122df8bae1dSRodney W. Grimes * An object is only deallocated when all "references" 123df8bae1dSRodney W. Grimes * are given up. Only one "reference" to a given 124df8bae1dSRodney W. Grimes * region of an object should be writeable. 125df8bae1dSRodney W. Grimes * 126df8bae1dSRodney W. Grimes * Associated with each object is a list of all resident 127df8bae1dSRodney W. Grimes * memory pages belonging to that object; this list is 128df8bae1dSRodney W. Grimes * maintained by the "vm_page" module, and locked by the object's 129df8bae1dSRodney W. Grimes * lock. 130df8bae1dSRodney W. Grimes * 131df8bae1dSRodney W. Grimes * Each object also records a "pager" routine which is 132df8bae1dSRodney W. Grimes * used to retrieve (and store) pages to the proper backing 133df8bae1dSRodney W. Grimes * storage. In addition, objects may be backed by other 134df8bae1dSRodney W. Grimes * objects from which they were virtual-copied. 135df8bae1dSRodney W. Grimes * 136df8bae1dSRodney W. Grimes * The only items within the object structure which are 137df8bae1dSRodney W. Grimes * modified after time of creation are: 138df8bae1dSRodney W. Grimes * reference count locked by object's lock 139df8bae1dSRodney W. Grimes * pager routine locked by object's lock 140df8bae1dSRodney W. Grimes * 141df8bae1dSRodney W. Grimes */ 142df8bae1dSRodney W. Grimes 14328f8db14SBruce Evans struct object_q vm_object_list; 144a5698387SAlan Cox struct mtx vm_object_list_mtx; /* lock for object list and count */ 145cccf11b8SAlan Cox 146cccf11b8SAlan Cox struct vm_object kernel_object_store; 147df8bae1dSRodney W. Grimes 1486472ac3dSEd Schouten static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, 1496472ac3dSEd Schouten "VM object stats"); 150604c2bbcSAlan Cox 151f708ef1bSPoul-Henning Kamp static long object_collapses; 152604c2bbcSAlan Cox SYSCTL_LONG(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD, 153604c2bbcSAlan Cox &object_collapses, 0, "VM object collapses"); 154604c2bbcSAlan Cox 155f708ef1bSPoul-Henning Kamp static long object_bypasses; 156604c2bbcSAlan Cox SYSCTL_LONG(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD, 157604c2bbcSAlan Cox &object_bypasses, 0, "VM object bypasses"); 158dad740e9SAlan Cox 159670d17b5SJeff Roberson static uma_zone_t obj_zone; 1608355f576SJeff Roberson 161b23f72e9SBrian Feldman static int vm_object_zinit(void *mem, int size, int flags); 1628355f576SJeff Roberson 1638355f576SJeff Roberson #ifdef INVARIANTS 1648355f576SJeff Roberson static void vm_object_zdtor(void *mem, int size, void *arg); 1658355f576SJeff Roberson 1668355f576SJeff Roberson static void 1678355f576SJeff Roberson vm_object_zdtor(void *mem, int size, void *arg) 1688355f576SJeff Roberson { 1698355f576SJeff Roberson vm_object_t object; 1708355f576SJeff Roberson 1718355f576SJeff Roberson object = (vm_object_t)mem; 172e735691bSJohn Baldwin KASSERT(object->ref_count == 0, 173e735691bSJohn Baldwin ("object %p ref_count = %d", object, object->ref_count)); 17443186e53SAlan Cox KASSERT(TAILQ_EMPTY(&object->memq), 175198da1b2SAttilio Rao ("object %p has resident pages in its memq", object)); 176774d251dSAttilio Rao KASSERT(vm_radix_is_empty(&object->rtree), 177774d251dSAttilio Rao ("object %p has resident pages in its trie", object)); 178f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 179f8a47341SAlan Cox KASSERT(LIST_EMPTY(&object->rvq), 180f8a47341SAlan Cox ("object %p has reservations", 181f8a47341SAlan Cox object)); 182f8a47341SAlan Cox #endif 1838355f576SJeff Roberson KASSERT(object->paging_in_progress == 0, 1848355f576SJeff Roberson ("object %p paging_in_progress = %d", 1858355f576SJeff Roberson object, object->paging_in_progress)); 1868355f576SJeff Roberson KASSERT(object->resident_page_count == 0, 1878355f576SJeff Roberson ("object %p resident_page_count = %d", 1888355f576SJeff Roberson object, object->resident_page_count)); 1898355f576SJeff Roberson KASSERT(object->shadow_count == 0, 1908355f576SJeff Roberson ("object %p shadow_count = %d", 1918355f576SJeff Roberson object, object->shadow_count)); 192e735691bSJohn Baldwin KASSERT(object->type == OBJT_DEAD, 193e735691bSJohn Baldwin ("object %p has non-dead type %d", 194e735691bSJohn Baldwin object, object->type)); 1958355f576SJeff Roberson } 1968355f576SJeff Roberson #endif 1978355f576SJeff Roberson 198b23f72e9SBrian Feldman static int 199b23f72e9SBrian Feldman vm_object_zinit(void *mem, int size, int flags) 2008355f576SJeff Roberson { 2018355f576SJeff Roberson vm_object_t object; 2028355f576SJeff Roberson 2038355f576SJeff Roberson object = (vm_object_t)mem; 204777a36c5SAlan Cox rw_init_flags(&object->lock, "vm object", RW_DUPOK | RW_NEW); 2058355f576SJeff Roberson 2068355f576SJeff Roberson /* These are true for any object that has been freed */ 207e735691bSJohn Baldwin object->type = OBJT_DEAD; 208e735691bSJohn Baldwin object->ref_count = 0; 209cd1241fbSKonstantin Belousov vm_radix_init(&object->rtree); 2108355f576SJeff Roberson object->paging_in_progress = 0; 2118355f576SJeff Roberson object->resident_page_count = 0; 2128355f576SJeff Roberson object->shadow_count = 0; 213f425ab8eSKonstantin Belousov object->flags = OBJ_DEAD; 214e735691bSJohn Baldwin 215e735691bSJohn Baldwin mtx_lock(&vm_object_list_mtx); 216e735691bSJohn Baldwin TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 217e735691bSJohn Baldwin mtx_unlock(&vm_object_list_mtx); 218b23f72e9SBrian Feldman return (0); 2198355f576SJeff Roberson } 220df8bae1dSRodney W. Grimes 221a4915c21SAttilio Rao static void 2226395da54SIan Dowse _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) 223df8bae1dSRodney W. Grimes { 2240cddd8f0SMatthew Dillon 225df8bae1dSRodney W. Grimes TAILQ_INIT(&object->memq); 2261c500307SAlan Cox LIST_INIT(&object->shadow_head); 227a1f6d91cSDavid Greenman 22824a1cce3SDavid Greenman object->type = type; 229f425ab8eSKonstantin Belousov if (type == OBJT_SWAP) 230f425ab8eSKonstantin Belousov pctrie_init(&object->un_pager.swp.swp_blks); 231f425ab8eSKonstantin Belousov 232f425ab8eSKonstantin Belousov /* 233f425ab8eSKonstantin Belousov * Ensure that swap_pager_swapoff() iteration over object_list 234f425ab8eSKonstantin Belousov * sees up to date type and pctrie head if it observed 235f425ab8eSKonstantin Belousov * non-dead object. 236f425ab8eSKonstantin Belousov */ 237f425ab8eSKonstantin Belousov atomic_thread_fence_rel(); 238f425ab8eSKonstantin Belousov 23928634820SAlan Cox switch (type) { 24028634820SAlan Cox case OBJT_DEAD: 24128634820SAlan Cox panic("_vm_object_allocate: can't create OBJT_DEAD"); 24228634820SAlan Cox case OBJT_DEFAULT: 24328634820SAlan Cox case OBJT_SWAP: 24428634820SAlan Cox object->flags = OBJ_ONEMAPPING; 24528634820SAlan Cox break; 24628634820SAlan Cox case OBJT_DEVICE: 24728634820SAlan Cox case OBJT_SG: 24828634820SAlan Cox object->flags = OBJ_FICTITIOUS | OBJ_UNMANAGED; 24928634820SAlan Cox break; 25028634820SAlan Cox case OBJT_MGTDEVICE: 25128634820SAlan Cox object->flags = OBJ_FICTITIOUS; 25228634820SAlan Cox break; 25328634820SAlan Cox case OBJT_PHYS: 25428634820SAlan Cox object->flags = OBJ_UNMANAGED; 25528634820SAlan Cox break; 25628634820SAlan Cox case OBJT_VNODE: 25728634820SAlan Cox object->flags = 0; 25828634820SAlan Cox break; 25928634820SAlan Cox default: 26028634820SAlan Cox panic("_vm_object_allocate: type %d is undefined", type); 26128634820SAlan Cox } 262df8bae1dSRodney W. Grimes object->size = size; 263b881da26SAlan Cox object->generation = 1; 264a1f6d91cSDavid Greenman object->ref_count = 1; 2653153e878SAlan Cox object->memattr = VM_MEMATTR_DEFAULT; 266ef694c1aSEdward Tomasz Napierala object->cred = NULL; 2673364c323SKonstantin Belousov object->charge = 0; 26824a1cce3SDavid Greenman object->handle = NULL; 26924a1cce3SDavid Greenman object->backing_object = NULL; 270a316d390SJohn Dyson object->backing_object_offset = (vm_ooffset_t) 0; 271f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 272f8a47341SAlan Cox LIST_INIT(&object->rvq); 273f8a47341SAlan Cox #endif 2741bdbd705SKonstantin Belousov umtx_shm_object_init(object); 275df8bae1dSRodney W. Grimes } 276df8bae1dSRodney W. Grimes 277df8bae1dSRodney W. Grimes /* 27826f9a767SRodney W. Grimes * vm_object_init: 27926f9a767SRodney W. Grimes * 28026f9a767SRodney W. Grimes * Initialize the VM objects module. 28126f9a767SRodney W. Grimes */ 28226f9a767SRodney W. Grimes void 2831b40f8c0SMatthew Dillon vm_object_init(void) 28426f9a767SRodney W. Grimes { 28526f9a767SRodney W. Grimes TAILQ_INIT(&vm_object_list); 2866008862bSJohn Baldwin mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); 2870217125fSDavid Greenman 28889f6b863SAttilio Rao rw_init(&kernel_object->lock, "kernel vm object"); 289d1780e8dSKonstantin Belousov _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - 290d1780e8dSKonstantin Belousov VM_MIN_KERNEL_ADDRESS), kernel_object); 291f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 292f8a47341SAlan Cox kernel_object->flags |= OBJ_COLORED; 293f8a47341SAlan Cox kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 294f8a47341SAlan Cox #endif 29526f9a767SRodney W. Grimes 2968dbca793STor Egge /* 2978dbca793STor Egge * The lock portion of struct vm_object must be type stable due 2988dbca793STor Egge * to vm_pageout_fallback_object_lock locking a vm object 2998dbca793STor Egge * without holding any references to it. 3008dbca793STor Egge */ 3018355f576SJeff Roberson obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, 3028355f576SJeff Roberson #ifdef INVARIANTS 3038355f576SJeff Roberson vm_object_zdtor, 3048355f576SJeff Roberson #else 3058355f576SJeff Roberson NULL, 3068355f576SJeff Roberson #endif 3075df87b21SJeff Roberson vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 308774d251dSAttilio Rao 309cd1241fbSKonstantin Belousov vm_radix_zinit(); 31099448ed1SJohn Dyson } 31199448ed1SJohn Dyson 31299448ed1SJohn Dyson void 3131b40f8c0SMatthew Dillon vm_object_clear_flag(vm_object_t object, u_short bits) 3141b40f8c0SMatthew Dillon { 3155440b5a9SAlan Cox 31689f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 317b06805adSJake Burkholder object->flags &= ~bits; 3181b40f8c0SMatthew Dillon } 3191b40f8c0SMatthew Dillon 3203153e878SAlan Cox /* 3213153e878SAlan Cox * Sets the default memory attribute for the specified object. Pages 3223153e878SAlan Cox * that are allocated to this object are by default assigned this memory 3233153e878SAlan Cox * attribute. 3243153e878SAlan Cox * 3253153e878SAlan Cox * Presently, this function must be called before any pages are allocated 3263153e878SAlan Cox * to the object. In the future, this requirement may be relaxed for 3273153e878SAlan Cox * "default" and "swap" objects. 3283153e878SAlan Cox */ 3293153e878SAlan Cox int 3303153e878SAlan Cox vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr) 3313153e878SAlan Cox { 3323153e878SAlan Cox 33389f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 3343153e878SAlan Cox switch (object->type) { 3353153e878SAlan Cox case OBJT_DEFAULT: 3363153e878SAlan Cox case OBJT_DEVICE: 33796b0b92aSAlan Cox case OBJT_MGTDEVICE: 3383153e878SAlan Cox case OBJT_PHYS: 33901381811SJohn Baldwin case OBJT_SG: 3403153e878SAlan Cox case OBJT_SWAP: 3413153e878SAlan Cox case OBJT_VNODE: 3423153e878SAlan Cox if (!TAILQ_EMPTY(&object->memq)) 3433153e878SAlan Cox return (KERN_FAILURE); 3443153e878SAlan Cox break; 3453153e878SAlan Cox case OBJT_DEAD: 3463153e878SAlan Cox return (KERN_INVALID_ARGUMENT); 34796b0b92aSAlan Cox default: 34896b0b92aSAlan Cox panic("vm_object_set_memattr: object %p is of undefined type", 34996b0b92aSAlan Cox object); 3503153e878SAlan Cox } 3513153e878SAlan Cox object->memattr = memattr; 3523153e878SAlan Cox return (KERN_SUCCESS); 3533153e878SAlan Cox } 3543153e878SAlan Cox 3551b40f8c0SMatthew Dillon void 3561b40f8c0SMatthew Dillon vm_object_pip_add(vm_object_t object, short i) 3571b40f8c0SMatthew Dillon { 358f279b88dSAlan Cox 35989f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 360b06805adSJake Burkholder object->paging_in_progress += i; 3611b40f8c0SMatthew Dillon } 3621b40f8c0SMatthew Dillon 3631b40f8c0SMatthew Dillon void 3641b40f8c0SMatthew Dillon vm_object_pip_subtract(vm_object_t object, short i) 3651b40f8c0SMatthew Dillon { 366d647a0edSAlan Cox 36789f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 368b06805adSJake Burkholder object->paging_in_progress -= i; 3691b40f8c0SMatthew Dillon } 3701b40f8c0SMatthew Dillon 3711b40f8c0SMatthew Dillon void 3721b40f8c0SMatthew Dillon vm_object_pip_wakeup(vm_object_t object) 3731b40f8c0SMatthew Dillon { 374f279b88dSAlan Cox 37589f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 376b06805adSJake Burkholder object->paging_in_progress--; 3771b40f8c0SMatthew Dillon if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 3781b40f8c0SMatthew Dillon vm_object_clear_flag(object, OBJ_PIPWNT); 3791b40f8c0SMatthew Dillon wakeup(object); 3801b40f8c0SMatthew Dillon } 3811b40f8c0SMatthew Dillon } 3821b40f8c0SMatthew Dillon 3831b40f8c0SMatthew Dillon void 3841b40f8c0SMatthew Dillon vm_object_pip_wakeupn(vm_object_t object, short i) 3851b40f8c0SMatthew Dillon { 386d647a0edSAlan Cox 38789f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 3881b40f8c0SMatthew Dillon if (i) 389b06805adSJake Burkholder object->paging_in_progress -= i; 3901b40f8c0SMatthew Dillon if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 3911b40f8c0SMatthew Dillon vm_object_clear_flag(object, OBJ_PIPWNT); 3921b40f8c0SMatthew Dillon wakeup(object); 3931b40f8c0SMatthew Dillon } 3941b40f8c0SMatthew Dillon } 3951b40f8c0SMatthew Dillon 3961b40f8c0SMatthew Dillon void 3971b40f8c0SMatthew Dillon vm_object_pip_wait(vm_object_t object, char *waitid) 3981b40f8c0SMatthew Dillon { 3991ca58953SAlan Cox 40089f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 4011ca58953SAlan Cox while (object->paging_in_progress) { 4021ca58953SAlan Cox object->flags |= OBJ_PIPWNT; 4030dde287bSAttilio Rao VM_OBJECT_SLEEP(object, object, PVM, waitid, 0); 4041ca58953SAlan Cox } 4051b40f8c0SMatthew Dillon } 4061b40f8c0SMatthew Dillon 40726f9a767SRodney W. Grimes /* 40826f9a767SRodney W. Grimes * vm_object_allocate: 40926f9a767SRodney W. Grimes * 41026f9a767SRodney W. Grimes * Returns a new object with the given size. 41126f9a767SRodney W. Grimes */ 41226f9a767SRodney W. Grimes vm_object_t 4136395da54SIan Dowse vm_object_allocate(objtype_t type, vm_pindex_t size) 41426f9a767SRodney W. Grimes { 41590688d13SAlan Cox vm_object_t object; 41690688d13SAlan Cox 41790688d13SAlan Cox object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); 41890688d13SAlan Cox _vm_object_allocate(type, size, object); 41990688d13SAlan Cox return (object); 42026f9a767SRodney W. Grimes } 42126f9a767SRodney W. Grimes 42226f9a767SRodney W. Grimes 42326f9a767SRodney W. Grimes /* 424df8bae1dSRodney W. Grimes * vm_object_reference: 425df8bae1dSRodney W. Grimes * 42615347817SAlan Cox * Gets another reference to the given object. Note: OBJ_DEAD 42715347817SAlan Cox * objects can be referenced during final cleaning. 428df8bae1dSRodney W. Grimes */ 4296476c0d2SJohn Dyson void 4301b40f8c0SMatthew Dillon vm_object_reference(vm_object_t object) 431df8bae1dSRodney W. Grimes { 432df8bae1dSRodney W. Grimes if (object == NULL) 433df8bae1dSRodney W. Grimes return; 43489f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 43552481a9aSJeff Roberson vm_object_reference_locked(object); 43689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 43795e5e988SJohn Dyson } 43895e5e988SJohn Dyson 43923955314SAlfred Perlstein /* 440b921a12bSAlan Cox * vm_object_reference_locked: 441b921a12bSAlan Cox * 442b921a12bSAlan Cox * Gets another reference to the given object. 443b921a12bSAlan Cox * 444b921a12bSAlan Cox * The object must be locked. 445b921a12bSAlan Cox */ 446b921a12bSAlan Cox void 447b921a12bSAlan Cox vm_object_reference_locked(vm_object_t object) 448b921a12bSAlan Cox { 449b921a12bSAlan Cox struct vnode *vp; 450b921a12bSAlan Cox 45189f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 452b921a12bSAlan Cox object->ref_count++; 453b921a12bSAlan Cox if (object->type == OBJT_VNODE) { 454b921a12bSAlan Cox vp = object->handle; 455b921a12bSAlan Cox vref(vp); 456b921a12bSAlan Cox } 457b921a12bSAlan Cox } 458b921a12bSAlan Cox 459b921a12bSAlan Cox /* 4609d5abbddSJens Schweikhardt * Handle deallocating an object of type OBJT_VNODE. 46123955314SAlfred Perlstein */ 46202dd8331SAlan Cox static void 4631b40f8c0SMatthew Dillon vm_object_vndeallocate(vm_object_t object) 46495e5e988SJohn Dyson { 46595e5e988SJohn Dyson struct vnode *vp = (struct vnode *) object->handle; 466219cbf59SEivind Eklund 46789f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 4685526d2d9SEivind Eklund KASSERT(object->type == OBJT_VNODE, 4695526d2d9SEivind Eklund ("vm_object_vndeallocate: not a vnode object")); 470219cbf59SEivind Eklund KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 471219cbf59SEivind Eklund #ifdef INVARIANTS 47295e5e988SJohn Dyson if (object->ref_count == 0) { 473411455a8SEdward Tomasz Napierala vn_printf(vp, "vm_object_vndeallocate "); 47495e5e988SJohn Dyson panic("vm_object_vndeallocate: bad object reference count"); 47595e5e988SJohn Dyson } 47695e5e988SJohn Dyson #endif 47795e5e988SJohn Dyson 4782a339d9eSKonstantin Belousov if (!umtx_shm_vnobj_persistent && object->ref_count == 1) 4791bdbd705SKonstantin Belousov umtx_shm_object_terminated(object); 4801bdbd705SKonstantin Belousov 48130d57414SKonstantin Belousov /* 48230d57414SKonstantin Belousov * The test for text of vp vnode does not need a bypass to 48330d57414SKonstantin Belousov * reach right VV_TEXT there, since it is obtained from 48430d57414SKonstantin Belousov * object->handle. 48530d57414SKonstantin Belousov */ 48630d57414SKonstantin Belousov if (object->ref_count > 1 || (vp->v_vflag & VV_TEXT) == 0) { 48795e5e988SJohn Dyson object->ref_count--; 48889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 48903fa5b34SKonstantin Belousov /* vrele may need the vnode lock. */ 49047221757SJohn Dyson vrele(vp); 49103fa5b34SKonstantin Belousov } else { 49286769ac0SKonstantin Belousov vhold(vp); 49389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 49403fa5b34SKonstantin Belousov vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 49586769ac0SKonstantin Belousov vdrop(vp); 49689f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 49703fa5b34SKonstantin Belousov object->ref_count--; 49886769ac0SKonstantin Belousov if (object->type == OBJT_DEAD) { 49989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 50086769ac0SKonstantin Belousov VOP_UNLOCK(vp, 0); 50186769ac0SKonstantin Belousov } else { 50203fa5b34SKonstantin Belousov if (object->ref_count == 0) 503877d24acSKonstantin Belousov VOP_UNSET_TEXT(vp); 50489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 50503fa5b34SKonstantin Belousov vput(vp); 50603fa5b34SKonstantin Belousov } 507df8bae1dSRodney W. Grimes } 50886769ac0SKonstantin Belousov } 509df8bae1dSRodney W. Grimes 510df8bae1dSRodney W. Grimes /* 511df8bae1dSRodney W. Grimes * vm_object_deallocate: 512df8bae1dSRodney W. Grimes * 513df8bae1dSRodney W. Grimes * Release a reference to the specified object, 514df8bae1dSRodney W. Grimes * gained either through a vm_object_allocate 515df8bae1dSRodney W. Grimes * or a vm_object_reference call. When all references 516df8bae1dSRodney W. Grimes * are gone, storage associated with this object 517df8bae1dSRodney W. Grimes * may be relinquished. 518df8bae1dSRodney W. Grimes * 519df8bae1dSRodney W. Grimes * No object may be locked. 520df8bae1dSRodney W. Grimes */ 52126f9a767SRodney W. Grimes void 5221b40f8c0SMatthew Dillon vm_object_deallocate(vm_object_t object) 523df8bae1dSRodney W. Grimes { 524df8bae1dSRodney W. Grimes vm_object_t temp; 5256f2af3fcSKonstantin Belousov struct vnode *vp; 526df8bae1dSRodney W. Grimes 527df8bae1dSRodney W. Grimes while (object != NULL) { 52889f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 5293b68228cSAlan Cox if (object->type == OBJT_VNODE) { 53095e5e988SJohn Dyson vm_object_vndeallocate(object); 53123b186d3SAlan Cox return; 5325050aa86SKonstantin Belousov } 53395e5e988SJohn Dyson 5348125b1e6SAlfred Perlstein KASSERT(object->ref_count != 0, 5358125b1e6SAlfred Perlstein ("vm_object_deallocate: object deallocated too many times: %d", object->type)); 5362be70f79SJohn Dyson 5372be70f79SJohn Dyson /* 5388125b1e6SAlfred Perlstein * If the reference count goes to 0 we start calling 5398125b1e6SAlfred Perlstein * vm_object_terminate() on the object chain. 5408125b1e6SAlfred Perlstein * A ref count of 1 may be a special case depending on the 5418125b1e6SAlfred Perlstein * shadow count being 0 or 1. 5422be70f79SJohn Dyson */ 543c0877f10SJohn Dyson object->ref_count--; 5448125b1e6SAlfred Perlstein if (object->ref_count > 1) { 54589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 54623b186d3SAlan Cox return; 5478125b1e6SAlfred Perlstein } else if (object->ref_count == 1) { 5486f2af3fcSKonstantin Belousov if (object->type == OBJT_SWAP && 5496f2af3fcSKonstantin Belousov (object->flags & OBJ_TMPFS) != 0) { 5506f2af3fcSKonstantin Belousov vp = object->un_pager.swp.swp_tmpfs; 5516f2af3fcSKonstantin Belousov vhold(vp); 5526f2af3fcSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 5536f2af3fcSKonstantin Belousov vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5546f2af3fcSKonstantin Belousov VM_OBJECT_WLOCK(object); 5557560005cSKonstantin Belousov if (object->type == OBJT_DEAD || 5567560005cSKonstantin Belousov object->ref_count != 1) { 5576f2af3fcSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 5586f2af3fcSKonstantin Belousov VOP_UNLOCK(vp, 0); 5592309fa9bSKonstantin Belousov vdrop(vp); 5606f2af3fcSKonstantin Belousov return; 5617560005cSKonstantin Belousov } 5627560005cSKonstantin Belousov if ((object->flags & OBJ_TMPFS) != 0) 5636f2af3fcSKonstantin Belousov VOP_UNSET_TEXT(vp); 5646f2af3fcSKonstantin Belousov VOP_UNLOCK(vp, 0); 5652309fa9bSKonstantin Belousov vdrop(vp); 5666f2af3fcSKonstantin Belousov } 5674c8e0452SAlan Cox if (object->shadow_count == 0 && 5684c8e0452SAlan Cox object->handle == NULL && 5694c8e0452SAlan Cox (object->type == OBJT_DEFAULT || 5706f2af3fcSKonstantin Belousov (object->type == OBJT_SWAP && 571f08f7dcaSKonstantin Belousov (object->flags & OBJ_TMPFS_NODE) == 0))) { 5728125b1e6SAlfred Perlstein vm_object_set_flag(object, OBJ_ONEMAPPING); 5738125b1e6SAlfred Perlstein } else if ((object->shadow_count == 1) && 5748125b1e6SAlfred Perlstein (object->handle == NULL) && 57524a1cce3SDavid Greenman (object->type == OBJT_DEFAULT || 57624a1cce3SDavid Greenman object->type == OBJT_SWAP)) { 577a1f6d91cSDavid Greenman vm_object_t robject; 57895e5e988SJohn Dyson 5791c500307SAlan Cox robject = LIST_FIRST(&object->shadow_head); 5805526d2d9SEivind Eklund KASSERT(robject != NULL, 581219cbf59SEivind Eklund ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 5825526d2d9SEivind Eklund object->ref_count, 5835526d2d9SEivind Eklund object->shadow_count)); 5844bace8e7SKonstantin Belousov KASSERT((robject->flags & OBJ_TMPFS_NODE) == 0, 5854bace8e7SKonstantin Belousov ("shadowed tmpfs v_object %p", object)); 58689f6b863SAttilio Rao if (!VM_OBJECT_TRYWLOCK(robject)) { 587b72b0115SAlan Cox /* 588b72b0115SAlan Cox * Avoid a potential deadlock. 589b72b0115SAlan Cox */ 590b72b0115SAlan Cox object->ref_count++; 59189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 592a7d86121SAlan Cox /* 593a7d86121SAlan Cox * More likely than not the thread 594a7d86121SAlan Cox * holding robject's lock has lower 595a7d86121SAlan Cox * priority than the current thread. 596a7d86121SAlan Cox * Let the lower priority thread run. 597a7d86121SAlan Cox */ 5988db5fc58SJohn Baldwin pause("vmo_de", 1); 599b72b0115SAlan Cox continue; 600b72b0115SAlan Cox } 601d936694fSAlan Cox /* 602d936694fSAlan Cox * Collapse object into its shadow unless its 603d936694fSAlan Cox * shadow is dead. In that case, object will 604d936694fSAlan Cox * be deallocated by the thread that is 605d936694fSAlan Cox * deallocating its shadow. 606d936694fSAlan Cox */ 607d936694fSAlan Cox if ((robject->flags & OBJ_DEAD) == 0 && 608d936694fSAlan Cox (robject->handle == NULL) && 60924a1cce3SDavid Greenman (robject->type == OBJT_DEFAULT || 61024a1cce3SDavid Greenman robject->type == OBJT_SWAP)) { 611a1f6d91cSDavid Greenman 61295e5e988SJohn Dyson robject->ref_count++; 613138449dcSAlan Cox retry: 614138449dcSAlan Cox if (robject->paging_in_progress) { 61589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 616138449dcSAlan Cox vm_object_pip_wait(robject, 617138449dcSAlan Cox "objde1"); 6182e9f4a69SAlan Cox temp = robject->backing_object; 6192e9f4a69SAlan Cox if (object == temp) { 62089f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 621138449dcSAlan Cox goto retry; 6222e9f4a69SAlan Cox } 623138449dcSAlan Cox } else if (object->paging_in_progress) { 62489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(robject); 625138449dcSAlan Cox object->flags |= OBJ_PIPWNT; 6260dde287bSAttilio Rao VM_OBJECT_SLEEP(object, object, 627138449dcSAlan Cox PDROP | PVM, "objde2", 0); 62889f6b863SAttilio Rao VM_OBJECT_WLOCK(robject); 6292e9f4a69SAlan Cox temp = robject->backing_object; 6302e9f4a69SAlan Cox if (object == temp) { 63189f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 632138449dcSAlan Cox goto retry; 633a1f6d91cSDavid Greenman } 6342e9f4a69SAlan Cox } else 63589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 6362e9f4a69SAlan Cox 63795e5e988SJohn Dyson if (robject->ref_count == 1) { 63895e5e988SJohn Dyson robject->ref_count--; 639ba8da839SDavid Greenman object = robject; 64095e5e988SJohn Dyson goto doterm; 64195e5e988SJohn Dyson } 64295e5e988SJohn Dyson object = robject; 64395e5e988SJohn Dyson vm_object_collapse(object); 64489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 645ba8da839SDavid Greenman continue; 646a1f6d91cSDavid Greenman } 64789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(robject); 64895e5e988SJohn Dyson } 64989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 65023b186d3SAlan Cox return; 65195e5e988SJohn Dyson } 65295e5e988SJohn Dyson doterm: 6531bdbd705SKonstantin Belousov umtx_shm_object_terminated(object); 65424a1cce3SDavid Greenman temp = object->backing_object; 655c9917419SAlan Cox if (temp != NULL) { 6564bace8e7SKonstantin Belousov KASSERT((object->flags & OBJ_TMPFS_NODE) == 0, 6574bace8e7SKonstantin Belousov ("shadowed tmpfs v_object 2 %p", object)); 65889f6b863SAttilio Rao VM_OBJECT_WLOCK(temp); 6591c500307SAlan Cox LIST_REMOVE(object, shadow_list); 66095e5e988SJohn Dyson temp->shadow_count--; 66189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(temp); 66295461b45SJohn Dyson object->backing_object = NULL; 663de5f6a77SJohn Dyson } 664245df27cSMatthew Dillon /* 665245df27cSMatthew Dillon * Don't double-terminate, we could be in a termination 666245df27cSMatthew Dillon * recursion due to the terminate having to sync data 667245df27cSMatthew Dillon * to disk. 668245df27cSMatthew Dillon */ 669245df27cSMatthew Dillon if ((object->flags & OBJ_DEAD) == 0) 670df8bae1dSRodney W. Grimes vm_object_terminate(object); 671c829b9d0SAlan Cox else 67289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 673df8bae1dSRodney W. Grimes object = temp; 674df8bae1dSRodney W. Grimes } 675df8bae1dSRodney W. Grimes } 676df8bae1dSRodney W. Grimes 677df8bae1dSRodney W. Grimes /* 6782ac78f0eSStephan Uphoff * vm_object_destroy removes the object from the global object list 6792ac78f0eSStephan Uphoff * and frees the space for the object. 6802ac78f0eSStephan Uphoff */ 6812ac78f0eSStephan Uphoff void 6822ac78f0eSStephan Uphoff vm_object_destroy(vm_object_t object) 6832ac78f0eSStephan Uphoff { 6842ac78f0eSStephan Uphoff 6852ac78f0eSStephan Uphoff /* 6863364c323SKonstantin Belousov * Release the allocation charge. 6873364c323SKonstantin Belousov */ 688ef694c1aSEdward Tomasz Napierala if (object->cred != NULL) { 689ef694c1aSEdward Tomasz Napierala swap_release_by_cred(object->charge, object->cred); 6903364c323SKonstantin Belousov object->charge = 0; 691ef694c1aSEdward Tomasz Napierala crfree(object->cred); 692ef694c1aSEdward Tomasz Napierala object->cred = NULL; 6933364c323SKonstantin Belousov } 6943364c323SKonstantin Belousov 6953364c323SKonstantin Belousov /* 6962ac78f0eSStephan Uphoff * Free the space for the object. 6972ac78f0eSStephan Uphoff */ 6982ac78f0eSStephan Uphoff uma_zfree(obj_zone, object); 6992ac78f0eSStephan Uphoff } 7002ac78f0eSStephan Uphoff 7012ac78f0eSStephan Uphoff /* 7027bbdb843SRuslan Bukin * vm_object_terminate_pages removes any remaining pageable pages 7037bbdb843SRuslan Bukin * from the object and resets the object to an empty state. 7047bbdb843SRuslan Bukin */ 7057bbdb843SRuslan Bukin static void 7067bbdb843SRuslan Bukin vm_object_terminate_pages(vm_object_t object) 7077bbdb843SRuslan Bukin { 7087bbdb843SRuslan Bukin vm_page_t p, p_next; 7092fcd1ff6SKonstantin Belousov struct mtx *mtx, *mtx1; 7102fcd1ff6SKonstantin Belousov struct vm_pagequeue *pq, *pq1; 7114074d642SAlan Cox int dequeued; 7127bbdb843SRuslan Bukin 7137bbdb843SRuslan Bukin VM_OBJECT_ASSERT_WLOCKED(object); 7147bbdb843SRuslan Bukin 7152fcd1ff6SKonstantin Belousov mtx = NULL; 7162fcd1ff6SKonstantin Belousov pq = NULL; 7172fcd1ff6SKonstantin Belousov 7187bbdb843SRuslan Bukin /* 7197bbdb843SRuslan Bukin * Free any remaining pageable pages. This also removes them from the 7207bbdb843SRuslan Bukin * paging queues. However, don't free wired pages, just remove them 7217bbdb843SRuslan Bukin * from the object. Rather than incrementally removing each page from 7227bbdb843SRuslan Bukin * the object, the page and object are reset to any empty state. 7237bbdb843SRuslan Bukin */ 7247bbdb843SRuslan Bukin TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) { 7257bbdb843SRuslan Bukin vm_page_assert_unbusied(p); 7262fcd1ff6SKonstantin Belousov if ((object->flags & OBJ_UNMANAGED) == 0) { 7277bbdb843SRuslan Bukin /* 7282fcd1ff6SKonstantin Belousov * vm_page_free_prep() only needs the page 7292fcd1ff6SKonstantin Belousov * lock for managed pages. 7307bbdb843SRuslan Bukin */ 7312fcd1ff6SKonstantin Belousov mtx1 = vm_page_lockptr(p); 7322fcd1ff6SKonstantin Belousov if (mtx1 != mtx) { 7332fcd1ff6SKonstantin Belousov if (mtx != NULL) 7342fcd1ff6SKonstantin Belousov mtx_unlock(mtx); 7352fcd1ff6SKonstantin Belousov if (pq != NULL) { 7364074d642SAlan Cox vm_pagequeue_cnt_add(pq, dequeued); 7372fcd1ff6SKonstantin Belousov vm_pagequeue_unlock(pq); 7382fcd1ff6SKonstantin Belousov pq = NULL; 7392fcd1ff6SKonstantin Belousov } 7402fcd1ff6SKonstantin Belousov mtx = mtx1; 7412fcd1ff6SKonstantin Belousov mtx_lock(mtx); 7422fcd1ff6SKonstantin Belousov } 7432fcd1ff6SKonstantin Belousov } 7447bbdb843SRuslan Bukin p->object = NULL; 7452fcd1ff6SKonstantin Belousov if (p->wire_count != 0) 7462fcd1ff6SKonstantin Belousov goto unlist; 7477bbdb843SRuslan Bukin VM_CNT_INC(v_pfree); 7482fcd1ff6SKonstantin Belousov p->flags &= ~PG_ZERO; 7492fcd1ff6SKonstantin Belousov if (p->queue != PQ_NONE) { 7502fcd1ff6SKonstantin Belousov KASSERT(p->queue < PQ_COUNT, ("vm_object_terminate: " 7512fcd1ff6SKonstantin Belousov "page %p is not queued", p)); 7522fcd1ff6SKonstantin Belousov pq1 = vm_page_pagequeue(p); 7532fcd1ff6SKonstantin Belousov if (pq != pq1) { 7544074d642SAlan Cox if (pq != NULL) { 7554074d642SAlan Cox vm_pagequeue_cnt_add(pq, dequeued); 7562fcd1ff6SKonstantin Belousov vm_pagequeue_unlock(pq); 7574074d642SAlan Cox } 7582fcd1ff6SKonstantin Belousov pq = pq1; 7592fcd1ff6SKonstantin Belousov vm_pagequeue_lock(pq); 7604074d642SAlan Cox dequeued = 0; 7617bbdb843SRuslan Bukin } 7624074d642SAlan Cox p->queue = PQ_NONE; 7634074d642SAlan Cox TAILQ_REMOVE(&pq->pq_pl, p, plinks.q); 7644074d642SAlan Cox dequeued--; 7657bbdb843SRuslan Bukin } 7662fcd1ff6SKonstantin Belousov if (vm_page_free_prep(p, true)) 7672fcd1ff6SKonstantin Belousov continue; 7682fcd1ff6SKonstantin Belousov unlist: 7692fcd1ff6SKonstantin Belousov TAILQ_REMOVE(&object->memq, p, listq); 7702fcd1ff6SKonstantin Belousov } 7714074d642SAlan Cox if (pq != NULL) { 7724074d642SAlan Cox vm_pagequeue_cnt_add(pq, dequeued); 7732fcd1ff6SKonstantin Belousov vm_pagequeue_unlock(pq); 7744074d642SAlan Cox } 7752fcd1ff6SKonstantin Belousov if (mtx != NULL) 7762fcd1ff6SKonstantin Belousov mtx_unlock(mtx); 7772fcd1ff6SKonstantin Belousov 7782fcd1ff6SKonstantin Belousov vm_page_free_phys_pglist(&object->memq); 7792fcd1ff6SKonstantin Belousov 7807bbdb843SRuslan Bukin /* 7817bbdb843SRuslan Bukin * If the object contained any pages, then reset it to an empty state. 7827bbdb843SRuslan Bukin * None of the object's fields, including "resident_page_count", were 7837bbdb843SRuslan Bukin * modified by the preceding loop. 7847bbdb843SRuslan Bukin */ 7857bbdb843SRuslan Bukin if (object->resident_page_count != 0) { 7867bbdb843SRuslan Bukin vm_radix_reclaim_allnodes(&object->rtree); 7877bbdb843SRuslan Bukin TAILQ_INIT(&object->memq); 7887bbdb843SRuslan Bukin object->resident_page_count = 0; 7897bbdb843SRuslan Bukin if (object->type == OBJT_VNODE) 7907bbdb843SRuslan Bukin vdrop(object->handle); 7917bbdb843SRuslan Bukin } 7927bbdb843SRuslan Bukin } 7937bbdb843SRuslan Bukin 7947bbdb843SRuslan Bukin /* 795df8bae1dSRodney W. Grimes * vm_object_terminate actually destroys the specified object, freeing 796df8bae1dSRodney W. Grimes * up all previously used resources. 797df8bae1dSRodney W. Grimes * 798df8bae1dSRodney W. Grimes * The object must be locked. 7991c7c3c6aSMatthew Dillon * This routine may block. 800df8bae1dSRodney W. Grimes */ 80195e5e988SJohn Dyson void 8021b40f8c0SMatthew Dillon vm_object_terminate(vm_object_t object) 803df8bae1dSRodney W. Grimes { 804df8bae1dSRodney W. Grimes 80589f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 8060cddd8f0SMatthew Dillon 80795e5e988SJohn Dyson /* 80895e5e988SJohn Dyson * Make sure no one uses us. 80995e5e988SJohn Dyson */ 810069e9bc1SDoug Rabson vm_object_set_flag(object, OBJ_DEAD); 8113c631446SJohn Dyson 812df8bae1dSRodney W. Grimes /* 813f6b04d2bSDavid Greenman * wait for the pageout daemon to be done with the object 814df8bae1dSRodney W. Grimes */ 81566095752SJohn Dyson vm_object_pip_wait(object, "objtrm"); 816df8bae1dSRodney W. Grimes 8175526d2d9SEivind Eklund KASSERT(!object->paging_in_progress, 8185526d2d9SEivind Eklund ("vm_object_terminate: pageout in progress")); 81926f9a767SRodney W. Grimes 82026f9a767SRodney W. Grimes /* 8210d94caffSDavid Greenman * Clean and free the pages, as appropriate. All references to the 8220d94caffSDavid Greenman * object are gone, so we don't need to lock it. 82326f9a767SRodney W. Grimes */ 82424a1cce3SDavid Greenman if (object->type == OBJT_VNODE) { 825f7dd7b63SAlan Cox struct vnode *vp = (struct vnode *)object->handle; 82695e5e988SJohn Dyson 82795e5e988SJohn Dyson /* 82895e5e988SJohn Dyson * Clean pages and flush buffers. 82995e5e988SJohn Dyson */ 8308f9110f6SJohn Dyson vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 83189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 83295e5e988SJohn Dyson 8330d7935fdSAttilio Rao vinvalbuf(vp, V_SAVE, 0, 0); 834f7dd7b63SAlan Cox 83519efd8a5SKonstantin Belousov BO_LOCK(&vp->v_bufobj); 83619efd8a5SKonstantin Belousov vp->v_bufobj.bo_flag |= BO_DEAD; 83719efd8a5SKonstantin Belousov BO_UNLOCK(&vp->v_bufobj); 83819efd8a5SKonstantin Belousov 83989f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 840bef608bdSJohn Dyson } 841bef608bdSJohn Dyson 842971dd342SAlfred Perlstein KASSERT(object->ref_count == 0, 843971dd342SAlfred Perlstein ("vm_object_terminate: object with references, ref_count=%d", 844971dd342SAlfred Perlstein object->ref_count)); 845996c772fSJohn Dyson 8467bbdb843SRuslan Bukin if ((object->flags & OBJ_PG_DTOR) == 0) 8477bbdb843SRuslan Bukin vm_object_terminate_pages(object); 848bef608bdSJohn Dyson 849f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 850f8a47341SAlan Cox if (__predict_false(!LIST_EMPTY(&object->rvq))) 851f8a47341SAlan Cox vm_reserv_break_all(object); 852f8a47341SAlan Cox #endif 8537bfda801SAlan Cox 854e735691bSJohn Baldwin KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT || 855e735691bSJohn Baldwin object->type == OBJT_SWAP, 856e735691bSJohn Baldwin ("%s: non-swap obj %p has cred", __func__, object)); 857e735691bSJohn Baldwin 8582d8acc0fSJohn Dyson /* 8599fcfb650SDavid Greenman * Let the pager know object is dead. 8609fcfb650SDavid Greenman */ 8619fcfb650SDavid Greenman vm_pager_deallocate(object); 86289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 8639fcfb650SDavid Greenman 8642ac78f0eSStephan Uphoff vm_object_destroy(object); 86547221757SJohn Dyson } 866df8bae1dSRodney W. Grimes 867edf93b25SAlan Cox /* 868edf93b25SAlan Cox * Make the page read-only so that we can clear the object flags. However, if 869edf93b25SAlan Cox * this is a nosync mmap then the object is likely to stay dirty so do not 870edf93b25SAlan Cox * mess with the page and do not clear the object flags. Returns TRUE if the 871edf93b25SAlan Cox * page should be flushed, and FALSE otherwise. 872edf93b25SAlan Cox */ 8733280870dSKonstantin Belousov static boolean_t 874126d6082SKonstantin Belousov vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *clearobjflags) 8753280870dSKonstantin Belousov { 8763280870dSKonstantin Belousov 8773280870dSKonstantin Belousov /* 8783280870dSKonstantin Belousov * If we have been asked to skip nosync pages and this is a 8793280870dSKonstantin Belousov * nosync page, skip it. Note that the object flags were not 8803280870dSKonstantin Belousov * cleared in this case so we do not have to set them. 8813280870dSKonstantin Belousov */ 8823280870dSKonstantin Belousov if ((flags & OBJPC_NOSYNC) != 0 && (p->oflags & VPO_NOSYNC) != 0) { 883126d6082SKonstantin Belousov *clearobjflags = FALSE; 8843280870dSKonstantin Belousov return (FALSE); 8853280870dSKonstantin Belousov } else { 8863280870dSKonstantin Belousov pmap_remove_write(p); 8873280870dSKonstantin Belousov return (p->dirty != 0); 8883280870dSKonstantin Belousov } 8893280870dSKonstantin Belousov } 8903280870dSKonstantin Belousov 891df8bae1dSRodney W. Grimes /* 892df8bae1dSRodney W. Grimes * vm_object_page_clean 893df8bae1dSRodney W. Grimes * 8944f79d873SMatthew Dillon * Clean all dirty pages in the specified range of object. Leaves page 8954f79d873SMatthew Dillon * on whatever queue it is currently on. If NOSYNC is set then do not 896b146f9e5SAlan Cox * write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC), 8974f79d873SMatthew Dillon * leaving the object dirty. 89826f9a767SRodney W. Grimes * 89943b7990eSMatthew Dillon * When stuffing pages asynchronously, allow clustering. XXX we need a 90043b7990eSMatthew Dillon * synchronous clustering mode implementation. 90143b7990eSMatthew Dillon * 90226f9a767SRodney W. Grimes * Odd semantics: if start == end, we clean everything. 90326f9a767SRodney W. Grimes * 90426f9a767SRodney W. Grimes * The object must be locked. 905126d6082SKonstantin Belousov * 906126d6082SKonstantin Belousov * Returns FALSE if some page from the range was not written, as 907126d6082SKonstantin Belousov * reported by the pager, and TRUE otherwise. 90826f9a767SRodney W. Grimes */ 909126d6082SKonstantin Belousov boolean_t 91017f3095dSAlan Cox vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, 911e239bb97SKonstantin Belousov int flags) 912f6b04d2bSDavid Greenman { 913e239bb97SKonstantin Belousov vm_page_t np, p; 91417f3095dSAlan Cox vm_pindex_t pi, tend, tstart; 915126d6082SKonstantin Belousov int curgeneration, n, pagerflags; 916126d6082SKonstantin Belousov boolean_t clearobjflags, eio, res; 917f6b04d2bSDavid Greenman 91889f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 919e5f299ffSKonstantin Belousov 920e5f299ffSKonstantin Belousov /* 921e5f299ffSKonstantin Belousov * The OBJ_MIGHTBEDIRTY flag is only set for OBJT_VNODE 922e5f299ffSKonstantin Belousov * objects. The check below prevents the function from 923e5f299ffSKonstantin Belousov * operating on non-vnode objects. 924e5f299ffSKonstantin Belousov */ 925e239bb97SKonstantin Belousov if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 || 926e239bb97SKonstantin Belousov object->resident_page_count == 0) 927126d6082SKonstantin Belousov return (TRUE); 928f6b04d2bSDavid Greenman 929e239bb97SKonstantin Belousov pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ? 930e239bb97SKonstantin Belousov VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 931e239bb97SKonstantin Belousov pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0; 932e239bb97SKonstantin Belousov 93317f3095dSAlan Cox tstart = OFF_TO_IDX(start); 93417f3095dSAlan Cox tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK); 93517f3095dSAlan Cox clearobjflags = tstart == 0 && tend >= object->size; 936126d6082SKonstantin Belousov res = TRUE; 937f6b04d2bSDavid Greenman 938bd7e5f99SJohn Dyson rescan: 9392d8acc0fSJohn Dyson curgeneration = object->generation; 9402d8acc0fSJohn Dyson 94117f3095dSAlan Cox for (p = vm_page_find_least(object, tstart); p != NULL; p = np) { 942bd7e5f99SJohn Dyson pi = p->pindex; 943e239bb97SKonstantin Belousov if (pi >= tend) 944e239bb97SKonstantin Belousov break; 945e239bb97SKonstantin Belousov np = TAILQ_NEXT(p, listq); 946e239bb97SKonstantin Belousov if (p->valid == 0) 947aef922f5SJohn Dyson continue; 948c7aebda8SAttilio Rao if (vm_page_sleep_if_busy(p, "vpcwai")) { 949e65919f9SKonstantin Belousov if (object->generation != curgeneration) { 950e65919f9SKonstantin Belousov if ((flags & OBJPC_SYNC) != 0) 951e239bb97SKonstantin Belousov goto rescan; 952e65919f9SKonstantin Belousov else 953126d6082SKonstantin Belousov clearobjflags = FALSE; 954e65919f9SKonstantin Belousov } 955780636b7SKonstantin Belousov np = vm_page_find_least(object, pi); 956780636b7SKonstantin Belousov continue; 957f6b04d2bSDavid Greenman } 9583280870dSKonstantin Belousov if (!vm_object_page_remove_write(p, flags, &clearobjflags)) 959bd7e5f99SJohn Dyson continue; 960e239bb97SKonstantin Belousov 9613280870dSKonstantin Belousov n = vm_object_page_collect_flush(object, p, pagerflags, 962126d6082SKonstantin Belousov flags, &clearobjflags, &eio); 963126d6082SKonstantin Belousov if (eio) { 964126d6082SKonstantin Belousov res = FALSE; 965126d6082SKonstantin Belousov clearobjflags = FALSE; 966126d6082SKonstantin Belousov } 967e65919f9SKonstantin Belousov if (object->generation != curgeneration) { 968e65919f9SKonstantin Belousov if ((flags & OBJPC_SYNC) != 0) 969b9b7a4beSMatthew Dillon goto rescan; 970e65919f9SKonstantin Belousov else 971126d6082SKonstantin Belousov clearobjflags = FALSE; 972e65919f9SKonstantin Belousov } 973031ec8c1SKonstantin Belousov 974031ec8c1SKonstantin Belousov /* 975031ec8c1SKonstantin Belousov * If the VOP_PUTPAGES() did a truncated write, so 976031ec8c1SKonstantin Belousov * that even the first page of the run is not fully 977031ec8c1SKonstantin Belousov * written, vm_pageout_flush() returns 0 as the run 978031ec8c1SKonstantin Belousov * length. Since the condition that caused truncated 979031ec8c1SKonstantin Belousov * write may be permanent, e.g. exhausted free space, 980031ec8c1SKonstantin Belousov * accepting n == 0 would cause an infinite loop. 981031ec8c1SKonstantin Belousov * 982031ec8c1SKonstantin Belousov * Forwarding the iterator leaves the unwritten page 983031ec8c1SKonstantin Belousov * behind, but there is not much we can do there if 984031ec8c1SKonstantin Belousov * filesystem refuses to write it. 985031ec8c1SKonstantin Belousov */ 986126d6082SKonstantin Belousov if (n == 0) { 987031ec8c1SKonstantin Belousov n = 1; 988126d6082SKonstantin Belousov clearobjflags = FALSE; 989126d6082SKonstantin Belousov } 990e239bb97SKonstantin Belousov np = vm_page_find_least(object, pi + n); 991b9b7a4beSMatthew Dillon } 992b9b7a4beSMatthew Dillon #if 0 993e239bb97SKonstantin Belousov VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0); 994b9b7a4beSMatthew Dillon #endif 995b9b7a4beSMatthew Dillon 996edf93b25SAlan Cox if (clearobjflags) 9973280870dSKonstantin Belousov vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY); 998126d6082SKonstantin Belousov return (res); 999b9b7a4beSMatthew Dillon } 1000b9b7a4beSMatthew Dillon 1001b9b7a4beSMatthew Dillon static int 10023280870dSKonstantin Belousov vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, 1003126d6082SKonstantin Belousov int flags, boolean_t *clearobjflags, boolean_t *eio) 1004b9b7a4beSMatthew Dillon { 10053157c503SKonstantin Belousov vm_page_t ma[vm_pageout_page_count], p_first, tp; 10063157c503SKonstantin Belousov int count, i, mreq, runlen; 1007b9b7a4beSMatthew Dillon 10087bec141bSKip Macy vm_page_lock_assert(p, MA_NOTOWNED); 100989f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 10103157c503SKonstantin Belousov 10113157c503SKonstantin Belousov count = 1; 10123157c503SKonstantin Belousov mreq = 0; 10133157c503SKonstantin Belousov 10143157c503SKonstantin Belousov for (tp = p; count < vm_pageout_page_count; count++) { 10153157c503SKonstantin Belousov tp = vm_page_next(tp); 1016c7aebda8SAttilio Rao if (tp == NULL || vm_page_busied(tp)) 1017bd7e5f99SJohn Dyson break; 10183280870dSKonstantin Belousov if (!vm_object_page_remove_write(tp, flags, clearobjflags)) 1019bd7e5f99SJohn Dyson break; 1020bd7e5f99SJohn Dyson } 1021aef922f5SJohn Dyson 10223157c503SKonstantin Belousov for (p_first = p; count < vm_pageout_page_count; count++) { 10233157c503SKonstantin Belousov tp = vm_page_prev(p_first); 1024c7aebda8SAttilio Rao if (tp == NULL || vm_page_busied(tp)) 1025bd7e5f99SJohn Dyson break; 10263280870dSKonstantin Belousov if (!vm_object_page_remove_write(tp, flags, clearobjflags)) 1027bd7e5f99SJohn Dyson break; 10283157c503SKonstantin Belousov p_first = tp; 10293157c503SKonstantin Belousov mreq++; 1030bd7e5f99SJohn Dyson } 1031bd7e5f99SJohn Dyson 10323157c503SKonstantin Belousov for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++) 10333157c503SKonstantin Belousov ma[i] = tp; 1034cf2819ccSJohn Dyson 1035126d6082SKonstantin Belousov vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio); 10361e8a675cSKonstantin Belousov return (runlen); 103726f9a767SRodney W. Grimes } 1038df8bae1dSRodney W. Grimes 10391efb74fbSJohn Dyson /* 1040950f8459SAlan Cox * Note that there is absolutely no sense in writing out 1041950f8459SAlan Cox * anonymous objects, so we track down the vnode object 1042950f8459SAlan Cox * to write out. 1043950f8459SAlan Cox * We invalidate (remove) all pages from the address space 1044950f8459SAlan Cox * for semantic correctness. 1045950f8459SAlan Cox * 10466bbee8e2SAlan Cox * If the backing object is a device object with unmanaged pages, then any 10476bbee8e2SAlan Cox * mappings to the specified range of pages must be removed before this 10486bbee8e2SAlan Cox * function is called. 10496bbee8e2SAlan Cox * 1050950f8459SAlan Cox * Note: certain anonymous maps, such as MAP_NOSYNC maps, 1051950f8459SAlan Cox * may start out with a NULL object. 1052950f8459SAlan Cox */ 1053126d6082SKonstantin Belousov boolean_t 1054950f8459SAlan Cox vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, 1055950f8459SAlan Cox boolean_t syncio, boolean_t invalidate) 1056950f8459SAlan Cox { 1057950f8459SAlan Cox vm_object_t backing_object; 1058950f8459SAlan Cox struct vnode *vp; 10593b582b4eSTor Egge struct mount *mp; 1060126d6082SKonstantin Belousov int error, flags, fsync_after; 1061126d6082SKonstantin Belousov boolean_t res; 1062950f8459SAlan Cox 1063950f8459SAlan Cox if (object == NULL) 1064126d6082SKonstantin Belousov return (TRUE); 1065126d6082SKonstantin Belousov res = TRUE; 1066126d6082SKonstantin Belousov error = 0; 106789f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1068950f8459SAlan Cox while ((backing_object = object->backing_object) != NULL) { 106989f6b863SAttilio Rao VM_OBJECT_WLOCK(backing_object); 107056e0670fSAlan Cox offset += object->backing_object_offset; 107189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1072950f8459SAlan Cox object = backing_object; 1073950f8459SAlan Cox if (object->size < OFF_TO_IDX(offset + size)) 1074950f8459SAlan Cox size = IDX_TO_OFF(object->size) - offset; 1075950f8459SAlan Cox } 1076950f8459SAlan Cox /* 1077950f8459SAlan Cox * Flush pages if writing is allowed, invalidate them 1078950f8459SAlan Cox * if invalidation requested. Pages undergoing I/O 1079950f8459SAlan Cox * will be ignored by vm_object_page_remove(). 1080950f8459SAlan Cox * 1081950f8459SAlan Cox * We cannot lock the vnode and then wait for paging 1082950f8459SAlan Cox * to complete without deadlocking against vm_fault. 1083950f8459SAlan Cox * Instead we simply call vm_object_page_remove() and 1084950f8459SAlan Cox * allow it to block internally on a page-by-page 1085950f8459SAlan Cox * basis when it encounters pages undergoing async 1086950f8459SAlan Cox * I/O. 1087950f8459SAlan Cox */ 1088950f8459SAlan Cox if (object->type == OBJT_VNODE && 10895bf94937SKonstantin Belousov (object->flags & OBJ_MIGHTBEDIRTY) != 0 && 10905bf94937SKonstantin Belousov ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) { 109189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 10923b582b4eSTor Egge (void) vn_start_write(vp, &mp, V_WAIT); 1093cb05b60aSAttilio Rao vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 109475ff604aSKonstantin Belousov if (syncio && !invalidate && offset == 0 && 1095d1780e8dSKonstantin Belousov atop(size) == object->size) { 109675ff604aSKonstantin Belousov /* 109775ff604aSKonstantin Belousov * If syncing the whole mapping of the file, 109875ff604aSKonstantin Belousov * it is faster to schedule all the writes in 109975ff604aSKonstantin Belousov * async mode, also allowing the clustering, 110075ff604aSKonstantin Belousov * and then wait for i/o to complete. 110175ff604aSKonstantin Belousov */ 110275ff604aSKonstantin Belousov flags = 0; 110375ff604aSKonstantin Belousov fsync_after = TRUE; 110475ff604aSKonstantin Belousov } else { 1105950f8459SAlan Cox flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 110675ff604aSKonstantin Belousov flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0; 110775ff604aSKonstantin Belousov fsync_after = FALSE; 110875ff604aSKonstantin Belousov } 110989f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1110126d6082SKonstantin Belousov res = vm_object_page_clean(object, offset, offset + size, 1111126d6082SKonstantin Belousov flags); 111289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 111375ff604aSKonstantin Belousov if (fsync_after) 1114126d6082SKonstantin Belousov error = VOP_FSYNC(vp, MNT_WAIT, curthread); 111522db15c0SAttilio Rao VOP_UNLOCK(vp, 0); 11163b582b4eSTor Egge vn_finished_write(mp); 1117126d6082SKonstantin Belousov if (error != 0) 1118126d6082SKonstantin Belousov res = FALSE; 111989f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1120950f8459SAlan Cox } 1121950f8459SAlan Cox if ((object->type == OBJT_VNODE || 1122950f8459SAlan Cox object->type == OBJT_DEVICE) && invalidate) { 11236bbee8e2SAlan Cox if (object->type == OBJT_DEVICE) 11246bbee8e2SAlan Cox /* 11256bbee8e2SAlan Cox * The option OBJPR_NOTMAPPED must be passed here 11266bbee8e2SAlan Cox * because vm_object_page_remove() cannot remove 11276bbee8e2SAlan Cox * unmanaged mappings. 11286bbee8e2SAlan Cox */ 11296bbee8e2SAlan Cox flags = OBJPR_NOTMAPPED; 11306bbee8e2SAlan Cox else if (old_msync) 11316195b24aSKonstantin Belousov flags = 0; 11326bbee8e2SAlan Cox else 11336195b24aSKonstantin Belousov flags = OBJPR_CLEANONLY; 11346bbee8e2SAlan Cox vm_object_page_remove(object, OFF_TO_IDX(offset), 11356bbee8e2SAlan Cox OFF_TO_IDX(offset + size + PAGE_MASK), flags); 1136950f8459SAlan Cox } 113789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1138126d6082SKonstantin Belousov return (res); 1139950f8459SAlan Cox } 1140950f8459SAlan Cox 1141950f8459SAlan Cox /* 1142aa3650eaSMark Johnston * Determine whether the given advice can be applied to the object. Advice is 1143aa3650eaSMark Johnston * not applied to unmanaged pages since they never belong to page queues, and 1144aa3650eaSMark Johnston * since MADV_FREE is destructive, it can apply only to anonymous pages that 1145aa3650eaSMark Johnston * have been mapped at most once. 1146aa3650eaSMark Johnston */ 1147aa3650eaSMark Johnston static bool 1148aa3650eaSMark Johnston vm_object_advice_applies(vm_object_t object, int advice) 1149aa3650eaSMark Johnston { 1150aa3650eaSMark Johnston 1151aa3650eaSMark Johnston if ((object->flags & OBJ_UNMANAGED) != 0) 1152aa3650eaSMark Johnston return (false); 1153aa3650eaSMark Johnston if (advice != MADV_FREE) 1154aa3650eaSMark Johnston return (true); 1155aa3650eaSMark Johnston return ((object->type == OBJT_DEFAULT || object->type == OBJT_SWAP) && 1156aa3650eaSMark Johnston (object->flags & OBJ_ONEMAPPING) != 0); 1157aa3650eaSMark Johnston } 1158aa3650eaSMark Johnston 1159aa3650eaSMark Johnston static void 1160aa3650eaSMark Johnston vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex, 1161aa3650eaSMark Johnston vm_size_t size) 1162aa3650eaSMark Johnston { 1163aa3650eaSMark Johnston 1164aa3650eaSMark Johnston if (advice == MADV_FREE && object->type == OBJT_SWAP) 1165aa3650eaSMark Johnston swap_pager_freespace(object, pindex, size); 1166aa3650eaSMark Johnston } 1167aa3650eaSMark Johnston 1168aa3650eaSMark Johnston /* 1169867a482dSJohn Dyson * vm_object_madvise: 1170867a482dSJohn Dyson * 1171867a482dSJohn Dyson * Implements the madvise function at the object/page level. 11721c7c3c6aSMatthew Dillon * 1173193b9358SAlan Cox * MADV_WILLNEED (any object) 1174193b9358SAlan Cox * 1175193b9358SAlan Cox * Activate the specified pages if they are resident. 1176193b9358SAlan Cox * 1177193b9358SAlan Cox * MADV_DONTNEED (any object) 1178193b9358SAlan Cox * 1179193b9358SAlan Cox * Deactivate the specified pages if they are resident. 1180193b9358SAlan Cox * 1181193b9358SAlan Cox * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 1182193b9358SAlan Cox * OBJ_ONEMAPPING only) 1183193b9358SAlan Cox * 1184193b9358SAlan Cox * Deactivate and clean the specified pages if they are 1185193b9358SAlan Cox * resident. This permits the process to reuse the pages 1186193b9358SAlan Cox * without faulting or the kernel to reclaim the pages 1187193b9358SAlan Cox * without I/O. 1188867a482dSJohn Dyson */ 1189867a482dSJohn Dyson void 119092a59946SJohn Baldwin vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end, 1191c2655a40SMark Johnston int advice) 1192867a482dSJohn Dyson { 119392a59946SJohn Baldwin vm_pindex_t tpindex; 119434567de7SAlan Cox vm_object_t backing_object, tobject; 1195aa3650eaSMark Johnston vm_page_t m, tm; 1196867a482dSJohn Dyson 1197867a482dSJohn Dyson if (object == NULL) 1198867a482dSJohn Dyson return; 1199c2655a40SMark Johnston 12006e20a165SJohn Dyson relookup: 1201aa3650eaSMark Johnston VM_OBJECT_WLOCK(object); 1202aa3650eaSMark Johnston if (!vm_object_advice_applies(object, advice)) { 1203aa3650eaSMark Johnston VM_OBJECT_WUNLOCK(object); 1204aa3650eaSMark Johnston return; 12056e20a165SJohn Dyson } 1206aa3650eaSMark Johnston for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) { 1207aa3650eaSMark Johnston tobject = object; 1208c2655a40SMark Johnston 12091ce137beSMatthew Dillon /* 1210aa3650eaSMark Johnston * If the next page isn't resident in the top-level object, we 1211aa3650eaSMark Johnston * need to search the shadow chain. When applying MADV_FREE, we 1212aa3650eaSMark Johnston * take care to release any swap space used to store 1213aa3650eaSMark Johnston * non-resident pages. 1214aa3650eaSMark Johnston */ 1215aa3650eaSMark Johnston if (m == NULL || pindex < m->pindex) { 1216aa3650eaSMark Johnston /* 1217aa3650eaSMark Johnston * Optimize a common case: if the top-level object has 1218aa3650eaSMark Johnston * no backing object, we can skip over the non-resident 1219aa3650eaSMark Johnston * range in constant time. 12201ce137beSMatthew Dillon */ 1221c2655a40SMark Johnston if (object->backing_object == NULL) { 1222c2655a40SMark Johnston tpindex = (m != NULL && m->pindex < end) ? 1223c2655a40SMark Johnston m->pindex : end; 1224aa3650eaSMark Johnston vm_object_madvise_freespace(object, advice, 1225aa3650eaSMark Johnston pindex, tpindex - pindex); 1226c2655a40SMark Johnston if ((pindex = tpindex) == end) 1227c2655a40SMark Johnston break; 1228aa3650eaSMark Johnston goto next_page; 1229aa3650eaSMark Johnston } 1230aa3650eaSMark Johnston 1231aa3650eaSMark Johnston tpindex = pindex; 1232aa3650eaSMark Johnston do { 1233aa3650eaSMark Johnston vm_object_madvise_freespace(tobject, advice, 1234aa3650eaSMark Johnston tpindex, 1); 12351ce137beSMatthew Dillon /* 1236aa3650eaSMark Johnston * Prepare to search the next object in the 1237aa3650eaSMark Johnston * chain. 12381ce137beSMatthew Dillon */ 123934567de7SAlan Cox backing_object = tobject->backing_object; 124034567de7SAlan Cox if (backing_object == NULL) 1241aa3650eaSMark Johnston goto next_pindex; 124289f6b863SAttilio Rao VM_OBJECT_WLOCK(backing_object); 1243aa3650eaSMark Johnston tpindex += 1244aa3650eaSMark Johnston OFF_TO_IDX(tobject->backing_object_offset); 12459b98b796SAlan Cox if (tobject != object) 124689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(tobject); 124734567de7SAlan Cox tobject = backing_object; 1248aa3650eaSMark Johnston if (!vm_object_advice_applies(tobject, advice)) 1249aa3650eaSMark Johnston goto next_pindex; 1250aa3650eaSMark Johnston } while ((tm = vm_page_lookup(tobject, tpindex)) == 1251aa3650eaSMark Johnston NULL); 1252aa3650eaSMark Johnston } else { 1253aa3650eaSMark Johnston next_page: 1254aa3650eaSMark Johnston tm = m; 1255aa3650eaSMark Johnston m = TAILQ_NEXT(m, listq); 1256c2655a40SMark Johnston } 1257c2655a40SMark Johnston 1258867a482dSJohn Dyson /* 12596a2a3d73SAlan Cox * If the page is not in a normal state, skip it. 1260867a482dSJohn Dyson */ 1261aa3650eaSMark Johnston if (tm->valid != VM_PAGE_BITS_ALL) 1262aa3650eaSMark Johnston goto next_pindex; 1263aa3650eaSMark Johnston vm_page_lock(tm); 1264aa3650eaSMark Johnston if (tm->hold_count != 0 || tm->wire_count != 0) { 1265aa3650eaSMark Johnston vm_page_unlock(tm); 1266aa3650eaSMark Johnston goto next_pindex; 12676e20a165SJohn Dyson } 1268aa3650eaSMark Johnston KASSERT((tm->flags & PG_FICTITIOUS) == 0, 1269aa3650eaSMark Johnston ("vm_object_madvise: page %p is fictitious", tm)); 1270aa3650eaSMark Johnston KASSERT((tm->oflags & VPO_UNMANAGED) == 0, 1271aa3650eaSMark Johnston ("vm_object_madvise: page %p is not managed", tm)); 1272aa3650eaSMark Johnston if (vm_page_busied(tm)) { 1273aa3650eaSMark Johnston if (object != tobject) 1274aa3650eaSMark Johnston VM_OBJECT_WUNLOCK(tobject); 1275aa3650eaSMark Johnston VM_OBJECT_WUNLOCK(object); 1276c2655a40SMark Johnston if (advice == MADV_WILLNEED) { 1277b11b56b5SAlan Cox /* 1278b11b56b5SAlan Cox * Reference the page before unlocking and 1279b11b56b5SAlan Cox * sleeping so that the page daemon is less 1280b11b56b5SAlan Cox * likely to reclaim it. 1281b11b56b5SAlan Cox */ 1282aa3650eaSMark Johnston vm_page_aflag_set(tm, PGA_REFERENCED); 1283567e51e1SAlan Cox } 1284aa3650eaSMark Johnston vm_page_busy_sleep(tm, "madvpo", false); 12856e20a165SJohn Dyson goto relookup; 128634567de7SAlan Cox } 1287aa3650eaSMark Johnston vm_page_advise(tm, advice); 1288aa3650eaSMark Johnston vm_page_unlock(tm); 1289aa3650eaSMark Johnston vm_object_madvise_freespace(tobject, advice, tm->pindex, 1); 1290aa3650eaSMark Johnston next_pindex: 12919b98b796SAlan Cox if (tobject != object) 129289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(tobject); 1293867a482dSJohn Dyson } 129489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1295867a482dSJohn Dyson } 1296867a482dSJohn Dyson 1297867a482dSJohn Dyson /* 1298df8bae1dSRodney W. Grimes * vm_object_shadow: 1299df8bae1dSRodney W. Grimes * 1300df8bae1dSRodney W. Grimes * Create a new object which is backed by the 1301df8bae1dSRodney W. Grimes * specified existing object range. The source 1302df8bae1dSRodney W. Grimes * object reference is deallocated. 1303df8bae1dSRodney W. Grimes * 1304df8bae1dSRodney W. Grimes * The new object and offset into that object 1305df8bae1dSRodney W. Grimes * are returned in the source parameters. 1306df8bae1dSRodney W. Grimes */ 130726f9a767SRodney W. Grimes void 13081b40f8c0SMatthew Dillon vm_object_shadow( 13091b40f8c0SMatthew Dillon vm_object_t *object, /* IN/OUT */ 13101b40f8c0SMatthew Dillon vm_ooffset_t *offset, /* IN/OUT */ 13111b40f8c0SMatthew Dillon vm_size_t length) 1312df8bae1dSRodney W. Grimes { 1313d031cff1SMatthew Dillon vm_object_t source; 1314d031cff1SMatthew Dillon vm_object_t result; 1315df8bae1dSRodney W. Grimes 1316df8bae1dSRodney W. Grimes source = *object; 1317df8bae1dSRodney W. Grimes 1318df8bae1dSRodney W. Grimes /* 13199a2f6362SAlan Cox * Don't create the new object if the old object isn't shared. 13209a2f6362SAlan Cox */ 1321570a2f4aSAlan Cox if (source != NULL) { 132289f6b863SAttilio Rao VM_OBJECT_WLOCK(source); 1323570a2f4aSAlan Cox if (source->ref_count == 1 && 13249a2f6362SAlan Cox source->handle == NULL && 13259a2f6362SAlan Cox (source->type == OBJT_DEFAULT || 13269917e010SAlan Cox source->type == OBJT_SWAP)) { 132789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 13289a2f6362SAlan Cox return; 13299917e010SAlan Cox } 133089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 1331570a2f4aSAlan Cox } 13329a2f6362SAlan Cox 13339a2f6362SAlan Cox /* 1334570a2f4aSAlan Cox * Allocate a new object with the given length. 1335df8bae1dSRodney W. Grimes */ 13360cc74f14SAlan Cox result = vm_object_allocate(OBJT_DEFAULT, atop(length)); 1337df8bae1dSRodney W. Grimes 1338df8bae1dSRodney W. Grimes /* 13390d94caffSDavid Greenman * The new object shadows the source object, adding a reference to it. 13400d94caffSDavid Greenman * Our caller changes his reference to point to the new object, 13410d94caffSDavid Greenman * removing a reference to the source object. Net result: no change 13420d94caffSDavid Greenman * of reference count. 13439b09fe24SMatthew Dillon * 13449b09fe24SMatthew Dillon * Try to optimize the result object's page color when shadowing 1345956f3135SPhilippe Charnier * in order to maintain page coloring consistency in the combined 13469b09fe24SMatthew Dillon * shadowed object. 1347df8bae1dSRodney W. Grimes */ 134824a1cce3SDavid Greenman result->backing_object = source; 13499174ca7bSTor Egge /* 13509174ca7bSTor Egge * Store the offset into the source object, and fix up the offset into 13519174ca7bSTor Egge * the new object. 13529174ca7bSTor Egge */ 13539174ca7bSTor Egge result->backing_object_offset = *offset; 1354570a2f4aSAlan Cox if (source != NULL) { 135589f6b863SAttilio Rao VM_OBJECT_WLOCK(source); 13561c500307SAlan Cox LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list); 1357eaf13dd7SJohn Dyson source->shadow_count++; 1358f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 13597b54b1a9SAlan Cox result->flags |= source->flags & OBJ_COLORED; 1360f8a47341SAlan Cox result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & 1361f8a47341SAlan Cox ((1 << (VM_NFREEORDER - 1)) - 1); 1362f8a47341SAlan Cox #endif 136389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 1364de5f6a77SJohn Dyson } 1365df8bae1dSRodney W. Grimes 1366df8bae1dSRodney W. Grimes 1367df8bae1dSRodney W. Grimes /* 1368df8bae1dSRodney W. Grimes * Return the new things 1369df8bae1dSRodney W. Grimes */ 1370df8bae1dSRodney W. Grimes *offset = 0; 1371df8bae1dSRodney W. Grimes *object = result; 1372df8bae1dSRodney W. Grimes } 1373df8bae1dSRodney W. Grimes 1374c5aaa06dSAlan Cox /* 1375c5aaa06dSAlan Cox * vm_object_split: 1376c5aaa06dSAlan Cox * 1377c5aaa06dSAlan Cox * Split the pages in a map entry into a new object. This affords 1378c5aaa06dSAlan Cox * easier removal of unused pages, and keeps object inheritance from 1379c5aaa06dSAlan Cox * being a negative impact on memory usage. 1380c5aaa06dSAlan Cox */ 1381c5aaa06dSAlan Cox void 1382c5aaa06dSAlan Cox vm_object_split(vm_map_entry_t entry) 1383c5aaa06dSAlan Cox { 138473000556SAlan Cox vm_page_t m, m_next; 1385c5aaa06dSAlan Cox vm_object_t orig_object, new_object, source; 138673000556SAlan Cox vm_pindex_t idx, offidxstart; 138773000556SAlan Cox vm_size_t size; 1388c5aaa06dSAlan Cox 1389c5aaa06dSAlan Cox orig_object = entry->object.vm_object; 1390c5aaa06dSAlan Cox if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 1391c5aaa06dSAlan Cox return; 1392c5aaa06dSAlan Cox if (orig_object->ref_count <= 1) 1393c5aaa06dSAlan Cox return; 139489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 1395c5aaa06dSAlan Cox 13964da9f125SAlan Cox offidxstart = OFF_TO_IDX(entry->offset); 139795442adfSAlan Cox size = atop(entry->end - entry->start); 1398c5aaa06dSAlan Cox 13994da9f125SAlan Cox /* 14004da9f125SAlan Cox * If swap_pager_copy() is later called, it will convert new_object 14014da9f125SAlan Cox * into a swap object. 14024da9f125SAlan Cox */ 14034da9f125SAlan Cox new_object = vm_object_allocate(OBJT_DEFAULT, size); 1404c5aaa06dSAlan Cox 1405c5474b8fSAlan Cox /* 1406c5474b8fSAlan Cox * At this point, the new object is still private, so the order in 1407c5474b8fSAlan Cox * which the original and new objects are locked does not matter. 1408c5474b8fSAlan Cox */ 140989f6b863SAttilio Rao VM_OBJECT_WLOCK(new_object); 141089f6b863SAttilio Rao VM_OBJECT_WLOCK(orig_object); 1411c5aaa06dSAlan Cox source = orig_object->backing_object; 1412c5aaa06dSAlan Cox if (source != NULL) { 141389f6b863SAttilio Rao VM_OBJECT_WLOCK(source); 141419c244d0SAlan Cox if ((source->flags & OBJ_DEAD) != 0) { 141589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 141689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 141789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(new_object); 141819c244d0SAlan Cox vm_object_deallocate(new_object); 141989f6b863SAttilio Rao VM_OBJECT_WLOCK(orig_object); 142019c244d0SAlan Cox return; 142119c244d0SAlan Cox } 14221c500307SAlan Cox LIST_INSERT_HEAD(&source->shadow_head, 1423c5aaa06dSAlan Cox new_object, shadow_list); 14248e3a76fbSAlan Cox source->shadow_count++; 1425b921a12bSAlan Cox vm_object_reference_locked(source); /* for new_object */ 1426c5aaa06dSAlan Cox vm_object_clear_flag(source, OBJ_ONEMAPPING); 142789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 1428c5aaa06dSAlan Cox new_object->backing_object_offset = 14294da9f125SAlan Cox orig_object->backing_object_offset + entry->offset; 1430c5aaa06dSAlan Cox new_object->backing_object = source; 1431c5aaa06dSAlan Cox } 1432ef694c1aSEdward Tomasz Napierala if (orig_object->cred != NULL) { 1433ef694c1aSEdward Tomasz Napierala new_object->cred = orig_object->cred; 1434ef694c1aSEdward Tomasz Napierala crhold(orig_object->cred); 14353364c323SKonstantin Belousov new_object->charge = ptoa(size); 14363364c323SKonstantin Belousov KASSERT(orig_object->charge >= ptoa(size), 14373364c323SKonstantin Belousov ("orig_object->charge < 0")); 14383364c323SKonstantin Belousov orig_object->charge -= ptoa(size); 14393364c323SKonstantin Belousov } 1440c5aaa06dSAlan Cox retry: 1441b382c10aSKonstantin Belousov m = vm_page_find_least(orig_object, offidxstart); 144273000556SAlan Cox for (; m != NULL && (idx = m->pindex - offidxstart) < size; 144373000556SAlan Cox m = m_next) { 144473000556SAlan Cox m_next = TAILQ_NEXT(m, listq); 1445c5aaa06dSAlan Cox 1446c5aaa06dSAlan Cox /* 1447c5aaa06dSAlan Cox * We must wait for pending I/O to complete before we can 1448c5aaa06dSAlan Cox * rename the page. 1449c5aaa06dSAlan Cox * 1450c5aaa06dSAlan Cox * We do not have to VM_PROT_NONE the page as mappings should 1451c5aaa06dSAlan Cox * not be changed by this operation. 1452c5aaa06dSAlan Cox */ 1453c7aebda8SAttilio Rao if (vm_page_busied(m)) { 145489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(new_object); 1455c7aebda8SAttilio Rao vm_page_lock(m); 1456c7aebda8SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 14575975e53dSKonstantin Belousov vm_page_busy_sleep(m, "spltwt", false); 1458c7aebda8SAttilio Rao VM_OBJECT_WLOCK(orig_object); 145989f6b863SAttilio Rao VM_OBJECT_WLOCK(new_object); 1460c5aaa06dSAlan Cox goto retry; 1461de33beddSAlan Cox } 1462e946b949SAttilio Rao 14633453bca8SAlan Cox /* vm_page_rename() will dirty the page. */ 1464e946b949SAttilio Rao if (vm_page_rename(m, new_object, idx)) { 1465e946b949SAttilio Rao VM_OBJECT_WUNLOCK(new_object); 1466e946b949SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 14678d6fbbb8SJeff Roberson vm_radix_wait(); 1468e946b949SAttilio Rao VM_OBJECT_WLOCK(orig_object); 1469e946b949SAttilio Rao VM_OBJECT_WLOCK(new_object); 1470e946b949SAttilio Rao goto retry; 1471e946b949SAttilio Rao } 1472b5f359b7SAlan Cox #if VM_NRESERVLEVEL > 0 1473b5f359b7SAlan Cox /* 1474b5f359b7SAlan Cox * If some of the reservation's allocated pages remain with 1475b5f359b7SAlan Cox * the original object, then transferring the reservation to 1476b5f359b7SAlan Cox * the new object is neither particularly beneficial nor 1477b5f359b7SAlan Cox * particularly harmful as compared to leaving the reservation 1478b5f359b7SAlan Cox * with the original object. If, however, all of the 1479b5f359b7SAlan Cox * reservation's allocated pages are transferred to the new 1480b5f359b7SAlan Cox * object, then transferring the reservation is typically 1481b5f359b7SAlan Cox * beneficial. Determining which of these two cases applies 1482b5f359b7SAlan Cox * would be more costly than unconditionally renaming the 1483b5f359b7SAlan Cox * reservation. 1484b5f359b7SAlan Cox */ 1485b5f359b7SAlan Cox vm_reserv_rename(m, new_object, orig_object, offidxstart); 1486b5f359b7SAlan Cox #endif 1487dfd55c0cSAttilio Rao if (orig_object->type == OBJT_SWAP) 1488c7aebda8SAttilio Rao vm_page_xbusy(m); 1489c5aaa06dSAlan Cox } 1490d7a013c3SAlan Cox if (orig_object->type == OBJT_SWAP) { 1491c5aaa06dSAlan Cox /* 1492c7c8dd7eSAlan Cox * swap_pager_copy() can sleep, in which case the orig_object's 1493c7c8dd7eSAlan Cox * and new_object's locks are released and reacquired. 1494c5aaa06dSAlan Cox */ 1495c5aaa06dSAlan Cox swap_pager_copy(orig_object, new_object, offidxstart, 0); 1496dfd55c0cSAttilio Rao TAILQ_FOREACH(m, &new_object->memq, listq) 1497c7aebda8SAttilio Rao vm_page_xunbusy(m); 1498c5aaa06dSAlan Cox } 149989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 150089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(new_object); 1501c5aaa06dSAlan Cox entry->object.vm_object = new_object; 1502c5aaa06dSAlan Cox entry->offset = 0LL; 1503c5aaa06dSAlan Cox vm_object_deallocate(orig_object); 150489f6b863SAttilio Rao VM_OBJECT_WLOCK(new_object); 1505c5aaa06dSAlan Cox } 1506c5aaa06dSAlan Cox 15072ad1a3f7SMatthew Dillon #define OBSC_COLLAPSE_NOWAIT 0x0002 15082ad1a3f7SMatthew Dillon #define OBSC_COLLAPSE_WAIT 0x0004 15092ad1a3f7SMatthew Dillon 151099a1570aSKonstantin Belousov static vm_page_t 15114cc8daf7SConrad Meyer vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p, vm_page_t next, 151299a1570aSKonstantin Belousov int op) 151399a1570aSKonstantin Belousov { 151499a1570aSKonstantin Belousov vm_object_t backing_object; 151599a1570aSKonstantin Belousov 151699a1570aSKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(object); 151799a1570aSKonstantin Belousov backing_object = object->backing_object; 151899a1570aSKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(backing_object); 151999a1570aSKonstantin Belousov 152099a1570aSKonstantin Belousov KASSERT(p == NULL || vm_page_busied(p), ("unbusy page %p", p)); 152199a1570aSKonstantin Belousov KASSERT(p == NULL || p->object == object || p->object == backing_object, 152299a1570aSKonstantin Belousov ("invalid ownership %p %p %p", p, object, backing_object)); 152399a1570aSKonstantin Belousov if ((op & OBSC_COLLAPSE_NOWAIT) != 0) 152499a1570aSKonstantin Belousov return (next); 152599a1570aSKonstantin Belousov if (p != NULL) 152699a1570aSKonstantin Belousov vm_page_lock(p); 152799a1570aSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 152899a1570aSKonstantin Belousov VM_OBJECT_WUNLOCK(backing_object); 15298d6fbbb8SJeff Roberson /* The page is only NULL when rename fails. */ 153099a1570aSKonstantin Belousov if (p == NULL) 15318d6fbbb8SJeff Roberson vm_radix_wait(); 153299a1570aSKonstantin Belousov else 15335975e53dSKonstantin Belousov vm_page_busy_sleep(p, "vmocol", false); 153499a1570aSKonstantin Belousov VM_OBJECT_WLOCK(object); 153599a1570aSKonstantin Belousov VM_OBJECT_WLOCK(backing_object); 153699a1570aSKonstantin Belousov return (TAILQ_FIRST(&backing_object->memq)); 153799a1570aSKonstantin Belousov } 153899a1570aSKonstantin Belousov 153999a1570aSKonstantin Belousov static bool 15404cc8daf7SConrad Meyer vm_object_scan_all_shadowed(vm_object_t object) 15414cc8daf7SConrad Meyer { 15424cc8daf7SConrad Meyer vm_object_t backing_object; 15434cc8daf7SConrad Meyer vm_page_t p, pp; 154477d6fd97SKonstantin Belousov vm_pindex_t backing_offset_index, new_pindex, pi, ps; 15454cc8daf7SConrad Meyer 15464cc8daf7SConrad Meyer VM_OBJECT_ASSERT_WLOCKED(object); 15474cc8daf7SConrad Meyer VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 15484cc8daf7SConrad Meyer 15494cc8daf7SConrad Meyer backing_object = object->backing_object; 15504cc8daf7SConrad Meyer 155177d6fd97SKonstantin Belousov if (backing_object->type != OBJT_DEFAULT && 155277d6fd97SKonstantin Belousov backing_object->type != OBJT_SWAP) 15534cc8daf7SConrad Meyer return (false); 15544cc8daf7SConrad Meyer 155577d6fd97SKonstantin Belousov pi = backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 155677d6fd97SKonstantin Belousov p = vm_page_find_least(backing_object, pi); 155777d6fd97SKonstantin Belousov ps = swap_pager_find_least(backing_object, pi); 15584cc8daf7SConrad Meyer 15594cc8daf7SConrad Meyer /* 156077d6fd97SKonstantin Belousov * Only check pages inside the parent object's range and 156177d6fd97SKonstantin Belousov * inside the parent object's mapping of the backing object. 15624cc8daf7SConrad Meyer */ 156377d6fd97SKonstantin Belousov for (;; pi++) { 156477d6fd97SKonstantin Belousov if (p != NULL && p->pindex < pi) 156577d6fd97SKonstantin Belousov p = TAILQ_NEXT(p, listq); 156677d6fd97SKonstantin Belousov if (ps < pi) 156777d6fd97SKonstantin Belousov ps = swap_pager_find_least(backing_object, pi); 156877d6fd97SKonstantin Belousov if (p == NULL && ps >= backing_object->size) 156977d6fd97SKonstantin Belousov break; 157077d6fd97SKonstantin Belousov else if (p == NULL) 157177d6fd97SKonstantin Belousov pi = ps; 157277d6fd97SKonstantin Belousov else 157377d6fd97SKonstantin Belousov pi = MIN(p->pindex, ps); 157477d6fd97SKonstantin Belousov 157577d6fd97SKonstantin Belousov new_pindex = pi - backing_offset_index; 157677d6fd97SKonstantin Belousov if (new_pindex >= object->size) 157777d6fd97SKonstantin Belousov break; 15784cc8daf7SConrad Meyer 15794cc8daf7SConrad Meyer /* 15804cc8daf7SConrad Meyer * See if the parent has the page or if the parent's object 15814cc8daf7SConrad Meyer * pager has the page. If the parent has the page but the page 15824cc8daf7SConrad Meyer * is not valid, the parent's object pager must have the page. 15834cc8daf7SConrad Meyer * 15844cc8daf7SConrad Meyer * If this fails, the parent does not completely shadow the 15854cc8daf7SConrad Meyer * object and we might as well give up now. 15864cc8daf7SConrad Meyer */ 15874cc8daf7SConrad Meyer pp = vm_page_lookup(object, new_pindex); 15884cc8daf7SConrad Meyer if ((pp == NULL || pp->valid == 0) && 15894cc8daf7SConrad Meyer !vm_pager_has_page(object, new_pindex, NULL, NULL)) 15904cc8daf7SConrad Meyer return (false); 15914cc8daf7SConrad Meyer } 15924cc8daf7SConrad Meyer return (true); 15934cc8daf7SConrad Meyer } 15944cc8daf7SConrad Meyer 15954cc8daf7SConrad Meyer static bool 15964cc8daf7SConrad Meyer vm_object_collapse_scan(vm_object_t object, int op) 15972ad1a3f7SMatthew Dillon { 15982ad1a3f7SMatthew Dillon vm_object_t backing_object; 159999a1570aSKonstantin Belousov vm_page_t next, p, pp; 160099a1570aSKonstantin Belousov vm_pindex_t backing_offset_index, new_pindex; 16012ad1a3f7SMatthew Dillon 160289f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 160389f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 16042ad1a3f7SMatthew Dillon 16052ad1a3f7SMatthew Dillon backing_object = object->backing_object; 16062ad1a3f7SMatthew Dillon backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 16072ad1a3f7SMatthew Dillon 16082ad1a3f7SMatthew Dillon /* 16092ad1a3f7SMatthew Dillon * Initial conditions 16102ad1a3f7SMatthew Dillon */ 16114cc8daf7SConrad Meyer if ((op & OBSC_COLLAPSE_WAIT) != 0) 16122ad1a3f7SMatthew Dillon vm_object_set_flag(backing_object, OBJ_DEAD); 16132ad1a3f7SMatthew Dillon 16142ad1a3f7SMatthew Dillon /* 16152ad1a3f7SMatthew Dillon * Our scan 16162ad1a3f7SMatthew Dillon */ 16174cc8daf7SConrad Meyer for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; p = next) { 161899a1570aSKonstantin Belousov next = TAILQ_NEXT(p, listq); 161999a1570aSKonstantin Belousov new_pindex = p->pindex - backing_offset_index; 16202ad1a3f7SMatthew Dillon 16212ad1a3f7SMatthew Dillon /* 16222ad1a3f7SMatthew Dillon * Check for busy page 16232ad1a3f7SMatthew Dillon */ 1624c7aebda8SAttilio Rao if (vm_page_busied(p)) { 16254cc8daf7SConrad Meyer next = vm_object_collapse_scan_wait(object, p, next, op); 16262ad1a3f7SMatthew Dillon continue; 16272ad1a3f7SMatthew Dillon } 16282ad1a3f7SMatthew Dillon 162999a1570aSKonstantin Belousov KASSERT(p->object == backing_object, 16304cc8daf7SConrad Meyer ("vm_object_collapse_scan: object mismatch")); 16312ad1a3f7SMatthew Dillon 163299a1570aSKonstantin Belousov if (p->pindex < backing_offset_index || 163399a1570aSKonstantin Belousov new_pindex >= object->size) { 1634e946b949SAttilio Rao if (backing_object->type == OBJT_SWAP) 16354cc8daf7SConrad Meyer swap_pager_freespace(backing_object, p->pindex, 16364cc8daf7SConrad Meyer 1); 1637e946b949SAttilio Rao 16382ad1a3f7SMatthew Dillon /* 16394cc8daf7SConrad Meyer * Page is out of the parent object's range, we can 16404cc8daf7SConrad Meyer * simply destroy it. 16412ad1a3f7SMatthew Dillon */ 16422965a453SKip Macy vm_page_lock(p); 1643f6d89838SAlan Cox KASSERT(!pmap_page_is_mapped(p), 1644f6d89838SAlan Cox ("freeing mapped page %p", p)); 1645f6d89838SAlan Cox if (p->wire_count == 0) 16462ad1a3f7SMatthew Dillon vm_page_free(p); 1647f6d89838SAlan Cox else 1648f6d89838SAlan Cox vm_page_remove(p); 16492965a453SKip Macy vm_page_unlock(p); 16502ad1a3f7SMatthew Dillon continue; 16512ad1a3f7SMatthew Dillon } 16522ad1a3f7SMatthew Dillon 16532ad1a3f7SMatthew Dillon pp = vm_page_lookup(object, new_pindex); 165499a1570aSKonstantin Belousov if (pp != NULL && vm_page_busied(pp)) { 1655e18cc7bfSMax Laier /* 16564cc8daf7SConrad Meyer * The page in the parent is busy and possibly not 16574cc8daf7SConrad Meyer * (yet) valid. Until its state is finalized by the 16584cc8daf7SConrad Meyer * busy bit owner, we can't tell whether it shadows the 16594cc8daf7SConrad Meyer * original page. Therefore, we must either skip it 16604cc8daf7SConrad Meyer * and the original (backing_object) page or wait for 16614cc8daf7SConrad Meyer * its state to be finalized. 1662e18cc7bfSMax Laier * 16634cc8daf7SConrad Meyer * This is due to a race with vm_fault() where we must 16644cc8daf7SConrad Meyer * unbusy the original (backing_obj) page before we can 16654cc8daf7SConrad Meyer * (re)lock the parent. Hence we can get here. 1666e18cc7bfSMax Laier */ 16674cc8daf7SConrad Meyer next = vm_object_collapse_scan_wait(object, pp, next, 16684cc8daf7SConrad Meyer op); 1669e18cc7bfSMax Laier continue; 1670e18cc7bfSMax Laier } 167199a1570aSKonstantin Belousov 167299a1570aSKonstantin Belousov KASSERT(pp == NULL || pp->valid != 0, 167399a1570aSKonstantin Belousov ("unbusy invalid page %p", pp)); 167499a1570aSKonstantin Belousov 16754cc8daf7SConrad Meyer if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL, 16764cc8daf7SConrad Meyer NULL)) { 167799a1570aSKonstantin Belousov /* 16784cc8daf7SConrad Meyer * The page already exists in the parent OR swap exists 16794cc8daf7SConrad Meyer * for this location in the parent. Leave the parent's 16804cc8daf7SConrad Meyer * page alone. Destroy the original page from the 16814cc8daf7SConrad Meyer * backing object. 168299a1570aSKonstantin Belousov */ 1683e946b949SAttilio Rao if (backing_object->type == OBJT_SWAP) 16844cc8daf7SConrad Meyer swap_pager_freespace(backing_object, p->pindex, 16854cc8daf7SConrad Meyer 1); 16862965a453SKip Macy vm_page_lock(p); 1687f6d89838SAlan Cox KASSERT(!pmap_page_is_mapped(p), 1688f6d89838SAlan Cox ("freeing mapped page %p", p)); 1689f6d89838SAlan Cox if (p->wire_count == 0) 16902ad1a3f7SMatthew Dillon vm_page_free(p); 1691f6d89838SAlan Cox else 1692f6d89838SAlan Cox vm_page_remove(p); 16932965a453SKip Macy vm_page_unlock(p); 16942ad1a3f7SMatthew Dillon continue; 16952ad1a3f7SMatthew Dillon } 16962ad1a3f7SMatthew Dillon 1697e946b949SAttilio Rao /* 16984cc8daf7SConrad Meyer * Page does not exist in parent, rename the page from the 16994cc8daf7SConrad Meyer * backing object to the main object. 1700e946b949SAttilio Rao * 17014cc8daf7SConrad Meyer * If the page was mapped to a process, it can remain mapped 17023453bca8SAlan Cox * through the rename. vm_page_rename() will dirty the page. 1703e946b949SAttilio Rao */ 1704e946b949SAttilio Rao if (vm_page_rename(p, object, new_pindex)) { 17054cc8daf7SConrad Meyer next = vm_object_collapse_scan_wait(object, NULL, next, 17064cc8daf7SConrad Meyer op); 1707e946b949SAttilio Rao continue; 1708e946b949SAttilio Rao } 170914a5dc17SAttilio Rao 171014a5dc17SAttilio Rao /* Use the old pindex to free the right page. */ 1711e946b949SAttilio Rao if (backing_object->type == OBJT_SWAP) 171214a5dc17SAttilio Rao swap_pager_freespace(backing_object, 171314a5dc17SAttilio Rao new_pindex + backing_offset_index, 1); 1714e946b949SAttilio Rao 1715f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1716f8a47341SAlan Cox /* 1717f8a47341SAlan Cox * Rename the reservation. 1718f8a47341SAlan Cox */ 1719f8a47341SAlan Cox vm_reserv_rename(p, object, backing_object, 1720f8a47341SAlan Cox backing_offset_index); 1721f8a47341SAlan Cox #endif 17222ad1a3f7SMatthew Dillon } 172399a1570aSKonstantin Belousov return (true); 17242ad1a3f7SMatthew Dillon } 17252ad1a3f7SMatthew Dillon 1726df8bae1dSRodney W. Grimes 1727df8bae1dSRodney W. Grimes /* 17282fe6e4d7SDavid Greenman * this version of collapse allows the operation to occur earlier and 17292fe6e4d7SDavid Greenman * when paging_in_progress is true for an object... This is not a complete 17302fe6e4d7SDavid Greenman * operation, but should plug 99.9% of the rest of the leaks. 17312fe6e4d7SDavid Greenman */ 17322fe6e4d7SDavid Greenman static void 17331b40f8c0SMatthew Dillon vm_object_qcollapse(vm_object_t object) 17342fe6e4d7SDavid Greenman { 17352ad1a3f7SMatthew Dillon vm_object_t backing_object = object->backing_object; 17362fe6e4d7SDavid Greenman 173789f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 173889f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(backing_object); 17391b40f8c0SMatthew Dillon 17402fe6e4d7SDavid Greenman if (backing_object->ref_count != 1) 17412fe6e4d7SDavid Greenman return; 17422fe6e4d7SDavid Greenman 17434cc8daf7SConrad Meyer vm_object_collapse_scan(object, OBSC_COLLAPSE_NOWAIT); 17442fe6e4d7SDavid Greenman } 17452fe6e4d7SDavid Greenman 1746df8bae1dSRodney W. Grimes /* 1747df8bae1dSRodney W. Grimes * vm_object_collapse: 1748df8bae1dSRodney W. Grimes * 1749df8bae1dSRodney W. Grimes * Collapse an object with the object backing it. 1750df8bae1dSRodney W. Grimes * Pages in the backing object are moved into the 1751df8bae1dSRodney W. Grimes * parent, and the backing object is deallocated. 1752df8bae1dSRodney W. Grimes */ 175326f9a767SRodney W. Grimes void 17541b40f8c0SMatthew Dillon vm_object_collapse(vm_object_t object) 1755df8bae1dSRodney W. Grimes { 175698f139daSKonstantin Belousov vm_object_t backing_object, new_backing_object; 175798f139daSKonstantin Belousov 175889f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 175923955314SAlfred Perlstein 1760df8bae1dSRodney W. Grimes while (TRUE) { 1761df8bae1dSRodney W. Grimes /* 1762df8bae1dSRodney W. Grimes * Verify that the conditions are right for collapse: 1763df8bae1dSRodney W. Grimes * 17642ad1a3f7SMatthew Dillon * The object exists and the backing object exists. 1765df8bae1dSRodney W. Grimes */ 176624a1cce3SDavid Greenman if ((backing_object = object->backing_object) == NULL) 17672ad1a3f7SMatthew Dillon break; 1768df8bae1dSRodney W. Grimes 1769f919ebdeSDavid Greenman /* 1770f919ebdeSDavid Greenman * we check the backing object first, because it is most likely 177124a1cce3SDavid Greenman * not collapsable. 1772f919ebdeSDavid Greenman */ 177389f6b863SAttilio Rao VM_OBJECT_WLOCK(backing_object); 177424a1cce3SDavid Greenman if (backing_object->handle != NULL || 177524a1cce3SDavid Greenman (backing_object->type != OBJT_DEFAULT && 177624a1cce3SDavid Greenman backing_object->type != OBJT_SWAP) || 1777f919ebdeSDavid Greenman (backing_object->flags & OBJ_DEAD) || 177824a1cce3SDavid Greenman object->handle != NULL || 177924a1cce3SDavid Greenman (object->type != OBJT_DEFAULT && 178024a1cce3SDavid Greenman object->type != OBJT_SWAP) || 178124a1cce3SDavid Greenman (object->flags & OBJ_DEAD)) { 178289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 17832ad1a3f7SMatthew Dillon break; 178424a1cce3SDavid Greenman } 17859b4814bbSDavid Greenman 178698f139daSKonstantin Belousov if (object->paging_in_progress != 0 || 178798f139daSKonstantin Belousov backing_object->paging_in_progress != 0) { 1788b9921222SDavid Greenman vm_object_qcollapse(object); 178989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 17902ad1a3f7SMatthew Dillon break; 1791df8bae1dSRodney W. Grimes } 179298f139daSKonstantin Belousov 179326f9a767SRodney W. Grimes /* 17940d94caffSDavid Greenman * We know that we can either collapse the backing object (if 17952ad1a3f7SMatthew Dillon * the parent is the only reference to it) or (perhaps) have 17962ad1a3f7SMatthew Dillon * the parent bypass the object if the parent happens to shadow 17972ad1a3f7SMatthew Dillon * all the resident pages in the entire backing object. 17982ad1a3f7SMatthew Dillon * 17992ad1a3f7SMatthew Dillon * This is ignoring pager-backed pages such as swap pages. 18004cc8daf7SConrad Meyer * vm_object_collapse_scan fails the shadowing test in this 18012ad1a3f7SMatthew Dillon * case. 1802df8bae1dSRodney W. Grimes */ 1803df8bae1dSRodney W. Grimes if (backing_object->ref_count == 1) { 1804aa9bc3b1SKonstantin Belousov vm_object_pip_add(object, 1); 1805aa9bc3b1SKonstantin Belousov vm_object_pip_add(backing_object, 1); 1806aa9bc3b1SKonstantin Belousov 1807df8bae1dSRodney W. Grimes /* 18082ad1a3f7SMatthew Dillon * If there is exactly one reference to the backing 18092ad1a3f7SMatthew Dillon * object, we can collapse it into the parent. 1810df8bae1dSRodney W. Grimes */ 18114cc8daf7SConrad Meyer vm_object_collapse_scan(object, OBSC_COLLAPSE_WAIT); 1812df8bae1dSRodney W. Grimes 1813f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1814f8a47341SAlan Cox /* 1815f8a47341SAlan Cox * Break any reservations from backing_object. 1816f8a47341SAlan Cox */ 1817f8a47341SAlan Cox if (__predict_false(!LIST_EMPTY(&backing_object->rvq))) 1818f8a47341SAlan Cox vm_reserv_break_all(backing_object); 1819f8a47341SAlan Cox #endif 1820f8a47341SAlan Cox 1821df8bae1dSRodney W. Grimes /* 1822df8bae1dSRodney W. Grimes * Move the pager from backing_object to object. 1823df8bae1dSRodney W. Grimes */ 18246be36525SAlan Cox if (backing_object->type == OBJT_SWAP) { 182524a1cce3SDavid Greenman /* 1826c7c8dd7eSAlan Cox * swap_pager_copy() can sleep, in which case 1827c7c8dd7eSAlan Cox * the backing_object's and object's locks are 1828c7c8dd7eSAlan Cox * released and reacquired. 1829571a1e92SAttilio Rao * Since swap_pager_copy() is being asked to 1830571a1e92SAttilio Rao * destroy the source, it will change the 1831571a1e92SAttilio Rao * backing_object's type to OBJT_DEFAULT. 183224a1cce3SDavid Greenman */ 18331c7c3c6aSMatthew Dillon swap_pager_copy( 18341c7c3c6aSMatthew Dillon backing_object, 18351c7c3c6aSMatthew Dillon object, 18361c7c3c6aSMatthew Dillon OFF_TO_IDX(object->backing_object_offset), TRUE); 1837c0503609SDavid Greenman } 1838df8bae1dSRodney W. Grimes /* 1839df8bae1dSRodney W. Grimes * Object now shadows whatever backing_object did. 18402ad1a3f7SMatthew Dillon * Note that the reference to 18412ad1a3f7SMatthew Dillon * backing_object->backing_object moves from within 18422ad1a3f7SMatthew Dillon * backing_object to within object. 1843df8bae1dSRodney W. Grimes */ 18441c500307SAlan Cox LIST_REMOVE(object, shadow_list); 18454f7c7f6eSAlan Cox backing_object->shadow_count--; 1846de5f6a77SJohn Dyson if (backing_object->backing_object) { 184789f6b863SAttilio Rao VM_OBJECT_WLOCK(backing_object->backing_object); 18481c500307SAlan Cox LIST_REMOVE(backing_object, shadow_list); 184943186e53SAlan Cox LIST_INSERT_HEAD( 185043186e53SAlan Cox &backing_object->backing_object->shadow_head, 185143186e53SAlan Cox object, shadow_list); 185243186e53SAlan Cox /* 185343186e53SAlan Cox * The shadow_count has not changed. 185443186e53SAlan Cox */ 185589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object->backing_object); 1856de5f6a77SJohn Dyson } 185724a1cce3SDavid Greenman object->backing_object = backing_object->backing_object; 18582ad1a3f7SMatthew Dillon object->backing_object_offset += 18592ad1a3f7SMatthew Dillon backing_object->backing_object_offset; 18602ad1a3f7SMatthew Dillon 1861df8bae1dSRodney W. Grimes /* 1862df8bae1dSRodney W. Grimes * Discard backing_object. 1863df8bae1dSRodney W. Grimes * 18640d94caffSDavid Greenman * Since the backing object has no pages, no pager left, 18650d94caffSDavid Greenman * and no object references within it, all that is 18660d94caffSDavid Greenman * necessary is to dispose of it. 1867df8bae1dSRodney W. Grimes */ 18689b4d473aSKonstantin Belousov KASSERT(backing_object->ref_count == 1, ( 18699b4d473aSKonstantin Belousov "backing_object %p was somehow re-referenced during collapse!", 18709b4d473aSKonstantin Belousov backing_object)); 1871aa9bc3b1SKonstantin Belousov vm_object_pip_wakeup(backing_object); 1872e735691bSJohn Baldwin backing_object->type = OBJT_DEAD; 1873e735691bSJohn Baldwin backing_object->ref_count = 0; 187489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 18759b4d473aSKonstantin Belousov vm_object_destroy(backing_object); 1876df8bae1dSRodney W. Grimes 1877aa9bc3b1SKonstantin Belousov vm_object_pip_wakeup(object); 1878df8bae1dSRodney W. Grimes object_collapses++; 18790d94caffSDavid Greenman } else { 1880df8bae1dSRodney W. Grimes /* 18812ad1a3f7SMatthew Dillon * If we do not entirely shadow the backing object, 18822ad1a3f7SMatthew Dillon * there is nothing we can do so we give up. 1883df8bae1dSRodney W. Grimes */ 1884df59a0feSJeff Roberson if (object->resident_page_count != object->size && 18854cc8daf7SConrad Meyer !vm_object_scan_all_shadowed(object)) { 188689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 18872ad1a3f7SMatthew Dillon break; 188824a1cce3SDavid Greenman } 1889df8bae1dSRodney W. Grimes 1890df8bae1dSRodney W. Grimes /* 18910d94caffSDavid Greenman * Make the parent shadow the next object in the 18920d94caffSDavid Greenman * chain. Deallocating backing_object will not remove 18930d94caffSDavid Greenman * it, since its reference count is at least 2. 1894df8bae1dSRodney W. Grimes */ 18951c500307SAlan Cox LIST_REMOVE(object, shadow_list); 1896eaf13dd7SJohn Dyson backing_object->shadow_count--; 189795e5e988SJohn Dyson 189895e5e988SJohn Dyson new_backing_object = backing_object->backing_object; 18998aef1712SMatthew Dillon if ((object->backing_object = new_backing_object) != NULL) { 190089f6b863SAttilio Rao VM_OBJECT_WLOCK(new_backing_object); 19011c500307SAlan Cox LIST_INSERT_HEAD( 19022ad1a3f7SMatthew Dillon &new_backing_object->shadow_head, 19032ad1a3f7SMatthew Dillon object, 19042ad1a3f7SMatthew Dillon shadow_list 19052ad1a3f7SMatthew Dillon ); 1906eaf13dd7SJohn Dyson new_backing_object->shadow_count++; 1907b921a12bSAlan Cox vm_object_reference_locked(new_backing_object); 190889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(new_backing_object); 190995e5e988SJohn Dyson object->backing_object_offset += 191095e5e988SJohn Dyson backing_object->backing_object_offset; 1911de5f6a77SJohn Dyson } 1912df8bae1dSRodney W. Grimes 1913df8bae1dSRodney W. Grimes /* 19140d94caffSDavid Greenman * Drop the reference count on backing_object. Since 191522ec553fSAlan Cox * its ref_count was at least 2, it will not vanish. 1916df8bae1dSRodney W. Grimes */ 191722ec553fSAlan Cox backing_object->ref_count--; 191889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 1919df8bae1dSRodney W. Grimes object_bypasses++; 1920df8bae1dSRodney W. Grimes } 1921df8bae1dSRodney W. Grimes 1922df8bae1dSRodney W. Grimes /* 1923df8bae1dSRodney W. Grimes * Try again with this object's new backing object. 1924df8bae1dSRodney W. Grimes */ 1925df8bae1dSRodney W. Grimes } 1926df8bae1dSRodney W. Grimes } 1927df8bae1dSRodney W. Grimes 1928df8bae1dSRodney W. Grimes /* 1929bff99f0dSAlan Cox * vm_object_page_remove: 1930df8bae1dSRodney W. Grimes * 193168855966SAlan Cox * For the given object, either frees or invalidates each of the 19326bbee8e2SAlan Cox * specified pages. In general, a page is freed. However, if a page is 19336bbee8e2SAlan Cox * wired for any reason other than the existence of a managed, wired 19346bbee8e2SAlan Cox * mapping, then it may be invalidated but not removed from the object. 19356bbee8e2SAlan Cox * Pages are specified by the given range ["start", "end") and the option 19366bbee8e2SAlan Cox * OBJPR_CLEANONLY. As a special case, if "end" is zero, then the range 19376bbee8e2SAlan Cox * extends from "start" to the end of the object. If the option 19386bbee8e2SAlan Cox * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the 19396bbee8e2SAlan Cox * specified range are affected. If the option OBJPR_NOTMAPPED is 19406bbee8e2SAlan Cox * specified, then the pages within the specified range must have no 19416bbee8e2SAlan Cox * mappings. Otherwise, if this option is not specified, any mappings to 19426bbee8e2SAlan Cox * the specified pages are removed before the pages are freed or 19436bbee8e2SAlan Cox * invalidated. 194468855966SAlan Cox * 19456bbee8e2SAlan Cox * In general, this operation should only be performed on objects that 19466bbee8e2SAlan Cox * contain managed pages. There are, however, two exceptions. First, it 19476bbee8e2SAlan Cox * is performed on the kernel and kmem objects by vm_map_entry_delete(). 19486bbee8e2SAlan Cox * Second, it is used by msync(..., MS_INVALIDATE) to invalidate device- 19496bbee8e2SAlan Cox * backed pages. In both of these cases, the option OBJPR_CLEANONLY must 19506bbee8e2SAlan Cox * not be specified and the option OBJPR_NOTMAPPED must be specified. 1951df8bae1dSRodney W. Grimes * 1952df8bae1dSRodney W. Grimes * The object must be locked. 1953df8bae1dSRodney W. Grimes */ 195426f9a767SRodney W. Grimes void 1955ecde4b32SAlan Cox vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 19566bbee8e2SAlan Cox int options) 1957df8bae1dSRodney W. Grimes { 1958d031cff1SMatthew Dillon vm_page_t p, next; 195993c5d3a4SKonstantin Belousov struct mtx *mtx; 1960bba52ecaSKonstantin Belousov struct pglist pgl; 1961df8bae1dSRodney W. Grimes 196289f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 196328634820SAlan Cox KASSERT((object->flags & OBJ_UNMANAGED) == 0 || 19646bbee8e2SAlan Cox (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED, 19656bbee8e2SAlan Cox ("vm_object_page_remove: illegal options for object %p", object)); 1966ecde4b32SAlan Cox if (object->resident_page_count == 0) 19677667839aSAlan Cox return; 1968d474eaaaSDoug Rabson vm_object_pip_add(object, 1); 1969bba52ecaSKonstantin Belousov TAILQ_INIT(&pgl); 197026f9a767SRodney W. Grimes again: 1971b382c10aSKonstantin Belousov p = vm_page_find_least(object, start); 197293c5d3a4SKonstantin Belousov mtx = NULL; 19732965a453SKip Macy 197475741c04SAlan Cox /* 19756bbee8e2SAlan Cox * Here, the variable "p" is either (1) the page with the least pindex 19766bbee8e2SAlan Cox * greater than or equal to the parameter "start" or (2) NULL. 197775741c04SAlan Cox */ 19786bbee8e2SAlan Cox for (; p != NULL && (p->pindex < end || end == 0); p = next) { 1979b18bfc3dSJohn Dyson next = TAILQ_NEXT(p, listq); 198075741c04SAlan Cox 198159677d3cSAlan Cox /* 19826bbee8e2SAlan Cox * If the page is wired for any reason besides the existence 19836bbee8e2SAlan Cox * of managed, wired mappings, then it cannot be freed. For 19846bbee8e2SAlan Cox * example, fictitious pages, which represent device memory, 19856bbee8e2SAlan Cox * are inherently wired and cannot be freed. They can, 19866bbee8e2SAlan Cox * however, be invalidated if the option OBJPR_CLEANONLY is 19876bbee8e2SAlan Cox * not specified. 198859677d3cSAlan Cox */ 198993c5d3a4SKonstantin Belousov vm_page_change_lock(p, &mtx); 19903aaea6efSKonstantin Belousov if (vm_page_xbusied(p)) { 19913aaea6efSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 19925975e53dSKonstantin Belousov vm_page_busy_sleep(p, "vmopax", true); 19933aaea6efSKonstantin Belousov VM_OBJECT_WLOCK(object); 19943aaea6efSKonstantin Belousov goto again; 19953aaea6efSKonstantin Belousov } 19966195b24aSKonstantin Belousov if (p->wire_count != 0) { 1997cf060942SAlan Cox if ((options & OBJPR_NOTMAPPED) == 0 && 1998cf060942SAlan Cox object->ref_count != 0) 19994fec79beSAlan Cox pmap_remove_all(p); 20006bbee8e2SAlan Cox if ((options & OBJPR_CLEANONLY) == 0) { 2001bd7e5f99SJohn Dyson p->valid = 0; 2002a28042d1SAlan Cox vm_page_undirty(p); 2003a28042d1SAlan Cox } 200493c5d3a4SKonstantin Belousov continue; 20050d94caffSDavid Greenman } 2006c7aebda8SAttilio Rao if (vm_page_busied(p)) { 2007c7aebda8SAttilio Rao VM_OBJECT_WUNLOCK(object); 20085975e53dSKonstantin Belousov vm_page_busy_sleep(p, "vmopar", false); 2009c7aebda8SAttilio Rao VM_OBJECT_WLOCK(object); 201026f9a767SRodney W. Grimes goto again; 2011c7aebda8SAttilio Rao } 201268855966SAlan Cox KASSERT((p->flags & PG_FICTITIOUS) == 0, 201368855966SAlan Cox ("vm_object_page_remove: page %p is fictitious", p)); 20146bbee8e2SAlan Cox if ((options & OBJPR_CLEANONLY) != 0 && p->valid != 0) { 2015cf060942SAlan Cox if ((options & OBJPR_NOTMAPPED) == 0 && 2016cf060942SAlan Cox object->ref_count != 0) 201778985e42SAlan Cox pmap_remove_write(p); 2018cf060942SAlan Cox if (p->dirty != 0) 201993c5d3a4SKonstantin Belousov continue; 20202965a453SKip Macy } 2021cf060942SAlan Cox if ((options & OBJPR_NOTMAPPED) == 0 && object->ref_count != 0) 20224fec79beSAlan Cox pmap_remove_all(p); 2023bba52ecaSKonstantin Belousov p->flags &= ~PG_ZERO; 2024bba52ecaSKonstantin Belousov if (vm_page_free_prep(p, false)) 2025bba52ecaSKonstantin Belousov TAILQ_INSERT_TAIL(&pgl, p, listq); 20262965a453SKip Macy } 202793c5d3a4SKonstantin Belousov if (mtx != NULL) 202893c5d3a4SKonstantin Belousov mtx_unlock(mtx); 2029bba52ecaSKonstantin Belousov vm_page_free_phys_pglist(&pgl); 2030f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 2031c0503609SDavid Greenman } 2032df8bae1dSRodney W. Grimes 2033df8bae1dSRodney W. Grimes /* 20343138cd36SMark Johnston * vm_object_page_noreuse: 2035936c09acSJohn Baldwin * 20363138cd36SMark Johnston * For the given object, attempt to move the specified pages to 20373138cd36SMark Johnston * the head of the inactive queue. This bypasses regular LRU 20383138cd36SMark Johnston * operation and allows the pages to be reused quickly under memory 20393138cd36SMark Johnston * pressure. If a page is wired for any reason, then it will not 20403138cd36SMark Johnston * be queued. Pages are specified by the range ["start", "end"). 20413138cd36SMark Johnston * As a special case, if "end" is zero, then the range extends from 20423138cd36SMark Johnston * "start" to the end of the object. 2043936c09acSJohn Baldwin * 2044936c09acSJohn Baldwin * This operation should only be performed on objects that 204528634820SAlan Cox * contain non-fictitious, managed pages. 2046936c09acSJohn Baldwin * 2047936c09acSJohn Baldwin * The object must be locked. 2048936c09acSJohn Baldwin */ 2049936c09acSJohn Baldwin void 20503138cd36SMark Johnston vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 2051936c09acSJohn Baldwin { 205293c5d3a4SKonstantin Belousov struct mtx *mtx; 2053936c09acSJohn Baldwin vm_page_t p, next; 2054936c09acSJohn Baldwin 205552d1addaSAlan Cox VM_OBJECT_ASSERT_LOCKED(object); 205628634820SAlan Cox KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0, 20573138cd36SMark Johnston ("vm_object_page_noreuse: illegal object %p", object)); 2058936c09acSJohn Baldwin if (object->resident_page_count == 0) 2059936c09acSJohn Baldwin return; 2060936c09acSJohn Baldwin p = vm_page_find_least(object, start); 2061936c09acSJohn Baldwin 2062936c09acSJohn Baldwin /* 2063936c09acSJohn Baldwin * Here, the variable "p" is either (1) the page with the least pindex 2064936c09acSJohn Baldwin * greater than or equal to the parameter "start" or (2) NULL. 2065936c09acSJohn Baldwin */ 2066936c09acSJohn Baldwin mtx = NULL; 2067936c09acSJohn Baldwin for (; p != NULL && (p->pindex < end || end == 0); p = next) { 2068936c09acSJohn Baldwin next = TAILQ_NEXT(p, listq); 206993c5d3a4SKonstantin Belousov vm_page_change_lock(p, &mtx); 20703138cd36SMark Johnston vm_page_deactivate_noreuse(p); 2071936c09acSJohn Baldwin } 2072936c09acSJohn Baldwin if (mtx != NULL) 2073936c09acSJohn Baldwin mtx_unlock(mtx); 2074936c09acSJohn Baldwin } 2075936c09acSJohn Baldwin 2076936c09acSJohn Baldwin /* 2077387aabc5SAlan Cox * Populate the specified range of the object with valid pages. Returns 2078387aabc5SAlan Cox * TRUE if the range is successfully populated and FALSE otherwise. 2079387aabc5SAlan Cox * 2080387aabc5SAlan Cox * Note: This function should be optimized to pass a larger array of 2081387aabc5SAlan Cox * pages to vm_pager_get_pages() before it is applied to a non- 2082387aabc5SAlan Cox * OBJT_DEVICE object. 2083387aabc5SAlan Cox * 2084387aabc5SAlan Cox * The object must be locked. 2085387aabc5SAlan Cox */ 2086387aabc5SAlan Cox boolean_t 2087387aabc5SAlan Cox vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 2088387aabc5SAlan Cox { 2089093c7f39SGleb Smirnoff vm_page_t m; 2090387aabc5SAlan Cox vm_pindex_t pindex; 2091387aabc5SAlan Cox int rv; 2092387aabc5SAlan Cox 209389f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 2094387aabc5SAlan Cox for (pindex = start; pindex < end; pindex++) { 20955944de8eSKonstantin Belousov m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); 2096387aabc5SAlan Cox if (m->valid != VM_PAGE_BITS_ALL) { 2097b0cd2017SGleb Smirnoff rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); 2098387aabc5SAlan Cox if (rv != VM_PAGER_OK) { 20992965a453SKip Macy vm_page_lock(m); 2100387aabc5SAlan Cox vm_page_free(m); 21012965a453SKip Macy vm_page_unlock(m); 2102387aabc5SAlan Cox break; 2103387aabc5SAlan Cox } 2104387aabc5SAlan Cox } 2105387aabc5SAlan Cox /* 2106387aabc5SAlan Cox * Keep "m" busy because a subsequent iteration may unlock 2107387aabc5SAlan Cox * the object. 2108387aabc5SAlan Cox */ 2109387aabc5SAlan Cox } 2110387aabc5SAlan Cox if (pindex > start) { 2111387aabc5SAlan Cox m = vm_page_lookup(object, start); 2112387aabc5SAlan Cox while (m != NULL && m->pindex < pindex) { 2113c7aebda8SAttilio Rao vm_page_xunbusy(m); 2114387aabc5SAlan Cox m = TAILQ_NEXT(m, listq); 2115387aabc5SAlan Cox } 2116387aabc5SAlan Cox } 2117387aabc5SAlan Cox return (pindex == end); 2118387aabc5SAlan Cox } 2119387aabc5SAlan Cox 2120387aabc5SAlan Cox /* 2121df8bae1dSRodney W. Grimes * Routine: vm_object_coalesce 2122df8bae1dSRodney W. Grimes * Function: Coalesces two objects backing up adjoining 2123df8bae1dSRodney W. Grimes * regions of memory into a single object. 2124df8bae1dSRodney W. Grimes * 2125df8bae1dSRodney W. Grimes * returns TRUE if objects were combined. 2126df8bae1dSRodney W. Grimes * 2127df8bae1dSRodney W. Grimes * NOTE: Only works at the moment if the second object is NULL - 2128df8bae1dSRodney W. Grimes * if it's not, which object do we lock first? 2129df8bae1dSRodney W. Grimes * 2130df8bae1dSRodney W. Grimes * Parameters: 2131df8bae1dSRodney W. Grimes * prev_object First object to coalesce 2132df8bae1dSRodney W. Grimes * prev_offset Offset into prev_object 2133df8bae1dSRodney W. Grimes * prev_size Size of reference to prev_object 213457a21abaSAlan Cox * next_size Size of reference to the second object 21353364c323SKonstantin Belousov * reserved Indicator that extension region has 21363364c323SKonstantin Belousov * swap accounted for 2137df8bae1dSRodney W. Grimes * 2138df8bae1dSRodney W. Grimes * Conditions: 2139df8bae1dSRodney W. Grimes * The object must *not* be locked. 2140df8bae1dSRodney W. Grimes */ 21410d94caffSDavid Greenman boolean_t 214257a21abaSAlan Cox vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, 21433364c323SKonstantin Belousov vm_size_t prev_size, vm_size_t next_size, boolean_t reserved) 2144df8bae1dSRodney W. Grimes { 2145ea41812fSAlan Cox vm_pindex_t next_pindex; 2146df8bae1dSRodney W. Grimes 214700e1854aSAlan Cox if (prev_object == NULL) 2148df8bae1dSRodney W. Grimes return (TRUE); 214989f6b863SAttilio Rao VM_OBJECT_WLOCK(prev_object); 21509ded9474SKonstantin Belousov if ((prev_object->type != OBJT_DEFAULT && 21519ded9474SKonstantin Belousov prev_object->type != OBJT_SWAP) || 2152f08f7dcaSKonstantin Belousov (prev_object->flags & OBJ_TMPFS_NODE) != 0) { 215389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(prev_object); 215430dcfc09SJohn Dyson return (FALSE); 215530dcfc09SJohn Dyson } 215630dcfc09SJohn Dyson 2157df8bae1dSRodney W. Grimes /* 2158df8bae1dSRodney W. Grimes * Try to collapse the object first 2159df8bae1dSRodney W. Grimes */ 2160df8bae1dSRodney W. Grimes vm_object_collapse(prev_object); 2161df8bae1dSRodney W. Grimes 2162df8bae1dSRodney W. Grimes /* 21630d94caffSDavid Greenman * Can't coalesce if: . more than one reference . paged out . shadows 21640d94caffSDavid Greenman * another object . has a copy elsewhere (any of which mean that the 21650d94caffSDavid Greenman * pages not mapped to prev_entry may be in use anyway) 2166df8bae1dSRodney W. Grimes */ 21678cc7e047SJohn Dyson if (prev_object->backing_object != NULL) { 216889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(prev_object); 2169df8bae1dSRodney W. Grimes return (FALSE); 2170df8bae1dSRodney W. Grimes } 2171a316d390SJohn Dyson 2172a316d390SJohn Dyson prev_size >>= PAGE_SHIFT; 2173a316d390SJohn Dyson next_size >>= PAGE_SHIFT; 217457a21abaSAlan Cox next_pindex = OFF_TO_IDX(prev_offset) + prev_size; 21758cc7e047SJohn Dyson 21768cc7e047SJohn Dyson if ((prev_object->ref_count > 1) && 2177ea41812fSAlan Cox (prev_object->size != next_pindex)) { 217889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(prev_object); 21798cc7e047SJohn Dyson return (FALSE); 21808cc7e047SJohn Dyson } 21818cc7e047SJohn Dyson 2182df8bae1dSRodney W. Grimes /* 21833364c323SKonstantin Belousov * Account for the charge. 21843364c323SKonstantin Belousov */ 2185ef694c1aSEdward Tomasz Napierala if (prev_object->cred != NULL) { 21863364c323SKonstantin Belousov 21873364c323SKonstantin Belousov /* 21883364c323SKonstantin Belousov * If prev_object was charged, then this mapping, 2189763df3ecSPedro F. Giffuni * although not charged now, may become writable 2190ef694c1aSEdward Tomasz Napierala * later. Non-NULL cred in the object would prevent 21913364c323SKonstantin Belousov * swap reservation during enabling of the write 21923364c323SKonstantin Belousov * access, so reserve swap now. Failed reservation 21933364c323SKonstantin Belousov * cause allocation of the separate object for the map 21943364c323SKonstantin Belousov * entry, and swap reservation for this entry is 21953364c323SKonstantin Belousov * managed in appropriate time. 21963364c323SKonstantin Belousov */ 2197ef694c1aSEdward Tomasz Napierala if (!reserved && !swap_reserve_by_cred(ptoa(next_size), 2198ef694c1aSEdward Tomasz Napierala prev_object->cred)) { 21999f790a17SKonstantin Belousov VM_OBJECT_WUNLOCK(prev_object); 22003364c323SKonstantin Belousov return (FALSE); 22013364c323SKonstantin Belousov } 22023364c323SKonstantin Belousov prev_object->charge += ptoa(next_size); 22033364c323SKonstantin Belousov } 22043364c323SKonstantin Belousov 22053364c323SKonstantin Belousov /* 22060d94caffSDavid Greenman * Remove any pages that may still be in the object from a previous 22070d94caffSDavid Greenman * deallocation. 2208df8bae1dSRodney W. Grimes */ 2209ea41812fSAlan Cox if (next_pindex < prev_object->size) { 22106bbee8e2SAlan Cox vm_object_page_remove(prev_object, next_pindex, next_pindex + 22116bbee8e2SAlan Cox next_size, 0); 2212ea41812fSAlan Cox if (prev_object->type == OBJT_SWAP) 2213ea41812fSAlan Cox swap_pager_freespace(prev_object, 2214ea41812fSAlan Cox next_pindex, next_size); 22153364c323SKonstantin Belousov #if 0 2216ef694c1aSEdward Tomasz Napierala if (prev_object->cred != NULL) { 22173364c323SKonstantin Belousov KASSERT(prev_object->charge >= 22183364c323SKonstantin Belousov ptoa(prev_object->size - next_pindex), 22193364c323SKonstantin Belousov ("object %p overcharged 1 %jx %jx", prev_object, 22203364c323SKonstantin Belousov (uintmax_t)next_pindex, (uintmax_t)next_size)); 22213364c323SKonstantin Belousov prev_object->charge -= ptoa(prev_object->size - 22223364c323SKonstantin Belousov next_pindex); 22233364c323SKonstantin Belousov } 22243364c323SKonstantin Belousov #endif 2225ea41812fSAlan Cox } 2226df8bae1dSRodney W. Grimes 2227df8bae1dSRodney W. Grimes /* 2228df8bae1dSRodney W. Grimes * Extend the object if necessary. 2229df8bae1dSRodney W. Grimes */ 2230ea41812fSAlan Cox if (next_pindex + next_size > prev_object->size) 2231ea41812fSAlan Cox prev_object->size = next_pindex + next_size; 2232df8bae1dSRodney W. Grimes 223389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(prev_object); 2234df8bae1dSRodney W. Grimes return (TRUE); 2235df8bae1dSRodney W. Grimes } 2236df8bae1dSRodney W. Grimes 22377a5a6352SMatthew Dillon void 22387a5a6352SMatthew Dillon vm_object_set_writeable_dirty(vm_object_t object) 22397a5a6352SMatthew Dillon { 22407a5a6352SMatthew Dillon 224189f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 2242f40cb1c6SKonstantin Belousov if (object->type != OBJT_VNODE) { 2243f40cb1c6SKonstantin Belousov if ((object->flags & OBJ_TMPFS_NODE) != 0) { 2244f40cb1c6SKonstantin Belousov KASSERT(object->type == OBJT_SWAP, ("non-swap tmpfs")); 2245f40cb1c6SKonstantin Belousov vm_object_set_flag(object, OBJ_TMPFS_DIRTY); 2246f40cb1c6SKonstantin Belousov } 22473280870dSKonstantin Belousov return; 2248f40cb1c6SKonstantin Belousov } 22493280870dSKonstantin Belousov object->generation++; 22503280870dSKonstantin Belousov if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) 2251ee39666aSJeff Roberson return; 2252af51d7bfSAlan Cox vm_object_set_flag(object, OBJ_MIGHTBEDIRTY); 22537a5a6352SMatthew Dillon } 22547a5a6352SMatthew Dillon 225503462509SAlan Cox /* 225603462509SAlan Cox * vm_object_unwire: 225703462509SAlan Cox * 225803462509SAlan Cox * For each page offset within the specified range of the given object, 225903462509SAlan Cox * find the highest-level page in the shadow chain and unwire it. A page 226003462509SAlan Cox * must exist at every page offset, and the highest-level page must be 226103462509SAlan Cox * wired. 226203462509SAlan Cox */ 226303462509SAlan Cox void 226403462509SAlan Cox vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length, 226503462509SAlan Cox uint8_t queue) 226603462509SAlan Cox { 226703462509SAlan Cox vm_object_t tobject; 226803462509SAlan Cox vm_page_t m, tm; 226903462509SAlan Cox vm_pindex_t end_pindex, pindex, tpindex; 227003462509SAlan Cox int depth, locked_depth; 227103462509SAlan Cox 227203462509SAlan Cox KASSERT((offset & PAGE_MASK) == 0, 227303462509SAlan Cox ("vm_object_unwire: offset is not page aligned")); 227403462509SAlan Cox KASSERT((length & PAGE_MASK) == 0, 227503462509SAlan Cox ("vm_object_unwire: length is not a multiple of PAGE_SIZE")); 227603462509SAlan Cox /* The wired count of a fictitious page never changes. */ 227703462509SAlan Cox if ((object->flags & OBJ_FICTITIOUS) != 0) 227803462509SAlan Cox return; 227903462509SAlan Cox pindex = OFF_TO_IDX(offset); 228003462509SAlan Cox end_pindex = pindex + atop(length); 228103462509SAlan Cox locked_depth = 1; 228203462509SAlan Cox VM_OBJECT_RLOCK(object); 228303462509SAlan Cox m = vm_page_find_least(object, pindex); 228403462509SAlan Cox while (pindex < end_pindex) { 228503462509SAlan Cox if (m == NULL || pindex < m->pindex) { 228603462509SAlan Cox /* 228703462509SAlan Cox * The first object in the shadow chain doesn't 228803462509SAlan Cox * contain a page at the current index. Therefore, 228903462509SAlan Cox * the page must exist in a backing object. 229003462509SAlan Cox */ 229103462509SAlan Cox tobject = object; 229203462509SAlan Cox tpindex = pindex; 229303462509SAlan Cox depth = 0; 229403462509SAlan Cox do { 229503462509SAlan Cox tpindex += 229603462509SAlan Cox OFF_TO_IDX(tobject->backing_object_offset); 229703462509SAlan Cox tobject = tobject->backing_object; 229803462509SAlan Cox KASSERT(tobject != NULL, 229903462509SAlan Cox ("vm_object_unwire: missing page")); 230003462509SAlan Cox if ((tobject->flags & OBJ_FICTITIOUS) != 0) 230103462509SAlan Cox goto next_page; 230203462509SAlan Cox depth++; 230303462509SAlan Cox if (depth == locked_depth) { 230403462509SAlan Cox locked_depth++; 230503462509SAlan Cox VM_OBJECT_RLOCK(tobject); 230603462509SAlan Cox } 230703462509SAlan Cox } while ((tm = vm_page_lookup(tobject, tpindex)) == 230803462509SAlan Cox NULL); 230903462509SAlan Cox } else { 231003462509SAlan Cox tm = m; 231103462509SAlan Cox m = TAILQ_NEXT(m, listq); 231203462509SAlan Cox } 231303462509SAlan Cox vm_page_lock(tm); 231403462509SAlan Cox vm_page_unwire(tm, queue); 231503462509SAlan Cox vm_page_unlock(tm); 231603462509SAlan Cox next_page: 231703462509SAlan Cox pindex++; 231803462509SAlan Cox } 231903462509SAlan Cox /* Release the accumulated object locks. */ 232003462509SAlan Cox for (depth = 0; depth < locked_depth; depth++) { 232103462509SAlan Cox tobject = object->backing_object; 232203462509SAlan Cox VM_OBJECT_RUNLOCK(object); 232303462509SAlan Cox object = tobject; 232403462509SAlan Cox } 232503462509SAlan Cox } 232603462509SAlan Cox 232763e4c6cdSEric van Gyzen struct vnode * 232863e4c6cdSEric van Gyzen vm_object_vnode(vm_object_t object) 232963e4c6cdSEric van Gyzen { 233063e4c6cdSEric van Gyzen 233163e4c6cdSEric van Gyzen VM_OBJECT_ASSERT_LOCKED(object); 233263e4c6cdSEric van Gyzen if (object->type == OBJT_VNODE) 233363e4c6cdSEric van Gyzen return (object->handle); 233463e4c6cdSEric van Gyzen if (object->type == OBJT_SWAP && (object->flags & OBJ_TMPFS) != 0) 233563e4c6cdSEric van Gyzen return (object->un_pager.swp.swp_tmpfs); 233663e4c6cdSEric van Gyzen return (NULL); 233763e4c6cdSEric van Gyzen } 233863e4c6cdSEric van Gyzen 2339ff87ae35SJohn Baldwin static int 2340ff87ae35SJohn Baldwin sysctl_vm_object_list(SYSCTL_HANDLER_ARGS) 2341ff87ae35SJohn Baldwin { 23420ecee546SKonstantin Belousov struct kinfo_vmobject *kvo; 2343ff87ae35SJohn Baldwin char *fullpath, *freepath; 2344ff87ae35SJohn Baldwin struct vnode *vp; 2345ff87ae35SJohn Baldwin struct vattr va; 2346ff87ae35SJohn Baldwin vm_object_t obj; 2347ff87ae35SJohn Baldwin vm_page_t m; 2348ff87ae35SJohn Baldwin int count, error; 2349ff87ae35SJohn Baldwin 2350ff87ae35SJohn Baldwin if (req->oldptr == NULL) { 2351ff87ae35SJohn Baldwin /* 2352ff87ae35SJohn Baldwin * If an old buffer has not been provided, generate an 2353ff87ae35SJohn Baldwin * estimate of the space needed for a subsequent call. 2354ff87ae35SJohn Baldwin */ 2355ff87ae35SJohn Baldwin mtx_lock(&vm_object_list_mtx); 2356ff87ae35SJohn Baldwin count = 0; 2357ff87ae35SJohn Baldwin TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2358ff87ae35SJohn Baldwin if (obj->type == OBJT_DEAD) 2359ff87ae35SJohn Baldwin continue; 2360ff87ae35SJohn Baldwin count++; 2361ff87ae35SJohn Baldwin } 2362ff87ae35SJohn Baldwin mtx_unlock(&vm_object_list_mtx); 2363ff87ae35SJohn Baldwin return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) * 2364ff87ae35SJohn Baldwin count * 11 / 10)); 2365ff87ae35SJohn Baldwin } 2366ff87ae35SJohn Baldwin 23670ecee546SKonstantin Belousov kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK); 2368ff87ae35SJohn Baldwin error = 0; 2369ff87ae35SJohn Baldwin 2370ff87ae35SJohn Baldwin /* 2371ff87ae35SJohn Baldwin * VM objects are type stable and are never removed from the 2372ff87ae35SJohn Baldwin * list once added. This allows us to safely read obj->object_list 2373ff87ae35SJohn Baldwin * after reacquiring the VM object lock. 2374ff87ae35SJohn Baldwin */ 2375ff87ae35SJohn Baldwin mtx_lock(&vm_object_list_mtx); 2376ff87ae35SJohn Baldwin TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2377ff87ae35SJohn Baldwin if (obj->type == OBJT_DEAD) 2378ff87ae35SJohn Baldwin continue; 2379ff87ae35SJohn Baldwin VM_OBJECT_RLOCK(obj); 2380ff87ae35SJohn Baldwin if (obj->type == OBJT_DEAD) { 2381ff87ae35SJohn Baldwin VM_OBJECT_RUNLOCK(obj); 2382ff87ae35SJohn Baldwin continue; 2383ff87ae35SJohn Baldwin } 2384ff87ae35SJohn Baldwin mtx_unlock(&vm_object_list_mtx); 23850ecee546SKonstantin Belousov kvo->kvo_size = ptoa(obj->size); 23860ecee546SKonstantin Belousov kvo->kvo_resident = obj->resident_page_count; 23870ecee546SKonstantin Belousov kvo->kvo_ref_count = obj->ref_count; 23880ecee546SKonstantin Belousov kvo->kvo_shadow_count = obj->shadow_count; 23890ecee546SKonstantin Belousov kvo->kvo_memattr = obj->memattr; 23900ecee546SKonstantin Belousov kvo->kvo_active = 0; 23910ecee546SKonstantin Belousov kvo->kvo_inactive = 0; 2392ff87ae35SJohn Baldwin TAILQ_FOREACH(m, &obj->memq, listq) { 2393ff87ae35SJohn Baldwin /* 2394ff87ae35SJohn Baldwin * A page may belong to the object but be 2395ff87ae35SJohn Baldwin * dequeued and set to PQ_NONE while the 2396ff87ae35SJohn Baldwin * object lock is not held. This makes the 2397ff87ae35SJohn Baldwin * reads of m->queue below racy, and we do not 2398ff87ae35SJohn Baldwin * count pages set to PQ_NONE. However, this 2399ff87ae35SJohn Baldwin * sysctl is only meant to give an 2400ff87ae35SJohn Baldwin * approximation of the system anyway. 2401ff87ae35SJohn Baldwin */ 2402ebcddc72SAlan Cox if (vm_page_active(m)) 24030ecee546SKonstantin Belousov kvo->kvo_active++; 2404ebcddc72SAlan Cox else if (vm_page_inactive(m)) 24050ecee546SKonstantin Belousov kvo->kvo_inactive++; 2406ff87ae35SJohn Baldwin } 2407ff87ae35SJohn Baldwin 24080ecee546SKonstantin Belousov kvo->kvo_vn_fileid = 0; 24090ecee546SKonstantin Belousov kvo->kvo_vn_fsid = 0; 24100ecee546SKonstantin Belousov kvo->kvo_vn_fsid_freebsd11 = 0; 2411ff87ae35SJohn Baldwin freepath = NULL; 2412ff87ae35SJohn Baldwin fullpath = ""; 2413ff87ae35SJohn Baldwin vp = NULL; 2414ff87ae35SJohn Baldwin switch (obj->type) { 2415ff87ae35SJohn Baldwin case OBJT_DEFAULT: 24160ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_DEFAULT; 2417ff87ae35SJohn Baldwin break; 2418ff87ae35SJohn Baldwin case OBJT_VNODE: 24190ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_VNODE; 2420ff87ae35SJohn Baldwin vp = obj->handle; 2421ff87ae35SJohn Baldwin vref(vp); 2422ff87ae35SJohn Baldwin break; 2423ff87ae35SJohn Baldwin case OBJT_SWAP: 24240ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_SWAP; 2425ff87ae35SJohn Baldwin break; 2426ff87ae35SJohn Baldwin case OBJT_DEVICE: 24270ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_DEVICE; 2428ff87ae35SJohn Baldwin break; 2429ff87ae35SJohn Baldwin case OBJT_PHYS: 24300ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_PHYS; 2431ff87ae35SJohn Baldwin break; 2432ff87ae35SJohn Baldwin case OBJT_DEAD: 24330ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_DEAD; 2434ff87ae35SJohn Baldwin break; 2435ff87ae35SJohn Baldwin case OBJT_SG: 24360ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_SG; 2437ff87ae35SJohn Baldwin break; 2438ff87ae35SJohn Baldwin case OBJT_MGTDEVICE: 24390ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_MGTDEVICE; 2440ff87ae35SJohn Baldwin break; 2441ff87ae35SJohn Baldwin default: 24420ecee546SKonstantin Belousov kvo->kvo_type = KVME_TYPE_UNKNOWN; 2443ff87ae35SJohn Baldwin break; 2444ff87ae35SJohn Baldwin } 2445ff87ae35SJohn Baldwin VM_OBJECT_RUNLOCK(obj); 2446ff87ae35SJohn Baldwin if (vp != NULL) { 2447ff87ae35SJohn Baldwin vn_fullpath(curthread, vp, &fullpath, &freepath); 2448ff87ae35SJohn Baldwin vn_lock(vp, LK_SHARED | LK_RETRY); 2449ff87ae35SJohn Baldwin if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) { 24500ecee546SKonstantin Belousov kvo->kvo_vn_fileid = va.va_fileid; 24510ecee546SKonstantin Belousov kvo->kvo_vn_fsid = va.va_fsid; 24520ecee546SKonstantin Belousov kvo->kvo_vn_fsid_freebsd11 = va.va_fsid; 245369921123SKonstantin Belousov /* truncate */ 2454ff87ae35SJohn Baldwin } 2455ff87ae35SJohn Baldwin vput(vp); 2456ff87ae35SJohn Baldwin } 2457ff87ae35SJohn Baldwin 24580ecee546SKonstantin Belousov strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path)); 2459ff87ae35SJohn Baldwin if (freepath != NULL) 2460ff87ae35SJohn Baldwin free(freepath, M_TEMP); 2461ff87ae35SJohn Baldwin 2462ff87ae35SJohn Baldwin /* Pack record size down */ 24630ecee546SKonstantin Belousov kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path) 24640ecee546SKonstantin Belousov + strlen(kvo->kvo_path) + 1; 24650ecee546SKonstantin Belousov kvo->kvo_structsize = roundup(kvo->kvo_structsize, 2466ff87ae35SJohn Baldwin sizeof(uint64_t)); 24670ecee546SKonstantin Belousov error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize); 2468ff87ae35SJohn Baldwin mtx_lock(&vm_object_list_mtx); 2469ff87ae35SJohn Baldwin if (error) 2470ff87ae35SJohn Baldwin break; 2471ff87ae35SJohn Baldwin } 2472ff87ae35SJohn Baldwin mtx_unlock(&vm_object_list_mtx); 24730ecee546SKonstantin Belousov free(kvo, M_TEMP); 2474ff87ae35SJohn Baldwin return (error); 2475ff87ae35SJohn Baldwin } 2476ff87ae35SJohn Baldwin SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP | 2477ff87ae35SJohn Baldwin CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject", 2478ff87ae35SJohn Baldwin "List of VM objects"); 2479ff87ae35SJohn Baldwin 2480c7c34a24SBruce Evans #include "opt_ddb.h" 2481c3cb3e12SDavid Greenman #ifdef DDB 2482c7c34a24SBruce Evans #include <sys/kernel.h> 2483c7c34a24SBruce Evans 2484ce9edcf5SPoul-Henning Kamp #include <sys/cons.h> 2485c7c34a24SBruce Evans 2486c7c34a24SBruce Evans #include <ddb/ddb.h> 2487c7c34a24SBruce Evans 2488cac597e4SBruce Evans static int 24891b40f8c0SMatthew Dillon _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 2490a1f6d91cSDavid Greenman { 2491a1f6d91cSDavid Greenman vm_map_t tmpm; 2492a1f6d91cSDavid Greenman vm_map_entry_t tmpe; 2493a1f6d91cSDavid Greenman vm_object_t obj; 2494a1f6d91cSDavid Greenman int entcount; 2495a1f6d91cSDavid Greenman 2496a1f6d91cSDavid Greenman if (map == 0) 2497a1f6d91cSDavid Greenman return 0; 2498a1f6d91cSDavid Greenman 2499a1f6d91cSDavid Greenman if (entry == 0) { 2500a1f6d91cSDavid Greenman tmpe = map->header.next; 2501a1f6d91cSDavid Greenman entcount = map->nentries; 2502a1f6d91cSDavid Greenman while (entcount-- && (tmpe != &map->header)) { 2503a1f6d91cSDavid Greenman if (_vm_object_in_map(map, object, tmpe)) { 2504a1f6d91cSDavid Greenman return 1; 2505a1f6d91cSDavid Greenman } 2506a1f6d91cSDavid Greenman tmpe = tmpe->next; 2507a1f6d91cSDavid Greenman } 25089fdfe602SMatthew Dillon } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 25099fdfe602SMatthew Dillon tmpm = entry->object.sub_map; 2510a1f6d91cSDavid Greenman tmpe = tmpm->header.next; 2511a1f6d91cSDavid Greenman entcount = tmpm->nentries; 2512a1f6d91cSDavid Greenman while (entcount-- && tmpe != &tmpm->header) { 2513a1f6d91cSDavid Greenman if (_vm_object_in_map(tmpm, object, tmpe)) { 2514a1f6d91cSDavid Greenman return 1; 2515a1f6d91cSDavid Greenman } 2516a1f6d91cSDavid Greenman tmpe = tmpe->next; 2517a1f6d91cSDavid Greenman } 25188aef1712SMatthew Dillon } else if ((obj = entry->object.vm_object) != NULL) { 251924a1cce3SDavid Greenman for (; obj; obj = obj->backing_object) 2520a1f6d91cSDavid Greenman if (obj == object) { 2521a1f6d91cSDavid Greenman return 1; 2522a1f6d91cSDavid Greenman } 2523a1f6d91cSDavid Greenman } 2524a1f6d91cSDavid Greenman return 0; 2525a1f6d91cSDavid Greenman } 2526a1f6d91cSDavid Greenman 2527cac597e4SBruce Evans static int 25281b40f8c0SMatthew Dillon vm_object_in_map(vm_object_t object) 2529a1f6d91cSDavid Greenman { 2530a1f6d91cSDavid Greenman struct proc *p; 25311005a129SJohn Baldwin 253260517fd1SJohn Baldwin /* sx_slock(&allproc_lock); */ 2533f67af5c9SXin LI FOREACH_PROC_IN_SYSTEM(p) { 2534a1f6d91cSDavid Greenman if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 2535a1f6d91cSDavid Greenman continue; 2536553629ebSJake Burkholder if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 253760517fd1SJohn Baldwin /* sx_sunlock(&allproc_lock); */ 2538a1f6d91cSDavid Greenman return 1; 2539a1f6d91cSDavid Greenman } 2540553629ebSJake Burkholder } 254160517fd1SJohn Baldwin /* sx_sunlock(&allproc_lock); */ 2542a1f6d91cSDavid Greenman if (_vm_object_in_map(kernel_map, object, 0)) 2543a1f6d91cSDavid Greenman return 1; 2544a1f6d91cSDavid Greenman return 0; 2545a1f6d91cSDavid Greenman } 2546a1f6d91cSDavid Greenman 2547c7c34a24SBruce Evans DB_SHOW_COMMAND(vmochk, vm_object_check) 2548f708ef1bSPoul-Henning Kamp { 2549a1f6d91cSDavid Greenman vm_object_t object; 2550a1f6d91cSDavid Greenman 2551a1f6d91cSDavid Greenman /* 2552a1f6d91cSDavid Greenman * make sure that internal objs are in a map somewhere 2553a1f6d91cSDavid Greenman * and none have zero ref counts. 2554a1f6d91cSDavid Greenman */ 2555cc64b484SAlfred Perlstein TAILQ_FOREACH(object, &vm_object_list, object_list) { 255624a1cce3SDavid Greenman if (object->handle == NULL && 255724a1cce3SDavid Greenman (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2558a1f6d91cSDavid Greenman if (object->ref_count == 0) { 25593efc015bSPeter Wemm db_printf("vmochk: internal obj has zero ref count: %ld\n", 25603efc015bSPeter Wemm (long)object->size); 2561a1f6d91cSDavid Greenman } 2562a1f6d91cSDavid Greenman if (!vm_object_in_map(object)) { 2563fc62ef1fSBruce Evans db_printf( 2564fc62ef1fSBruce Evans "vmochk: internal obj is not in a map: " 2565fc62ef1fSBruce Evans "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 2566fc62ef1fSBruce Evans object->ref_count, (u_long)object->size, 2567fc62ef1fSBruce Evans (u_long)object->size, 2568fc62ef1fSBruce Evans (void *)object->backing_object); 2569a1f6d91cSDavid Greenman } 2570a1f6d91cSDavid Greenman } 2571a1f6d91cSDavid Greenman } 2572a1f6d91cSDavid Greenman } 2573a1f6d91cSDavid Greenman 257426f9a767SRodney W. Grimes /* 2575df8bae1dSRodney W. Grimes * vm_object_print: [ debug ] 2576df8bae1dSRodney W. Grimes */ 2577c7c34a24SBruce Evans DB_SHOW_COMMAND(object, vm_object_print_static) 2578df8bae1dSRodney W. Grimes { 2579c7c34a24SBruce Evans /* XXX convert args. */ 2580c7c34a24SBruce Evans vm_object_t object = (vm_object_t)addr; 2581c7c34a24SBruce Evans boolean_t full = have_addr; 2582c7c34a24SBruce Evans 2583d031cff1SMatthew Dillon vm_page_t p; 2584df8bae1dSRodney W. Grimes 2585c7c34a24SBruce Evans /* XXX count is an (unused) arg. Avoid shadowing it. */ 2586c7c34a24SBruce Evans #define count was_count 2587c7c34a24SBruce Evans 2588d031cff1SMatthew Dillon int count; 2589df8bae1dSRodney W. Grimes 2590df8bae1dSRodney W. Grimes if (object == NULL) 2591df8bae1dSRodney W. Grimes return; 2592df8bae1dSRodney W. Grimes 2593eb95adefSBruce Evans db_iprintf( 2594ef694c1aSEdward Tomasz Napierala "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n", 2595e47cd172SMaxime Henrion object, (int)object->type, (uintmax_t)object->size, 25963364c323SKonstantin Belousov object->resident_page_count, object->ref_count, object->flags, 2597ef694c1aSEdward Tomasz Napierala object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge); 2598e47cd172SMaxime Henrion db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", 25991c7c3c6aSMatthew Dillon object->shadow_count, 2600eb95adefSBruce Evans object->backing_object ? object->backing_object->ref_count : 0, 2601e47cd172SMaxime Henrion object->backing_object, (uintmax_t)object->backing_object_offset); 2602df8bae1dSRodney W. Grimes 2603df8bae1dSRodney W. Grimes if (!full) 2604df8bae1dSRodney W. Grimes return; 2605df8bae1dSRodney W. Grimes 2606c7c34a24SBruce Evans db_indent += 2; 2607df8bae1dSRodney W. Grimes count = 0; 2608fc2ffbe6SPoul-Henning Kamp TAILQ_FOREACH(p, &object->memq, listq) { 2609df8bae1dSRodney W. Grimes if (count == 0) 2610c7c34a24SBruce Evans db_iprintf("memory:="); 2611df8bae1dSRodney W. Grimes else if (count == 6) { 2612c7c34a24SBruce Evans db_printf("\n"); 2613c7c34a24SBruce Evans db_iprintf(" ..."); 2614df8bae1dSRodney W. Grimes count = 0; 2615df8bae1dSRodney W. Grimes } else 2616c7c34a24SBruce Evans db_printf(","); 2617df8bae1dSRodney W. Grimes count++; 2618df8bae1dSRodney W. Grimes 2619e47cd172SMaxime Henrion db_printf("(off=0x%jx,page=0x%jx)", 2620e47cd172SMaxime Henrion (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); 2621df8bae1dSRodney W. Grimes } 2622df8bae1dSRodney W. Grimes if (count != 0) 2623c7c34a24SBruce Evans db_printf("\n"); 2624c7c34a24SBruce Evans db_indent -= 2; 2625df8bae1dSRodney W. Grimes } 26265070c7f8SJohn Dyson 2627c7c34a24SBruce Evans /* XXX. */ 2628c7c34a24SBruce Evans #undef count 2629c7c34a24SBruce Evans 2630c7c34a24SBruce Evans /* XXX need this non-static entry for calling from vm_map_print. */ 26315070c7f8SJohn Dyson void 26321b40f8c0SMatthew Dillon vm_object_print( 26331b40f8c0SMatthew Dillon /* db_expr_t */ long addr, 26341b40f8c0SMatthew Dillon boolean_t have_addr, 26351b40f8c0SMatthew Dillon /* db_expr_t */ long count, 26361b40f8c0SMatthew Dillon char *modif) 2637c7c34a24SBruce Evans { 2638c7c34a24SBruce Evans vm_object_print_static(addr, have_addr, count, modif); 2639c7c34a24SBruce Evans } 2640c7c34a24SBruce Evans 2641c7c34a24SBruce Evans DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 26425070c7f8SJohn Dyson { 26435070c7f8SJohn Dyson vm_object_t object; 2644bb2ac86fSKonstantin Belousov vm_pindex_t fidx; 2645bb2ac86fSKonstantin Belousov vm_paddr_t pa; 2646bb2ac86fSKonstantin Belousov vm_page_t m, prev_m; 2647bb2ac86fSKonstantin Belousov int rcount, nl, c; 2648cc64b484SAlfred Perlstein 2649bb2ac86fSKonstantin Belousov nl = 0; 2650cc64b484SAlfred Perlstein TAILQ_FOREACH(object, &vm_object_list, object_list) { 2651fc62ef1fSBruce Evans db_printf("new object: %p\n", (void *)object); 26525070c7f8SJohn Dyson if (nl > 18) { 26535070c7f8SJohn Dyson c = cngetc(); 26545070c7f8SJohn Dyson if (c != ' ') 26555070c7f8SJohn Dyson return; 26565070c7f8SJohn Dyson nl = 0; 26575070c7f8SJohn Dyson } 26585070c7f8SJohn Dyson nl++; 26595070c7f8SJohn Dyson rcount = 0; 26605070c7f8SJohn Dyson fidx = 0; 2661bb2ac86fSKonstantin Belousov pa = -1; 2662bb2ac86fSKonstantin Belousov TAILQ_FOREACH(m, &object->memq, listq) { 2663bb2ac86fSKonstantin Belousov if (m->pindex > 128) 2664bb2ac86fSKonstantin Belousov break; 2665bb2ac86fSKonstantin Belousov if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL && 2666bb2ac86fSKonstantin Belousov prev_m->pindex + 1 != m->pindex) { 26675070c7f8SJohn Dyson if (rcount) { 26683efc015bSPeter Wemm db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 26693efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 26705070c7f8SJohn Dyson if (nl > 18) { 26715070c7f8SJohn Dyson c = cngetc(); 26725070c7f8SJohn Dyson if (c != ' ') 26735070c7f8SJohn Dyson return; 26745070c7f8SJohn Dyson nl = 0; 26755070c7f8SJohn Dyson } 26765070c7f8SJohn Dyson nl++; 26775070c7f8SJohn Dyson rcount = 0; 26785070c7f8SJohn Dyson } 26795070c7f8SJohn Dyson } 26805070c7f8SJohn Dyson if (rcount && 26815070c7f8SJohn Dyson (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 26825070c7f8SJohn Dyson ++rcount; 26835070c7f8SJohn Dyson continue; 26845070c7f8SJohn Dyson } 26855070c7f8SJohn Dyson if (rcount) { 26862446e4f0SAlan Cox db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 26873efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 26885070c7f8SJohn Dyson if (nl > 18) { 26895070c7f8SJohn Dyson c = cngetc(); 26905070c7f8SJohn Dyson if (c != ' ') 26915070c7f8SJohn Dyson return; 26925070c7f8SJohn Dyson nl = 0; 26935070c7f8SJohn Dyson } 26945070c7f8SJohn Dyson nl++; 26955070c7f8SJohn Dyson } 2696bb2ac86fSKonstantin Belousov fidx = m->pindex; 26975070c7f8SJohn Dyson pa = VM_PAGE_TO_PHYS(m); 26985070c7f8SJohn Dyson rcount = 1; 26995070c7f8SJohn Dyson } 27005070c7f8SJohn Dyson if (rcount) { 27013efc015bSPeter Wemm db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 27023efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 27035070c7f8SJohn Dyson if (nl > 18) { 27045070c7f8SJohn Dyson c = cngetc(); 27055070c7f8SJohn Dyson if (c != ' ') 27065070c7f8SJohn Dyson return; 27075070c7f8SJohn Dyson nl = 0; 27085070c7f8SJohn Dyson } 27095070c7f8SJohn Dyson nl++; 27105070c7f8SJohn Dyson } 27115070c7f8SJohn Dyson } 27125070c7f8SJohn Dyson } 2713c3cb3e12SDavid Greenman #endif /* DDB */ 2714