160727d8bSWarner Losh /*- 2796df753SPedro F. Giffuni * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 351369649SPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 5df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 6df8bae1dSRodney W. Grimes * 7df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 8df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18fbbd9655SWarner Losh * 3. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes * 343c4dd356SDavid Greenman * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 35df8bae1dSRodney W. Grimes * 36df8bae1dSRodney W. Grimes * 37df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38df8bae1dSRodney W. Grimes * All rights reserved. 39df8bae1dSRodney W. Grimes * 40df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41df8bae1dSRodney W. Grimes * 42df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 43df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 44df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 45df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 46df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 47df8bae1dSRodney W. Grimes * 48df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51df8bae1dSRodney W. Grimes * 52df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 53df8bae1dSRodney W. Grimes * 54df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55df8bae1dSRodney W. Grimes * School of Computer Science 56df8bae1dSRodney W. Grimes * Carnegie Mellon University 57df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 58df8bae1dSRodney W. Grimes * 59df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 60df8bae1dSRodney W. Grimes * rights to redistribute these changes. 61df8bae1dSRodney W. Grimes */ 62df8bae1dSRodney W. Grimes 63df8bae1dSRodney W. Grimes /* 64df8bae1dSRodney W. Grimes * Virtual memory object module. 65df8bae1dSRodney W. Grimes */ 66df8bae1dSRodney W. Grimes 67874651b1SDavid E. O'Brien #include <sys/cdefs.h> 68874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 69874651b1SDavid E. O'Brien 70f8a47341SAlan Cox #include "opt_vm.h" 71f8a47341SAlan Cox 72df8bae1dSRodney W. Grimes #include <sys/param.h> 73df8bae1dSRodney W. Grimes #include <sys/systm.h> 743f289c3fSJeff Roberson #include <sys/cpuset.h> 75fb919e4dSMark Murray #include <sys/lock.h> 76867a482dSJohn Dyson #include <sys/mman.h> 77cf2819ccSJohn Dyson #include <sys/mount.h> 78b9b7a4beSMatthew Dillon #include <sys/kernel.h> 79f425ab8eSKonstantin Belousov #include <sys/pctrie.h> 80b9b7a4beSMatthew Dillon #include <sys/sysctl.h> 811b367556SJason Evans #include <sys/mutex.h> 82fb919e4dSMark Murray #include <sys/proc.h> /* for curproc, pageproc */ 83fb919e4dSMark Murray #include <sys/socket.h> 843364c323SKonstantin Belousov #include <sys/resourcevar.h> 8589f6b863SAttilio Rao #include <sys/rwlock.h> 86ff87ae35SJohn Baldwin #include <sys/user.h> 87fb919e4dSMark Murray #include <sys/vnode.h> 88fb919e4dSMark Murray #include <sys/vmmeter.h> 891005a129SJohn Baldwin #include <sys/sx.h> 90df8bae1dSRodney W. Grimes 91df8bae1dSRodney W. Grimes #include <vm/vm.h> 92efeaf95aSDavid Greenman #include <vm/vm_param.h> 93efeaf95aSDavid Greenman #include <vm/pmap.h> 94efeaf95aSDavid Greenman #include <vm/vm_map.h> 95efeaf95aSDavid Greenman #include <vm/vm_object.h> 96df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 9726f9a767SRodney W. Grimes #include <vm/vm_pageout.h> 980d94caffSDavid Greenman #include <vm/vm_pager.h> 99e2068d0bSJeff Roberson #include <vm/vm_phys.h> 100e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h> 10105f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 102a1f6d91cSDavid Greenman #include <vm/vm_kern.h> 103efeaf95aSDavid Greenman #include <vm/vm_extern.h> 104774d251dSAttilio Rao #include <vm/vm_radix.h> 105f8a47341SAlan Cox #include <vm/vm_reserv.h> 106670d17b5SJeff Roberson #include <vm/uma.h> 10726f9a767SRodney W. Grimes 108c53f7aceSDag-Erling Smørgrav static int old_msync; 109c53f7aceSDag-Erling Smørgrav SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, 110c53f7aceSDag-Erling Smørgrav "Use old (insecure) msync behavior"); 111c53f7aceSDag-Erling Smørgrav 112757216f3SKonstantin Belousov static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, 113126d6082SKonstantin Belousov int pagerflags, int flags, boolean_t *clearobjflags, 114126d6082SKonstantin Belousov boolean_t *eio); 1153280870dSKonstantin Belousov static boolean_t vm_object_page_remove_write(vm_page_t p, int flags, 116126d6082SKonstantin Belousov boolean_t *clearobjflags); 117b9b7a4beSMatthew Dillon static void vm_object_qcollapse(vm_object_t object); 11802dd8331SAlan Cox static void vm_object_vndeallocate(vm_object_t object); 119f6b04d2bSDavid Greenman 120df8bae1dSRodney W. Grimes /* 121df8bae1dSRodney W. Grimes * Virtual memory objects maintain the actual data 122df8bae1dSRodney W. Grimes * associated with allocated virtual memory. A given 123df8bae1dSRodney W. Grimes * page of memory exists within exactly one object. 124df8bae1dSRodney W. Grimes * 125df8bae1dSRodney W. Grimes * An object is only deallocated when all "references" 126df8bae1dSRodney W. Grimes * are given up. Only one "reference" to a given 127df8bae1dSRodney W. Grimes * region of an object should be writeable. 128df8bae1dSRodney W. Grimes * 129df8bae1dSRodney W. Grimes * Associated with each object is a list of all resident 130df8bae1dSRodney W. Grimes * memory pages belonging to that object; this list is 131df8bae1dSRodney W. Grimes * maintained by the "vm_page" module, and locked by the object's 132df8bae1dSRodney W. Grimes * lock. 133df8bae1dSRodney W. Grimes * 134df8bae1dSRodney W. Grimes * Each object also records a "pager" routine which is 135df8bae1dSRodney W. Grimes * used to retrieve (and store) pages to the proper backing 136df8bae1dSRodney W. Grimes * storage. In addition, objects may be backed by other 137df8bae1dSRodney W. Grimes * objects from which they were virtual-copied. 138df8bae1dSRodney W. Grimes * 139df8bae1dSRodney W. Grimes * The only items within the object structure which are 140df8bae1dSRodney W. Grimes * modified after time of creation are: 141df8bae1dSRodney W. Grimes * reference count locked by object's lock 142df8bae1dSRodney W. Grimes * pager routine locked by object's lock 143df8bae1dSRodney W. Grimes * 144df8bae1dSRodney W. Grimes */ 145df8bae1dSRodney W. Grimes 14628f8db14SBruce Evans struct object_q vm_object_list; 147a5698387SAlan Cox struct mtx vm_object_list_mtx; /* lock for object list and count */ 148cccf11b8SAlan Cox 149cccf11b8SAlan Cox struct vm_object kernel_object_store; 150df8bae1dSRodney W. Grimes 1516472ac3dSEd Schouten static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, 1526472ac3dSEd Schouten "VM object stats"); 153604c2bbcSAlan Cox 15411542376SAlan Cox static counter_u64_t object_collapses = EARLY_COUNTER; 15511542376SAlan Cox SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD, 15611542376SAlan Cox &object_collapses, 15711542376SAlan Cox "VM object collapses"); 158604c2bbcSAlan Cox 15911542376SAlan Cox static counter_u64_t object_bypasses = EARLY_COUNTER; 16011542376SAlan Cox SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD, 16111542376SAlan Cox &object_bypasses, 16211542376SAlan Cox "VM object bypasses"); 16311542376SAlan Cox 16411542376SAlan Cox static void 16511542376SAlan Cox counter_startup(void) 16611542376SAlan Cox { 16711542376SAlan Cox 16811542376SAlan Cox object_collapses = counter_u64_alloc(M_WAITOK); 16911542376SAlan Cox object_bypasses = counter_u64_alloc(M_WAITOK); 17011542376SAlan Cox } 17111542376SAlan Cox SYSINIT(object_counters, SI_SUB_CPU, SI_ORDER_ANY, counter_startup, NULL); 172dad740e9SAlan Cox 173670d17b5SJeff Roberson static uma_zone_t obj_zone; 1748355f576SJeff Roberson 175b23f72e9SBrian Feldman static int vm_object_zinit(void *mem, int size, int flags); 1768355f576SJeff Roberson 1778355f576SJeff Roberson #ifdef INVARIANTS 1788355f576SJeff Roberson static void vm_object_zdtor(void *mem, int size, void *arg); 1798355f576SJeff Roberson 1808355f576SJeff Roberson static void 1818355f576SJeff Roberson vm_object_zdtor(void *mem, int size, void *arg) 1828355f576SJeff Roberson { 1838355f576SJeff Roberson vm_object_t object; 1848355f576SJeff Roberson 1858355f576SJeff Roberson object = (vm_object_t)mem; 186e735691bSJohn Baldwin KASSERT(object->ref_count == 0, 187e735691bSJohn Baldwin ("object %p ref_count = %d", object, object->ref_count)); 18843186e53SAlan Cox KASSERT(TAILQ_EMPTY(&object->memq), 189198da1b2SAttilio Rao ("object %p has resident pages in its memq", object)); 190774d251dSAttilio Rao KASSERT(vm_radix_is_empty(&object->rtree), 191774d251dSAttilio Rao ("object %p has resident pages in its trie", object)); 192f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 193f8a47341SAlan Cox KASSERT(LIST_EMPTY(&object->rvq), 194f8a47341SAlan Cox ("object %p has reservations", 195f8a47341SAlan Cox object)); 196f8a47341SAlan Cox #endif 1978355f576SJeff Roberson KASSERT(object->paging_in_progress == 0, 1988355f576SJeff Roberson ("object %p paging_in_progress = %d", 1998355f576SJeff Roberson object, object->paging_in_progress)); 2008355f576SJeff Roberson KASSERT(object->resident_page_count == 0, 2018355f576SJeff Roberson ("object %p resident_page_count = %d", 2028355f576SJeff Roberson object, object->resident_page_count)); 2038355f576SJeff Roberson KASSERT(object->shadow_count == 0, 2048355f576SJeff Roberson ("object %p shadow_count = %d", 2058355f576SJeff Roberson object, object->shadow_count)); 206e735691bSJohn Baldwin KASSERT(object->type == OBJT_DEAD, 207e735691bSJohn Baldwin ("object %p has non-dead type %d", 208e735691bSJohn Baldwin object, object->type)); 2098355f576SJeff Roberson } 2108355f576SJeff Roberson #endif 2118355f576SJeff Roberson 212b23f72e9SBrian Feldman static int 213b23f72e9SBrian Feldman vm_object_zinit(void *mem, int size, int flags) 2148355f576SJeff Roberson { 2158355f576SJeff Roberson vm_object_t object; 2168355f576SJeff Roberson 2178355f576SJeff Roberson object = (vm_object_t)mem; 218777a36c5SAlan Cox rw_init_flags(&object->lock, "vm object", RW_DUPOK | RW_NEW); 2198355f576SJeff Roberson 2208355f576SJeff Roberson /* These are true for any object that has been freed */ 221e735691bSJohn Baldwin object->type = OBJT_DEAD; 222e735691bSJohn Baldwin object->ref_count = 0; 223cd1241fbSKonstantin Belousov vm_radix_init(&object->rtree); 2248355f576SJeff Roberson object->paging_in_progress = 0; 2258355f576SJeff Roberson object->resident_page_count = 0; 2268355f576SJeff Roberson object->shadow_count = 0; 227f425ab8eSKonstantin Belousov object->flags = OBJ_DEAD; 228e735691bSJohn Baldwin 229e735691bSJohn Baldwin mtx_lock(&vm_object_list_mtx); 230e735691bSJohn Baldwin TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 231e735691bSJohn Baldwin mtx_unlock(&vm_object_list_mtx); 232b23f72e9SBrian Feldman return (0); 2338355f576SJeff Roberson } 234df8bae1dSRodney W. Grimes 235a4915c21SAttilio Rao static void 2366395da54SIan Dowse _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) 237df8bae1dSRodney W. Grimes { 2380cddd8f0SMatthew Dillon 239df8bae1dSRodney W. Grimes TAILQ_INIT(&object->memq); 2401c500307SAlan Cox LIST_INIT(&object->shadow_head); 241a1f6d91cSDavid Greenman 24224a1cce3SDavid Greenman object->type = type; 243f425ab8eSKonstantin Belousov if (type == OBJT_SWAP) 244f425ab8eSKonstantin Belousov pctrie_init(&object->un_pager.swp.swp_blks); 245f425ab8eSKonstantin Belousov 246f425ab8eSKonstantin Belousov /* 247f425ab8eSKonstantin Belousov * Ensure that swap_pager_swapoff() iteration over object_list 248f425ab8eSKonstantin Belousov * sees up to date type and pctrie head if it observed 249f425ab8eSKonstantin Belousov * non-dead object. 250f425ab8eSKonstantin Belousov */ 251f425ab8eSKonstantin Belousov atomic_thread_fence_rel(); 252f425ab8eSKonstantin Belousov 25328634820SAlan Cox switch (type) { 25428634820SAlan Cox case OBJT_DEAD: 25528634820SAlan Cox panic("_vm_object_allocate: can't create OBJT_DEAD"); 25628634820SAlan Cox case OBJT_DEFAULT: 25728634820SAlan Cox case OBJT_SWAP: 25828634820SAlan Cox object->flags = OBJ_ONEMAPPING; 25928634820SAlan Cox break; 26028634820SAlan Cox case OBJT_DEVICE: 26128634820SAlan Cox case OBJT_SG: 26228634820SAlan Cox object->flags = OBJ_FICTITIOUS | OBJ_UNMANAGED; 26328634820SAlan Cox break; 26428634820SAlan Cox case OBJT_MGTDEVICE: 26528634820SAlan Cox object->flags = OBJ_FICTITIOUS; 26628634820SAlan Cox break; 26728634820SAlan Cox case OBJT_PHYS: 26828634820SAlan Cox object->flags = OBJ_UNMANAGED; 26928634820SAlan Cox break; 27028634820SAlan Cox case OBJT_VNODE: 27128634820SAlan Cox object->flags = 0; 27228634820SAlan Cox break; 27328634820SAlan Cox default: 27428634820SAlan Cox panic("_vm_object_allocate: type %d is undefined", type); 27528634820SAlan Cox } 276df8bae1dSRodney W. Grimes object->size = size; 2774c29d2deSMark Johnston object->domain.dr_policy = NULL; 278b881da26SAlan Cox object->generation = 1; 279a1f6d91cSDavid Greenman object->ref_count = 1; 2803153e878SAlan Cox object->memattr = VM_MEMATTR_DEFAULT; 281ef694c1aSEdward Tomasz Napierala object->cred = NULL; 2823364c323SKonstantin Belousov object->charge = 0; 28324a1cce3SDavid Greenman object->handle = NULL; 28424a1cce3SDavid Greenman object->backing_object = NULL; 285a316d390SJohn Dyson object->backing_object_offset = (vm_ooffset_t) 0; 286f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 287f8a47341SAlan Cox LIST_INIT(&object->rvq); 288f8a47341SAlan Cox #endif 2891bdbd705SKonstantin Belousov umtx_shm_object_init(object); 290df8bae1dSRodney W. Grimes } 291df8bae1dSRodney W. Grimes 292df8bae1dSRodney W. Grimes /* 29326f9a767SRodney W. Grimes * vm_object_init: 29426f9a767SRodney W. Grimes * 29526f9a767SRodney W. Grimes * Initialize the VM objects module. 29626f9a767SRodney W. Grimes */ 29726f9a767SRodney W. Grimes void 2981b40f8c0SMatthew Dillon vm_object_init(void) 29926f9a767SRodney W. Grimes { 30026f9a767SRodney W. Grimes TAILQ_INIT(&vm_object_list); 3016008862bSJohn Baldwin mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); 3020217125fSDavid Greenman 30389f6b863SAttilio Rao rw_init(&kernel_object->lock, "kernel vm object"); 304d1780e8dSKonstantin Belousov _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - 305d1780e8dSKonstantin Belousov VM_MIN_KERNEL_ADDRESS), kernel_object); 306f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 307f8a47341SAlan Cox kernel_object->flags |= OBJ_COLORED; 308f8a47341SAlan Cox kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 309f8a47341SAlan Cox #endif 31026f9a767SRodney W. Grimes 3118dbca793STor Egge /* 3128dbca793STor Egge * The lock portion of struct vm_object must be type stable due 3138dbca793STor Egge * to vm_pageout_fallback_object_lock locking a vm object 3148dbca793STor Egge * without holding any references to it. 3158dbca793STor Egge */ 3168355f576SJeff Roberson obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, 3178355f576SJeff Roberson #ifdef INVARIANTS 3188355f576SJeff Roberson vm_object_zdtor, 3198355f576SJeff Roberson #else 3208355f576SJeff Roberson NULL, 3218355f576SJeff Roberson #endif 3225df87b21SJeff Roberson vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 323774d251dSAttilio Rao 324cd1241fbSKonstantin Belousov vm_radix_zinit(); 32599448ed1SJohn Dyson } 32699448ed1SJohn Dyson 32799448ed1SJohn Dyson void 3281b40f8c0SMatthew Dillon vm_object_clear_flag(vm_object_t object, u_short bits) 3291b40f8c0SMatthew Dillon { 3305440b5a9SAlan Cox 33189f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 332b06805adSJake Burkholder object->flags &= ~bits; 3331b40f8c0SMatthew Dillon } 3341b40f8c0SMatthew Dillon 3353153e878SAlan Cox /* 3363153e878SAlan Cox * Sets the default memory attribute for the specified object. Pages 3373153e878SAlan Cox * that are allocated to this object are by default assigned this memory 3383153e878SAlan Cox * attribute. 3393153e878SAlan Cox * 3403153e878SAlan Cox * Presently, this function must be called before any pages are allocated 3413153e878SAlan Cox * to the object. In the future, this requirement may be relaxed for 3423153e878SAlan Cox * "default" and "swap" objects. 3433153e878SAlan Cox */ 3443153e878SAlan Cox int 3453153e878SAlan Cox vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr) 3463153e878SAlan Cox { 3473153e878SAlan Cox 34889f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 3493153e878SAlan Cox switch (object->type) { 3503153e878SAlan Cox case OBJT_DEFAULT: 3513153e878SAlan Cox case OBJT_DEVICE: 35296b0b92aSAlan Cox case OBJT_MGTDEVICE: 3533153e878SAlan Cox case OBJT_PHYS: 35401381811SJohn Baldwin case OBJT_SG: 3553153e878SAlan Cox case OBJT_SWAP: 3563153e878SAlan Cox case OBJT_VNODE: 3573153e878SAlan Cox if (!TAILQ_EMPTY(&object->memq)) 3583153e878SAlan Cox return (KERN_FAILURE); 3593153e878SAlan Cox break; 3603153e878SAlan Cox case OBJT_DEAD: 3613153e878SAlan Cox return (KERN_INVALID_ARGUMENT); 36296b0b92aSAlan Cox default: 36396b0b92aSAlan Cox panic("vm_object_set_memattr: object %p is of undefined type", 36496b0b92aSAlan Cox object); 3653153e878SAlan Cox } 3663153e878SAlan Cox object->memattr = memattr; 3673153e878SAlan Cox return (KERN_SUCCESS); 3683153e878SAlan Cox } 3693153e878SAlan Cox 3701b40f8c0SMatthew Dillon void 3711b40f8c0SMatthew Dillon vm_object_pip_add(vm_object_t object, short i) 3721b40f8c0SMatthew Dillon { 373f279b88dSAlan Cox 37489f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 375b06805adSJake Burkholder object->paging_in_progress += i; 3761b40f8c0SMatthew Dillon } 3771b40f8c0SMatthew Dillon 3781b40f8c0SMatthew Dillon void 3791b40f8c0SMatthew Dillon vm_object_pip_subtract(vm_object_t object, short i) 3801b40f8c0SMatthew Dillon { 381d647a0edSAlan Cox 38289f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 383b06805adSJake Burkholder object->paging_in_progress -= i; 3841b40f8c0SMatthew Dillon } 3851b40f8c0SMatthew Dillon 3861b40f8c0SMatthew Dillon void 3871b40f8c0SMatthew Dillon vm_object_pip_wakeup(vm_object_t object) 3881b40f8c0SMatthew Dillon { 389f279b88dSAlan Cox 39089f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 391b06805adSJake Burkholder object->paging_in_progress--; 3921b40f8c0SMatthew Dillon if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 3931b40f8c0SMatthew Dillon vm_object_clear_flag(object, OBJ_PIPWNT); 3941b40f8c0SMatthew Dillon wakeup(object); 3951b40f8c0SMatthew Dillon } 3961b40f8c0SMatthew Dillon } 3971b40f8c0SMatthew Dillon 3981b40f8c0SMatthew Dillon void 3991b40f8c0SMatthew Dillon vm_object_pip_wakeupn(vm_object_t object, short i) 4001b40f8c0SMatthew Dillon { 401d647a0edSAlan Cox 40289f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 4031b40f8c0SMatthew Dillon if (i) 404b06805adSJake Burkholder object->paging_in_progress -= i; 4051b40f8c0SMatthew Dillon if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 4061b40f8c0SMatthew Dillon vm_object_clear_flag(object, OBJ_PIPWNT); 4071b40f8c0SMatthew Dillon wakeup(object); 4081b40f8c0SMatthew Dillon } 4091b40f8c0SMatthew Dillon } 4101b40f8c0SMatthew Dillon 4111b40f8c0SMatthew Dillon void 4121b40f8c0SMatthew Dillon vm_object_pip_wait(vm_object_t object, char *waitid) 4131b40f8c0SMatthew Dillon { 4141ca58953SAlan Cox 41589f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 4161ca58953SAlan Cox while (object->paging_in_progress) { 4171ca58953SAlan Cox object->flags |= OBJ_PIPWNT; 4180dde287bSAttilio Rao VM_OBJECT_SLEEP(object, object, PVM, waitid, 0); 4191ca58953SAlan Cox } 4201b40f8c0SMatthew Dillon } 4211b40f8c0SMatthew Dillon 42226f9a767SRodney W. Grimes /* 42326f9a767SRodney W. Grimes * vm_object_allocate: 42426f9a767SRodney W. Grimes * 42526f9a767SRodney W. Grimes * Returns a new object with the given size. 42626f9a767SRodney W. Grimes */ 42726f9a767SRodney W. Grimes vm_object_t 4286395da54SIan Dowse vm_object_allocate(objtype_t type, vm_pindex_t size) 42926f9a767SRodney W. Grimes { 43090688d13SAlan Cox vm_object_t object; 43190688d13SAlan Cox 43290688d13SAlan Cox object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); 43390688d13SAlan Cox _vm_object_allocate(type, size, object); 43490688d13SAlan Cox return (object); 43526f9a767SRodney W. Grimes } 43626f9a767SRodney W. Grimes 43726f9a767SRodney W. Grimes 43826f9a767SRodney W. Grimes /* 439df8bae1dSRodney W. Grimes * vm_object_reference: 440df8bae1dSRodney W. Grimes * 44115347817SAlan Cox * Gets another reference to the given object. Note: OBJ_DEAD 44215347817SAlan Cox * objects can be referenced during final cleaning. 443df8bae1dSRodney W. Grimes */ 4446476c0d2SJohn Dyson void 4451b40f8c0SMatthew Dillon vm_object_reference(vm_object_t object) 446df8bae1dSRodney W. Grimes { 447df8bae1dSRodney W. Grimes if (object == NULL) 448df8bae1dSRodney W. Grimes return; 44989f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 45052481a9aSJeff Roberson vm_object_reference_locked(object); 45189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 45295e5e988SJohn Dyson } 45395e5e988SJohn Dyson 45423955314SAlfred Perlstein /* 455b921a12bSAlan Cox * vm_object_reference_locked: 456b921a12bSAlan Cox * 457b921a12bSAlan Cox * Gets another reference to the given object. 458b921a12bSAlan Cox * 459b921a12bSAlan Cox * The object must be locked. 460b921a12bSAlan Cox */ 461b921a12bSAlan Cox void 462b921a12bSAlan Cox vm_object_reference_locked(vm_object_t object) 463b921a12bSAlan Cox { 464b921a12bSAlan Cox struct vnode *vp; 465b921a12bSAlan Cox 46689f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 467b921a12bSAlan Cox object->ref_count++; 468b921a12bSAlan Cox if (object->type == OBJT_VNODE) { 469b921a12bSAlan Cox vp = object->handle; 470b921a12bSAlan Cox vref(vp); 471b921a12bSAlan Cox } 472b921a12bSAlan Cox } 473b921a12bSAlan Cox 474b921a12bSAlan Cox /* 4759d5abbddSJens Schweikhardt * Handle deallocating an object of type OBJT_VNODE. 47623955314SAlfred Perlstein */ 47702dd8331SAlan Cox static void 4781b40f8c0SMatthew Dillon vm_object_vndeallocate(vm_object_t object) 47995e5e988SJohn Dyson { 48095e5e988SJohn Dyson struct vnode *vp = (struct vnode *) object->handle; 481219cbf59SEivind Eklund 48289f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 4835526d2d9SEivind Eklund KASSERT(object->type == OBJT_VNODE, 4845526d2d9SEivind Eklund ("vm_object_vndeallocate: not a vnode object")); 485219cbf59SEivind Eklund KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 486219cbf59SEivind Eklund #ifdef INVARIANTS 48795e5e988SJohn Dyson if (object->ref_count == 0) { 488411455a8SEdward Tomasz Napierala vn_printf(vp, "vm_object_vndeallocate "); 48995e5e988SJohn Dyson panic("vm_object_vndeallocate: bad object reference count"); 49095e5e988SJohn Dyson } 49195e5e988SJohn Dyson #endif 49295e5e988SJohn Dyson 4932a339d9eSKonstantin Belousov if (!umtx_shm_vnobj_persistent && object->ref_count == 1) 4941bdbd705SKonstantin Belousov umtx_shm_object_terminated(object); 4951bdbd705SKonstantin Belousov 49695e5e988SJohn Dyson object->ref_count--; 49778022527SKonstantin Belousov 49803fa5b34SKonstantin Belousov /* vrele may need the vnode lock. */ 49978022527SKonstantin Belousov VM_OBJECT_WUNLOCK(object); 50047221757SJohn Dyson vrele(vp); 50186769ac0SKonstantin Belousov } 502df8bae1dSRodney W. Grimes 503df8bae1dSRodney W. Grimes /* 504df8bae1dSRodney W. Grimes * vm_object_deallocate: 505df8bae1dSRodney W. Grimes * 506df8bae1dSRodney W. Grimes * Release a reference to the specified object, 507df8bae1dSRodney W. Grimes * gained either through a vm_object_allocate 508df8bae1dSRodney W. Grimes * or a vm_object_reference call. When all references 509df8bae1dSRodney W. Grimes * are gone, storage associated with this object 510df8bae1dSRodney W. Grimes * may be relinquished. 511df8bae1dSRodney W. Grimes * 512df8bae1dSRodney W. Grimes * No object may be locked. 513df8bae1dSRodney W. Grimes */ 51426f9a767SRodney W. Grimes void 5151b40f8c0SMatthew Dillon vm_object_deallocate(vm_object_t object) 516df8bae1dSRodney W. Grimes { 517df8bae1dSRodney W. Grimes vm_object_t temp; 5186f2af3fcSKonstantin Belousov struct vnode *vp; 519df8bae1dSRodney W. Grimes 520df8bae1dSRodney W. Grimes while (object != NULL) { 52189f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 5223b68228cSAlan Cox if (object->type == OBJT_VNODE) { 52395e5e988SJohn Dyson vm_object_vndeallocate(object); 52423b186d3SAlan Cox return; 5255050aa86SKonstantin Belousov } 52695e5e988SJohn Dyson 5278125b1e6SAlfred Perlstein KASSERT(object->ref_count != 0, 5288125b1e6SAlfred Perlstein ("vm_object_deallocate: object deallocated too many times: %d", object->type)); 5292be70f79SJohn Dyson 5302be70f79SJohn Dyson /* 5318125b1e6SAlfred Perlstein * If the reference count goes to 0 we start calling 5328125b1e6SAlfred Perlstein * vm_object_terminate() on the object chain. 5338125b1e6SAlfred Perlstein * A ref count of 1 may be a special case depending on the 5348125b1e6SAlfred Perlstein * shadow count being 0 or 1. 5352be70f79SJohn Dyson */ 536c0877f10SJohn Dyson object->ref_count--; 5378125b1e6SAlfred Perlstein if (object->ref_count > 1) { 53889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 53923b186d3SAlan Cox return; 5408125b1e6SAlfred Perlstein } else if (object->ref_count == 1) { 5416f2af3fcSKonstantin Belousov if (object->type == OBJT_SWAP && 5426f2af3fcSKonstantin Belousov (object->flags & OBJ_TMPFS) != 0) { 5436f2af3fcSKonstantin Belousov vp = object->un_pager.swp.swp_tmpfs; 5446f2af3fcSKonstantin Belousov vhold(vp); 5456f2af3fcSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 5466f2af3fcSKonstantin Belousov vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5476f2af3fcSKonstantin Belousov VM_OBJECT_WLOCK(object); 5487560005cSKonstantin Belousov if (object->type == OBJT_DEAD || 5497560005cSKonstantin Belousov object->ref_count != 1) { 5506f2af3fcSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 5516f2af3fcSKonstantin Belousov VOP_UNLOCK(vp, 0); 5522309fa9bSKonstantin Belousov vdrop(vp); 5536f2af3fcSKonstantin Belousov return; 5547560005cSKonstantin Belousov } 5557560005cSKonstantin Belousov if ((object->flags & OBJ_TMPFS) != 0) 5566f2af3fcSKonstantin Belousov VOP_UNSET_TEXT(vp); 5576f2af3fcSKonstantin Belousov VOP_UNLOCK(vp, 0); 5582309fa9bSKonstantin Belousov vdrop(vp); 5596f2af3fcSKonstantin Belousov } 5604c8e0452SAlan Cox if (object->shadow_count == 0 && 5614c8e0452SAlan Cox object->handle == NULL && 5624c8e0452SAlan Cox (object->type == OBJT_DEFAULT || 5636f2af3fcSKonstantin Belousov (object->type == OBJT_SWAP && 564f08f7dcaSKonstantin Belousov (object->flags & OBJ_TMPFS_NODE) == 0))) { 5658125b1e6SAlfred Perlstein vm_object_set_flag(object, OBJ_ONEMAPPING); 5668125b1e6SAlfred Perlstein } else if ((object->shadow_count == 1) && 5678125b1e6SAlfred Perlstein (object->handle == NULL) && 56824a1cce3SDavid Greenman (object->type == OBJT_DEFAULT || 56924a1cce3SDavid Greenman object->type == OBJT_SWAP)) { 570a1f6d91cSDavid Greenman vm_object_t robject; 57195e5e988SJohn Dyson 5721c500307SAlan Cox robject = LIST_FIRST(&object->shadow_head); 5735526d2d9SEivind Eklund KASSERT(robject != NULL, 574219cbf59SEivind Eklund ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 5755526d2d9SEivind Eklund object->ref_count, 5765526d2d9SEivind Eklund object->shadow_count)); 5774bace8e7SKonstantin Belousov KASSERT((robject->flags & OBJ_TMPFS_NODE) == 0, 5784bace8e7SKonstantin Belousov ("shadowed tmpfs v_object %p", object)); 57989f6b863SAttilio Rao if (!VM_OBJECT_TRYWLOCK(robject)) { 580b72b0115SAlan Cox /* 581b72b0115SAlan Cox * Avoid a potential deadlock. 582b72b0115SAlan Cox */ 583b72b0115SAlan Cox object->ref_count++; 58489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 585a7d86121SAlan Cox /* 586a7d86121SAlan Cox * More likely than not the thread 587a7d86121SAlan Cox * holding robject's lock has lower 588a7d86121SAlan Cox * priority than the current thread. 589a7d86121SAlan Cox * Let the lower priority thread run. 590a7d86121SAlan Cox */ 5918db5fc58SJohn Baldwin pause("vmo_de", 1); 592b72b0115SAlan Cox continue; 593b72b0115SAlan Cox } 594d936694fSAlan Cox /* 595d936694fSAlan Cox * Collapse object into its shadow unless its 596d936694fSAlan Cox * shadow is dead. In that case, object will 597d936694fSAlan Cox * be deallocated by the thread that is 598d936694fSAlan Cox * deallocating its shadow. 599d936694fSAlan Cox */ 600d936694fSAlan Cox if ((robject->flags & OBJ_DEAD) == 0 && 601d936694fSAlan Cox (robject->handle == NULL) && 60224a1cce3SDavid Greenman (robject->type == OBJT_DEFAULT || 60324a1cce3SDavid Greenman robject->type == OBJT_SWAP)) { 604a1f6d91cSDavid Greenman 60595e5e988SJohn Dyson robject->ref_count++; 606138449dcSAlan Cox retry: 607138449dcSAlan Cox if (robject->paging_in_progress) { 60889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 609138449dcSAlan Cox vm_object_pip_wait(robject, 610138449dcSAlan Cox "objde1"); 6112e9f4a69SAlan Cox temp = robject->backing_object; 6122e9f4a69SAlan Cox if (object == temp) { 61389f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 614138449dcSAlan Cox goto retry; 6152e9f4a69SAlan Cox } 616138449dcSAlan Cox } else if (object->paging_in_progress) { 61789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(robject); 618138449dcSAlan Cox object->flags |= OBJ_PIPWNT; 6190dde287bSAttilio Rao VM_OBJECT_SLEEP(object, object, 620138449dcSAlan Cox PDROP | PVM, "objde2", 0); 62189f6b863SAttilio Rao VM_OBJECT_WLOCK(robject); 6222e9f4a69SAlan Cox temp = robject->backing_object; 6232e9f4a69SAlan Cox if (object == temp) { 62489f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 625138449dcSAlan Cox goto retry; 626a1f6d91cSDavid Greenman } 6272e9f4a69SAlan Cox } else 62889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 6292e9f4a69SAlan Cox 63095e5e988SJohn Dyson if (robject->ref_count == 1) { 63195e5e988SJohn Dyson robject->ref_count--; 632ba8da839SDavid Greenman object = robject; 63395e5e988SJohn Dyson goto doterm; 63495e5e988SJohn Dyson } 63595e5e988SJohn Dyson object = robject; 63695e5e988SJohn Dyson vm_object_collapse(object); 63789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 638ba8da839SDavid Greenman continue; 639a1f6d91cSDavid Greenman } 64089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(robject); 64195e5e988SJohn Dyson } 64289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 64323b186d3SAlan Cox return; 64495e5e988SJohn Dyson } 64595e5e988SJohn Dyson doterm: 6461bdbd705SKonstantin Belousov umtx_shm_object_terminated(object); 64724a1cce3SDavid Greenman temp = object->backing_object; 648c9917419SAlan Cox if (temp != NULL) { 6494bace8e7SKonstantin Belousov KASSERT((object->flags & OBJ_TMPFS_NODE) == 0, 6504bace8e7SKonstantin Belousov ("shadowed tmpfs v_object 2 %p", object)); 65189f6b863SAttilio Rao VM_OBJECT_WLOCK(temp); 6521c500307SAlan Cox LIST_REMOVE(object, shadow_list); 65395e5e988SJohn Dyson temp->shadow_count--; 65489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(temp); 65595461b45SJohn Dyson object->backing_object = NULL; 656de5f6a77SJohn Dyson } 657245df27cSMatthew Dillon /* 658245df27cSMatthew Dillon * Don't double-terminate, we could be in a termination 659245df27cSMatthew Dillon * recursion due to the terminate having to sync data 660245df27cSMatthew Dillon * to disk. 661245df27cSMatthew Dillon */ 662245df27cSMatthew Dillon if ((object->flags & OBJ_DEAD) == 0) 663df8bae1dSRodney W. Grimes vm_object_terminate(object); 664c829b9d0SAlan Cox else 66589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 666df8bae1dSRodney W. Grimes object = temp; 667df8bae1dSRodney W. Grimes } 668df8bae1dSRodney W. Grimes } 669df8bae1dSRodney W. Grimes 670df8bae1dSRodney W. Grimes /* 6712ac78f0eSStephan Uphoff * vm_object_destroy removes the object from the global object list 6722ac78f0eSStephan Uphoff * and frees the space for the object. 6732ac78f0eSStephan Uphoff */ 6742ac78f0eSStephan Uphoff void 6752ac78f0eSStephan Uphoff vm_object_destroy(vm_object_t object) 6762ac78f0eSStephan Uphoff { 6772ac78f0eSStephan Uphoff 6782ac78f0eSStephan Uphoff /* 6793364c323SKonstantin Belousov * Release the allocation charge. 6803364c323SKonstantin Belousov */ 681ef694c1aSEdward Tomasz Napierala if (object->cred != NULL) { 682ef694c1aSEdward Tomasz Napierala swap_release_by_cred(object->charge, object->cred); 6833364c323SKonstantin Belousov object->charge = 0; 684ef694c1aSEdward Tomasz Napierala crfree(object->cred); 685ef694c1aSEdward Tomasz Napierala object->cred = NULL; 6863364c323SKonstantin Belousov } 6873364c323SKonstantin Belousov 6883364c323SKonstantin Belousov /* 6892ac78f0eSStephan Uphoff * Free the space for the object. 6902ac78f0eSStephan Uphoff */ 6912ac78f0eSStephan Uphoff uma_zfree(obj_zone, object); 6922ac78f0eSStephan Uphoff } 6932ac78f0eSStephan Uphoff 6942ac78f0eSStephan Uphoff /* 6957bbdb843SRuslan Bukin * vm_object_terminate_pages removes any remaining pageable pages 6967bbdb843SRuslan Bukin * from the object and resets the object to an empty state. 6977bbdb843SRuslan Bukin */ 6987bbdb843SRuslan Bukin static void 6997bbdb843SRuslan Bukin vm_object_terminate_pages(vm_object_t object) 7007bbdb843SRuslan Bukin { 7017bbdb843SRuslan Bukin vm_page_t p, p_next; 7025cd29d0fSMark Johnston struct mtx *mtx; 7037bbdb843SRuslan Bukin 7047bbdb843SRuslan Bukin VM_OBJECT_ASSERT_WLOCKED(object); 7057bbdb843SRuslan Bukin 7062fcd1ff6SKonstantin Belousov mtx = NULL; 7072fcd1ff6SKonstantin Belousov 7087bbdb843SRuslan Bukin /* 7097bbdb843SRuslan Bukin * Free any remaining pageable pages. This also removes them from the 7107bbdb843SRuslan Bukin * paging queues. However, don't free wired pages, just remove them 7117bbdb843SRuslan Bukin * from the object. Rather than incrementally removing each page from 7127bbdb843SRuslan Bukin * the object, the page and object are reset to any empty state. 7137bbdb843SRuslan Bukin */ 7147bbdb843SRuslan Bukin TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) { 7157bbdb843SRuslan Bukin vm_page_assert_unbusied(p); 7165cd29d0fSMark Johnston if ((object->flags & OBJ_UNMANAGED) == 0) 7177bbdb843SRuslan Bukin /* 7182fcd1ff6SKonstantin Belousov * vm_page_free_prep() only needs the page 7192fcd1ff6SKonstantin Belousov * lock for managed pages. 7207bbdb843SRuslan Bukin */ 7215cd29d0fSMark Johnston vm_page_change_lock(p, &mtx); 7227bbdb843SRuslan Bukin p->object = NULL; 723*d842aa51SMark Johnston if (vm_page_wired(p)) 7242fcd1ff6SKonstantin Belousov continue; 7255cd29d0fSMark Johnston VM_CNT_INC(v_pfree); 7265cd29d0fSMark Johnston vm_page_free(p); 7274074d642SAlan Cox } 7282fcd1ff6SKonstantin Belousov if (mtx != NULL) 7292fcd1ff6SKonstantin Belousov mtx_unlock(mtx); 7302fcd1ff6SKonstantin Belousov 7317bbdb843SRuslan Bukin /* 7327bbdb843SRuslan Bukin * If the object contained any pages, then reset it to an empty state. 7337bbdb843SRuslan Bukin * None of the object's fields, including "resident_page_count", were 7347bbdb843SRuslan Bukin * modified by the preceding loop. 7357bbdb843SRuslan Bukin */ 7367bbdb843SRuslan Bukin if (object->resident_page_count != 0) { 7377bbdb843SRuslan Bukin vm_radix_reclaim_allnodes(&object->rtree); 7387bbdb843SRuslan Bukin TAILQ_INIT(&object->memq); 7397bbdb843SRuslan Bukin object->resident_page_count = 0; 7407bbdb843SRuslan Bukin if (object->type == OBJT_VNODE) 7417bbdb843SRuslan Bukin vdrop(object->handle); 7427bbdb843SRuslan Bukin } 7437bbdb843SRuslan Bukin } 7447bbdb843SRuslan Bukin 7457bbdb843SRuslan Bukin /* 746df8bae1dSRodney W. Grimes * vm_object_terminate actually destroys the specified object, freeing 747df8bae1dSRodney W. Grimes * up all previously used resources. 748df8bae1dSRodney W. Grimes * 749df8bae1dSRodney W. Grimes * The object must be locked. 7501c7c3c6aSMatthew Dillon * This routine may block. 751df8bae1dSRodney W. Grimes */ 75295e5e988SJohn Dyson void 7531b40f8c0SMatthew Dillon vm_object_terminate(vm_object_t object) 754df8bae1dSRodney W. Grimes { 755df8bae1dSRodney W. Grimes 75689f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 7570cddd8f0SMatthew Dillon 75895e5e988SJohn Dyson /* 75995e5e988SJohn Dyson * Make sure no one uses us. 76095e5e988SJohn Dyson */ 761069e9bc1SDoug Rabson vm_object_set_flag(object, OBJ_DEAD); 7623c631446SJohn Dyson 763df8bae1dSRodney W. Grimes /* 764f6b04d2bSDavid Greenman * wait for the pageout daemon to be done with the object 765df8bae1dSRodney W. Grimes */ 76666095752SJohn Dyson vm_object_pip_wait(object, "objtrm"); 767df8bae1dSRodney W. Grimes 7685526d2d9SEivind Eklund KASSERT(!object->paging_in_progress, 7695526d2d9SEivind Eklund ("vm_object_terminate: pageout in progress")); 77026f9a767SRodney W. Grimes 77126f9a767SRodney W. Grimes /* 7720d94caffSDavid Greenman * Clean and free the pages, as appropriate. All references to the 7730d94caffSDavid Greenman * object are gone, so we don't need to lock it. 77426f9a767SRodney W. Grimes */ 77524a1cce3SDavid Greenman if (object->type == OBJT_VNODE) { 776f7dd7b63SAlan Cox struct vnode *vp = (struct vnode *)object->handle; 77795e5e988SJohn Dyson 77895e5e988SJohn Dyson /* 77995e5e988SJohn Dyson * Clean pages and flush buffers. 78095e5e988SJohn Dyson */ 7818f9110f6SJohn Dyson vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 78289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 78395e5e988SJohn Dyson 7840d7935fdSAttilio Rao vinvalbuf(vp, V_SAVE, 0, 0); 785f7dd7b63SAlan Cox 78619efd8a5SKonstantin Belousov BO_LOCK(&vp->v_bufobj); 78719efd8a5SKonstantin Belousov vp->v_bufobj.bo_flag |= BO_DEAD; 78819efd8a5SKonstantin Belousov BO_UNLOCK(&vp->v_bufobj); 78919efd8a5SKonstantin Belousov 79089f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 791bef608bdSJohn Dyson } 792bef608bdSJohn Dyson 793971dd342SAlfred Perlstein KASSERT(object->ref_count == 0, 794971dd342SAlfred Perlstein ("vm_object_terminate: object with references, ref_count=%d", 795971dd342SAlfred Perlstein object->ref_count)); 796996c772fSJohn Dyson 7977bbdb843SRuslan Bukin if ((object->flags & OBJ_PG_DTOR) == 0) 7987bbdb843SRuslan Bukin vm_object_terminate_pages(object); 799bef608bdSJohn Dyson 800f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 801f8a47341SAlan Cox if (__predict_false(!LIST_EMPTY(&object->rvq))) 802f8a47341SAlan Cox vm_reserv_break_all(object); 803f8a47341SAlan Cox #endif 8047bfda801SAlan Cox 805e735691bSJohn Baldwin KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT || 806e735691bSJohn Baldwin object->type == OBJT_SWAP, 807e735691bSJohn Baldwin ("%s: non-swap obj %p has cred", __func__, object)); 808e735691bSJohn Baldwin 8092d8acc0fSJohn Dyson /* 8109fcfb650SDavid Greenman * Let the pager know object is dead. 8119fcfb650SDavid Greenman */ 8129fcfb650SDavid Greenman vm_pager_deallocate(object); 81389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 8149fcfb650SDavid Greenman 8152ac78f0eSStephan Uphoff vm_object_destroy(object); 81647221757SJohn Dyson } 817df8bae1dSRodney W. Grimes 818edf93b25SAlan Cox /* 819edf93b25SAlan Cox * Make the page read-only so that we can clear the object flags. However, if 820edf93b25SAlan Cox * this is a nosync mmap then the object is likely to stay dirty so do not 821edf93b25SAlan Cox * mess with the page and do not clear the object flags. Returns TRUE if the 822edf93b25SAlan Cox * page should be flushed, and FALSE otherwise. 823edf93b25SAlan Cox */ 8243280870dSKonstantin Belousov static boolean_t 825126d6082SKonstantin Belousov vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *clearobjflags) 8263280870dSKonstantin Belousov { 8273280870dSKonstantin Belousov 8283280870dSKonstantin Belousov /* 8293280870dSKonstantin Belousov * If we have been asked to skip nosync pages and this is a 8303280870dSKonstantin Belousov * nosync page, skip it. Note that the object flags were not 8313280870dSKonstantin Belousov * cleared in this case so we do not have to set them. 8323280870dSKonstantin Belousov */ 8333280870dSKonstantin Belousov if ((flags & OBJPC_NOSYNC) != 0 && (p->oflags & VPO_NOSYNC) != 0) { 834126d6082SKonstantin Belousov *clearobjflags = FALSE; 8353280870dSKonstantin Belousov return (FALSE); 8363280870dSKonstantin Belousov } else { 8373280870dSKonstantin Belousov pmap_remove_write(p); 8383280870dSKonstantin Belousov return (p->dirty != 0); 8393280870dSKonstantin Belousov } 8403280870dSKonstantin Belousov } 8413280870dSKonstantin Belousov 842df8bae1dSRodney W. Grimes /* 843df8bae1dSRodney W. Grimes * vm_object_page_clean 844df8bae1dSRodney W. Grimes * 8454f79d873SMatthew Dillon * Clean all dirty pages in the specified range of object. Leaves page 8464f79d873SMatthew Dillon * on whatever queue it is currently on. If NOSYNC is set then do not 847b146f9e5SAlan Cox * write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC), 8484f79d873SMatthew Dillon * leaving the object dirty. 84926f9a767SRodney W. Grimes * 85043b7990eSMatthew Dillon * When stuffing pages asynchronously, allow clustering. XXX we need a 85143b7990eSMatthew Dillon * synchronous clustering mode implementation. 85243b7990eSMatthew Dillon * 85326f9a767SRodney W. Grimes * Odd semantics: if start == end, we clean everything. 85426f9a767SRodney W. Grimes * 85526f9a767SRodney W. Grimes * The object must be locked. 856126d6082SKonstantin Belousov * 857126d6082SKonstantin Belousov * Returns FALSE if some page from the range was not written, as 858126d6082SKonstantin Belousov * reported by the pager, and TRUE otherwise. 85926f9a767SRodney W. Grimes */ 860126d6082SKonstantin Belousov boolean_t 86117f3095dSAlan Cox vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, 862e239bb97SKonstantin Belousov int flags) 863f6b04d2bSDavid Greenman { 864e239bb97SKonstantin Belousov vm_page_t np, p; 86517f3095dSAlan Cox vm_pindex_t pi, tend, tstart; 866126d6082SKonstantin Belousov int curgeneration, n, pagerflags; 867126d6082SKonstantin Belousov boolean_t clearobjflags, eio, res; 868f6b04d2bSDavid Greenman 86989f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 870e5f299ffSKonstantin Belousov 871e5f299ffSKonstantin Belousov /* 872e5f299ffSKonstantin Belousov * The OBJ_MIGHTBEDIRTY flag is only set for OBJT_VNODE 873e5f299ffSKonstantin Belousov * objects. The check below prevents the function from 874e5f299ffSKonstantin Belousov * operating on non-vnode objects. 875e5f299ffSKonstantin Belousov */ 876e239bb97SKonstantin Belousov if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 || 877e239bb97SKonstantin Belousov object->resident_page_count == 0) 878126d6082SKonstantin Belousov return (TRUE); 879f6b04d2bSDavid Greenman 880e239bb97SKonstantin Belousov pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ? 881e239bb97SKonstantin Belousov VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 882e239bb97SKonstantin Belousov pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0; 883e239bb97SKonstantin Belousov 88417f3095dSAlan Cox tstart = OFF_TO_IDX(start); 88517f3095dSAlan Cox tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK); 88617f3095dSAlan Cox clearobjflags = tstart == 0 && tend >= object->size; 887126d6082SKonstantin Belousov res = TRUE; 888f6b04d2bSDavid Greenman 889bd7e5f99SJohn Dyson rescan: 8902d8acc0fSJohn Dyson curgeneration = object->generation; 8912d8acc0fSJohn Dyson 89217f3095dSAlan Cox for (p = vm_page_find_least(object, tstart); p != NULL; p = np) { 893bd7e5f99SJohn Dyson pi = p->pindex; 894e239bb97SKonstantin Belousov if (pi >= tend) 895e239bb97SKonstantin Belousov break; 896e239bb97SKonstantin Belousov np = TAILQ_NEXT(p, listq); 897e239bb97SKonstantin Belousov if (p->valid == 0) 898aef922f5SJohn Dyson continue; 899c7aebda8SAttilio Rao if (vm_page_sleep_if_busy(p, "vpcwai")) { 900e65919f9SKonstantin Belousov if (object->generation != curgeneration) { 901e65919f9SKonstantin Belousov if ((flags & OBJPC_SYNC) != 0) 902e239bb97SKonstantin Belousov goto rescan; 903e65919f9SKonstantin Belousov else 904126d6082SKonstantin Belousov clearobjflags = FALSE; 905e65919f9SKonstantin Belousov } 906780636b7SKonstantin Belousov np = vm_page_find_least(object, pi); 907780636b7SKonstantin Belousov continue; 908f6b04d2bSDavid Greenman } 9093280870dSKonstantin Belousov if (!vm_object_page_remove_write(p, flags, &clearobjflags)) 910bd7e5f99SJohn Dyson continue; 911e239bb97SKonstantin Belousov 9123280870dSKonstantin Belousov n = vm_object_page_collect_flush(object, p, pagerflags, 913126d6082SKonstantin Belousov flags, &clearobjflags, &eio); 914126d6082SKonstantin Belousov if (eio) { 915126d6082SKonstantin Belousov res = FALSE; 916126d6082SKonstantin Belousov clearobjflags = FALSE; 917126d6082SKonstantin Belousov } 918e65919f9SKonstantin Belousov if (object->generation != curgeneration) { 919e65919f9SKonstantin Belousov if ((flags & OBJPC_SYNC) != 0) 920b9b7a4beSMatthew Dillon goto rescan; 921e65919f9SKonstantin Belousov else 922126d6082SKonstantin Belousov clearobjflags = FALSE; 923e65919f9SKonstantin Belousov } 924031ec8c1SKonstantin Belousov 925031ec8c1SKonstantin Belousov /* 926031ec8c1SKonstantin Belousov * If the VOP_PUTPAGES() did a truncated write, so 927031ec8c1SKonstantin Belousov * that even the first page of the run is not fully 928031ec8c1SKonstantin Belousov * written, vm_pageout_flush() returns 0 as the run 929031ec8c1SKonstantin Belousov * length. Since the condition that caused truncated 930031ec8c1SKonstantin Belousov * write may be permanent, e.g. exhausted free space, 931031ec8c1SKonstantin Belousov * accepting n == 0 would cause an infinite loop. 932031ec8c1SKonstantin Belousov * 933031ec8c1SKonstantin Belousov * Forwarding the iterator leaves the unwritten page 934031ec8c1SKonstantin Belousov * behind, but there is not much we can do there if 935031ec8c1SKonstantin Belousov * filesystem refuses to write it. 936031ec8c1SKonstantin Belousov */ 937126d6082SKonstantin Belousov if (n == 0) { 938031ec8c1SKonstantin Belousov n = 1; 939126d6082SKonstantin Belousov clearobjflags = FALSE; 940126d6082SKonstantin Belousov } 941e239bb97SKonstantin Belousov np = vm_page_find_least(object, pi + n); 942b9b7a4beSMatthew Dillon } 943b9b7a4beSMatthew Dillon #if 0 944e239bb97SKonstantin Belousov VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0); 945b9b7a4beSMatthew Dillon #endif 946b9b7a4beSMatthew Dillon 947edf93b25SAlan Cox if (clearobjflags) 9483280870dSKonstantin Belousov vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY); 949126d6082SKonstantin Belousov return (res); 950b9b7a4beSMatthew Dillon } 951b9b7a4beSMatthew Dillon 952b9b7a4beSMatthew Dillon static int 9533280870dSKonstantin Belousov vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, 954126d6082SKonstantin Belousov int flags, boolean_t *clearobjflags, boolean_t *eio) 955b9b7a4beSMatthew Dillon { 9563157c503SKonstantin Belousov vm_page_t ma[vm_pageout_page_count], p_first, tp; 9573157c503SKonstantin Belousov int count, i, mreq, runlen; 958b9b7a4beSMatthew Dillon 9597bec141bSKip Macy vm_page_lock_assert(p, MA_NOTOWNED); 96089f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 9613157c503SKonstantin Belousov 9623157c503SKonstantin Belousov count = 1; 9633157c503SKonstantin Belousov mreq = 0; 9643157c503SKonstantin Belousov 9653157c503SKonstantin Belousov for (tp = p; count < vm_pageout_page_count; count++) { 9663157c503SKonstantin Belousov tp = vm_page_next(tp); 967c7aebda8SAttilio Rao if (tp == NULL || vm_page_busied(tp)) 968bd7e5f99SJohn Dyson break; 9693280870dSKonstantin Belousov if (!vm_object_page_remove_write(tp, flags, clearobjflags)) 970bd7e5f99SJohn Dyson break; 971bd7e5f99SJohn Dyson } 972aef922f5SJohn Dyson 9733157c503SKonstantin Belousov for (p_first = p; count < vm_pageout_page_count; count++) { 9743157c503SKonstantin Belousov tp = vm_page_prev(p_first); 975c7aebda8SAttilio Rao if (tp == NULL || vm_page_busied(tp)) 976bd7e5f99SJohn Dyson break; 9773280870dSKonstantin Belousov if (!vm_object_page_remove_write(tp, flags, clearobjflags)) 978bd7e5f99SJohn Dyson break; 9793157c503SKonstantin Belousov p_first = tp; 9803157c503SKonstantin Belousov mreq++; 981bd7e5f99SJohn Dyson } 982bd7e5f99SJohn Dyson 9833157c503SKonstantin Belousov for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++) 9843157c503SKonstantin Belousov ma[i] = tp; 985cf2819ccSJohn Dyson 986126d6082SKonstantin Belousov vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio); 9871e8a675cSKonstantin Belousov return (runlen); 98826f9a767SRodney W. Grimes } 989df8bae1dSRodney W. Grimes 9901efb74fbSJohn Dyson /* 991950f8459SAlan Cox * Note that there is absolutely no sense in writing out 992950f8459SAlan Cox * anonymous objects, so we track down the vnode object 993950f8459SAlan Cox * to write out. 994950f8459SAlan Cox * We invalidate (remove) all pages from the address space 995950f8459SAlan Cox * for semantic correctness. 996950f8459SAlan Cox * 9976bbee8e2SAlan Cox * If the backing object is a device object with unmanaged pages, then any 9986bbee8e2SAlan Cox * mappings to the specified range of pages must be removed before this 9996bbee8e2SAlan Cox * function is called. 10006bbee8e2SAlan Cox * 1001950f8459SAlan Cox * Note: certain anonymous maps, such as MAP_NOSYNC maps, 1002950f8459SAlan Cox * may start out with a NULL object. 1003950f8459SAlan Cox */ 1004126d6082SKonstantin Belousov boolean_t 1005950f8459SAlan Cox vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, 1006950f8459SAlan Cox boolean_t syncio, boolean_t invalidate) 1007950f8459SAlan Cox { 1008950f8459SAlan Cox vm_object_t backing_object; 1009950f8459SAlan Cox struct vnode *vp; 10103b582b4eSTor Egge struct mount *mp; 1011126d6082SKonstantin Belousov int error, flags, fsync_after; 1012126d6082SKonstantin Belousov boolean_t res; 1013950f8459SAlan Cox 1014950f8459SAlan Cox if (object == NULL) 1015126d6082SKonstantin Belousov return (TRUE); 1016126d6082SKonstantin Belousov res = TRUE; 1017126d6082SKonstantin Belousov error = 0; 101889f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1019950f8459SAlan Cox while ((backing_object = object->backing_object) != NULL) { 102089f6b863SAttilio Rao VM_OBJECT_WLOCK(backing_object); 102156e0670fSAlan Cox offset += object->backing_object_offset; 102289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1023950f8459SAlan Cox object = backing_object; 1024950f8459SAlan Cox if (object->size < OFF_TO_IDX(offset + size)) 1025950f8459SAlan Cox size = IDX_TO_OFF(object->size) - offset; 1026950f8459SAlan Cox } 1027950f8459SAlan Cox /* 1028950f8459SAlan Cox * Flush pages if writing is allowed, invalidate them 1029950f8459SAlan Cox * if invalidation requested. Pages undergoing I/O 1030950f8459SAlan Cox * will be ignored by vm_object_page_remove(). 1031950f8459SAlan Cox * 1032950f8459SAlan Cox * We cannot lock the vnode and then wait for paging 1033950f8459SAlan Cox * to complete without deadlocking against vm_fault. 1034950f8459SAlan Cox * Instead we simply call vm_object_page_remove() and 1035950f8459SAlan Cox * allow it to block internally on a page-by-page 1036950f8459SAlan Cox * basis when it encounters pages undergoing async 1037950f8459SAlan Cox * I/O. 1038950f8459SAlan Cox */ 1039950f8459SAlan Cox if (object->type == OBJT_VNODE && 10405bf94937SKonstantin Belousov (object->flags & OBJ_MIGHTBEDIRTY) != 0 && 10415bf94937SKonstantin Belousov ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) { 104289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 10433b582b4eSTor Egge (void) vn_start_write(vp, &mp, V_WAIT); 1044cb05b60aSAttilio Rao vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 104575ff604aSKonstantin Belousov if (syncio && !invalidate && offset == 0 && 1046d1780e8dSKonstantin Belousov atop(size) == object->size) { 104775ff604aSKonstantin Belousov /* 104875ff604aSKonstantin Belousov * If syncing the whole mapping of the file, 104975ff604aSKonstantin Belousov * it is faster to schedule all the writes in 105075ff604aSKonstantin Belousov * async mode, also allowing the clustering, 105175ff604aSKonstantin Belousov * and then wait for i/o to complete. 105275ff604aSKonstantin Belousov */ 105375ff604aSKonstantin Belousov flags = 0; 105475ff604aSKonstantin Belousov fsync_after = TRUE; 105575ff604aSKonstantin Belousov } else { 1056950f8459SAlan Cox flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 105775ff604aSKonstantin Belousov flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0; 105875ff604aSKonstantin Belousov fsync_after = FALSE; 105975ff604aSKonstantin Belousov } 106089f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1061126d6082SKonstantin Belousov res = vm_object_page_clean(object, offset, offset + size, 1062126d6082SKonstantin Belousov flags); 106389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 106475ff604aSKonstantin Belousov if (fsync_after) 1065126d6082SKonstantin Belousov error = VOP_FSYNC(vp, MNT_WAIT, curthread); 106622db15c0SAttilio Rao VOP_UNLOCK(vp, 0); 10673b582b4eSTor Egge vn_finished_write(mp); 1068126d6082SKonstantin Belousov if (error != 0) 1069126d6082SKonstantin Belousov res = FALSE; 107089f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1071950f8459SAlan Cox } 1072950f8459SAlan Cox if ((object->type == OBJT_VNODE || 1073950f8459SAlan Cox object->type == OBJT_DEVICE) && invalidate) { 10746bbee8e2SAlan Cox if (object->type == OBJT_DEVICE) 10756bbee8e2SAlan Cox /* 10766bbee8e2SAlan Cox * The option OBJPR_NOTMAPPED must be passed here 10776bbee8e2SAlan Cox * because vm_object_page_remove() cannot remove 10786bbee8e2SAlan Cox * unmanaged mappings. 10796bbee8e2SAlan Cox */ 10806bbee8e2SAlan Cox flags = OBJPR_NOTMAPPED; 10816bbee8e2SAlan Cox else if (old_msync) 10826195b24aSKonstantin Belousov flags = 0; 10836bbee8e2SAlan Cox else 10846195b24aSKonstantin Belousov flags = OBJPR_CLEANONLY; 10856bbee8e2SAlan Cox vm_object_page_remove(object, OFF_TO_IDX(offset), 10866bbee8e2SAlan Cox OFF_TO_IDX(offset + size + PAGE_MASK), flags); 1087950f8459SAlan Cox } 108889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1089126d6082SKonstantin Belousov return (res); 1090950f8459SAlan Cox } 1091950f8459SAlan Cox 1092950f8459SAlan Cox /* 1093aa3650eaSMark Johnston * Determine whether the given advice can be applied to the object. Advice is 1094aa3650eaSMark Johnston * not applied to unmanaged pages since they never belong to page queues, and 1095aa3650eaSMark Johnston * since MADV_FREE is destructive, it can apply only to anonymous pages that 1096aa3650eaSMark Johnston * have been mapped at most once. 1097aa3650eaSMark Johnston */ 1098aa3650eaSMark Johnston static bool 1099aa3650eaSMark Johnston vm_object_advice_applies(vm_object_t object, int advice) 1100aa3650eaSMark Johnston { 1101aa3650eaSMark Johnston 1102aa3650eaSMark Johnston if ((object->flags & OBJ_UNMANAGED) != 0) 1103aa3650eaSMark Johnston return (false); 1104aa3650eaSMark Johnston if (advice != MADV_FREE) 1105aa3650eaSMark Johnston return (true); 1106aa3650eaSMark Johnston return ((object->type == OBJT_DEFAULT || object->type == OBJT_SWAP) && 1107aa3650eaSMark Johnston (object->flags & OBJ_ONEMAPPING) != 0); 1108aa3650eaSMark Johnston } 1109aa3650eaSMark Johnston 1110aa3650eaSMark Johnston static void 1111aa3650eaSMark Johnston vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex, 1112aa3650eaSMark Johnston vm_size_t size) 1113aa3650eaSMark Johnston { 1114aa3650eaSMark Johnston 1115aa3650eaSMark Johnston if (advice == MADV_FREE && object->type == OBJT_SWAP) 1116aa3650eaSMark Johnston swap_pager_freespace(object, pindex, size); 1117aa3650eaSMark Johnston } 1118aa3650eaSMark Johnston 1119aa3650eaSMark Johnston /* 1120867a482dSJohn Dyson * vm_object_madvise: 1121867a482dSJohn Dyson * 1122867a482dSJohn Dyson * Implements the madvise function at the object/page level. 11231c7c3c6aSMatthew Dillon * 1124193b9358SAlan Cox * MADV_WILLNEED (any object) 1125193b9358SAlan Cox * 1126193b9358SAlan Cox * Activate the specified pages if they are resident. 1127193b9358SAlan Cox * 1128193b9358SAlan Cox * MADV_DONTNEED (any object) 1129193b9358SAlan Cox * 1130193b9358SAlan Cox * Deactivate the specified pages if they are resident. 1131193b9358SAlan Cox * 1132193b9358SAlan Cox * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 1133193b9358SAlan Cox * OBJ_ONEMAPPING only) 1134193b9358SAlan Cox * 1135193b9358SAlan Cox * Deactivate and clean the specified pages if they are 1136193b9358SAlan Cox * resident. This permits the process to reuse the pages 1137193b9358SAlan Cox * without faulting or the kernel to reclaim the pages 1138193b9358SAlan Cox * without I/O. 1139867a482dSJohn Dyson */ 1140867a482dSJohn Dyson void 114192a59946SJohn Baldwin vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end, 1142c2655a40SMark Johnston int advice) 1143867a482dSJohn Dyson { 114492a59946SJohn Baldwin vm_pindex_t tpindex; 114534567de7SAlan Cox vm_object_t backing_object, tobject; 1146aa3650eaSMark Johnston vm_page_t m, tm; 1147867a482dSJohn Dyson 1148867a482dSJohn Dyson if (object == NULL) 1149867a482dSJohn Dyson return; 1150c2655a40SMark Johnston 11516e20a165SJohn Dyson relookup: 1152aa3650eaSMark Johnston VM_OBJECT_WLOCK(object); 1153aa3650eaSMark Johnston if (!vm_object_advice_applies(object, advice)) { 1154aa3650eaSMark Johnston VM_OBJECT_WUNLOCK(object); 1155aa3650eaSMark Johnston return; 11566e20a165SJohn Dyson } 1157aa3650eaSMark Johnston for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) { 1158aa3650eaSMark Johnston tobject = object; 1159c2655a40SMark Johnston 11601ce137beSMatthew Dillon /* 1161aa3650eaSMark Johnston * If the next page isn't resident in the top-level object, we 1162aa3650eaSMark Johnston * need to search the shadow chain. When applying MADV_FREE, we 1163aa3650eaSMark Johnston * take care to release any swap space used to store 1164aa3650eaSMark Johnston * non-resident pages. 1165aa3650eaSMark Johnston */ 1166aa3650eaSMark Johnston if (m == NULL || pindex < m->pindex) { 1167aa3650eaSMark Johnston /* 1168aa3650eaSMark Johnston * Optimize a common case: if the top-level object has 1169aa3650eaSMark Johnston * no backing object, we can skip over the non-resident 1170aa3650eaSMark Johnston * range in constant time. 11711ce137beSMatthew Dillon */ 1172c2655a40SMark Johnston if (object->backing_object == NULL) { 1173c2655a40SMark Johnston tpindex = (m != NULL && m->pindex < end) ? 1174c2655a40SMark Johnston m->pindex : end; 1175aa3650eaSMark Johnston vm_object_madvise_freespace(object, advice, 1176aa3650eaSMark Johnston pindex, tpindex - pindex); 1177c2655a40SMark Johnston if ((pindex = tpindex) == end) 1178c2655a40SMark Johnston break; 1179aa3650eaSMark Johnston goto next_page; 1180aa3650eaSMark Johnston } 1181aa3650eaSMark Johnston 1182aa3650eaSMark Johnston tpindex = pindex; 1183aa3650eaSMark Johnston do { 1184aa3650eaSMark Johnston vm_object_madvise_freespace(tobject, advice, 1185aa3650eaSMark Johnston tpindex, 1); 11861ce137beSMatthew Dillon /* 1187aa3650eaSMark Johnston * Prepare to search the next object in the 1188aa3650eaSMark Johnston * chain. 11891ce137beSMatthew Dillon */ 119034567de7SAlan Cox backing_object = tobject->backing_object; 119134567de7SAlan Cox if (backing_object == NULL) 1192aa3650eaSMark Johnston goto next_pindex; 119389f6b863SAttilio Rao VM_OBJECT_WLOCK(backing_object); 1194aa3650eaSMark Johnston tpindex += 1195aa3650eaSMark Johnston OFF_TO_IDX(tobject->backing_object_offset); 11969b98b796SAlan Cox if (tobject != object) 119789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(tobject); 119834567de7SAlan Cox tobject = backing_object; 1199aa3650eaSMark Johnston if (!vm_object_advice_applies(tobject, advice)) 1200aa3650eaSMark Johnston goto next_pindex; 1201aa3650eaSMark Johnston } while ((tm = vm_page_lookup(tobject, tpindex)) == 1202aa3650eaSMark Johnston NULL); 1203aa3650eaSMark Johnston } else { 1204aa3650eaSMark Johnston next_page: 1205aa3650eaSMark Johnston tm = m; 1206aa3650eaSMark Johnston m = TAILQ_NEXT(m, listq); 1207c2655a40SMark Johnston } 1208c2655a40SMark Johnston 1209867a482dSJohn Dyson /* 12106a2a3d73SAlan Cox * If the page is not in a normal state, skip it. 1211867a482dSJohn Dyson */ 1212aa3650eaSMark Johnston if (tm->valid != VM_PAGE_BITS_ALL) 1213aa3650eaSMark Johnston goto next_pindex; 1214aa3650eaSMark Johnston vm_page_lock(tm); 12151d3a1bcfSMark Johnston if (vm_page_held(tm)) { 1216aa3650eaSMark Johnston vm_page_unlock(tm); 1217aa3650eaSMark Johnston goto next_pindex; 12186e20a165SJohn Dyson } 1219aa3650eaSMark Johnston KASSERT((tm->flags & PG_FICTITIOUS) == 0, 1220aa3650eaSMark Johnston ("vm_object_madvise: page %p is fictitious", tm)); 1221aa3650eaSMark Johnston KASSERT((tm->oflags & VPO_UNMANAGED) == 0, 1222aa3650eaSMark Johnston ("vm_object_madvise: page %p is not managed", tm)); 1223aa3650eaSMark Johnston if (vm_page_busied(tm)) { 1224aa3650eaSMark Johnston if (object != tobject) 1225aa3650eaSMark Johnston VM_OBJECT_WUNLOCK(tobject); 1226aa3650eaSMark Johnston VM_OBJECT_WUNLOCK(object); 1227c2655a40SMark Johnston if (advice == MADV_WILLNEED) { 1228b11b56b5SAlan Cox /* 1229b11b56b5SAlan Cox * Reference the page before unlocking and 1230b11b56b5SAlan Cox * sleeping so that the page daemon is less 1231b11b56b5SAlan Cox * likely to reclaim it. 1232b11b56b5SAlan Cox */ 1233aa3650eaSMark Johnston vm_page_aflag_set(tm, PGA_REFERENCED); 1234567e51e1SAlan Cox } 1235aa3650eaSMark Johnston vm_page_busy_sleep(tm, "madvpo", false); 12366e20a165SJohn Dyson goto relookup; 123734567de7SAlan Cox } 1238aa3650eaSMark Johnston vm_page_advise(tm, advice); 1239aa3650eaSMark Johnston vm_page_unlock(tm); 1240aa3650eaSMark Johnston vm_object_madvise_freespace(tobject, advice, tm->pindex, 1); 1241aa3650eaSMark Johnston next_pindex: 12429b98b796SAlan Cox if (tobject != object) 124389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(tobject); 1244867a482dSJohn Dyson } 124589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1246867a482dSJohn Dyson } 1247867a482dSJohn Dyson 1248867a482dSJohn Dyson /* 1249df8bae1dSRodney W. Grimes * vm_object_shadow: 1250df8bae1dSRodney W. Grimes * 1251df8bae1dSRodney W. Grimes * Create a new object which is backed by the 1252df8bae1dSRodney W. Grimes * specified existing object range. The source 1253df8bae1dSRodney W. Grimes * object reference is deallocated. 1254df8bae1dSRodney W. Grimes * 1255df8bae1dSRodney W. Grimes * The new object and offset into that object 1256df8bae1dSRodney W. Grimes * are returned in the source parameters. 1257df8bae1dSRodney W. Grimes */ 125826f9a767SRodney W. Grimes void 12591b40f8c0SMatthew Dillon vm_object_shadow( 12601b40f8c0SMatthew Dillon vm_object_t *object, /* IN/OUT */ 12611b40f8c0SMatthew Dillon vm_ooffset_t *offset, /* IN/OUT */ 12621b40f8c0SMatthew Dillon vm_size_t length) 1263df8bae1dSRodney W. Grimes { 1264d031cff1SMatthew Dillon vm_object_t source; 1265d031cff1SMatthew Dillon vm_object_t result; 1266df8bae1dSRodney W. Grimes 1267df8bae1dSRodney W. Grimes source = *object; 1268df8bae1dSRodney W. Grimes 1269df8bae1dSRodney W. Grimes /* 12709a2f6362SAlan Cox * Don't create the new object if the old object isn't shared. 12719a2f6362SAlan Cox */ 1272570a2f4aSAlan Cox if (source != NULL) { 127389f6b863SAttilio Rao VM_OBJECT_WLOCK(source); 1274570a2f4aSAlan Cox if (source->ref_count == 1 && 12759a2f6362SAlan Cox source->handle == NULL && 12769a2f6362SAlan Cox (source->type == OBJT_DEFAULT || 12779917e010SAlan Cox source->type == OBJT_SWAP)) { 127889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 12799a2f6362SAlan Cox return; 12809917e010SAlan Cox } 128189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 1282570a2f4aSAlan Cox } 12839a2f6362SAlan Cox 12849a2f6362SAlan Cox /* 1285570a2f4aSAlan Cox * Allocate a new object with the given length. 1286df8bae1dSRodney W. Grimes */ 12870cc74f14SAlan Cox result = vm_object_allocate(OBJT_DEFAULT, atop(length)); 1288df8bae1dSRodney W. Grimes 1289df8bae1dSRodney W. Grimes /* 12900d94caffSDavid Greenman * The new object shadows the source object, adding a reference to it. 12910d94caffSDavid Greenman * Our caller changes his reference to point to the new object, 12920d94caffSDavid Greenman * removing a reference to the source object. Net result: no change 12930d94caffSDavid Greenman * of reference count. 12949b09fe24SMatthew Dillon * 12959b09fe24SMatthew Dillon * Try to optimize the result object's page color when shadowing 1296956f3135SPhilippe Charnier * in order to maintain page coloring consistency in the combined 12979b09fe24SMatthew Dillon * shadowed object. 1298df8bae1dSRodney W. Grimes */ 129924a1cce3SDavid Greenman result->backing_object = source; 13009174ca7bSTor Egge /* 13019174ca7bSTor Egge * Store the offset into the source object, and fix up the offset into 13029174ca7bSTor Egge * the new object. 13039174ca7bSTor Egge */ 13049174ca7bSTor Egge result->backing_object_offset = *offset; 1305570a2f4aSAlan Cox if (source != NULL) { 130689f6b863SAttilio Rao VM_OBJECT_WLOCK(source); 13073f289c3fSJeff Roberson result->domain = source->domain; 13081c500307SAlan Cox LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list); 1309eaf13dd7SJohn Dyson source->shadow_count++; 1310f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 13117b54b1a9SAlan Cox result->flags |= source->flags & OBJ_COLORED; 1312f8a47341SAlan Cox result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & 1313f8a47341SAlan Cox ((1 << (VM_NFREEORDER - 1)) - 1); 1314f8a47341SAlan Cox #endif 131589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 1316de5f6a77SJohn Dyson } 1317df8bae1dSRodney W. Grimes 1318df8bae1dSRodney W. Grimes 1319df8bae1dSRodney W. Grimes /* 1320df8bae1dSRodney W. Grimes * Return the new things 1321df8bae1dSRodney W. Grimes */ 1322df8bae1dSRodney W. Grimes *offset = 0; 1323df8bae1dSRodney W. Grimes *object = result; 1324df8bae1dSRodney W. Grimes } 1325df8bae1dSRodney W. Grimes 1326c5aaa06dSAlan Cox /* 1327c5aaa06dSAlan Cox * vm_object_split: 1328c5aaa06dSAlan Cox * 1329c5aaa06dSAlan Cox * Split the pages in a map entry into a new object. This affords 1330c5aaa06dSAlan Cox * easier removal of unused pages, and keeps object inheritance from 1331c5aaa06dSAlan Cox * being a negative impact on memory usage. 1332c5aaa06dSAlan Cox */ 1333c5aaa06dSAlan Cox void 1334c5aaa06dSAlan Cox vm_object_split(vm_map_entry_t entry) 1335c5aaa06dSAlan Cox { 133673000556SAlan Cox vm_page_t m, m_next; 1337c5aaa06dSAlan Cox vm_object_t orig_object, new_object, source; 133873000556SAlan Cox vm_pindex_t idx, offidxstart; 133973000556SAlan Cox vm_size_t size; 1340c5aaa06dSAlan Cox 1341c5aaa06dSAlan Cox orig_object = entry->object.vm_object; 1342c5aaa06dSAlan Cox if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 1343c5aaa06dSAlan Cox return; 1344c5aaa06dSAlan Cox if (orig_object->ref_count <= 1) 1345c5aaa06dSAlan Cox return; 134689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 1347c5aaa06dSAlan Cox 13484da9f125SAlan Cox offidxstart = OFF_TO_IDX(entry->offset); 134995442adfSAlan Cox size = atop(entry->end - entry->start); 1350c5aaa06dSAlan Cox 13514da9f125SAlan Cox /* 13524da9f125SAlan Cox * If swap_pager_copy() is later called, it will convert new_object 13534da9f125SAlan Cox * into a swap object. 13544da9f125SAlan Cox */ 13554da9f125SAlan Cox new_object = vm_object_allocate(OBJT_DEFAULT, size); 1356c5aaa06dSAlan Cox 1357c5474b8fSAlan Cox /* 1358c5474b8fSAlan Cox * At this point, the new object is still private, so the order in 1359c5474b8fSAlan Cox * which the original and new objects are locked does not matter. 1360c5474b8fSAlan Cox */ 136189f6b863SAttilio Rao VM_OBJECT_WLOCK(new_object); 136289f6b863SAttilio Rao VM_OBJECT_WLOCK(orig_object); 13633f289c3fSJeff Roberson new_object->domain = orig_object->domain; 1364c5aaa06dSAlan Cox source = orig_object->backing_object; 1365c5aaa06dSAlan Cox if (source != NULL) { 136689f6b863SAttilio Rao VM_OBJECT_WLOCK(source); 136719c244d0SAlan Cox if ((source->flags & OBJ_DEAD) != 0) { 136889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 136989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 137089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(new_object); 137119c244d0SAlan Cox vm_object_deallocate(new_object); 137289f6b863SAttilio Rao VM_OBJECT_WLOCK(orig_object); 137319c244d0SAlan Cox return; 137419c244d0SAlan Cox } 13751c500307SAlan Cox LIST_INSERT_HEAD(&source->shadow_head, 1376c5aaa06dSAlan Cox new_object, shadow_list); 13778e3a76fbSAlan Cox source->shadow_count++; 1378b921a12bSAlan Cox vm_object_reference_locked(source); /* for new_object */ 1379c5aaa06dSAlan Cox vm_object_clear_flag(source, OBJ_ONEMAPPING); 138089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(source); 1381c5aaa06dSAlan Cox new_object->backing_object_offset = 13824da9f125SAlan Cox orig_object->backing_object_offset + entry->offset; 1383c5aaa06dSAlan Cox new_object->backing_object = source; 1384c5aaa06dSAlan Cox } 1385ef694c1aSEdward Tomasz Napierala if (orig_object->cred != NULL) { 1386ef694c1aSEdward Tomasz Napierala new_object->cred = orig_object->cred; 1387ef694c1aSEdward Tomasz Napierala crhold(orig_object->cred); 13883364c323SKonstantin Belousov new_object->charge = ptoa(size); 13893364c323SKonstantin Belousov KASSERT(orig_object->charge >= ptoa(size), 13903364c323SKonstantin Belousov ("orig_object->charge < 0")); 13913364c323SKonstantin Belousov orig_object->charge -= ptoa(size); 13923364c323SKonstantin Belousov } 1393c5aaa06dSAlan Cox retry: 1394b382c10aSKonstantin Belousov m = vm_page_find_least(orig_object, offidxstart); 139573000556SAlan Cox for (; m != NULL && (idx = m->pindex - offidxstart) < size; 139673000556SAlan Cox m = m_next) { 139773000556SAlan Cox m_next = TAILQ_NEXT(m, listq); 1398c5aaa06dSAlan Cox 1399c5aaa06dSAlan Cox /* 1400c5aaa06dSAlan Cox * We must wait for pending I/O to complete before we can 1401c5aaa06dSAlan Cox * rename the page. 1402c5aaa06dSAlan Cox * 1403c5aaa06dSAlan Cox * We do not have to VM_PROT_NONE the page as mappings should 1404c5aaa06dSAlan Cox * not be changed by this operation. 1405c5aaa06dSAlan Cox */ 1406c7aebda8SAttilio Rao if (vm_page_busied(m)) { 140789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(new_object); 1408c7aebda8SAttilio Rao vm_page_lock(m); 1409c7aebda8SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 14105975e53dSKonstantin Belousov vm_page_busy_sleep(m, "spltwt", false); 1411c7aebda8SAttilio Rao VM_OBJECT_WLOCK(orig_object); 141289f6b863SAttilio Rao VM_OBJECT_WLOCK(new_object); 1413c5aaa06dSAlan Cox goto retry; 1414de33beddSAlan Cox } 1415e946b949SAttilio Rao 14163453bca8SAlan Cox /* vm_page_rename() will dirty the page. */ 1417e946b949SAttilio Rao if (vm_page_rename(m, new_object, idx)) { 1418e946b949SAttilio Rao VM_OBJECT_WUNLOCK(new_object); 1419e946b949SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 14208d6fbbb8SJeff Roberson vm_radix_wait(); 1421e946b949SAttilio Rao VM_OBJECT_WLOCK(orig_object); 1422e946b949SAttilio Rao VM_OBJECT_WLOCK(new_object); 1423e946b949SAttilio Rao goto retry; 1424e946b949SAttilio Rao } 1425b5f359b7SAlan Cox #if VM_NRESERVLEVEL > 0 1426b5f359b7SAlan Cox /* 1427b5f359b7SAlan Cox * If some of the reservation's allocated pages remain with 1428b5f359b7SAlan Cox * the original object, then transferring the reservation to 1429b5f359b7SAlan Cox * the new object is neither particularly beneficial nor 1430b5f359b7SAlan Cox * particularly harmful as compared to leaving the reservation 1431b5f359b7SAlan Cox * with the original object. If, however, all of the 1432b5f359b7SAlan Cox * reservation's allocated pages are transferred to the new 1433b5f359b7SAlan Cox * object, then transferring the reservation is typically 1434b5f359b7SAlan Cox * beneficial. Determining which of these two cases applies 1435b5f359b7SAlan Cox * would be more costly than unconditionally renaming the 1436b5f359b7SAlan Cox * reservation. 1437b5f359b7SAlan Cox */ 1438b5f359b7SAlan Cox vm_reserv_rename(m, new_object, orig_object, offidxstart); 1439b5f359b7SAlan Cox #endif 1440dfd55c0cSAttilio Rao if (orig_object->type == OBJT_SWAP) 1441c7aebda8SAttilio Rao vm_page_xbusy(m); 1442c5aaa06dSAlan Cox } 1443d7a013c3SAlan Cox if (orig_object->type == OBJT_SWAP) { 1444c5aaa06dSAlan Cox /* 1445c7c8dd7eSAlan Cox * swap_pager_copy() can sleep, in which case the orig_object's 1446c7c8dd7eSAlan Cox * and new_object's locks are released and reacquired. 1447c5aaa06dSAlan Cox */ 1448c5aaa06dSAlan Cox swap_pager_copy(orig_object, new_object, offidxstart, 0); 1449dfd55c0cSAttilio Rao TAILQ_FOREACH(m, &new_object->memq, listq) 1450c7aebda8SAttilio Rao vm_page_xunbusy(m); 1451c5aaa06dSAlan Cox } 145289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(orig_object); 145389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(new_object); 1454c5aaa06dSAlan Cox entry->object.vm_object = new_object; 1455c5aaa06dSAlan Cox entry->offset = 0LL; 1456c5aaa06dSAlan Cox vm_object_deallocate(orig_object); 145789f6b863SAttilio Rao VM_OBJECT_WLOCK(new_object); 1458c5aaa06dSAlan Cox } 1459c5aaa06dSAlan Cox 14602ad1a3f7SMatthew Dillon #define OBSC_COLLAPSE_NOWAIT 0x0002 14612ad1a3f7SMatthew Dillon #define OBSC_COLLAPSE_WAIT 0x0004 14622ad1a3f7SMatthew Dillon 146399a1570aSKonstantin Belousov static vm_page_t 14644cc8daf7SConrad Meyer vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p, vm_page_t next, 146599a1570aSKonstantin Belousov int op) 146699a1570aSKonstantin Belousov { 146799a1570aSKonstantin Belousov vm_object_t backing_object; 146899a1570aSKonstantin Belousov 146999a1570aSKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(object); 147099a1570aSKonstantin Belousov backing_object = object->backing_object; 147199a1570aSKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(backing_object); 147299a1570aSKonstantin Belousov 147399a1570aSKonstantin Belousov KASSERT(p == NULL || vm_page_busied(p), ("unbusy page %p", p)); 147499a1570aSKonstantin Belousov KASSERT(p == NULL || p->object == object || p->object == backing_object, 147599a1570aSKonstantin Belousov ("invalid ownership %p %p %p", p, object, backing_object)); 147699a1570aSKonstantin Belousov if ((op & OBSC_COLLAPSE_NOWAIT) != 0) 147799a1570aSKonstantin Belousov return (next); 147899a1570aSKonstantin Belousov if (p != NULL) 147999a1570aSKonstantin Belousov vm_page_lock(p); 148099a1570aSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 148199a1570aSKonstantin Belousov VM_OBJECT_WUNLOCK(backing_object); 14828d6fbbb8SJeff Roberson /* The page is only NULL when rename fails. */ 148399a1570aSKonstantin Belousov if (p == NULL) 14848d6fbbb8SJeff Roberson vm_radix_wait(); 148599a1570aSKonstantin Belousov else 14865975e53dSKonstantin Belousov vm_page_busy_sleep(p, "vmocol", false); 148799a1570aSKonstantin Belousov VM_OBJECT_WLOCK(object); 148899a1570aSKonstantin Belousov VM_OBJECT_WLOCK(backing_object); 148999a1570aSKonstantin Belousov return (TAILQ_FIRST(&backing_object->memq)); 149099a1570aSKonstantin Belousov } 149199a1570aSKonstantin Belousov 149299a1570aSKonstantin Belousov static bool 14934cc8daf7SConrad Meyer vm_object_scan_all_shadowed(vm_object_t object) 14944cc8daf7SConrad Meyer { 14954cc8daf7SConrad Meyer vm_object_t backing_object; 14964cc8daf7SConrad Meyer vm_page_t p, pp; 149777d6fd97SKonstantin Belousov vm_pindex_t backing_offset_index, new_pindex, pi, ps; 14984cc8daf7SConrad Meyer 14994cc8daf7SConrad Meyer VM_OBJECT_ASSERT_WLOCKED(object); 15004cc8daf7SConrad Meyer VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 15014cc8daf7SConrad Meyer 15024cc8daf7SConrad Meyer backing_object = object->backing_object; 15034cc8daf7SConrad Meyer 150477d6fd97SKonstantin Belousov if (backing_object->type != OBJT_DEFAULT && 150577d6fd97SKonstantin Belousov backing_object->type != OBJT_SWAP) 15064cc8daf7SConrad Meyer return (false); 15074cc8daf7SConrad Meyer 150877d6fd97SKonstantin Belousov pi = backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 150977d6fd97SKonstantin Belousov p = vm_page_find_least(backing_object, pi); 151077d6fd97SKonstantin Belousov ps = swap_pager_find_least(backing_object, pi); 15114cc8daf7SConrad Meyer 15124cc8daf7SConrad Meyer /* 151377d6fd97SKonstantin Belousov * Only check pages inside the parent object's range and 151477d6fd97SKonstantin Belousov * inside the parent object's mapping of the backing object. 15154cc8daf7SConrad Meyer */ 151677d6fd97SKonstantin Belousov for (;; pi++) { 151777d6fd97SKonstantin Belousov if (p != NULL && p->pindex < pi) 151877d6fd97SKonstantin Belousov p = TAILQ_NEXT(p, listq); 151977d6fd97SKonstantin Belousov if (ps < pi) 152077d6fd97SKonstantin Belousov ps = swap_pager_find_least(backing_object, pi); 152177d6fd97SKonstantin Belousov if (p == NULL && ps >= backing_object->size) 152277d6fd97SKonstantin Belousov break; 152377d6fd97SKonstantin Belousov else if (p == NULL) 152477d6fd97SKonstantin Belousov pi = ps; 152577d6fd97SKonstantin Belousov else 152677d6fd97SKonstantin Belousov pi = MIN(p->pindex, ps); 152777d6fd97SKonstantin Belousov 152877d6fd97SKonstantin Belousov new_pindex = pi - backing_offset_index; 152977d6fd97SKonstantin Belousov if (new_pindex >= object->size) 153077d6fd97SKonstantin Belousov break; 15314cc8daf7SConrad Meyer 15324cc8daf7SConrad Meyer /* 15334cc8daf7SConrad Meyer * See if the parent has the page or if the parent's object 15344cc8daf7SConrad Meyer * pager has the page. If the parent has the page but the page 15354cc8daf7SConrad Meyer * is not valid, the parent's object pager must have the page. 15364cc8daf7SConrad Meyer * 15374cc8daf7SConrad Meyer * If this fails, the parent does not completely shadow the 15384cc8daf7SConrad Meyer * object and we might as well give up now. 15394cc8daf7SConrad Meyer */ 15404cc8daf7SConrad Meyer pp = vm_page_lookup(object, new_pindex); 15414cc8daf7SConrad Meyer if ((pp == NULL || pp->valid == 0) && 15424cc8daf7SConrad Meyer !vm_pager_has_page(object, new_pindex, NULL, NULL)) 15434cc8daf7SConrad Meyer return (false); 15444cc8daf7SConrad Meyer } 15454cc8daf7SConrad Meyer return (true); 15464cc8daf7SConrad Meyer } 15474cc8daf7SConrad Meyer 15484cc8daf7SConrad Meyer static bool 15494cc8daf7SConrad Meyer vm_object_collapse_scan(vm_object_t object, int op) 15502ad1a3f7SMatthew Dillon { 15512ad1a3f7SMatthew Dillon vm_object_t backing_object; 155299a1570aSKonstantin Belousov vm_page_t next, p, pp; 155399a1570aSKonstantin Belousov vm_pindex_t backing_offset_index, new_pindex; 15542ad1a3f7SMatthew Dillon 155589f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 155689f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 15572ad1a3f7SMatthew Dillon 15582ad1a3f7SMatthew Dillon backing_object = object->backing_object; 15592ad1a3f7SMatthew Dillon backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 15602ad1a3f7SMatthew Dillon 15612ad1a3f7SMatthew Dillon /* 15622ad1a3f7SMatthew Dillon * Initial conditions 15632ad1a3f7SMatthew Dillon */ 15644cc8daf7SConrad Meyer if ((op & OBSC_COLLAPSE_WAIT) != 0) 15652ad1a3f7SMatthew Dillon vm_object_set_flag(backing_object, OBJ_DEAD); 15662ad1a3f7SMatthew Dillon 15672ad1a3f7SMatthew Dillon /* 15682ad1a3f7SMatthew Dillon * Our scan 15692ad1a3f7SMatthew Dillon */ 15704cc8daf7SConrad Meyer for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; p = next) { 157199a1570aSKonstantin Belousov next = TAILQ_NEXT(p, listq); 157299a1570aSKonstantin Belousov new_pindex = p->pindex - backing_offset_index; 15732ad1a3f7SMatthew Dillon 15742ad1a3f7SMatthew Dillon /* 15752ad1a3f7SMatthew Dillon * Check for busy page 15762ad1a3f7SMatthew Dillon */ 1577c7aebda8SAttilio Rao if (vm_page_busied(p)) { 15784cc8daf7SConrad Meyer next = vm_object_collapse_scan_wait(object, p, next, op); 15792ad1a3f7SMatthew Dillon continue; 15802ad1a3f7SMatthew Dillon } 15812ad1a3f7SMatthew Dillon 158299a1570aSKonstantin Belousov KASSERT(p->object == backing_object, 15834cc8daf7SConrad Meyer ("vm_object_collapse_scan: object mismatch")); 15842ad1a3f7SMatthew Dillon 158599a1570aSKonstantin Belousov if (p->pindex < backing_offset_index || 158699a1570aSKonstantin Belousov new_pindex >= object->size) { 1587e946b949SAttilio Rao if (backing_object->type == OBJT_SWAP) 15884cc8daf7SConrad Meyer swap_pager_freespace(backing_object, p->pindex, 15894cc8daf7SConrad Meyer 1); 1590e946b949SAttilio Rao 15912ad1a3f7SMatthew Dillon /* 15924cc8daf7SConrad Meyer * Page is out of the parent object's range, we can 15934cc8daf7SConrad Meyer * simply destroy it. 15942ad1a3f7SMatthew Dillon */ 15952965a453SKip Macy vm_page_lock(p); 1596f6d89838SAlan Cox KASSERT(!pmap_page_is_mapped(p), 1597f6d89838SAlan Cox ("freeing mapped page %p", p)); 1598*d842aa51SMark Johnston if (!vm_page_wired(p)) 15992ad1a3f7SMatthew Dillon vm_page_free(p); 1600f6d89838SAlan Cox else 1601f6d89838SAlan Cox vm_page_remove(p); 16022965a453SKip Macy vm_page_unlock(p); 16032ad1a3f7SMatthew Dillon continue; 16042ad1a3f7SMatthew Dillon } 16052ad1a3f7SMatthew Dillon 16062ad1a3f7SMatthew Dillon pp = vm_page_lookup(object, new_pindex); 160799a1570aSKonstantin Belousov if (pp != NULL && vm_page_busied(pp)) { 1608e18cc7bfSMax Laier /* 16094cc8daf7SConrad Meyer * The page in the parent is busy and possibly not 16104cc8daf7SConrad Meyer * (yet) valid. Until its state is finalized by the 16114cc8daf7SConrad Meyer * busy bit owner, we can't tell whether it shadows the 16124cc8daf7SConrad Meyer * original page. Therefore, we must either skip it 16134cc8daf7SConrad Meyer * and the original (backing_object) page or wait for 16144cc8daf7SConrad Meyer * its state to be finalized. 1615e18cc7bfSMax Laier * 16164cc8daf7SConrad Meyer * This is due to a race with vm_fault() where we must 16174cc8daf7SConrad Meyer * unbusy the original (backing_obj) page before we can 16184cc8daf7SConrad Meyer * (re)lock the parent. Hence we can get here. 1619e18cc7bfSMax Laier */ 16204cc8daf7SConrad Meyer next = vm_object_collapse_scan_wait(object, pp, next, 16214cc8daf7SConrad Meyer op); 1622e18cc7bfSMax Laier continue; 1623e18cc7bfSMax Laier } 162499a1570aSKonstantin Belousov 162599a1570aSKonstantin Belousov KASSERT(pp == NULL || pp->valid != 0, 162699a1570aSKonstantin Belousov ("unbusy invalid page %p", pp)); 162799a1570aSKonstantin Belousov 16284cc8daf7SConrad Meyer if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL, 16294cc8daf7SConrad Meyer NULL)) { 163099a1570aSKonstantin Belousov /* 16314cc8daf7SConrad Meyer * The page already exists in the parent OR swap exists 16324cc8daf7SConrad Meyer * for this location in the parent. Leave the parent's 16334cc8daf7SConrad Meyer * page alone. Destroy the original page from the 16344cc8daf7SConrad Meyer * backing object. 163599a1570aSKonstantin Belousov */ 1636e946b949SAttilio Rao if (backing_object->type == OBJT_SWAP) 16374cc8daf7SConrad Meyer swap_pager_freespace(backing_object, p->pindex, 16384cc8daf7SConrad Meyer 1); 16392965a453SKip Macy vm_page_lock(p); 1640f6d89838SAlan Cox KASSERT(!pmap_page_is_mapped(p), 1641f6d89838SAlan Cox ("freeing mapped page %p", p)); 1642*d842aa51SMark Johnston if (!vm_page_wired(p)) 16432ad1a3f7SMatthew Dillon vm_page_free(p); 1644f6d89838SAlan Cox else 1645f6d89838SAlan Cox vm_page_remove(p); 16462965a453SKip Macy vm_page_unlock(p); 16472ad1a3f7SMatthew Dillon continue; 16482ad1a3f7SMatthew Dillon } 16492ad1a3f7SMatthew Dillon 1650e946b949SAttilio Rao /* 16514cc8daf7SConrad Meyer * Page does not exist in parent, rename the page from the 16524cc8daf7SConrad Meyer * backing object to the main object. 1653e946b949SAttilio Rao * 16544cc8daf7SConrad Meyer * If the page was mapped to a process, it can remain mapped 16553453bca8SAlan Cox * through the rename. vm_page_rename() will dirty the page. 1656e946b949SAttilio Rao */ 1657e946b949SAttilio Rao if (vm_page_rename(p, object, new_pindex)) { 16584cc8daf7SConrad Meyer next = vm_object_collapse_scan_wait(object, NULL, next, 16594cc8daf7SConrad Meyer op); 1660e946b949SAttilio Rao continue; 1661e946b949SAttilio Rao } 166214a5dc17SAttilio Rao 166314a5dc17SAttilio Rao /* Use the old pindex to free the right page. */ 1664e946b949SAttilio Rao if (backing_object->type == OBJT_SWAP) 166514a5dc17SAttilio Rao swap_pager_freespace(backing_object, 166614a5dc17SAttilio Rao new_pindex + backing_offset_index, 1); 1667e946b949SAttilio Rao 1668f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1669f8a47341SAlan Cox /* 1670f8a47341SAlan Cox * Rename the reservation. 1671f8a47341SAlan Cox */ 1672f8a47341SAlan Cox vm_reserv_rename(p, object, backing_object, 1673f8a47341SAlan Cox backing_offset_index); 1674f8a47341SAlan Cox #endif 16752ad1a3f7SMatthew Dillon } 167699a1570aSKonstantin Belousov return (true); 16772ad1a3f7SMatthew Dillon } 16782ad1a3f7SMatthew Dillon 1679df8bae1dSRodney W. Grimes 1680df8bae1dSRodney W. Grimes /* 16812fe6e4d7SDavid Greenman * this version of collapse allows the operation to occur earlier and 16822fe6e4d7SDavid Greenman * when paging_in_progress is true for an object... This is not a complete 16832fe6e4d7SDavid Greenman * operation, but should plug 99.9% of the rest of the leaks. 16842fe6e4d7SDavid Greenman */ 16852fe6e4d7SDavid Greenman static void 16861b40f8c0SMatthew Dillon vm_object_qcollapse(vm_object_t object) 16872fe6e4d7SDavid Greenman { 16882ad1a3f7SMatthew Dillon vm_object_t backing_object = object->backing_object; 16892fe6e4d7SDavid Greenman 169089f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 169189f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(backing_object); 16921b40f8c0SMatthew Dillon 16932fe6e4d7SDavid Greenman if (backing_object->ref_count != 1) 16942fe6e4d7SDavid Greenman return; 16952fe6e4d7SDavid Greenman 16964cc8daf7SConrad Meyer vm_object_collapse_scan(object, OBSC_COLLAPSE_NOWAIT); 16972fe6e4d7SDavid Greenman } 16982fe6e4d7SDavid Greenman 1699df8bae1dSRodney W. Grimes /* 1700df8bae1dSRodney W. Grimes * vm_object_collapse: 1701df8bae1dSRodney W. Grimes * 1702df8bae1dSRodney W. Grimes * Collapse an object with the object backing it. 1703df8bae1dSRodney W. Grimes * Pages in the backing object are moved into the 1704df8bae1dSRodney W. Grimes * parent, and the backing object is deallocated. 1705df8bae1dSRodney W. Grimes */ 170626f9a767SRodney W. Grimes void 17071b40f8c0SMatthew Dillon vm_object_collapse(vm_object_t object) 1708df8bae1dSRodney W. Grimes { 170998f139daSKonstantin Belousov vm_object_t backing_object, new_backing_object; 171098f139daSKonstantin Belousov 171189f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 171223955314SAlfred Perlstein 1713df8bae1dSRodney W. Grimes while (TRUE) { 1714df8bae1dSRodney W. Grimes /* 1715df8bae1dSRodney W. Grimes * Verify that the conditions are right for collapse: 1716df8bae1dSRodney W. Grimes * 17172ad1a3f7SMatthew Dillon * The object exists and the backing object exists. 1718df8bae1dSRodney W. Grimes */ 171924a1cce3SDavid Greenman if ((backing_object = object->backing_object) == NULL) 17202ad1a3f7SMatthew Dillon break; 1721df8bae1dSRodney W. Grimes 1722f919ebdeSDavid Greenman /* 1723f919ebdeSDavid Greenman * we check the backing object first, because it is most likely 172424a1cce3SDavid Greenman * not collapsable. 1725f919ebdeSDavid Greenman */ 172689f6b863SAttilio Rao VM_OBJECT_WLOCK(backing_object); 172724a1cce3SDavid Greenman if (backing_object->handle != NULL || 172824a1cce3SDavid Greenman (backing_object->type != OBJT_DEFAULT && 172912487941SKonstantin Belousov backing_object->type != OBJT_SWAP) || 173012487941SKonstantin Belousov (backing_object->flags & (OBJ_DEAD | OBJ_NOSPLIT)) != 0 || 173124a1cce3SDavid Greenman object->handle != NULL || 173224a1cce3SDavid Greenman (object->type != OBJT_DEFAULT && 173324a1cce3SDavid Greenman object->type != OBJT_SWAP) || 173424a1cce3SDavid Greenman (object->flags & OBJ_DEAD)) { 173589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 17362ad1a3f7SMatthew Dillon break; 173724a1cce3SDavid Greenman } 17389b4814bbSDavid Greenman 173998f139daSKonstantin Belousov if (object->paging_in_progress != 0 || 174098f139daSKonstantin Belousov backing_object->paging_in_progress != 0) { 1741b9921222SDavid Greenman vm_object_qcollapse(object); 174289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 17432ad1a3f7SMatthew Dillon break; 1744df8bae1dSRodney W. Grimes } 174598f139daSKonstantin Belousov 174626f9a767SRodney W. Grimes /* 17470d94caffSDavid Greenman * We know that we can either collapse the backing object (if 17482ad1a3f7SMatthew Dillon * the parent is the only reference to it) or (perhaps) have 17492ad1a3f7SMatthew Dillon * the parent bypass the object if the parent happens to shadow 17502ad1a3f7SMatthew Dillon * all the resident pages in the entire backing object. 17512ad1a3f7SMatthew Dillon * 17522ad1a3f7SMatthew Dillon * This is ignoring pager-backed pages such as swap pages. 17534cc8daf7SConrad Meyer * vm_object_collapse_scan fails the shadowing test in this 17542ad1a3f7SMatthew Dillon * case. 1755df8bae1dSRodney W. Grimes */ 1756df8bae1dSRodney W. Grimes if (backing_object->ref_count == 1) { 1757aa9bc3b1SKonstantin Belousov vm_object_pip_add(object, 1); 1758aa9bc3b1SKonstantin Belousov vm_object_pip_add(backing_object, 1); 1759aa9bc3b1SKonstantin Belousov 1760df8bae1dSRodney W. Grimes /* 17612ad1a3f7SMatthew Dillon * If there is exactly one reference to the backing 17622ad1a3f7SMatthew Dillon * object, we can collapse it into the parent. 1763df8bae1dSRodney W. Grimes */ 17644cc8daf7SConrad Meyer vm_object_collapse_scan(object, OBSC_COLLAPSE_WAIT); 1765df8bae1dSRodney W. Grimes 1766f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1767f8a47341SAlan Cox /* 1768f8a47341SAlan Cox * Break any reservations from backing_object. 1769f8a47341SAlan Cox */ 1770f8a47341SAlan Cox if (__predict_false(!LIST_EMPTY(&backing_object->rvq))) 1771f8a47341SAlan Cox vm_reserv_break_all(backing_object); 1772f8a47341SAlan Cox #endif 1773f8a47341SAlan Cox 1774df8bae1dSRodney W. Grimes /* 1775df8bae1dSRodney W. Grimes * Move the pager from backing_object to object. 1776df8bae1dSRodney W. Grimes */ 17776be36525SAlan Cox if (backing_object->type == OBJT_SWAP) { 177824a1cce3SDavid Greenman /* 1779c7c8dd7eSAlan Cox * swap_pager_copy() can sleep, in which case 1780c7c8dd7eSAlan Cox * the backing_object's and object's locks are 1781c7c8dd7eSAlan Cox * released and reacquired. 1782571a1e92SAttilio Rao * Since swap_pager_copy() is being asked to 1783571a1e92SAttilio Rao * destroy the source, it will change the 1784571a1e92SAttilio Rao * backing_object's type to OBJT_DEFAULT. 178524a1cce3SDavid Greenman */ 17861c7c3c6aSMatthew Dillon swap_pager_copy( 17871c7c3c6aSMatthew Dillon backing_object, 17881c7c3c6aSMatthew Dillon object, 17891c7c3c6aSMatthew Dillon OFF_TO_IDX(object->backing_object_offset), TRUE); 1790c0503609SDavid Greenman } 1791df8bae1dSRodney W. Grimes /* 1792df8bae1dSRodney W. Grimes * Object now shadows whatever backing_object did. 17932ad1a3f7SMatthew Dillon * Note that the reference to 17942ad1a3f7SMatthew Dillon * backing_object->backing_object moves from within 17952ad1a3f7SMatthew Dillon * backing_object to within object. 1796df8bae1dSRodney W. Grimes */ 17971c500307SAlan Cox LIST_REMOVE(object, shadow_list); 17984f7c7f6eSAlan Cox backing_object->shadow_count--; 1799de5f6a77SJohn Dyson if (backing_object->backing_object) { 180089f6b863SAttilio Rao VM_OBJECT_WLOCK(backing_object->backing_object); 18011c500307SAlan Cox LIST_REMOVE(backing_object, shadow_list); 180243186e53SAlan Cox LIST_INSERT_HEAD( 180343186e53SAlan Cox &backing_object->backing_object->shadow_head, 180443186e53SAlan Cox object, shadow_list); 180543186e53SAlan Cox /* 180643186e53SAlan Cox * The shadow_count has not changed. 180743186e53SAlan Cox */ 180889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object->backing_object); 1809de5f6a77SJohn Dyson } 181024a1cce3SDavid Greenman object->backing_object = backing_object->backing_object; 18112ad1a3f7SMatthew Dillon object->backing_object_offset += 18122ad1a3f7SMatthew Dillon backing_object->backing_object_offset; 18132ad1a3f7SMatthew Dillon 1814df8bae1dSRodney W. Grimes /* 1815df8bae1dSRodney W. Grimes * Discard backing_object. 1816df8bae1dSRodney W. Grimes * 18170d94caffSDavid Greenman * Since the backing object has no pages, no pager left, 18180d94caffSDavid Greenman * and no object references within it, all that is 18190d94caffSDavid Greenman * necessary is to dispose of it. 1820df8bae1dSRodney W. Grimes */ 18219b4d473aSKonstantin Belousov KASSERT(backing_object->ref_count == 1, ( 18229b4d473aSKonstantin Belousov "backing_object %p was somehow re-referenced during collapse!", 18239b4d473aSKonstantin Belousov backing_object)); 1824aa9bc3b1SKonstantin Belousov vm_object_pip_wakeup(backing_object); 1825e735691bSJohn Baldwin backing_object->type = OBJT_DEAD; 1826e735691bSJohn Baldwin backing_object->ref_count = 0; 182789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 18289b4d473aSKonstantin Belousov vm_object_destroy(backing_object); 1829df8bae1dSRodney W. Grimes 1830aa9bc3b1SKonstantin Belousov vm_object_pip_wakeup(object); 183111542376SAlan Cox counter_u64_add(object_collapses, 1); 18320d94caffSDavid Greenman } else { 1833df8bae1dSRodney W. Grimes /* 18342ad1a3f7SMatthew Dillon * If we do not entirely shadow the backing object, 18352ad1a3f7SMatthew Dillon * there is nothing we can do so we give up. 1836df8bae1dSRodney W. Grimes */ 1837df59a0feSJeff Roberson if (object->resident_page_count != object->size && 18384cc8daf7SConrad Meyer !vm_object_scan_all_shadowed(object)) { 183989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 18402ad1a3f7SMatthew Dillon break; 184124a1cce3SDavid Greenman } 1842df8bae1dSRodney W. Grimes 1843df8bae1dSRodney W. Grimes /* 18440d94caffSDavid Greenman * Make the parent shadow the next object in the 18450d94caffSDavid Greenman * chain. Deallocating backing_object will not remove 18460d94caffSDavid Greenman * it, since its reference count is at least 2. 1847df8bae1dSRodney W. Grimes */ 18481c500307SAlan Cox LIST_REMOVE(object, shadow_list); 1849eaf13dd7SJohn Dyson backing_object->shadow_count--; 185095e5e988SJohn Dyson 185195e5e988SJohn Dyson new_backing_object = backing_object->backing_object; 18528aef1712SMatthew Dillon if ((object->backing_object = new_backing_object) != NULL) { 185389f6b863SAttilio Rao VM_OBJECT_WLOCK(new_backing_object); 18541c500307SAlan Cox LIST_INSERT_HEAD( 18552ad1a3f7SMatthew Dillon &new_backing_object->shadow_head, 18562ad1a3f7SMatthew Dillon object, 18572ad1a3f7SMatthew Dillon shadow_list 18582ad1a3f7SMatthew Dillon ); 1859eaf13dd7SJohn Dyson new_backing_object->shadow_count++; 1860b921a12bSAlan Cox vm_object_reference_locked(new_backing_object); 186189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(new_backing_object); 186295e5e988SJohn Dyson object->backing_object_offset += 186395e5e988SJohn Dyson backing_object->backing_object_offset; 1864de5f6a77SJohn Dyson } 1865df8bae1dSRodney W. Grimes 1866df8bae1dSRodney W. Grimes /* 18670d94caffSDavid Greenman * Drop the reference count on backing_object. Since 186822ec553fSAlan Cox * its ref_count was at least 2, it will not vanish. 1869df8bae1dSRodney W. Grimes */ 187022ec553fSAlan Cox backing_object->ref_count--; 187189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(backing_object); 187211542376SAlan Cox counter_u64_add(object_bypasses, 1); 1873df8bae1dSRodney W. Grimes } 1874df8bae1dSRodney W. Grimes 1875df8bae1dSRodney W. Grimes /* 1876df8bae1dSRodney W. Grimes * Try again with this object's new backing object. 1877df8bae1dSRodney W. Grimes */ 1878df8bae1dSRodney W. Grimes } 1879df8bae1dSRodney W. Grimes } 1880df8bae1dSRodney W. Grimes 1881df8bae1dSRodney W. Grimes /* 1882bff99f0dSAlan Cox * vm_object_page_remove: 1883df8bae1dSRodney W. Grimes * 188468855966SAlan Cox * For the given object, either frees or invalidates each of the 18856bbee8e2SAlan Cox * specified pages. In general, a page is freed. However, if a page is 18866bbee8e2SAlan Cox * wired for any reason other than the existence of a managed, wired 18876bbee8e2SAlan Cox * mapping, then it may be invalidated but not removed from the object. 18886bbee8e2SAlan Cox * Pages are specified by the given range ["start", "end") and the option 18896bbee8e2SAlan Cox * OBJPR_CLEANONLY. As a special case, if "end" is zero, then the range 18906bbee8e2SAlan Cox * extends from "start" to the end of the object. If the option 18916bbee8e2SAlan Cox * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the 18926bbee8e2SAlan Cox * specified range are affected. If the option OBJPR_NOTMAPPED is 18936bbee8e2SAlan Cox * specified, then the pages within the specified range must have no 18946bbee8e2SAlan Cox * mappings. Otherwise, if this option is not specified, any mappings to 18956bbee8e2SAlan Cox * the specified pages are removed before the pages are freed or 18966bbee8e2SAlan Cox * invalidated. 189768855966SAlan Cox * 18986bbee8e2SAlan Cox * In general, this operation should only be performed on objects that 18996bbee8e2SAlan Cox * contain managed pages. There are, however, two exceptions. First, it 19006bbee8e2SAlan Cox * is performed on the kernel and kmem objects by vm_map_entry_delete(). 19016bbee8e2SAlan Cox * Second, it is used by msync(..., MS_INVALIDATE) to invalidate device- 19026bbee8e2SAlan Cox * backed pages. In both of these cases, the option OBJPR_CLEANONLY must 19036bbee8e2SAlan Cox * not be specified and the option OBJPR_NOTMAPPED must be specified. 1904df8bae1dSRodney W. Grimes * 1905df8bae1dSRodney W. Grimes * The object must be locked. 1906df8bae1dSRodney W. Grimes */ 190726f9a767SRodney W. Grimes void 1908ecde4b32SAlan Cox vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 19096bbee8e2SAlan Cox int options) 1910df8bae1dSRodney W. Grimes { 1911d031cff1SMatthew Dillon vm_page_t p, next; 191293c5d3a4SKonstantin Belousov struct mtx *mtx; 1913df8bae1dSRodney W. Grimes 191489f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 191528634820SAlan Cox KASSERT((object->flags & OBJ_UNMANAGED) == 0 || 19166bbee8e2SAlan Cox (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED, 19176bbee8e2SAlan Cox ("vm_object_page_remove: illegal options for object %p", object)); 1918ecde4b32SAlan Cox if (object->resident_page_count == 0) 19197667839aSAlan Cox return; 1920d474eaaaSDoug Rabson vm_object_pip_add(object, 1); 192126f9a767SRodney W. Grimes again: 1922b382c10aSKonstantin Belousov p = vm_page_find_least(object, start); 192393c5d3a4SKonstantin Belousov mtx = NULL; 19242965a453SKip Macy 192575741c04SAlan Cox /* 19266bbee8e2SAlan Cox * Here, the variable "p" is either (1) the page with the least pindex 19276bbee8e2SAlan Cox * greater than or equal to the parameter "start" or (2) NULL. 192875741c04SAlan Cox */ 19296bbee8e2SAlan Cox for (; p != NULL && (p->pindex < end || end == 0); p = next) { 1930b18bfc3dSJohn Dyson next = TAILQ_NEXT(p, listq); 193175741c04SAlan Cox 193259677d3cSAlan Cox /* 19336bbee8e2SAlan Cox * If the page is wired for any reason besides the existence 19346bbee8e2SAlan Cox * of managed, wired mappings, then it cannot be freed. For 19356bbee8e2SAlan Cox * example, fictitious pages, which represent device memory, 19366bbee8e2SAlan Cox * are inherently wired and cannot be freed. They can, 19376bbee8e2SAlan Cox * however, be invalidated if the option OBJPR_CLEANONLY is 19386bbee8e2SAlan Cox * not specified. 193959677d3cSAlan Cox */ 194093c5d3a4SKonstantin Belousov vm_page_change_lock(p, &mtx); 19413aaea6efSKonstantin Belousov if (vm_page_xbusied(p)) { 19423aaea6efSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 19435975e53dSKonstantin Belousov vm_page_busy_sleep(p, "vmopax", true); 19443aaea6efSKonstantin Belousov VM_OBJECT_WLOCK(object); 19453aaea6efSKonstantin Belousov goto again; 19463aaea6efSKonstantin Belousov } 1947*d842aa51SMark Johnston if (vm_page_wired(p)) { 1948cf060942SAlan Cox if ((options & OBJPR_NOTMAPPED) == 0 && 1949cf060942SAlan Cox object->ref_count != 0) 19504fec79beSAlan Cox pmap_remove_all(p); 19516bbee8e2SAlan Cox if ((options & OBJPR_CLEANONLY) == 0) { 1952bd7e5f99SJohn Dyson p->valid = 0; 1953a28042d1SAlan Cox vm_page_undirty(p); 1954a28042d1SAlan Cox } 195593c5d3a4SKonstantin Belousov continue; 19560d94caffSDavid Greenman } 1957c7aebda8SAttilio Rao if (vm_page_busied(p)) { 1958c7aebda8SAttilio Rao VM_OBJECT_WUNLOCK(object); 19595975e53dSKonstantin Belousov vm_page_busy_sleep(p, "vmopar", false); 1960c7aebda8SAttilio Rao VM_OBJECT_WLOCK(object); 196126f9a767SRodney W. Grimes goto again; 1962c7aebda8SAttilio Rao } 196368855966SAlan Cox KASSERT((p->flags & PG_FICTITIOUS) == 0, 196468855966SAlan Cox ("vm_object_page_remove: page %p is fictitious", p)); 19656bbee8e2SAlan Cox if ((options & OBJPR_CLEANONLY) != 0 && p->valid != 0) { 1966cf060942SAlan Cox if ((options & OBJPR_NOTMAPPED) == 0 && 1967cf060942SAlan Cox object->ref_count != 0) 196878985e42SAlan Cox pmap_remove_write(p); 1969cf060942SAlan Cox if (p->dirty != 0) 197093c5d3a4SKonstantin Belousov continue; 19712965a453SKip Macy } 1972cf060942SAlan Cox if ((options & OBJPR_NOTMAPPED) == 0 && object->ref_count != 0) 19734fec79beSAlan Cox pmap_remove_all(p); 19745cd29d0fSMark Johnston vm_page_free(p); 19752965a453SKip Macy } 197693c5d3a4SKonstantin Belousov if (mtx != NULL) 197793c5d3a4SKonstantin Belousov mtx_unlock(mtx); 1978f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 1979c0503609SDavid Greenman } 1980df8bae1dSRodney W. Grimes 1981df8bae1dSRodney W. Grimes /* 19823138cd36SMark Johnston * vm_object_page_noreuse: 1983936c09acSJohn Baldwin * 19843138cd36SMark Johnston * For the given object, attempt to move the specified pages to 19853138cd36SMark Johnston * the head of the inactive queue. This bypasses regular LRU 19863138cd36SMark Johnston * operation and allows the pages to be reused quickly under memory 19873138cd36SMark Johnston * pressure. If a page is wired for any reason, then it will not 19883138cd36SMark Johnston * be queued. Pages are specified by the range ["start", "end"). 19893138cd36SMark Johnston * As a special case, if "end" is zero, then the range extends from 19903138cd36SMark Johnston * "start" to the end of the object. 1991936c09acSJohn Baldwin * 1992936c09acSJohn Baldwin * This operation should only be performed on objects that 199328634820SAlan Cox * contain non-fictitious, managed pages. 1994936c09acSJohn Baldwin * 1995936c09acSJohn Baldwin * The object must be locked. 1996936c09acSJohn Baldwin */ 1997936c09acSJohn Baldwin void 19983138cd36SMark Johnston vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 1999936c09acSJohn Baldwin { 200093c5d3a4SKonstantin Belousov struct mtx *mtx; 2001936c09acSJohn Baldwin vm_page_t p, next; 2002936c09acSJohn Baldwin 200352d1addaSAlan Cox VM_OBJECT_ASSERT_LOCKED(object); 200428634820SAlan Cox KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0, 20053138cd36SMark Johnston ("vm_object_page_noreuse: illegal object %p", object)); 2006936c09acSJohn Baldwin if (object->resident_page_count == 0) 2007936c09acSJohn Baldwin return; 2008936c09acSJohn Baldwin p = vm_page_find_least(object, start); 2009936c09acSJohn Baldwin 2010936c09acSJohn Baldwin /* 2011936c09acSJohn Baldwin * Here, the variable "p" is either (1) the page with the least pindex 2012936c09acSJohn Baldwin * greater than or equal to the parameter "start" or (2) NULL. 2013936c09acSJohn Baldwin */ 2014936c09acSJohn Baldwin mtx = NULL; 2015936c09acSJohn Baldwin for (; p != NULL && (p->pindex < end || end == 0); p = next) { 2016936c09acSJohn Baldwin next = TAILQ_NEXT(p, listq); 201793c5d3a4SKonstantin Belousov vm_page_change_lock(p, &mtx); 20183138cd36SMark Johnston vm_page_deactivate_noreuse(p); 2019936c09acSJohn Baldwin } 2020936c09acSJohn Baldwin if (mtx != NULL) 2021936c09acSJohn Baldwin mtx_unlock(mtx); 2022936c09acSJohn Baldwin } 2023936c09acSJohn Baldwin 2024936c09acSJohn Baldwin /* 2025387aabc5SAlan Cox * Populate the specified range of the object with valid pages. Returns 2026387aabc5SAlan Cox * TRUE if the range is successfully populated and FALSE otherwise. 2027387aabc5SAlan Cox * 2028387aabc5SAlan Cox * Note: This function should be optimized to pass a larger array of 2029387aabc5SAlan Cox * pages to vm_pager_get_pages() before it is applied to a non- 2030387aabc5SAlan Cox * OBJT_DEVICE object. 2031387aabc5SAlan Cox * 2032387aabc5SAlan Cox * The object must be locked. 2033387aabc5SAlan Cox */ 2034387aabc5SAlan Cox boolean_t 2035387aabc5SAlan Cox vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 2036387aabc5SAlan Cox { 2037093c7f39SGleb Smirnoff vm_page_t m; 2038387aabc5SAlan Cox vm_pindex_t pindex; 2039387aabc5SAlan Cox int rv; 2040387aabc5SAlan Cox 204189f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 2042387aabc5SAlan Cox for (pindex = start; pindex < end; pindex++) { 20435944de8eSKonstantin Belousov m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); 2044387aabc5SAlan Cox if (m->valid != VM_PAGE_BITS_ALL) { 2045b0cd2017SGleb Smirnoff rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); 2046387aabc5SAlan Cox if (rv != VM_PAGER_OK) { 20472965a453SKip Macy vm_page_lock(m); 2048387aabc5SAlan Cox vm_page_free(m); 20492965a453SKip Macy vm_page_unlock(m); 2050387aabc5SAlan Cox break; 2051387aabc5SAlan Cox } 2052387aabc5SAlan Cox } 2053387aabc5SAlan Cox /* 2054387aabc5SAlan Cox * Keep "m" busy because a subsequent iteration may unlock 2055387aabc5SAlan Cox * the object. 2056387aabc5SAlan Cox */ 2057387aabc5SAlan Cox } 2058387aabc5SAlan Cox if (pindex > start) { 2059387aabc5SAlan Cox m = vm_page_lookup(object, start); 2060387aabc5SAlan Cox while (m != NULL && m->pindex < pindex) { 2061c7aebda8SAttilio Rao vm_page_xunbusy(m); 2062387aabc5SAlan Cox m = TAILQ_NEXT(m, listq); 2063387aabc5SAlan Cox } 2064387aabc5SAlan Cox } 2065387aabc5SAlan Cox return (pindex == end); 2066387aabc5SAlan Cox } 2067387aabc5SAlan Cox 2068387aabc5SAlan Cox /* 2069df8bae1dSRodney W. Grimes * Routine: vm_object_coalesce 2070df8bae1dSRodney W. Grimes * Function: Coalesces two objects backing up adjoining 2071df8bae1dSRodney W. Grimes * regions of memory into a single object. 2072df8bae1dSRodney W. Grimes * 2073df8bae1dSRodney W. Grimes * returns TRUE if objects were combined. 2074df8bae1dSRodney W. Grimes * 2075df8bae1dSRodney W. Grimes * NOTE: Only works at the moment if the second object is NULL - 2076df8bae1dSRodney W. Grimes * if it's not, which object do we lock first? 2077df8bae1dSRodney W. Grimes * 2078df8bae1dSRodney W. Grimes * Parameters: 2079df8bae1dSRodney W. Grimes * prev_object First object to coalesce 2080df8bae1dSRodney W. Grimes * prev_offset Offset into prev_object 2081df8bae1dSRodney W. Grimes * prev_size Size of reference to prev_object 208257a21abaSAlan Cox * next_size Size of reference to the second object 20833364c323SKonstantin Belousov * reserved Indicator that extension region has 20843364c323SKonstantin Belousov * swap accounted for 2085df8bae1dSRodney W. Grimes * 2086df8bae1dSRodney W. Grimes * Conditions: 2087df8bae1dSRodney W. Grimes * The object must *not* be locked. 2088df8bae1dSRodney W. Grimes */ 20890d94caffSDavid Greenman boolean_t 209057a21abaSAlan Cox vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, 20913364c323SKonstantin Belousov vm_size_t prev_size, vm_size_t next_size, boolean_t reserved) 2092df8bae1dSRodney W. Grimes { 2093ea41812fSAlan Cox vm_pindex_t next_pindex; 2094df8bae1dSRodney W. Grimes 209500e1854aSAlan Cox if (prev_object == NULL) 2096df8bae1dSRodney W. Grimes return (TRUE); 209789f6b863SAttilio Rao VM_OBJECT_WLOCK(prev_object); 20989ded9474SKonstantin Belousov if ((prev_object->type != OBJT_DEFAULT && 20999ded9474SKonstantin Belousov prev_object->type != OBJT_SWAP) || 2100f08f7dcaSKonstantin Belousov (prev_object->flags & OBJ_TMPFS_NODE) != 0) { 210189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(prev_object); 210230dcfc09SJohn Dyson return (FALSE); 210330dcfc09SJohn Dyson } 210430dcfc09SJohn Dyson 2105df8bae1dSRodney W. Grimes /* 2106df8bae1dSRodney W. Grimes * Try to collapse the object first 2107df8bae1dSRodney W. Grimes */ 2108df8bae1dSRodney W. Grimes vm_object_collapse(prev_object); 2109df8bae1dSRodney W. Grimes 2110df8bae1dSRodney W. Grimes /* 21110d94caffSDavid Greenman * Can't coalesce if: . more than one reference . paged out . shadows 21120d94caffSDavid Greenman * another object . has a copy elsewhere (any of which mean that the 21130d94caffSDavid Greenman * pages not mapped to prev_entry may be in use anyway) 2114df8bae1dSRodney W. Grimes */ 21158cc7e047SJohn Dyson if (prev_object->backing_object != NULL) { 211689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(prev_object); 2117df8bae1dSRodney W. Grimes return (FALSE); 2118df8bae1dSRodney W. Grimes } 2119a316d390SJohn Dyson 2120a316d390SJohn Dyson prev_size >>= PAGE_SHIFT; 2121a316d390SJohn Dyson next_size >>= PAGE_SHIFT; 212257a21abaSAlan Cox next_pindex = OFF_TO_IDX(prev_offset) + prev_size; 21238cc7e047SJohn Dyson 21240e48e068SMark Johnston if (prev_object->ref_count > 1 && 21250e48e068SMark Johnston prev_object->size != next_pindex && 21260e48e068SMark Johnston (prev_object->flags & OBJ_ONEMAPPING) == 0) { 212789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(prev_object); 21288cc7e047SJohn Dyson return (FALSE); 21298cc7e047SJohn Dyson } 21308cc7e047SJohn Dyson 2131df8bae1dSRodney W. Grimes /* 21323364c323SKonstantin Belousov * Account for the charge. 21333364c323SKonstantin Belousov */ 2134ef694c1aSEdward Tomasz Napierala if (prev_object->cred != NULL) { 21353364c323SKonstantin Belousov 21363364c323SKonstantin Belousov /* 21373364c323SKonstantin Belousov * If prev_object was charged, then this mapping, 2138763df3ecSPedro F. Giffuni * although not charged now, may become writable 2139ef694c1aSEdward Tomasz Napierala * later. Non-NULL cred in the object would prevent 21403364c323SKonstantin Belousov * swap reservation during enabling of the write 21413364c323SKonstantin Belousov * access, so reserve swap now. Failed reservation 21423364c323SKonstantin Belousov * cause allocation of the separate object for the map 21433364c323SKonstantin Belousov * entry, and swap reservation for this entry is 21443364c323SKonstantin Belousov * managed in appropriate time. 21453364c323SKonstantin Belousov */ 2146ef694c1aSEdward Tomasz Napierala if (!reserved && !swap_reserve_by_cred(ptoa(next_size), 2147ef694c1aSEdward Tomasz Napierala prev_object->cred)) { 21489f790a17SKonstantin Belousov VM_OBJECT_WUNLOCK(prev_object); 21493364c323SKonstantin Belousov return (FALSE); 21503364c323SKonstantin Belousov } 21513364c323SKonstantin Belousov prev_object->charge += ptoa(next_size); 21523364c323SKonstantin Belousov } 21533364c323SKonstantin Belousov 21543364c323SKonstantin Belousov /* 21550d94caffSDavid Greenman * Remove any pages that may still be in the object from a previous 21560d94caffSDavid Greenman * deallocation. 2157df8bae1dSRodney W. Grimes */ 2158ea41812fSAlan Cox if (next_pindex < prev_object->size) { 21596bbee8e2SAlan Cox vm_object_page_remove(prev_object, next_pindex, next_pindex + 21606bbee8e2SAlan Cox next_size, 0); 2161ea41812fSAlan Cox if (prev_object->type == OBJT_SWAP) 2162ea41812fSAlan Cox swap_pager_freespace(prev_object, 2163ea41812fSAlan Cox next_pindex, next_size); 21643364c323SKonstantin Belousov #if 0 2165ef694c1aSEdward Tomasz Napierala if (prev_object->cred != NULL) { 21663364c323SKonstantin Belousov KASSERT(prev_object->charge >= 21673364c323SKonstantin Belousov ptoa(prev_object->size - next_pindex), 21683364c323SKonstantin Belousov ("object %p overcharged 1 %jx %jx", prev_object, 21693364c323SKonstantin Belousov (uintmax_t)next_pindex, (uintmax_t)next_size)); 21703364c323SKonstantin Belousov prev_object->charge -= ptoa(prev_object->size - 21713364c323SKonstantin Belousov next_pindex); 21723364c323SKonstantin Belousov } 21733364c323SKonstantin Belousov #endif 2174ea41812fSAlan Cox } 2175df8bae1dSRodney W. Grimes 2176df8bae1dSRodney W. Grimes /* 2177df8bae1dSRodney W. Grimes * Extend the object if necessary. 2178df8bae1dSRodney W. Grimes */ 2179ea41812fSAlan Cox if (next_pindex + next_size > prev_object->size) 2180ea41812fSAlan Cox prev_object->size = next_pindex + next_size; 2181df8bae1dSRodney W. Grimes 218289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(prev_object); 2183df8bae1dSRodney W. Grimes return (TRUE); 2184df8bae1dSRodney W. Grimes } 2185df8bae1dSRodney W. Grimes 21867a5a6352SMatthew Dillon void 21877a5a6352SMatthew Dillon vm_object_set_writeable_dirty(vm_object_t object) 21887a5a6352SMatthew Dillon { 21897a5a6352SMatthew Dillon 219089f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 2191f40cb1c6SKonstantin Belousov if (object->type != OBJT_VNODE) { 2192f40cb1c6SKonstantin Belousov if ((object->flags & OBJ_TMPFS_NODE) != 0) { 2193f40cb1c6SKonstantin Belousov KASSERT(object->type == OBJT_SWAP, ("non-swap tmpfs")); 2194f40cb1c6SKonstantin Belousov vm_object_set_flag(object, OBJ_TMPFS_DIRTY); 2195f40cb1c6SKonstantin Belousov } 21963280870dSKonstantin Belousov return; 2197f40cb1c6SKonstantin Belousov } 21983280870dSKonstantin Belousov object->generation++; 21993280870dSKonstantin Belousov if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) 2200ee39666aSJeff Roberson return; 2201af51d7bfSAlan Cox vm_object_set_flag(object, OBJ_MIGHTBEDIRTY); 22027a5a6352SMatthew Dillon } 22037a5a6352SMatthew Dillon 220403462509SAlan Cox /* 220503462509SAlan Cox * vm_object_unwire: 220603462509SAlan Cox * 220703462509SAlan Cox * For each page offset within the specified range of the given object, 220803462509SAlan Cox * find the highest-level page in the shadow chain and unwire it. A page 220903462509SAlan Cox * must exist at every page offset, and the highest-level page must be 221003462509SAlan Cox * wired. 221103462509SAlan Cox */ 221203462509SAlan Cox void 221303462509SAlan Cox vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length, 221403462509SAlan Cox uint8_t queue) 221503462509SAlan Cox { 221620e4afbfSKonstantin Belousov vm_object_t tobject, t1object; 221703462509SAlan Cox vm_page_t m, tm; 221803462509SAlan Cox vm_pindex_t end_pindex, pindex, tpindex; 221903462509SAlan Cox int depth, locked_depth; 222003462509SAlan Cox 222103462509SAlan Cox KASSERT((offset & PAGE_MASK) == 0, 222203462509SAlan Cox ("vm_object_unwire: offset is not page aligned")); 222303462509SAlan Cox KASSERT((length & PAGE_MASK) == 0, 222403462509SAlan Cox ("vm_object_unwire: length is not a multiple of PAGE_SIZE")); 222503462509SAlan Cox /* The wired count of a fictitious page never changes. */ 222603462509SAlan Cox if ((object->flags & OBJ_FICTITIOUS) != 0) 222703462509SAlan Cox return; 222803462509SAlan Cox pindex = OFF_TO_IDX(offset); 222903462509SAlan Cox end_pindex = pindex + atop(length); 223020e4afbfSKonstantin Belousov again: 223103462509SAlan Cox locked_depth = 1; 223203462509SAlan Cox VM_OBJECT_RLOCK(object); 223303462509SAlan Cox m = vm_page_find_least(object, pindex); 223403462509SAlan Cox while (pindex < end_pindex) { 223503462509SAlan Cox if (m == NULL || pindex < m->pindex) { 223603462509SAlan Cox /* 223703462509SAlan Cox * The first object in the shadow chain doesn't 223803462509SAlan Cox * contain a page at the current index. Therefore, 223903462509SAlan Cox * the page must exist in a backing object. 224003462509SAlan Cox */ 224103462509SAlan Cox tobject = object; 224203462509SAlan Cox tpindex = pindex; 224303462509SAlan Cox depth = 0; 224403462509SAlan Cox do { 224503462509SAlan Cox tpindex += 224603462509SAlan Cox OFF_TO_IDX(tobject->backing_object_offset); 224703462509SAlan Cox tobject = tobject->backing_object; 224803462509SAlan Cox KASSERT(tobject != NULL, 224903462509SAlan Cox ("vm_object_unwire: missing page")); 225003462509SAlan Cox if ((tobject->flags & OBJ_FICTITIOUS) != 0) 225103462509SAlan Cox goto next_page; 225203462509SAlan Cox depth++; 225303462509SAlan Cox if (depth == locked_depth) { 225403462509SAlan Cox locked_depth++; 225503462509SAlan Cox VM_OBJECT_RLOCK(tobject); 225603462509SAlan Cox } 225703462509SAlan Cox } while ((tm = vm_page_lookup(tobject, tpindex)) == 225803462509SAlan Cox NULL); 225903462509SAlan Cox } else { 226003462509SAlan Cox tm = m; 226103462509SAlan Cox m = TAILQ_NEXT(m, listq); 226203462509SAlan Cox } 226303462509SAlan Cox vm_page_lock(tm); 226420e4afbfSKonstantin Belousov if (vm_page_xbusied(tm)) { 226520e4afbfSKonstantin Belousov for (tobject = object; locked_depth >= 1; 226620e4afbfSKonstantin Belousov locked_depth--) { 226720e4afbfSKonstantin Belousov t1object = tobject->backing_object; 226820e4afbfSKonstantin Belousov VM_OBJECT_RUNLOCK(tobject); 226920e4afbfSKonstantin Belousov tobject = t1object; 227020e4afbfSKonstantin Belousov } 227120e4afbfSKonstantin Belousov vm_page_busy_sleep(tm, "unwbo", true); 227220e4afbfSKonstantin Belousov goto again; 227320e4afbfSKonstantin Belousov } 227403462509SAlan Cox vm_page_unwire(tm, queue); 227503462509SAlan Cox vm_page_unlock(tm); 227603462509SAlan Cox next_page: 227703462509SAlan Cox pindex++; 227803462509SAlan Cox } 227903462509SAlan Cox /* Release the accumulated object locks. */ 228020e4afbfSKonstantin Belousov for (tobject = object; locked_depth >= 1; locked_depth--) { 228120e4afbfSKonstantin Belousov t1object = tobject->backing_object; 228220e4afbfSKonstantin Belousov VM_OBJECT_RUNLOCK(tobject); 228320e4afbfSKonstantin Belousov tobject = t1object; 228403462509SAlan Cox } 228503462509SAlan Cox } 228603462509SAlan Cox 22870951bd36SEric van Gyzen /* 22880951bd36SEric van Gyzen * Return the vnode for the given object, or NULL if none exists. 22890951bd36SEric van Gyzen * For tmpfs objects, the function may return NULL if there is 22900951bd36SEric van Gyzen * no vnode allocated at the time of the call. 22910951bd36SEric van Gyzen */ 229263e4c6cdSEric van Gyzen struct vnode * 229363e4c6cdSEric van Gyzen vm_object_vnode(vm_object_t object) 229463e4c6cdSEric van Gyzen { 22950951bd36SEric van Gyzen struct vnode *vp; 229663e4c6cdSEric van Gyzen 229763e4c6cdSEric van Gyzen VM_OBJECT_ASSERT_LOCKED(object); 22980951bd36SEric van Gyzen if (object->type == OBJT_VNODE) { 22990951bd36SEric van Gyzen vp = object->handle; 23000951bd36SEric van Gyzen KASSERT(vp != NULL, ("%s: OBJT_VNODE has no vnode", __func__)); 23010951bd36SEric van Gyzen } else if (object->type == OBJT_SWAP && 23020951bd36SEric van Gyzen (object->flags & OBJ_TMPFS) != 0) { 23030951bd36SEric van Gyzen vp = object->un_pager.swp.swp_tmpfs; 23040951bd36SEric van Gyzen KASSERT(vp != NULL, ("%s: OBJT_TMPFS has no vnode", __func__)); 23050951bd36SEric van Gyzen } else { 23060951bd36SEric van Gyzen vp = NULL; 23070951bd36SEric van Gyzen } 23080951bd36SEric van Gyzen return (vp); 230963e4c6cdSEric van Gyzen } 231063e4c6cdSEric van Gyzen 23115e38e3f5SEric van Gyzen /* 23125e38e3f5SEric van Gyzen * Return the kvme type of the given object. 23135e38e3f5SEric van Gyzen * If vpp is not NULL, set it to the object's vm_object_vnode() or NULL. 23145e38e3f5SEric van Gyzen */ 23155e38e3f5SEric van Gyzen int 23165e38e3f5SEric van Gyzen vm_object_kvme_type(vm_object_t object, struct vnode **vpp) 23175e38e3f5SEric van Gyzen { 23185e38e3f5SEric van Gyzen 23195e38e3f5SEric van Gyzen VM_OBJECT_ASSERT_LOCKED(object); 23205e38e3f5SEric van Gyzen if (vpp != NULL) 23215e38e3f5SEric van Gyzen *vpp = vm_object_vnode(object); 23225e38e3f5SEric van Gyzen switch (object->type) { 23235e38e3f5SEric van Gyzen case OBJT_DEFAULT: 23245e38e3f5SEric van Gyzen return (KVME_TYPE_DEFAULT); 23255e38e3f5SEric van Gyzen case OBJT_VNODE: 23265e38e3f5SEric van Gyzen return (KVME_TYPE_VNODE); 23275e38e3f5SEric van Gyzen case OBJT_SWAP: 23285e38e3f5SEric van Gyzen if ((object->flags & OBJ_TMPFS_NODE) != 0) 23295e38e3f5SEric van Gyzen return (KVME_TYPE_VNODE); 23305e38e3f5SEric van Gyzen return (KVME_TYPE_SWAP); 23315e38e3f5SEric van Gyzen case OBJT_DEVICE: 23325e38e3f5SEric van Gyzen return (KVME_TYPE_DEVICE); 23335e38e3f5SEric van Gyzen case OBJT_PHYS: 23345e38e3f5SEric van Gyzen return (KVME_TYPE_PHYS); 23355e38e3f5SEric van Gyzen case OBJT_DEAD: 23365e38e3f5SEric van Gyzen return (KVME_TYPE_DEAD); 23375e38e3f5SEric van Gyzen case OBJT_SG: 23385e38e3f5SEric van Gyzen return (KVME_TYPE_SG); 23395e38e3f5SEric van Gyzen case OBJT_MGTDEVICE: 23405e38e3f5SEric van Gyzen return (KVME_TYPE_MGTDEVICE); 23415e38e3f5SEric van Gyzen default: 23425e38e3f5SEric van Gyzen return (KVME_TYPE_UNKNOWN); 23435e38e3f5SEric van Gyzen } 23445e38e3f5SEric van Gyzen } 23455e38e3f5SEric van Gyzen 2346ff87ae35SJohn Baldwin static int 2347ff87ae35SJohn Baldwin sysctl_vm_object_list(SYSCTL_HANDLER_ARGS) 2348ff87ae35SJohn Baldwin { 23490ecee546SKonstantin Belousov struct kinfo_vmobject *kvo; 2350ff87ae35SJohn Baldwin char *fullpath, *freepath; 2351ff87ae35SJohn Baldwin struct vnode *vp; 2352ff87ae35SJohn Baldwin struct vattr va; 2353ff87ae35SJohn Baldwin vm_object_t obj; 2354ff87ae35SJohn Baldwin vm_page_t m; 2355ff87ae35SJohn Baldwin int count, error; 2356ff87ae35SJohn Baldwin 2357ff87ae35SJohn Baldwin if (req->oldptr == NULL) { 2358ff87ae35SJohn Baldwin /* 2359ff87ae35SJohn Baldwin * If an old buffer has not been provided, generate an 2360ff87ae35SJohn Baldwin * estimate of the space needed for a subsequent call. 2361ff87ae35SJohn Baldwin */ 2362ff87ae35SJohn Baldwin mtx_lock(&vm_object_list_mtx); 2363ff87ae35SJohn Baldwin count = 0; 2364ff87ae35SJohn Baldwin TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2365ff87ae35SJohn Baldwin if (obj->type == OBJT_DEAD) 2366ff87ae35SJohn Baldwin continue; 2367ff87ae35SJohn Baldwin count++; 2368ff87ae35SJohn Baldwin } 2369ff87ae35SJohn Baldwin mtx_unlock(&vm_object_list_mtx); 2370ff87ae35SJohn Baldwin return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) * 2371ff87ae35SJohn Baldwin count * 11 / 10)); 2372ff87ae35SJohn Baldwin } 2373ff87ae35SJohn Baldwin 23740ecee546SKonstantin Belousov kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK); 2375ff87ae35SJohn Baldwin error = 0; 2376ff87ae35SJohn Baldwin 2377ff87ae35SJohn Baldwin /* 2378ff87ae35SJohn Baldwin * VM objects are type stable and are never removed from the 2379ff87ae35SJohn Baldwin * list once added. This allows us to safely read obj->object_list 2380ff87ae35SJohn Baldwin * after reacquiring the VM object lock. 2381ff87ae35SJohn Baldwin */ 2382ff87ae35SJohn Baldwin mtx_lock(&vm_object_list_mtx); 2383ff87ae35SJohn Baldwin TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2384ff87ae35SJohn Baldwin if (obj->type == OBJT_DEAD) 2385ff87ae35SJohn Baldwin continue; 2386ff87ae35SJohn Baldwin VM_OBJECT_RLOCK(obj); 2387ff87ae35SJohn Baldwin if (obj->type == OBJT_DEAD) { 2388ff87ae35SJohn Baldwin VM_OBJECT_RUNLOCK(obj); 2389ff87ae35SJohn Baldwin continue; 2390ff87ae35SJohn Baldwin } 2391ff87ae35SJohn Baldwin mtx_unlock(&vm_object_list_mtx); 23920ecee546SKonstantin Belousov kvo->kvo_size = ptoa(obj->size); 23930ecee546SKonstantin Belousov kvo->kvo_resident = obj->resident_page_count; 23940ecee546SKonstantin Belousov kvo->kvo_ref_count = obj->ref_count; 23950ecee546SKonstantin Belousov kvo->kvo_shadow_count = obj->shadow_count; 23960ecee546SKonstantin Belousov kvo->kvo_memattr = obj->memattr; 23970ecee546SKonstantin Belousov kvo->kvo_active = 0; 23980ecee546SKonstantin Belousov kvo->kvo_inactive = 0; 2399ff87ae35SJohn Baldwin TAILQ_FOREACH(m, &obj->memq, listq) { 2400ff87ae35SJohn Baldwin /* 2401ff87ae35SJohn Baldwin * A page may belong to the object but be 2402ff87ae35SJohn Baldwin * dequeued and set to PQ_NONE while the 2403ff87ae35SJohn Baldwin * object lock is not held. This makes the 2404ff87ae35SJohn Baldwin * reads of m->queue below racy, and we do not 2405ff87ae35SJohn Baldwin * count pages set to PQ_NONE. However, this 2406ff87ae35SJohn Baldwin * sysctl is only meant to give an 2407ff87ae35SJohn Baldwin * approximation of the system anyway. 2408ff87ae35SJohn Baldwin */ 24091b5c869dSMark Johnston if (m->queue == PQ_ACTIVE) 24100ecee546SKonstantin Belousov kvo->kvo_active++; 24111b5c869dSMark Johnston else if (m->queue == PQ_INACTIVE) 24120ecee546SKonstantin Belousov kvo->kvo_inactive++; 2413ff87ae35SJohn Baldwin } 2414ff87ae35SJohn Baldwin 24150ecee546SKonstantin Belousov kvo->kvo_vn_fileid = 0; 24160ecee546SKonstantin Belousov kvo->kvo_vn_fsid = 0; 24170ecee546SKonstantin Belousov kvo->kvo_vn_fsid_freebsd11 = 0; 2418ff87ae35SJohn Baldwin freepath = NULL; 2419ff87ae35SJohn Baldwin fullpath = ""; 24205e38e3f5SEric van Gyzen kvo->kvo_type = vm_object_kvme_type(obj, &vp); 24215e38e3f5SEric van Gyzen if (vp != NULL) 2422ff87ae35SJohn Baldwin vref(vp); 2423ff87ae35SJohn Baldwin VM_OBJECT_RUNLOCK(obj); 2424ff87ae35SJohn Baldwin if (vp != NULL) { 2425ff87ae35SJohn Baldwin vn_fullpath(curthread, vp, &fullpath, &freepath); 2426ff87ae35SJohn Baldwin vn_lock(vp, LK_SHARED | LK_RETRY); 2427ff87ae35SJohn Baldwin if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) { 24280ecee546SKonstantin Belousov kvo->kvo_vn_fileid = va.va_fileid; 24290ecee546SKonstantin Belousov kvo->kvo_vn_fsid = va.va_fsid; 24300ecee546SKonstantin Belousov kvo->kvo_vn_fsid_freebsd11 = va.va_fsid; 243169921123SKonstantin Belousov /* truncate */ 2432ff87ae35SJohn Baldwin } 2433ff87ae35SJohn Baldwin vput(vp); 2434ff87ae35SJohn Baldwin } 2435ff87ae35SJohn Baldwin 24360ecee546SKonstantin Belousov strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path)); 2437ff87ae35SJohn Baldwin if (freepath != NULL) 2438ff87ae35SJohn Baldwin free(freepath, M_TEMP); 2439ff87ae35SJohn Baldwin 2440ff87ae35SJohn Baldwin /* Pack record size down */ 24410ecee546SKonstantin Belousov kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path) 24420ecee546SKonstantin Belousov + strlen(kvo->kvo_path) + 1; 24430ecee546SKonstantin Belousov kvo->kvo_structsize = roundup(kvo->kvo_structsize, 2444ff87ae35SJohn Baldwin sizeof(uint64_t)); 24450ecee546SKonstantin Belousov error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize); 2446ff87ae35SJohn Baldwin mtx_lock(&vm_object_list_mtx); 2447ff87ae35SJohn Baldwin if (error) 2448ff87ae35SJohn Baldwin break; 2449ff87ae35SJohn Baldwin } 2450ff87ae35SJohn Baldwin mtx_unlock(&vm_object_list_mtx); 24510ecee546SKonstantin Belousov free(kvo, M_TEMP); 2452ff87ae35SJohn Baldwin return (error); 2453ff87ae35SJohn Baldwin } 2454ff87ae35SJohn Baldwin SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP | 2455ff87ae35SJohn Baldwin CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject", 2456ff87ae35SJohn Baldwin "List of VM objects"); 2457ff87ae35SJohn Baldwin 2458c7c34a24SBruce Evans #include "opt_ddb.h" 2459c3cb3e12SDavid Greenman #ifdef DDB 2460c7c34a24SBruce Evans #include <sys/kernel.h> 2461c7c34a24SBruce Evans 2462ce9edcf5SPoul-Henning Kamp #include <sys/cons.h> 2463c7c34a24SBruce Evans 2464c7c34a24SBruce Evans #include <ddb/ddb.h> 2465c7c34a24SBruce Evans 2466cac597e4SBruce Evans static int 24671b40f8c0SMatthew Dillon _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 2468a1f6d91cSDavid Greenman { 2469a1f6d91cSDavid Greenman vm_map_t tmpm; 2470a1f6d91cSDavid Greenman vm_map_entry_t tmpe; 2471a1f6d91cSDavid Greenman vm_object_t obj; 2472a1f6d91cSDavid Greenman int entcount; 2473a1f6d91cSDavid Greenman 2474a1f6d91cSDavid Greenman if (map == 0) 2475a1f6d91cSDavid Greenman return 0; 2476a1f6d91cSDavid Greenman 2477a1f6d91cSDavid Greenman if (entry == 0) { 2478a1f6d91cSDavid Greenman tmpe = map->header.next; 2479a1f6d91cSDavid Greenman entcount = map->nentries; 2480a1f6d91cSDavid Greenman while (entcount-- && (tmpe != &map->header)) { 2481a1f6d91cSDavid Greenman if (_vm_object_in_map(map, object, tmpe)) { 2482a1f6d91cSDavid Greenman return 1; 2483a1f6d91cSDavid Greenman } 2484a1f6d91cSDavid Greenman tmpe = tmpe->next; 2485a1f6d91cSDavid Greenman } 24869fdfe602SMatthew Dillon } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 24879fdfe602SMatthew Dillon tmpm = entry->object.sub_map; 2488a1f6d91cSDavid Greenman tmpe = tmpm->header.next; 2489a1f6d91cSDavid Greenman entcount = tmpm->nentries; 2490a1f6d91cSDavid Greenman while (entcount-- && tmpe != &tmpm->header) { 2491a1f6d91cSDavid Greenman if (_vm_object_in_map(tmpm, object, tmpe)) { 2492a1f6d91cSDavid Greenman return 1; 2493a1f6d91cSDavid Greenman } 2494a1f6d91cSDavid Greenman tmpe = tmpe->next; 2495a1f6d91cSDavid Greenman } 24968aef1712SMatthew Dillon } else if ((obj = entry->object.vm_object) != NULL) { 249724a1cce3SDavid Greenman for (; obj; obj = obj->backing_object) 2498a1f6d91cSDavid Greenman if (obj == object) { 2499a1f6d91cSDavid Greenman return 1; 2500a1f6d91cSDavid Greenman } 2501a1f6d91cSDavid Greenman } 2502a1f6d91cSDavid Greenman return 0; 2503a1f6d91cSDavid Greenman } 2504a1f6d91cSDavid Greenman 2505cac597e4SBruce Evans static int 25061b40f8c0SMatthew Dillon vm_object_in_map(vm_object_t object) 2507a1f6d91cSDavid Greenman { 2508a1f6d91cSDavid Greenman struct proc *p; 25091005a129SJohn Baldwin 251060517fd1SJohn Baldwin /* sx_slock(&allproc_lock); */ 2511f67af5c9SXin LI FOREACH_PROC_IN_SYSTEM(p) { 2512a1f6d91cSDavid Greenman if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 2513a1f6d91cSDavid Greenman continue; 2514553629ebSJake Burkholder if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 251560517fd1SJohn Baldwin /* sx_sunlock(&allproc_lock); */ 2516a1f6d91cSDavid Greenman return 1; 2517a1f6d91cSDavid Greenman } 2518553629ebSJake Burkholder } 251960517fd1SJohn Baldwin /* sx_sunlock(&allproc_lock); */ 2520a1f6d91cSDavid Greenman if (_vm_object_in_map(kernel_map, object, 0)) 2521a1f6d91cSDavid Greenman return 1; 2522a1f6d91cSDavid Greenman return 0; 2523a1f6d91cSDavid Greenman } 2524a1f6d91cSDavid Greenman 2525c7c34a24SBruce Evans DB_SHOW_COMMAND(vmochk, vm_object_check) 2526f708ef1bSPoul-Henning Kamp { 2527a1f6d91cSDavid Greenman vm_object_t object; 2528a1f6d91cSDavid Greenman 2529a1f6d91cSDavid Greenman /* 2530a1f6d91cSDavid Greenman * make sure that internal objs are in a map somewhere 2531a1f6d91cSDavid Greenman * and none have zero ref counts. 2532a1f6d91cSDavid Greenman */ 2533cc64b484SAlfred Perlstein TAILQ_FOREACH(object, &vm_object_list, object_list) { 253424a1cce3SDavid Greenman if (object->handle == NULL && 253524a1cce3SDavid Greenman (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2536a1f6d91cSDavid Greenman if (object->ref_count == 0) { 25373efc015bSPeter Wemm db_printf("vmochk: internal obj has zero ref count: %ld\n", 25383efc015bSPeter Wemm (long)object->size); 2539a1f6d91cSDavid Greenman } 2540a1f6d91cSDavid Greenman if (!vm_object_in_map(object)) { 2541fc62ef1fSBruce Evans db_printf( 2542fc62ef1fSBruce Evans "vmochk: internal obj is not in a map: " 2543fc62ef1fSBruce Evans "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 2544fc62ef1fSBruce Evans object->ref_count, (u_long)object->size, 2545fc62ef1fSBruce Evans (u_long)object->size, 2546fc62ef1fSBruce Evans (void *)object->backing_object); 2547a1f6d91cSDavid Greenman } 2548a1f6d91cSDavid Greenman } 2549a1f6d91cSDavid Greenman } 2550a1f6d91cSDavid Greenman } 2551a1f6d91cSDavid Greenman 255226f9a767SRodney W. Grimes /* 2553df8bae1dSRodney W. Grimes * vm_object_print: [ debug ] 2554df8bae1dSRodney W. Grimes */ 2555c7c34a24SBruce Evans DB_SHOW_COMMAND(object, vm_object_print_static) 2556df8bae1dSRodney W. Grimes { 2557c7c34a24SBruce Evans /* XXX convert args. */ 2558c7c34a24SBruce Evans vm_object_t object = (vm_object_t)addr; 2559c7c34a24SBruce Evans boolean_t full = have_addr; 2560c7c34a24SBruce Evans 2561d031cff1SMatthew Dillon vm_page_t p; 2562df8bae1dSRodney W. Grimes 2563c7c34a24SBruce Evans /* XXX count is an (unused) arg. Avoid shadowing it. */ 2564c7c34a24SBruce Evans #define count was_count 2565c7c34a24SBruce Evans 2566d031cff1SMatthew Dillon int count; 2567df8bae1dSRodney W. Grimes 2568df8bae1dSRodney W. Grimes if (object == NULL) 2569df8bae1dSRodney W. Grimes return; 2570df8bae1dSRodney W. Grimes 2571eb95adefSBruce Evans db_iprintf( 2572ef694c1aSEdward Tomasz Napierala "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n", 2573e47cd172SMaxime Henrion object, (int)object->type, (uintmax_t)object->size, 25743364c323SKonstantin Belousov object->resident_page_count, object->ref_count, object->flags, 2575ef694c1aSEdward Tomasz Napierala object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge); 2576e47cd172SMaxime Henrion db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", 25771c7c3c6aSMatthew Dillon object->shadow_count, 2578eb95adefSBruce Evans object->backing_object ? object->backing_object->ref_count : 0, 2579e47cd172SMaxime Henrion object->backing_object, (uintmax_t)object->backing_object_offset); 2580df8bae1dSRodney W. Grimes 2581df8bae1dSRodney W. Grimes if (!full) 2582df8bae1dSRodney W. Grimes return; 2583df8bae1dSRodney W. Grimes 2584c7c34a24SBruce Evans db_indent += 2; 2585df8bae1dSRodney W. Grimes count = 0; 2586fc2ffbe6SPoul-Henning Kamp TAILQ_FOREACH(p, &object->memq, listq) { 2587df8bae1dSRodney W. Grimes if (count == 0) 2588c7c34a24SBruce Evans db_iprintf("memory:="); 2589df8bae1dSRodney W. Grimes else if (count == 6) { 2590c7c34a24SBruce Evans db_printf("\n"); 2591c7c34a24SBruce Evans db_iprintf(" ..."); 2592df8bae1dSRodney W. Grimes count = 0; 2593df8bae1dSRodney W. Grimes } else 2594c7c34a24SBruce Evans db_printf(","); 2595df8bae1dSRodney W. Grimes count++; 2596df8bae1dSRodney W. Grimes 2597e47cd172SMaxime Henrion db_printf("(off=0x%jx,page=0x%jx)", 2598e47cd172SMaxime Henrion (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); 2599df8bae1dSRodney W. Grimes } 2600df8bae1dSRodney W. Grimes if (count != 0) 2601c7c34a24SBruce Evans db_printf("\n"); 2602c7c34a24SBruce Evans db_indent -= 2; 2603df8bae1dSRodney W. Grimes } 26045070c7f8SJohn Dyson 2605c7c34a24SBruce Evans /* XXX. */ 2606c7c34a24SBruce Evans #undef count 2607c7c34a24SBruce Evans 2608c7c34a24SBruce Evans /* XXX need this non-static entry for calling from vm_map_print. */ 26095070c7f8SJohn Dyson void 26101b40f8c0SMatthew Dillon vm_object_print( 26111b40f8c0SMatthew Dillon /* db_expr_t */ long addr, 26121b40f8c0SMatthew Dillon boolean_t have_addr, 26131b40f8c0SMatthew Dillon /* db_expr_t */ long count, 26141b40f8c0SMatthew Dillon char *modif) 2615c7c34a24SBruce Evans { 2616c7c34a24SBruce Evans vm_object_print_static(addr, have_addr, count, modif); 2617c7c34a24SBruce Evans } 2618c7c34a24SBruce Evans 2619c7c34a24SBruce Evans DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 26205070c7f8SJohn Dyson { 26215070c7f8SJohn Dyson vm_object_t object; 2622bb2ac86fSKonstantin Belousov vm_pindex_t fidx; 2623bb2ac86fSKonstantin Belousov vm_paddr_t pa; 2624bb2ac86fSKonstantin Belousov vm_page_t m, prev_m; 2625bb2ac86fSKonstantin Belousov int rcount, nl, c; 2626cc64b484SAlfred Perlstein 2627bb2ac86fSKonstantin Belousov nl = 0; 2628cc64b484SAlfred Perlstein TAILQ_FOREACH(object, &vm_object_list, object_list) { 2629fc62ef1fSBruce Evans db_printf("new object: %p\n", (void *)object); 26305070c7f8SJohn Dyson if (nl > 18) { 26315070c7f8SJohn Dyson c = cngetc(); 26325070c7f8SJohn Dyson if (c != ' ') 26335070c7f8SJohn Dyson return; 26345070c7f8SJohn Dyson nl = 0; 26355070c7f8SJohn Dyson } 26365070c7f8SJohn Dyson nl++; 26375070c7f8SJohn Dyson rcount = 0; 26385070c7f8SJohn Dyson fidx = 0; 2639bb2ac86fSKonstantin Belousov pa = -1; 2640bb2ac86fSKonstantin Belousov TAILQ_FOREACH(m, &object->memq, listq) { 2641bb2ac86fSKonstantin Belousov if (m->pindex > 128) 2642bb2ac86fSKonstantin Belousov break; 2643bb2ac86fSKonstantin Belousov if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL && 2644bb2ac86fSKonstantin Belousov prev_m->pindex + 1 != m->pindex) { 26455070c7f8SJohn Dyson if (rcount) { 26463efc015bSPeter Wemm db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 26473efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 26485070c7f8SJohn Dyson if (nl > 18) { 26495070c7f8SJohn Dyson c = cngetc(); 26505070c7f8SJohn Dyson if (c != ' ') 26515070c7f8SJohn Dyson return; 26525070c7f8SJohn Dyson nl = 0; 26535070c7f8SJohn Dyson } 26545070c7f8SJohn Dyson nl++; 26555070c7f8SJohn Dyson rcount = 0; 26565070c7f8SJohn Dyson } 26575070c7f8SJohn Dyson } 26585070c7f8SJohn Dyson if (rcount && 26595070c7f8SJohn Dyson (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 26605070c7f8SJohn Dyson ++rcount; 26615070c7f8SJohn Dyson continue; 26625070c7f8SJohn Dyson } 26635070c7f8SJohn Dyson if (rcount) { 26642446e4f0SAlan Cox db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 26653efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 26665070c7f8SJohn Dyson if (nl > 18) { 26675070c7f8SJohn Dyson c = cngetc(); 26685070c7f8SJohn Dyson if (c != ' ') 26695070c7f8SJohn Dyson return; 26705070c7f8SJohn Dyson nl = 0; 26715070c7f8SJohn Dyson } 26725070c7f8SJohn Dyson nl++; 26735070c7f8SJohn Dyson } 2674bb2ac86fSKonstantin Belousov fidx = m->pindex; 26755070c7f8SJohn Dyson pa = VM_PAGE_TO_PHYS(m); 26765070c7f8SJohn Dyson rcount = 1; 26775070c7f8SJohn Dyson } 26785070c7f8SJohn Dyson if (rcount) { 26793efc015bSPeter Wemm db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 26803efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 26815070c7f8SJohn Dyson if (nl > 18) { 26825070c7f8SJohn Dyson c = cngetc(); 26835070c7f8SJohn Dyson if (c != ' ') 26845070c7f8SJohn Dyson return; 26855070c7f8SJohn Dyson nl = 0; 26865070c7f8SJohn Dyson } 26875070c7f8SJohn Dyson nl++; 26885070c7f8SJohn Dyson } 26895070c7f8SJohn Dyson } 26905070c7f8SJohn Dyson } 2691c3cb3e12SDavid Greenman #endif /* DDB */ 2692