160727d8bSWarner Losh /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 6df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 9df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 10df8bae1dSRodney W. Grimes * are met: 11df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 12df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 13df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 15df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 16df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 17df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 18df8bae1dSRodney W. Grimes * without specific prior written permission. 19df8bae1dSRodney W. Grimes * 20df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30df8bae1dSRodney W. Grimes * SUCH DAMAGE. 31df8bae1dSRodney W. Grimes * 323c4dd356SDavid Greenman * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 33df8bae1dSRodney W. Grimes * 34df8bae1dSRodney W. Grimes * 35df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36df8bae1dSRodney W. Grimes * All rights reserved. 37df8bae1dSRodney W. Grimes * 38df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39df8bae1dSRodney W. Grimes * 40df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 41df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 42df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 43df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 44df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 51df8bae1dSRodney W. Grimes * 52df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53df8bae1dSRodney W. Grimes * School of Computer Science 54df8bae1dSRodney W. Grimes * Carnegie Mellon University 55df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 56df8bae1dSRodney W. Grimes * 57df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 58df8bae1dSRodney W. Grimes * rights to redistribute these changes. 59df8bae1dSRodney W. Grimes */ 60df8bae1dSRodney W. Grimes 61df8bae1dSRodney W. Grimes /* 62df8bae1dSRodney W. Grimes * Virtual memory object module. 63df8bae1dSRodney W. Grimes */ 64df8bae1dSRodney W. Grimes 65874651b1SDavid E. O'Brien #include <sys/cdefs.h> 66874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 67874651b1SDavid E. O'Brien 68f8a47341SAlan Cox #include "opt_vm.h" 69f8a47341SAlan Cox 70df8bae1dSRodney W. Grimes #include <sys/param.h> 71df8bae1dSRodney W. Grimes #include <sys/systm.h> 72fb919e4dSMark Murray #include <sys/lock.h> 73867a482dSJohn Dyson #include <sys/mman.h> 74cf2819ccSJohn Dyson #include <sys/mount.h> 75b9b7a4beSMatthew Dillon #include <sys/kernel.h> 76b9b7a4beSMatthew Dillon #include <sys/sysctl.h> 771b367556SJason Evans #include <sys/mutex.h> 78fb919e4dSMark Murray #include <sys/proc.h> /* for curproc, pageproc */ 79fb919e4dSMark Murray #include <sys/socket.h> 80fb919e4dSMark Murray #include <sys/vnode.h> 81fb919e4dSMark Murray #include <sys/vmmeter.h> 821005a129SJohn Baldwin #include <sys/sx.h> 83df8bae1dSRodney W. Grimes 84df8bae1dSRodney W. Grimes #include <vm/vm.h> 85efeaf95aSDavid Greenman #include <vm/vm_param.h> 86efeaf95aSDavid Greenman #include <vm/pmap.h> 87efeaf95aSDavid Greenman #include <vm/vm_map.h> 88efeaf95aSDavid Greenman #include <vm/vm_object.h> 89df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 9026f9a767SRodney W. Grimes #include <vm/vm_pageout.h> 910d94caffSDavid Greenman #include <vm/vm_pager.h> 9205f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 93a1f6d91cSDavid Greenman #include <vm/vm_kern.h> 94efeaf95aSDavid Greenman #include <vm/vm_extern.h> 95f8a47341SAlan Cox #include <vm/vm_reserv.h> 96670d17b5SJeff Roberson #include <vm/uma.h> 9726f9a767SRodney W. Grimes 98b9b7a4beSMatthew Dillon #define EASY_SCAN_FACTOR 8 99b9b7a4beSMatthew Dillon 100b9b7a4beSMatthew Dillon #define MSYNC_FLUSH_HARDSEQ 0x01 101b9b7a4beSMatthew Dillon #define MSYNC_FLUSH_SOFTSEQ 0x02 102b9b7a4beSMatthew Dillon 103b9b7a4beSMatthew Dillon /* 104b9b7a4beSMatthew Dillon * msync / VM object flushing optimizations 105b9b7a4beSMatthew Dillon */ 106b9b7a4beSMatthew Dillon static int msync_flush_flags = MSYNC_FLUSH_HARDSEQ | MSYNC_FLUSH_SOFTSEQ; 1076bd9cb1cSTom Rhodes SYSCTL_INT(_vm, OID_AUTO, msync_flush_flags, CTLFLAG_RW, &msync_flush_flags, 0, 1086bd9cb1cSTom Rhodes "Enable sequential iteration optimization"); 109b9b7a4beSMatthew Dillon 110c53f7aceSDag-Erling Smørgrav static int old_msync; 111c53f7aceSDag-Erling Smørgrav SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, 112c53f7aceSDag-Erling Smørgrav "Use old (insecure) msync behavior"); 113c53f7aceSDag-Erling Smørgrav 114b9b7a4beSMatthew Dillon static void vm_object_qcollapse(vm_object_t object); 115b9b7a4beSMatthew Dillon static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags); 11602dd8331SAlan Cox static void vm_object_vndeallocate(vm_object_t object); 117f6b04d2bSDavid Greenman 118df8bae1dSRodney W. Grimes /* 119df8bae1dSRodney W. Grimes * Virtual memory objects maintain the actual data 120df8bae1dSRodney W. Grimes * associated with allocated virtual memory. A given 121df8bae1dSRodney W. Grimes * page of memory exists within exactly one object. 122df8bae1dSRodney W. Grimes * 123df8bae1dSRodney W. Grimes * An object is only deallocated when all "references" 124df8bae1dSRodney W. Grimes * are given up. Only one "reference" to a given 125df8bae1dSRodney W. Grimes * region of an object should be writeable. 126df8bae1dSRodney W. Grimes * 127df8bae1dSRodney W. Grimes * Associated with each object is a list of all resident 128df8bae1dSRodney W. Grimes * memory pages belonging to that object; this list is 129df8bae1dSRodney W. Grimes * maintained by the "vm_page" module, and locked by the object's 130df8bae1dSRodney W. Grimes * lock. 131df8bae1dSRodney W. Grimes * 132df8bae1dSRodney W. Grimes * Each object also records a "pager" routine which is 133df8bae1dSRodney W. Grimes * used to retrieve (and store) pages to the proper backing 134df8bae1dSRodney W. Grimes * storage. In addition, objects may be backed by other 135df8bae1dSRodney W. Grimes * objects from which they were virtual-copied. 136df8bae1dSRodney W. Grimes * 137df8bae1dSRodney W. Grimes * The only items within the object structure which are 138df8bae1dSRodney W. Grimes * modified after time of creation are: 139df8bae1dSRodney W. Grimes * reference count locked by object's lock 140df8bae1dSRodney W. Grimes * pager routine locked by object's lock 141df8bae1dSRodney W. Grimes * 142df8bae1dSRodney W. Grimes */ 143df8bae1dSRodney W. Grimes 14428f8db14SBruce Evans struct object_q vm_object_list; 145a5698387SAlan Cox struct mtx vm_object_list_mtx; /* lock for object list and count */ 146cccf11b8SAlan Cox 147cccf11b8SAlan Cox struct vm_object kernel_object_store; 148cccf11b8SAlan Cox struct vm_object kmem_object_store; 149df8bae1dSRodney W. Grimes 150604c2bbcSAlan Cox SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, "VM object stats"); 151604c2bbcSAlan Cox 152f708ef1bSPoul-Henning Kamp static long object_collapses; 153604c2bbcSAlan Cox SYSCTL_LONG(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD, 154604c2bbcSAlan Cox &object_collapses, 0, "VM object collapses"); 155604c2bbcSAlan Cox 156f708ef1bSPoul-Henning Kamp static long object_bypasses; 157604c2bbcSAlan Cox SYSCTL_LONG(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD, 158604c2bbcSAlan Cox &object_bypasses, 0, "VM object bypasses"); 159dad740e9SAlan Cox 160670d17b5SJeff Roberson static uma_zone_t obj_zone; 1618355f576SJeff Roberson 162b23f72e9SBrian Feldman static int vm_object_zinit(void *mem, int size, int flags); 1638355f576SJeff Roberson 1648355f576SJeff Roberson #ifdef INVARIANTS 1658355f576SJeff Roberson static void vm_object_zdtor(void *mem, int size, void *arg); 1668355f576SJeff Roberson 1678355f576SJeff Roberson static void 1688355f576SJeff Roberson vm_object_zdtor(void *mem, int size, void *arg) 1698355f576SJeff Roberson { 1708355f576SJeff Roberson vm_object_t object; 1718355f576SJeff Roberson 1728355f576SJeff Roberson object = (vm_object_t)mem; 17343186e53SAlan Cox KASSERT(TAILQ_EMPTY(&object->memq), 17443186e53SAlan Cox ("object %p has resident pages", 17543186e53SAlan Cox object)); 176f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 177f8a47341SAlan Cox KASSERT(LIST_EMPTY(&object->rvq), 178f8a47341SAlan Cox ("object %p has reservations", 179f8a47341SAlan Cox object)); 180f8a47341SAlan Cox #endif 1817bfda801SAlan Cox KASSERT(object->cache == NULL, 1827bfda801SAlan Cox ("object %p has cached pages", 1837bfda801SAlan Cox object)); 1848355f576SJeff Roberson KASSERT(object->paging_in_progress == 0, 1858355f576SJeff Roberson ("object %p paging_in_progress = %d", 1868355f576SJeff Roberson object, object->paging_in_progress)); 1878355f576SJeff Roberson KASSERT(object->resident_page_count == 0, 1888355f576SJeff Roberson ("object %p resident_page_count = %d", 1898355f576SJeff Roberson object, object->resident_page_count)); 1908355f576SJeff Roberson KASSERT(object->shadow_count == 0, 1918355f576SJeff Roberson ("object %p shadow_count = %d", 1928355f576SJeff Roberson object, object->shadow_count)); 1938355f576SJeff Roberson } 1948355f576SJeff Roberson #endif 1958355f576SJeff Roberson 196b23f72e9SBrian Feldman static int 197b23f72e9SBrian Feldman vm_object_zinit(void *mem, int size, int flags) 1988355f576SJeff Roberson { 1998355f576SJeff Roberson vm_object_t object; 2008355f576SJeff Roberson 2018355f576SJeff Roberson object = (vm_object_t)mem; 202e3a9e1b2SAlan Cox bzero(&object->mtx, sizeof(object->mtx)); 2035285558aSAlan Cox VM_OBJECT_LOCK_INIT(object, "standard object"); 2048355f576SJeff Roberson 2058355f576SJeff Roberson /* These are true for any object that has been freed */ 2068355f576SJeff Roberson object->paging_in_progress = 0; 2078355f576SJeff Roberson object->resident_page_count = 0; 2088355f576SJeff Roberson object->shadow_count = 0; 209b23f72e9SBrian Feldman return (0); 2108355f576SJeff Roberson } 211df8bae1dSRodney W. Grimes 2123075778bSJohn Dyson void 2136395da54SIan Dowse _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) 214df8bae1dSRodney W. Grimes { 2150cddd8f0SMatthew Dillon 216df8bae1dSRodney W. Grimes TAILQ_INIT(&object->memq); 2171c500307SAlan Cox LIST_INIT(&object->shadow_head); 218a1f6d91cSDavid Greenman 219b86ec922SMatthew Dillon object->root = NULL; 22024a1cce3SDavid Greenman object->type = type; 221df8bae1dSRodney W. Grimes object->size = size; 222b881da26SAlan Cox object->generation = 1; 223a1f6d91cSDavid Greenman object->ref_count = 1; 22424a1cce3SDavid Greenman object->flags = 0; 22560517fd1SJohn Baldwin if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP)) 2263471677cSAlan Cox object->flags = OBJ_ONEMAPPING; 2272446e4f0SAlan Cox object->pg_color = 0; 22824a1cce3SDavid Greenman object->handle = NULL; 22924a1cce3SDavid Greenman object->backing_object = NULL; 230a316d390SJohn Dyson object->backing_object_offset = (vm_ooffset_t) 0; 231f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 232f8a47341SAlan Cox LIST_INIT(&object->rvq); 233f8a47341SAlan Cox #endif 2347bfda801SAlan Cox object->cache = NULL; 235a1f6d91cSDavid Greenman 236a5698387SAlan Cox mtx_lock(&vm_object_list_mtx); 23760517fd1SJohn Baldwin TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 238a5698387SAlan Cox mtx_unlock(&vm_object_list_mtx); 239df8bae1dSRodney W. Grimes } 240df8bae1dSRodney W. Grimes 241df8bae1dSRodney W. Grimes /* 24226f9a767SRodney W. Grimes * vm_object_init: 24326f9a767SRodney W. Grimes * 24426f9a767SRodney W. Grimes * Initialize the VM objects module. 24526f9a767SRodney W. Grimes */ 24626f9a767SRodney W. Grimes void 2471b40f8c0SMatthew Dillon vm_object_init(void) 24826f9a767SRodney W. Grimes { 24926f9a767SRodney W. Grimes TAILQ_INIT(&vm_object_list); 2506008862bSJohn Baldwin mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); 2510217125fSDavid Greenman 2525285558aSAlan Cox VM_OBJECT_LOCK_INIT(&kernel_object_store, "kernel object"); 2539f5c801bSAlan Cox _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 25426f9a767SRodney W. Grimes kernel_object); 255f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 256f8a47341SAlan Cox kernel_object->flags |= OBJ_COLORED; 257f8a47341SAlan Cox kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 258f8a47341SAlan Cox #endif 25926f9a767SRodney W. Grimes 2605285558aSAlan Cox VM_OBJECT_LOCK_INIT(&kmem_object_store, "kmem object"); 2619f5c801bSAlan Cox _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 26226f9a767SRodney W. Grimes kmem_object); 263f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 264f8a47341SAlan Cox kmem_object->flags |= OBJ_COLORED; 265f8a47341SAlan Cox kmem_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 266f8a47341SAlan Cox #endif 267ed6a7863SAlan Cox 2688dbca793STor Egge /* 2698dbca793STor Egge * The lock portion of struct vm_object must be type stable due 2708dbca793STor Egge * to vm_pageout_fallback_object_lock locking a vm object 2718dbca793STor Egge * without holding any references to it. 2728dbca793STor Egge */ 2738355f576SJeff Roberson obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, 2748355f576SJeff Roberson #ifdef INVARIANTS 2758355f576SJeff Roberson vm_object_zdtor, 2768355f576SJeff Roberson #else 2778355f576SJeff Roberson NULL, 2788355f576SJeff Roberson #endif 279f3c625e4SJeff Roberson vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE); 28099448ed1SJohn Dyson } 28199448ed1SJohn Dyson 28299448ed1SJohn Dyson void 2831b40f8c0SMatthew Dillon vm_object_clear_flag(vm_object_t object, u_short bits) 2841b40f8c0SMatthew Dillon { 2855440b5a9SAlan Cox 286d647a0edSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 287b06805adSJake Burkholder object->flags &= ~bits; 2881b40f8c0SMatthew Dillon } 2891b40f8c0SMatthew Dillon 2901b40f8c0SMatthew Dillon void 2911b40f8c0SMatthew Dillon vm_object_pip_add(vm_object_t object, short i) 2921b40f8c0SMatthew Dillon { 293f279b88dSAlan Cox 294d647a0edSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 295b06805adSJake Burkholder object->paging_in_progress += i; 2961b40f8c0SMatthew Dillon } 2971b40f8c0SMatthew Dillon 2981b40f8c0SMatthew Dillon void 2991b40f8c0SMatthew Dillon vm_object_pip_subtract(vm_object_t object, short i) 3001b40f8c0SMatthew Dillon { 301d647a0edSAlan Cox 3020fa05eaeSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 303b06805adSJake Burkholder object->paging_in_progress -= i; 3041b40f8c0SMatthew Dillon } 3051b40f8c0SMatthew Dillon 3061b40f8c0SMatthew Dillon void 3071b40f8c0SMatthew Dillon vm_object_pip_wakeup(vm_object_t object) 3081b40f8c0SMatthew Dillon { 309f279b88dSAlan Cox 310d647a0edSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 311b06805adSJake Burkholder object->paging_in_progress--; 3121b40f8c0SMatthew Dillon if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 3131b40f8c0SMatthew Dillon vm_object_clear_flag(object, OBJ_PIPWNT); 3141b40f8c0SMatthew Dillon wakeup(object); 3151b40f8c0SMatthew Dillon } 3161b40f8c0SMatthew Dillon } 3171b40f8c0SMatthew Dillon 3181b40f8c0SMatthew Dillon void 3191b40f8c0SMatthew Dillon vm_object_pip_wakeupn(vm_object_t object, short i) 3201b40f8c0SMatthew Dillon { 321d647a0edSAlan Cox 3220d420ad3SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3231b40f8c0SMatthew Dillon if (i) 324b06805adSJake Burkholder object->paging_in_progress -= i; 3251b40f8c0SMatthew Dillon if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 3261b40f8c0SMatthew Dillon vm_object_clear_flag(object, OBJ_PIPWNT); 3271b40f8c0SMatthew Dillon wakeup(object); 3281b40f8c0SMatthew Dillon } 3291b40f8c0SMatthew Dillon } 3301b40f8c0SMatthew Dillon 3311b40f8c0SMatthew Dillon void 3321b40f8c0SMatthew Dillon vm_object_pip_wait(vm_object_t object, char *waitid) 3331b40f8c0SMatthew Dillon { 3341ca58953SAlan Cox 3351ca58953SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3361ca58953SAlan Cox while (object->paging_in_progress) { 3371ca58953SAlan Cox object->flags |= OBJ_PIPWNT; 3381ca58953SAlan Cox msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0); 3391ca58953SAlan Cox } 3401b40f8c0SMatthew Dillon } 3411b40f8c0SMatthew Dillon 34226f9a767SRodney W. Grimes /* 34326f9a767SRodney W. Grimes * vm_object_allocate: 34426f9a767SRodney W. Grimes * 34526f9a767SRodney W. Grimes * Returns a new object with the given size. 34626f9a767SRodney W. Grimes */ 34726f9a767SRodney W. Grimes vm_object_t 3486395da54SIan Dowse vm_object_allocate(objtype_t type, vm_pindex_t size) 34926f9a767SRodney W. Grimes { 35090688d13SAlan Cox vm_object_t object; 35190688d13SAlan Cox 35290688d13SAlan Cox object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); 35390688d13SAlan Cox _vm_object_allocate(type, size, object); 35490688d13SAlan Cox return (object); 35526f9a767SRodney W. Grimes } 35626f9a767SRodney W. Grimes 35726f9a767SRodney W. Grimes 35826f9a767SRodney W. Grimes /* 359df8bae1dSRodney W. Grimes * vm_object_reference: 360df8bae1dSRodney W. Grimes * 36115347817SAlan Cox * Gets another reference to the given object. Note: OBJ_DEAD 36215347817SAlan Cox * objects can be referenced during final cleaning. 363df8bae1dSRodney W. Grimes */ 3646476c0d2SJohn Dyson void 3651b40f8c0SMatthew Dillon vm_object_reference(vm_object_t object) 366df8bae1dSRodney W. Grimes { 367df8bae1dSRodney W. Grimes if (object == NULL) 368df8bae1dSRodney W. Grimes return; 36915347817SAlan Cox VM_OBJECT_LOCK(object); 37052481a9aSJeff Roberson vm_object_reference_locked(object); 37182f9defeSAlan Cox VM_OBJECT_UNLOCK(object); 37295e5e988SJohn Dyson } 37395e5e988SJohn Dyson 37423955314SAlfred Perlstein /* 375b921a12bSAlan Cox * vm_object_reference_locked: 376b921a12bSAlan Cox * 377b921a12bSAlan Cox * Gets another reference to the given object. 378b921a12bSAlan Cox * 379b921a12bSAlan Cox * The object must be locked. 380b921a12bSAlan Cox */ 381b921a12bSAlan Cox void 382b921a12bSAlan Cox vm_object_reference_locked(vm_object_t object) 383b921a12bSAlan Cox { 384b921a12bSAlan Cox struct vnode *vp; 385b921a12bSAlan Cox 386b921a12bSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 387b921a12bSAlan Cox object->ref_count++; 388b921a12bSAlan Cox if (object->type == OBJT_VNODE) { 389b921a12bSAlan Cox vp = object->handle; 390b921a12bSAlan Cox vref(vp); 391b921a12bSAlan Cox } 392b921a12bSAlan Cox } 393b921a12bSAlan Cox 394b921a12bSAlan Cox /* 3959d5abbddSJens Schweikhardt * Handle deallocating an object of type OBJT_VNODE. 39623955314SAlfred Perlstein */ 39702dd8331SAlan Cox static void 3981b40f8c0SMatthew Dillon vm_object_vndeallocate(vm_object_t object) 39995e5e988SJohn Dyson { 40095e5e988SJohn Dyson struct vnode *vp = (struct vnode *) object->handle; 401219cbf59SEivind Eklund 402ae51ff11SJeff Roberson VFS_ASSERT_GIANT(vp->v_mount); 403ad682c48SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 4045526d2d9SEivind Eklund KASSERT(object->type == OBJT_VNODE, 4055526d2d9SEivind Eklund ("vm_object_vndeallocate: not a vnode object")); 406219cbf59SEivind Eklund KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 407219cbf59SEivind Eklund #ifdef INVARIANTS 40895e5e988SJohn Dyson if (object->ref_count == 0) { 40995e5e988SJohn Dyson vprint("vm_object_vndeallocate", vp); 41095e5e988SJohn Dyson panic("vm_object_vndeallocate: bad object reference count"); 41195e5e988SJohn Dyson } 41295e5e988SJohn Dyson #endif 41395e5e988SJohn Dyson 41495e5e988SJohn Dyson object->ref_count--; 41547221757SJohn Dyson if (object->ref_count == 0) { 416e6e370a7SJeff Roberson mp_fixme("Unlocked vflag access."); 417e6e370a7SJeff Roberson vp->v_vflag &= ~VV_TEXT; 4182be70f79SJohn Dyson } 419ad682c48SAlan Cox VM_OBJECT_UNLOCK(object); 42023955314SAlfred Perlstein /* 42123955314SAlfred Perlstein * vrele may need a vop lock 42223955314SAlfred Perlstein */ 42347221757SJohn Dyson vrele(vp); 424df8bae1dSRodney W. Grimes } 425df8bae1dSRodney W. Grimes 426df8bae1dSRodney W. Grimes /* 427df8bae1dSRodney W. Grimes * vm_object_deallocate: 428df8bae1dSRodney W. Grimes * 429df8bae1dSRodney W. Grimes * Release a reference to the specified object, 430df8bae1dSRodney W. Grimes * gained either through a vm_object_allocate 431df8bae1dSRodney W. Grimes * or a vm_object_reference call. When all references 432df8bae1dSRodney W. Grimes * are gone, storage associated with this object 433df8bae1dSRodney W. Grimes * may be relinquished. 434df8bae1dSRodney W. Grimes * 435df8bae1dSRodney W. Grimes * No object may be locked. 436df8bae1dSRodney W. Grimes */ 43726f9a767SRodney W. Grimes void 4381b40f8c0SMatthew Dillon vm_object_deallocate(vm_object_t object) 439df8bae1dSRodney W. Grimes { 440df8bae1dSRodney W. Grimes vm_object_t temp; 441df8bae1dSRodney W. Grimes 442df8bae1dSRodney W. Grimes while (object != NULL) { 443ae51ff11SJeff Roberson int vfslocked; 444ca95b514SJohn Baldwin 445ae51ff11SJeff Roberson vfslocked = 0; 446ca95b514SJohn Baldwin restart: 447ad682c48SAlan Cox VM_OBJECT_LOCK(object); 4483b68228cSAlan Cox if (object->type == OBJT_VNODE) { 449ca95b514SJohn Baldwin struct vnode *vp = (struct vnode *) object->handle; 450ca95b514SJohn Baldwin 451ca95b514SJohn Baldwin /* 452ca95b514SJohn Baldwin * Conditionally acquire Giant for a vnode-backed 453ca95b514SJohn Baldwin * object. We have to be careful since the type of 454ca95b514SJohn Baldwin * a vnode object can change while the object is 455ca95b514SJohn Baldwin * unlocked. 456ca95b514SJohn Baldwin */ 457ca95b514SJohn Baldwin if (VFS_NEEDSGIANT(vp->v_mount) && !vfslocked) { 458ca95b514SJohn Baldwin vfslocked = 1; 459ca95b514SJohn Baldwin if (!mtx_trylock(&Giant)) { 460ca95b514SJohn Baldwin VM_OBJECT_UNLOCK(object); 461ca95b514SJohn Baldwin mtx_lock(&Giant); 462ca95b514SJohn Baldwin goto restart; 463ca95b514SJohn Baldwin } 464ca95b514SJohn Baldwin } 46595e5e988SJohn Dyson vm_object_vndeallocate(object); 466ae51ff11SJeff Roberson VFS_UNLOCK_GIANT(vfslocked); 46723b186d3SAlan Cox return; 468ca95b514SJohn Baldwin } else 469ca95b514SJohn Baldwin /* 470ca95b514SJohn Baldwin * This is to handle the case that the object 471ca95b514SJohn Baldwin * changed type while we dropped its lock to 472ca95b514SJohn Baldwin * obtain Giant. 473ca95b514SJohn Baldwin */ 474ca95b514SJohn Baldwin VFS_UNLOCK_GIANT(vfslocked); 47595e5e988SJohn Dyson 4768125b1e6SAlfred Perlstein KASSERT(object->ref_count != 0, 4778125b1e6SAlfred Perlstein ("vm_object_deallocate: object deallocated too many times: %d", object->type)); 4782be70f79SJohn Dyson 4792be70f79SJohn Dyson /* 4808125b1e6SAlfred Perlstein * If the reference count goes to 0 we start calling 4818125b1e6SAlfred Perlstein * vm_object_terminate() on the object chain. 4828125b1e6SAlfred Perlstein * A ref count of 1 may be a special case depending on the 4838125b1e6SAlfred Perlstein * shadow count being 0 or 1. 4842be70f79SJohn Dyson */ 485c0877f10SJohn Dyson object->ref_count--; 4868125b1e6SAlfred Perlstein if (object->ref_count > 1) { 4873b68228cSAlan Cox VM_OBJECT_UNLOCK(object); 48823b186d3SAlan Cox return; 4898125b1e6SAlfred Perlstein } else if (object->ref_count == 1) { 4904c8e0452SAlan Cox if (object->shadow_count == 0 && 4914c8e0452SAlan Cox object->handle == NULL && 4924c8e0452SAlan Cox (object->type == OBJT_DEFAULT || 4934c8e0452SAlan Cox object->type == OBJT_SWAP)) { 4948125b1e6SAlfred Perlstein vm_object_set_flag(object, OBJ_ONEMAPPING); 4958125b1e6SAlfred Perlstein } else if ((object->shadow_count == 1) && 4968125b1e6SAlfred Perlstein (object->handle == NULL) && 49724a1cce3SDavid Greenman (object->type == OBJT_DEFAULT || 49824a1cce3SDavid Greenman object->type == OBJT_SWAP)) { 499a1f6d91cSDavid Greenman vm_object_t robject; 50095e5e988SJohn Dyson 5011c500307SAlan Cox robject = LIST_FIRST(&object->shadow_head); 5025526d2d9SEivind Eklund KASSERT(robject != NULL, 503219cbf59SEivind Eklund ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 5045526d2d9SEivind Eklund object->ref_count, 5055526d2d9SEivind Eklund object->shadow_count)); 506b72b0115SAlan Cox if (!VM_OBJECT_TRYLOCK(robject)) { 507b72b0115SAlan Cox /* 508b72b0115SAlan Cox * Avoid a potential deadlock. 509b72b0115SAlan Cox */ 510b72b0115SAlan Cox object->ref_count++; 511b72b0115SAlan Cox VM_OBJECT_UNLOCK(object); 512a7d86121SAlan Cox /* 513a7d86121SAlan Cox * More likely than not the thread 514a7d86121SAlan Cox * holding robject's lock has lower 515a7d86121SAlan Cox * priority than the current thread. 516a7d86121SAlan Cox * Let the lower priority thread run. 517a7d86121SAlan Cox */ 5188db5fc58SJohn Baldwin pause("vmo_de", 1); 519b72b0115SAlan Cox continue; 520b72b0115SAlan Cox } 521d936694fSAlan Cox /* 522d936694fSAlan Cox * Collapse object into its shadow unless its 523d936694fSAlan Cox * shadow is dead. In that case, object will 524d936694fSAlan Cox * be deallocated by the thread that is 525d936694fSAlan Cox * deallocating its shadow. 526d936694fSAlan Cox */ 527d936694fSAlan Cox if ((robject->flags & OBJ_DEAD) == 0 && 528d936694fSAlan Cox (robject->handle == NULL) && 52924a1cce3SDavid Greenman (robject->type == OBJT_DEFAULT || 53024a1cce3SDavid Greenman robject->type == OBJT_SWAP)) { 531a1f6d91cSDavid Greenman 53295e5e988SJohn Dyson robject->ref_count++; 533138449dcSAlan Cox retry: 534138449dcSAlan Cox if (robject->paging_in_progress) { 535138449dcSAlan Cox VM_OBJECT_UNLOCK(object); 536138449dcSAlan Cox vm_object_pip_wait(robject, 537138449dcSAlan Cox "objde1"); 5382e9f4a69SAlan Cox temp = robject->backing_object; 5392e9f4a69SAlan Cox if (object == temp) { 540138449dcSAlan Cox VM_OBJECT_LOCK(object); 541138449dcSAlan Cox goto retry; 5422e9f4a69SAlan Cox } 543138449dcSAlan Cox } else if (object->paging_in_progress) { 544138449dcSAlan Cox VM_OBJECT_UNLOCK(robject); 545138449dcSAlan Cox object->flags |= OBJ_PIPWNT; 546138449dcSAlan Cox msleep(object, 547138449dcSAlan Cox VM_OBJECT_MTX(object), 548138449dcSAlan Cox PDROP | PVM, "objde2", 0); 549138449dcSAlan Cox VM_OBJECT_LOCK(robject); 5502e9f4a69SAlan Cox temp = robject->backing_object; 5512e9f4a69SAlan Cox if (object == temp) { 552138449dcSAlan Cox VM_OBJECT_LOCK(object); 553138449dcSAlan Cox goto retry; 554a1f6d91cSDavid Greenman } 5552e9f4a69SAlan Cox } else 5563b68228cSAlan Cox VM_OBJECT_UNLOCK(object); 5572e9f4a69SAlan Cox 55895e5e988SJohn Dyson if (robject->ref_count == 1) { 55995e5e988SJohn Dyson robject->ref_count--; 560ba8da839SDavid Greenman object = robject; 56195e5e988SJohn Dyson goto doterm; 56295e5e988SJohn Dyson } 56395e5e988SJohn Dyson object = robject; 56495e5e988SJohn Dyson vm_object_collapse(object); 565d7fc2210SAlan Cox VM_OBJECT_UNLOCK(object); 566ba8da839SDavid Greenman continue; 567a1f6d91cSDavid Greenman } 568b72b0115SAlan Cox VM_OBJECT_UNLOCK(robject); 56995e5e988SJohn Dyson } 5703b68228cSAlan Cox VM_OBJECT_UNLOCK(object); 57123b186d3SAlan Cox return; 57295e5e988SJohn Dyson } 57395e5e988SJohn Dyson doterm: 57424a1cce3SDavid Greenman temp = object->backing_object; 575c9917419SAlan Cox if (temp != NULL) { 576c9917419SAlan Cox VM_OBJECT_LOCK(temp); 5771c500307SAlan Cox LIST_REMOVE(object, shadow_list); 57895e5e988SJohn Dyson temp->shadow_count--; 579eaf13dd7SJohn Dyson temp->generation++; 580c9917419SAlan Cox VM_OBJECT_UNLOCK(temp); 58195461b45SJohn Dyson object->backing_object = NULL; 582de5f6a77SJohn Dyson } 583245df27cSMatthew Dillon /* 584245df27cSMatthew Dillon * Don't double-terminate, we could be in a termination 585245df27cSMatthew Dillon * recursion due to the terminate having to sync data 586245df27cSMatthew Dillon * to disk. 587245df27cSMatthew Dillon */ 588245df27cSMatthew Dillon if ((object->flags & OBJ_DEAD) == 0) 589df8bae1dSRodney W. Grimes vm_object_terminate(object); 590c829b9d0SAlan Cox else 591c829b9d0SAlan Cox VM_OBJECT_UNLOCK(object); 592df8bae1dSRodney W. Grimes object = temp; 593df8bae1dSRodney W. Grimes } 594df8bae1dSRodney W. Grimes } 595df8bae1dSRodney W. Grimes 596df8bae1dSRodney W. Grimes /* 5972ac78f0eSStephan Uphoff * vm_object_destroy removes the object from the global object list 5982ac78f0eSStephan Uphoff * and frees the space for the object. 5992ac78f0eSStephan Uphoff */ 6002ac78f0eSStephan Uphoff void 6012ac78f0eSStephan Uphoff vm_object_destroy(vm_object_t object) 6022ac78f0eSStephan Uphoff { 6032ac78f0eSStephan Uphoff 6042ac78f0eSStephan Uphoff /* 6052ac78f0eSStephan Uphoff * Remove the object from the global object list. 6062ac78f0eSStephan Uphoff */ 6072ac78f0eSStephan Uphoff mtx_lock(&vm_object_list_mtx); 6082ac78f0eSStephan Uphoff TAILQ_REMOVE(&vm_object_list, object, object_list); 6092ac78f0eSStephan Uphoff mtx_unlock(&vm_object_list_mtx); 6102ac78f0eSStephan Uphoff 6112ac78f0eSStephan Uphoff /* 6122ac78f0eSStephan Uphoff * Free the space for the object. 6132ac78f0eSStephan Uphoff */ 6142ac78f0eSStephan Uphoff uma_zfree(obj_zone, object); 6152ac78f0eSStephan Uphoff } 6162ac78f0eSStephan Uphoff 6172ac78f0eSStephan Uphoff /* 618df8bae1dSRodney W. Grimes * vm_object_terminate actually destroys the specified object, freeing 619df8bae1dSRodney W. Grimes * up all previously used resources. 620df8bae1dSRodney W. Grimes * 621df8bae1dSRodney W. Grimes * The object must be locked. 6221c7c3c6aSMatthew Dillon * This routine may block. 623df8bae1dSRodney W. Grimes */ 62495e5e988SJohn Dyson void 6251b40f8c0SMatthew Dillon vm_object_terminate(vm_object_t object) 626df8bae1dSRodney W. Grimes { 627d031cff1SMatthew Dillon vm_page_t p; 628df8bae1dSRodney W. Grimes 629c829b9d0SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 6300cddd8f0SMatthew Dillon 63195e5e988SJohn Dyson /* 63295e5e988SJohn Dyson * Make sure no one uses us. 63395e5e988SJohn Dyson */ 634069e9bc1SDoug Rabson vm_object_set_flag(object, OBJ_DEAD); 6353c631446SJohn Dyson 636df8bae1dSRodney W. Grimes /* 637f6b04d2bSDavid Greenman * wait for the pageout daemon to be done with the object 638df8bae1dSRodney W. Grimes */ 63966095752SJohn Dyson vm_object_pip_wait(object, "objtrm"); 640df8bae1dSRodney W. Grimes 6415526d2d9SEivind Eklund KASSERT(!object->paging_in_progress, 6425526d2d9SEivind Eklund ("vm_object_terminate: pageout in progress")); 64326f9a767SRodney W. Grimes 64426f9a767SRodney W. Grimes /* 6450d94caffSDavid Greenman * Clean and free the pages, as appropriate. All references to the 6460d94caffSDavid Greenman * object are gone, so we don't need to lock it. 64726f9a767SRodney W. Grimes */ 64824a1cce3SDavid Greenman if (object->type == OBJT_VNODE) { 649f7dd7b63SAlan Cox struct vnode *vp = (struct vnode *)object->handle; 65095e5e988SJohn Dyson 65195e5e988SJohn Dyson /* 65295e5e988SJohn Dyson * Clean pages and flush buffers. 65395e5e988SJohn Dyson */ 6548f9110f6SJohn Dyson vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 655b6e48e03SAlan Cox VM_OBJECT_UNLOCK(object); 65695e5e988SJohn Dyson 6570d7935fdSAttilio Rao vinvalbuf(vp, V_SAVE, 0, 0); 658f7dd7b63SAlan Cox 659f7dd7b63SAlan Cox VM_OBJECT_LOCK(object); 660bef608bdSJohn Dyson } 661bef608bdSJohn Dyson 662971dd342SAlfred Perlstein KASSERT(object->ref_count == 0, 663971dd342SAlfred Perlstein ("vm_object_terminate: object with references, ref_count=%d", 664971dd342SAlfred Perlstein object->ref_count)); 665996c772fSJohn Dyson 6660d94caffSDavid Greenman /* 667356863ebSDavid Greenman * Now free any remaining pages. For internal objects, this also 668356863ebSDavid Greenman * removes them from paging queues. Don't free wired pages, just 669356863ebSDavid Greenman * remove them from the object. 670df8bae1dSRodney W. Grimes */ 67156030358SAlan Cox vm_page_lock_queues(); 672b18bfc3dSJohn Dyson while ((p = TAILQ_FIRST(&object->memq)) != NULL) { 6739af80719SAlan Cox KASSERT(!p->busy && (p->oflags & VPO_BUSY) == 0, 674971dd342SAlfred Perlstein ("vm_object_terminate: freeing busy page %p " 675e9f54126SRobert Noland "p->busy = %d, p->oflags %x\n", p, p->busy, p->oflags)); 6760b10ba98SDavid Greenman if (p->wire_count == 0) { 677df8bae1dSRodney W. Grimes vm_page_free(p); 678393a081dSAttilio Rao cnt.v_pfree++; 6790b10ba98SDavid Greenman } else { 6800b10ba98SDavid Greenman vm_page_remove(p); 6810b10ba98SDavid Greenman } 682df8bae1dSRodney W. Grimes } 68356030358SAlan Cox vm_page_unlock_queues(); 684bef608bdSJohn Dyson 685f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 686f8a47341SAlan Cox if (__predict_false(!LIST_EMPTY(&object->rvq))) 687f8a47341SAlan Cox vm_reserv_break_all(object); 688f8a47341SAlan Cox #endif 6897bfda801SAlan Cox if (__predict_false(object->cache != NULL)) 690c9444914SAlan Cox vm_page_cache_free(object, 0, 0); 6917bfda801SAlan Cox 6922d8acc0fSJohn Dyson /* 6939fcfb650SDavid Greenman * Let the pager know object is dead. 6949fcfb650SDavid Greenman */ 6959fcfb650SDavid Greenman vm_pager_deallocate(object); 696658ad5ffSAlan Cox VM_OBJECT_UNLOCK(object); 6979fcfb650SDavid Greenman 6982ac78f0eSStephan Uphoff vm_object_destroy(object); 69947221757SJohn Dyson } 700df8bae1dSRodney W. Grimes 701df8bae1dSRodney W. Grimes /* 702df8bae1dSRodney W. Grimes * vm_object_page_clean 703df8bae1dSRodney W. Grimes * 7044f79d873SMatthew Dillon * Clean all dirty pages in the specified range of object. Leaves page 7054f79d873SMatthew Dillon * on whatever queue it is currently on. If NOSYNC is set then do not 706b146f9e5SAlan Cox * write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC), 7074f79d873SMatthew Dillon * leaving the object dirty. 70826f9a767SRodney W. Grimes * 70943b7990eSMatthew Dillon * When stuffing pages asynchronously, allow clustering. XXX we need a 71043b7990eSMatthew Dillon * synchronous clustering mode implementation. 71143b7990eSMatthew Dillon * 71226f9a767SRodney W. Grimes * Odd semantics: if start == end, we clean everything. 71326f9a767SRodney W. Grimes * 71426f9a767SRodney W. Grimes * The object must be locked. 71526f9a767SRodney W. Grimes */ 716f6b04d2bSDavid Greenman void 7171b40f8c0SMatthew Dillon vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags) 718f6b04d2bSDavid Greenman { 719b9b7a4beSMatthew Dillon vm_page_t p, np; 7206395da54SIan Dowse vm_pindex_t tstart, tend; 721bd7e5f99SJohn Dyson vm_pindex_t pi; 7224f79d873SMatthew Dillon int clearobjflags; 7238f9110f6SJohn Dyson int pagerflags; 7242d8acc0fSJohn Dyson int curgeneration; 725f6b04d2bSDavid Greenman 726b6e48e03SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 727aef922f5SJohn Dyson if (object->type != OBJT_VNODE || 728aef922f5SJohn Dyson (object->flags & OBJ_MIGHTBEDIRTY) == 0) 729f6b04d2bSDavid Greenman return; 730f6b04d2bSDavid Greenman 73143b7990eSMatthew Dillon pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 7328f9110f6SJohn Dyson pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0; 7338f9110f6SJohn Dyson 734069e9bc1SDoug Rabson vm_object_set_flag(object, OBJ_CLEANING); 73524a1cce3SDavid Greenman 736f6b04d2bSDavid Greenman tstart = start; 737f6b04d2bSDavid Greenman if (end == 0) { 738f6b04d2bSDavid Greenman tend = object->size; 739f6b04d2bSDavid Greenman } else { 740f6b04d2bSDavid Greenman tend = end; 741f6b04d2bSDavid Greenman } 742eaf13dd7SJohn Dyson 74335c01631SAlan Cox vm_page_lock_queues(); 7444f79d873SMatthew Dillon /* 745b9b7a4beSMatthew Dillon * If the caller is smart and only msync()s a range he knows is 746b9b7a4beSMatthew Dillon * dirty, we may be able to avoid an object scan. This results in 747b9b7a4beSMatthew Dillon * a phenominal improvement in performance. We cannot do this 748b9b7a4beSMatthew Dillon * as a matter of course because the object may be huge - e.g. 749b9b7a4beSMatthew Dillon * the size might be in the gigabytes or terrabytes. 750b9b7a4beSMatthew Dillon */ 751b9b7a4beSMatthew Dillon if (msync_flush_flags & MSYNC_FLUSH_HARDSEQ) { 752300b96acSIan Dowse vm_pindex_t tscan; 753b9b7a4beSMatthew Dillon int scanlimit; 754b9b7a4beSMatthew Dillon int scanreset; 755b9b7a4beSMatthew Dillon 756b9b7a4beSMatthew Dillon scanreset = object->resident_page_count / EASY_SCAN_FACTOR; 757b9b7a4beSMatthew Dillon if (scanreset < 16) 758b9b7a4beSMatthew Dillon scanreset = 16; 75943b7990eSMatthew Dillon pagerflags |= VM_PAGER_IGNORE_CLEANCHK; 760b9b7a4beSMatthew Dillon 761b9b7a4beSMatthew Dillon scanlimit = scanreset; 762b9b7a4beSMatthew Dillon tscan = tstart; 763b9b7a4beSMatthew Dillon while (tscan < tend) { 764b9b7a4beSMatthew Dillon curgeneration = object->generation; 765b9b7a4beSMatthew Dillon p = vm_page_lookup(object, tscan); 7667bfda801SAlan Cox if (p == NULL || p->valid == 0) { 767b9b7a4beSMatthew Dillon if (--scanlimit == 0) 768b9b7a4beSMatthew Dillon break; 769b9b7a4beSMatthew Dillon ++tscan; 770b9b7a4beSMatthew Dillon continue; 771b9b7a4beSMatthew Dillon } 772b9b7a4beSMatthew Dillon vm_page_test_dirty(p); 773b9b7a4beSMatthew Dillon if ((p->dirty & p->valid) == 0) { 774b9b7a4beSMatthew Dillon if (--scanlimit == 0) 775b9b7a4beSMatthew Dillon break; 776b9b7a4beSMatthew Dillon ++tscan; 777b9b7a4beSMatthew Dillon continue; 778b9b7a4beSMatthew Dillon } 779b9b7a4beSMatthew Dillon /* 780b9b7a4beSMatthew Dillon * If we have been asked to skip nosync pages and 781b9b7a4beSMatthew Dillon * this is a nosync page, we can't continue. 782b9b7a4beSMatthew Dillon */ 783b146f9e5SAlan Cox if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) { 784b9b7a4beSMatthew Dillon if (--scanlimit == 0) 785b9b7a4beSMatthew Dillon break; 786b9b7a4beSMatthew Dillon ++tscan; 787b9b7a4beSMatthew Dillon continue; 788b9b7a4beSMatthew Dillon } 789b9b7a4beSMatthew Dillon scanlimit = scanreset; 790b9b7a4beSMatthew Dillon 791b9b7a4beSMatthew Dillon /* 792b9b7a4beSMatthew Dillon * This returns 0 if it was unable to busy the first 793b9b7a4beSMatthew Dillon * page (i.e. had to sleep). 794b9b7a4beSMatthew Dillon */ 795b9b7a4beSMatthew Dillon tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags); 796b9b7a4beSMatthew Dillon } 797b9b7a4beSMatthew Dillon 798b9b7a4beSMatthew Dillon /* 799b9b7a4beSMatthew Dillon * If everything was dirty and we flushed it successfully, 800b9b7a4beSMatthew Dillon * and the requested range is not the entire object, we 801b9b7a4beSMatthew Dillon * don't have to mess with CLEANCHK or MIGHTBEDIRTY and can 802b9b7a4beSMatthew Dillon * return immediately. 803b9b7a4beSMatthew Dillon */ 804b9b7a4beSMatthew Dillon if (tscan >= tend && (tstart || tend < object->size)) { 80535c01631SAlan Cox vm_page_unlock_queues(); 806b9b7a4beSMatthew Dillon vm_object_clear_flag(object, OBJ_CLEANING); 807b9b7a4beSMatthew Dillon return; 808b9b7a4beSMatthew Dillon } 80943b7990eSMatthew Dillon pagerflags &= ~VM_PAGER_IGNORE_CLEANCHK; 810b9b7a4beSMatthew Dillon } 811b9b7a4beSMatthew Dillon 812b9b7a4beSMatthew Dillon /* 8134f79d873SMatthew Dillon * Generally set CLEANCHK interlock and make the page read-only so 8144f79d873SMatthew Dillon * we can then clear the object flags. 8154f79d873SMatthew Dillon * 8164f79d873SMatthew Dillon * However, if this is a nosync mmap then the object is likely to 8174f79d873SMatthew Dillon * stay dirty so do not mess with the page and do not clear the 8184f79d873SMatthew Dillon * object flags. 8194f79d873SMatthew Dillon */ 8204f79d873SMatthew Dillon clearobjflags = 1; 821fc2ffbe6SPoul-Henning Kamp TAILQ_FOREACH(p, &object->memq, listq) { 8220cd31a0dSAlan Cox p->oflags |= VPO_CLEANCHK; 823b146f9e5SAlan Cox if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) 8244f79d873SMatthew Dillon clearobjflags = 0; 8254f79d873SMatthew Dillon else 82678985e42SAlan Cox pmap_remove_write(p); 827eaf13dd7SJohn Dyson } 828eaf13dd7SJohn Dyson 8294f79d873SMatthew Dillon if (clearobjflags && (tstart == 0) && (tend == object->size)) { 830245df27cSMatthew Dillon struct vnode *vp; 831245df27cSMatthew Dillon 832af51d7bfSAlan Cox vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY); 833245df27cSMatthew Dillon if (object->type == OBJT_VNODE && 834245df27cSMatthew Dillon (vp = (struct vnode *)object->handle) != NULL) { 835e6e370a7SJeff Roberson VI_LOCK(vp); 836e6e370a7SJeff Roberson if (vp->v_iflag & VI_OBJDIRTY) 837e6e370a7SJeff Roberson vp->v_iflag &= ~VI_OBJDIRTY; 838e6e370a7SJeff Roberson VI_UNLOCK(vp); 839245df27cSMatthew Dillon } 840ec4f9fb0SDavid Greenman } 841f6b04d2bSDavid Greenman 842bd7e5f99SJohn Dyson rescan: 8432d8acc0fSJohn Dyson curgeneration = object->generation; 8442d8acc0fSJohn Dyson 845b18bfc3dSJohn Dyson for (p = TAILQ_FIRST(&object->memq); p; p = np) { 846b9b7a4beSMatthew Dillon int n; 847b9b7a4beSMatthew Dillon 848b18bfc3dSJohn Dyson np = TAILQ_NEXT(p, listq); 849bd7e5f99SJohn Dyson 850b9b7a4beSMatthew Dillon again: 851bd7e5f99SJohn Dyson pi = p->pindex; 8520cd31a0dSAlan Cox if ((p->oflags & VPO_CLEANCHK) == 0 || 853bd7e5f99SJohn Dyson (pi < tstart) || (pi >= tend) || 8547bfda801SAlan Cox p->valid == 0) { 8550cd31a0dSAlan Cox p->oflags &= ~VPO_CLEANCHK; 856aef922f5SJohn Dyson continue; 857f6b04d2bSDavid Greenman } 858f6b04d2bSDavid Greenman 859bd7e5f99SJohn Dyson vm_page_test_dirty(p); 860bd7e5f99SJohn Dyson if ((p->dirty & p->valid) == 0) { 8610cd31a0dSAlan Cox p->oflags &= ~VPO_CLEANCHK; 862bd7e5f99SJohn Dyson continue; 863bd7e5f99SJohn Dyson } 864ec4f9fb0SDavid Greenman 8654f79d873SMatthew Dillon /* 8664f79d873SMatthew Dillon * If we have been asked to skip nosync pages and this is a 8674f79d873SMatthew Dillon * nosync page, skip it. Note that the object flags were 8684f79d873SMatthew Dillon * not cleared in this case so we do not have to set them. 8694f79d873SMatthew Dillon */ 870b146f9e5SAlan Cox if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) { 8710cd31a0dSAlan Cox p->oflags &= ~VPO_CLEANCHK; 8724f79d873SMatthew Dillon continue; 8734f79d873SMatthew Dillon } 8744f79d873SMatthew Dillon 875b9b7a4beSMatthew Dillon n = vm_object_page_collect_flush(object, p, 876b9b7a4beSMatthew Dillon curgeneration, pagerflags); 877b9b7a4beSMatthew Dillon if (n == 0) 878b9b7a4beSMatthew Dillon goto rescan; 879b9b7a4beSMatthew Dillon 880b9b7a4beSMatthew Dillon if (object->generation != curgeneration) 881b9b7a4beSMatthew Dillon goto rescan; 882b9b7a4beSMatthew Dillon 883b9b7a4beSMatthew Dillon /* 884b9b7a4beSMatthew Dillon * Try to optimize the next page. If we can't we pick up 885b9b7a4beSMatthew Dillon * our (random) scan where we left off. 886b9b7a4beSMatthew Dillon */ 887b9b7a4beSMatthew Dillon if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) { 888b9b7a4beSMatthew Dillon if ((p = vm_page_lookup(object, pi + n)) != NULL) 889b9b7a4beSMatthew Dillon goto again; 890b9b7a4beSMatthew Dillon } 891b9b7a4beSMatthew Dillon } 89235c01631SAlan Cox vm_page_unlock_queues(); 893b9b7a4beSMatthew Dillon #if 0 8948df6bac4SPoul-Henning Kamp VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc); 895b9b7a4beSMatthew Dillon #endif 896b9b7a4beSMatthew Dillon 897b9b7a4beSMatthew Dillon vm_object_clear_flag(object, OBJ_CLEANING); 898b9b7a4beSMatthew Dillon return; 899b9b7a4beSMatthew Dillon } 900b9b7a4beSMatthew Dillon 901b9b7a4beSMatthew Dillon static int 902b9b7a4beSMatthew Dillon vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags) 903b9b7a4beSMatthew Dillon { 904b9b7a4beSMatthew Dillon int runlen; 905b9b7a4beSMatthew Dillon int maxf; 906b9b7a4beSMatthew Dillon int chkb; 907b9b7a4beSMatthew Dillon int maxb; 908b9b7a4beSMatthew Dillon int i; 909b9b7a4beSMatthew Dillon vm_pindex_t pi; 910b9b7a4beSMatthew Dillon vm_page_t maf[vm_pageout_page_count]; 911b9b7a4beSMatthew Dillon vm_page_t mab[vm_pageout_page_count]; 912b9b7a4beSMatthew Dillon vm_page_t ma[vm_pageout_page_count]; 913b9b7a4beSMatthew Dillon 91435c01631SAlan Cox mtx_assert(&vm_page_queue_mtx, MA_OWNED); 915b9b7a4beSMatthew Dillon pi = p->pindex; 916bd82dc74SAlan Cox while (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) { 91735c01631SAlan Cox vm_page_lock_queues(); 9182d8acc0fSJohn Dyson if (object->generation != curgeneration) { 919b9b7a4beSMatthew Dillon return(0); 920f6b04d2bSDavid Greenman } 921bd82dc74SAlan Cox } 922bd7e5f99SJohn Dyson maxf = 0; 923bd7e5f99SJohn Dyson for(i = 1; i < vm_pageout_page_count; i++) { 924b9b7a4beSMatthew Dillon vm_page_t tp; 925b9b7a4beSMatthew Dillon 9268aef1712SMatthew Dillon if ((tp = vm_page_lookup(object, pi + i)) != NULL) { 9279af80719SAlan Cox if ((tp->oflags & VPO_BUSY) || 92843b7990eSMatthew Dillon ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 9290cd31a0dSAlan Cox (tp->oflags & VPO_CLEANCHK) == 0) || 930ffc82b0aSJohn Dyson (tp->busy != 0)) 931bd7e5f99SJohn Dyson break; 932bd7e5f99SJohn Dyson vm_page_test_dirty(tp); 933bd7e5f99SJohn Dyson if ((tp->dirty & tp->valid) == 0) { 9340cd31a0dSAlan Cox tp->oflags &= ~VPO_CLEANCHK; 935bd7e5f99SJohn Dyson break; 936bd7e5f99SJohn Dyson } 937bd7e5f99SJohn Dyson maf[ i - 1 ] = tp; 938bd7e5f99SJohn Dyson maxf++; 939bd7e5f99SJohn Dyson continue; 940bd7e5f99SJohn Dyson } 941bd7e5f99SJohn Dyson break; 942bd7e5f99SJohn Dyson } 943aef922f5SJohn Dyson 944bd7e5f99SJohn Dyson maxb = 0; 945bd7e5f99SJohn Dyson chkb = vm_pageout_page_count - maxf; 946bd7e5f99SJohn Dyson if (chkb) { 947bd7e5f99SJohn Dyson for(i = 1; i < chkb;i++) { 948b9b7a4beSMatthew Dillon vm_page_t tp; 949b9b7a4beSMatthew Dillon 9508aef1712SMatthew Dillon if ((tp = vm_page_lookup(object, pi - i)) != NULL) { 9519af80719SAlan Cox if ((tp->oflags & VPO_BUSY) || 95243b7990eSMatthew Dillon ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 9530cd31a0dSAlan Cox (tp->oflags & VPO_CLEANCHK) == 0) || 954ffc82b0aSJohn Dyson (tp->busy != 0)) 955bd7e5f99SJohn Dyson break; 956bd7e5f99SJohn Dyson vm_page_test_dirty(tp); 957bd7e5f99SJohn Dyson if ((tp->dirty & tp->valid) == 0) { 9580cd31a0dSAlan Cox tp->oflags &= ~VPO_CLEANCHK; 959bd7e5f99SJohn Dyson break; 960bd7e5f99SJohn Dyson } 961bd7e5f99SJohn Dyson mab[ i - 1 ] = tp; 962bd7e5f99SJohn Dyson maxb++; 963bd7e5f99SJohn Dyson continue; 964bd7e5f99SJohn Dyson } 965bd7e5f99SJohn Dyson break; 966bd7e5f99SJohn Dyson } 967bd7e5f99SJohn Dyson } 968bd7e5f99SJohn Dyson 969bd7e5f99SJohn Dyson for(i = 0; i < maxb; i++) { 970bd7e5f99SJohn Dyson int index = (maxb - i) - 1; 971bd7e5f99SJohn Dyson ma[index] = mab[i]; 9720cd31a0dSAlan Cox ma[index]->oflags &= ~VPO_CLEANCHK; 973bd7e5f99SJohn Dyson } 9740cd31a0dSAlan Cox p->oflags &= ~VPO_CLEANCHK; 975bd7e5f99SJohn Dyson ma[maxb] = p; 976bd7e5f99SJohn Dyson for(i = 0; i < maxf; i++) { 977bd7e5f99SJohn Dyson int index = (maxb + i) + 1; 978bd7e5f99SJohn Dyson ma[index] = maf[i]; 9790cd31a0dSAlan Cox ma[index]->oflags &= ~VPO_CLEANCHK; 980f6b04d2bSDavid Greenman } 981bd7e5f99SJohn Dyson runlen = maxb + maxf + 1; 982cf2819ccSJohn Dyson 9837a935082SAlan Cox vm_pageout_flush(ma, runlen, pagerflags); 984cf2819ccSJohn Dyson for (i = 0; i < runlen; i++) { 985cf2819ccSJohn Dyson if (ma[i]->valid & ma[i]->dirty) { 98678985e42SAlan Cox pmap_remove_write(ma[i]); 9870cd31a0dSAlan Cox ma[i]->oflags |= VPO_CLEANCHK; 988aef922f5SJohn Dyson 989b9b7a4beSMatthew Dillon /* 990b9b7a4beSMatthew Dillon * maxf will end up being the actual number of pages 991b9b7a4beSMatthew Dillon * we wrote out contiguously, non-inclusive of the 992b9b7a4beSMatthew Dillon * first page. We do not count look-behind pages. 993b9b7a4beSMatthew Dillon */ 994b9b7a4beSMatthew Dillon if (i >= maxb + 1 && (maxf > i - maxb - 1)) 995b9b7a4beSMatthew Dillon maxf = i - maxb - 1; 996b9b7a4beSMatthew Dillon } 997b9b7a4beSMatthew Dillon } 998b9b7a4beSMatthew Dillon return(maxf + 1); 99926f9a767SRodney W. Grimes } 1000df8bae1dSRodney W. Grimes 10011efb74fbSJohn Dyson /* 1002950f8459SAlan Cox * Note that there is absolutely no sense in writing out 1003950f8459SAlan Cox * anonymous objects, so we track down the vnode object 1004950f8459SAlan Cox * to write out. 1005950f8459SAlan Cox * We invalidate (remove) all pages from the address space 1006950f8459SAlan Cox * for semantic correctness. 1007950f8459SAlan Cox * 1008950f8459SAlan Cox * Note: certain anonymous maps, such as MAP_NOSYNC maps, 1009950f8459SAlan Cox * may start out with a NULL object. 1010950f8459SAlan Cox */ 1011950f8459SAlan Cox void 1012950f8459SAlan Cox vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, 1013950f8459SAlan Cox boolean_t syncio, boolean_t invalidate) 1014950f8459SAlan Cox { 1015950f8459SAlan Cox vm_object_t backing_object; 1016950f8459SAlan Cox struct vnode *vp; 10173b582b4eSTor Egge struct mount *mp; 1018950f8459SAlan Cox int flags; 1019950f8459SAlan Cox 1020950f8459SAlan Cox if (object == NULL) 1021950f8459SAlan Cox return; 1022950f8459SAlan Cox VM_OBJECT_LOCK(object); 1023950f8459SAlan Cox while ((backing_object = object->backing_object) != NULL) { 1024950f8459SAlan Cox VM_OBJECT_LOCK(backing_object); 102556e0670fSAlan Cox offset += object->backing_object_offset; 1026950f8459SAlan Cox VM_OBJECT_UNLOCK(object); 1027950f8459SAlan Cox object = backing_object; 1028950f8459SAlan Cox if (object->size < OFF_TO_IDX(offset + size)) 1029950f8459SAlan Cox size = IDX_TO_OFF(object->size) - offset; 1030950f8459SAlan Cox } 1031950f8459SAlan Cox /* 1032950f8459SAlan Cox * Flush pages if writing is allowed, invalidate them 1033950f8459SAlan Cox * if invalidation requested. Pages undergoing I/O 1034950f8459SAlan Cox * will be ignored by vm_object_page_remove(). 1035950f8459SAlan Cox * 1036950f8459SAlan Cox * We cannot lock the vnode and then wait for paging 1037950f8459SAlan Cox * to complete without deadlocking against vm_fault. 1038950f8459SAlan Cox * Instead we simply call vm_object_page_remove() and 1039950f8459SAlan Cox * allow it to block internally on a page-by-page 1040950f8459SAlan Cox * basis when it encounters pages undergoing async 1041950f8459SAlan Cox * I/O. 1042950f8459SAlan Cox */ 1043950f8459SAlan Cox if (object->type == OBJT_VNODE && 1044950f8459SAlan Cox (object->flags & OBJ_MIGHTBEDIRTY) != 0) { 1045ae51ff11SJeff Roberson int vfslocked; 1046950f8459SAlan Cox vp = object->handle; 1047950f8459SAlan Cox VM_OBJECT_UNLOCK(object); 10483b582b4eSTor Egge (void) vn_start_write(vp, &mp, V_WAIT); 1049ae51ff11SJeff Roberson vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1050cb05b60aSAttilio Rao vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1051950f8459SAlan Cox flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 1052950f8459SAlan Cox flags |= invalidate ? OBJPC_INVAL : 0; 1053950f8459SAlan Cox VM_OBJECT_LOCK(object); 1054950f8459SAlan Cox vm_object_page_clean(object, 1055950f8459SAlan Cox OFF_TO_IDX(offset), 1056950f8459SAlan Cox OFF_TO_IDX(offset + size + PAGE_MASK), 1057950f8459SAlan Cox flags); 1058950f8459SAlan Cox VM_OBJECT_UNLOCK(object); 105922db15c0SAttilio Rao VOP_UNLOCK(vp, 0); 1060ae51ff11SJeff Roberson VFS_UNLOCK_GIANT(vfslocked); 10613b582b4eSTor Egge vn_finished_write(mp); 1062950f8459SAlan Cox VM_OBJECT_LOCK(object); 1063950f8459SAlan Cox } 1064950f8459SAlan Cox if ((object->type == OBJT_VNODE || 1065950f8459SAlan Cox object->type == OBJT_DEVICE) && invalidate) { 1066874f0135SDoug Rabson boolean_t purge; 1067874f0135SDoug Rabson purge = old_msync || (object->type == OBJT_DEVICE); 1068950f8459SAlan Cox vm_object_page_remove(object, 1069950f8459SAlan Cox OFF_TO_IDX(offset), 1070950f8459SAlan Cox OFF_TO_IDX(offset + size + PAGE_MASK), 1071874f0135SDoug Rabson purge ? FALSE : TRUE); 1072950f8459SAlan Cox } 1073950f8459SAlan Cox VM_OBJECT_UNLOCK(object); 1074950f8459SAlan Cox } 1075950f8459SAlan Cox 1076950f8459SAlan Cox /* 1077867a482dSJohn Dyson * vm_object_madvise: 1078867a482dSJohn Dyson * 1079867a482dSJohn Dyson * Implements the madvise function at the object/page level. 10801c7c3c6aSMatthew Dillon * 1081193b9358SAlan Cox * MADV_WILLNEED (any object) 1082193b9358SAlan Cox * 1083193b9358SAlan Cox * Activate the specified pages if they are resident. 1084193b9358SAlan Cox * 1085193b9358SAlan Cox * MADV_DONTNEED (any object) 1086193b9358SAlan Cox * 1087193b9358SAlan Cox * Deactivate the specified pages if they are resident. 1088193b9358SAlan Cox * 1089193b9358SAlan Cox * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 1090193b9358SAlan Cox * OBJ_ONEMAPPING only) 1091193b9358SAlan Cox * 1092193b9358SAlan Cox * Deactivate and clean the specified pages if they are 1093193b9358SAlan Cox * resident. This permits the process to reuse the pages 1094193b9358SAlan Cox * without faulting or the kernel to reclaim the pages 1095193b9358SAlan Cox * without I/O. 1096867a482dSJohn Dyson */ 1097867a482dSJohn Dyson void 10981b40f8c0SMatthew Dillon vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise) 1099867a482dSJohn Dyson { 11006e20a165SJohn Dyson vm_pindex_t end, tpindex; 110134567de7SAlan Cox vm_object_t backing_object, tobject; 1102867a482dSJohn Dyson vm_page_t m; 1103867a482dSJohn Dyson 1104867a482dSJohn Dyson if (object == NULL) 1105867a482dSJohn Dyson return; 11069b98b796SAlan Cox VM_OBJECT_LOCK(object); 1107867a482dSJohn Dyson end = pindex + count; 11081c7c3c6aSMatthew Dillon /* 11091c7c3c6aSMatthew Dillon * Locate and adjust resident pages 11101c7c3c6aSMatthew Dillon */ 11111c7c3c6aSMatthew Dillon for (; pindex < end; pindex += 1) { 11126e20a165SJohn Dyson relookup: 11136e20a165SJohn Dyson tobject = object; 11146e20a165SJohn Dyson tpindex = pindex; 11156e20a165SJohn Dyson shadowlookup: 111658b4e6ccSAlan Cox /* 111758b4e6ccSAlan Cox * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages 111858b4e6ccSAlan Cox * and those pages must be OBJ_ONEMAPPING. 111958b4e6ccSAlan Cox */ 112058b4e6ccSAlan Cox if (advise == MADV_FREE) { 112158b4e6ccSAlan Cox if ((tobject->type != OBJT_DEFAULT && 112258b4e6ccSAlan Cox tobject->type != OBJT_SWAP) || 112358b4e6ccSAlan Cox (tobject->flags & OBJ_ONEMAPPING) == 0) { 112434567de7SAlan Cox goto unlock_tobject; 11256e20a165SJohn Dyson } 112658b4e6ccSAlan Cox } 11271c7c3c6aSMatthew Dillon m = vm_page_lookup(tobject, tpindex); 11287bfda801SAlan Cox if (m == NULL && advise == MADV_WILLNEED) { 11297bfda801SAlan Cox /* 11307bfda801SAlan Cox * If the page is cached, reactivate it. 11317bfda801SAlan Cox */ 1132f3a2ed4bSAlan Cox m = vm_page_alloc(tobject, tpindex, VM_ALLOC_IFCACHED | 1133f3a2ed4bSAlan Cox VM_ALLOC_NOBUSY); 11347bfda801SAlan Cox } 11351c7c3c6aSMatthew Dillon if (m == NULL) { 11361ce137beSMatthew Dillon /* 11371ce137beSMatthew Dillon * There may be swap even if there is no backing page 11381ce137beSMatthew Dillon */ 11391ce137beSMatthew Dillon if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 11401ce137beSMatthew Dillon swap_pager_freespace(tobject, tpindex, 1); 11411ce137beSMatthew Dillon /* 11421ce137beSMatthew Dillon * next object 11431ce137beSMatthew Dillon */ 114434567de7SAlan Cox backing_object = tobject->backing_object; 114534567de7SAlan Cox if (backing_object == NULL) 114634567de7SAlan Cox goto unlock_tobject; 114734567de7SAlan Cox VM_OBJECT_LOCK(backing_object); 114856e0670fSAlan Cox tpindex += OFF_TO_IDX(tobject->backing_object_offset); 11499b98b796SAlan Cox if (tobject != object) 115034567de7SAlan Cox VM_OBJECT_UNLOCK(tobject); 115134567de7SAlan Cox tobject = backing_object; 11526e20a165SJohn Dyson goto shadowlookup; 11536e20a165SJohn Dyson } 1154867a482dSJohn Dyson /* 1155867a482dSJohn Dyson * If the page is busy or not in a normal active state, 11568b03c8edSMatthew Dillon * we skip it. If the page is not managed there are no 11578b03c8edSMatthew Dillon * page queues to mess with. Things can break if we mess 11588b03c8edSMatthew Dillon * with pages in any of the below states. 1159867a482dSJohn Dyson */ 116032585dd6SAlan Cox vm_page_lock_queues(); 116132585dd6SAlan Cox if (m->hold_count || 11621c7c3c6aSMatthew Dillon m->wire_count || 11638b03c8edSMatthew Dillon (m->flags & PG_UNMANAGED) || 116432585dd6SAlan Cox m->valid != VM_PAGE_BITS_ALL) { 116532585dd6SAlan Cox vm_page_unlock_queues(); 116634567de7SAlan Cox goto unlock_tobject; 11676e20a165SJohn Dyson } 11689af80719SAlan Cox if ((m->oflags & VPO_BUSY) || m->busy) { 11695786be7cSAlan Cox vm_page_flag_set(m, PG_REFERENCED); 117091449ce9SAlan Cox vm_page_unlock_queues(); 11719b98b796SAlan Cox if (object != tobject) 11729b98b796SAlan Cox VM_OBJECT_UNLOCK(object); 11735786be7cSAlan Cox m->oflags |= VPO_WANTED; 117491449ce9SAlan Cox msleep(m, VM_OBJECT_MTX(tobject), PDROP | PVM, "madvpo", 0); 11759b98b796SAlan Cox VM_OBJECT_LOCK(object); 11766e20a165SJohn Dyson goto relookup; 117734567de7SAlan Cox } 1178867a482dSJohn Dyson if (advise == MADV_WILLNEED) { 1179867a482dSJohn Dyson vm_page_activate(m); 11806e20a165SJohn Dyson } else if (advise == MADV_DONTNEED) { 1181479112dfSMatthew Dillon vm_page_dontneed(m); 11820a47b48bSJohn Dyson } else if (advise == MADV_FREE) { 11831c7c3c6aSMatthew Dillon /* 11842aaeadf8SMatthew Dillon * Mark the page clean. This will allow the page 11852aaeadf8SMatthew Dillon * to be freed up by the system. However, such pages 11862aaeadf8SMatthew Dillon * are often reused quickly by malloc()/free() 11872aaeadf8SMatthew Dillon * so we do not do anything that would cause 11882aaeadf8SMatthew Dillon * a page fault if we can help it. 11892aaeadf8SMatthew Dillon * 11902aaeadf8SMatthew Dillon * Specifically, we do not try to actually free 11912aaeadf8SMatthew Dillon * the page now nor do we try to put it in the 11922aaeadf8SMatthew Dillon * cache (which would cause a page fault on reuse). 119341c67e12SMatthew Dillon * 119441c67e12SMatthew Dillon * But we do make the page is freeable as we 119541c67e12SMatthew Dillon * can without actually taking the step of unmapping 119641c67e12SMatthew Dillon * it. 11971c7c3c6aSMatthew Dillon */ 11980385347cSPeter Wemm pmap_clear_modify(m); 11996e20a165SJohn Dyson m->dirty = 0; 120041c67e12SMatthew Dillon m->act_count = 0; 1201479112dfSMatthew Dillon vm_page_dontneed(m); 1202867a482dSJohn Dyson } 12032999e9faSAlan Cox vm_page_unlock_queues(); 12042999e9faSAlan Cox if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 12052999e9faSAlan Cox swap_pager_freespace(tobject, tpindex, 1); 120634567de7SAlan Cox unlock_tobject: 12079b98b796SAlan Cox if (tobject != object) 120834567de7SAlan Cox VM_OBJECT_UNLOCK(tobject); 1209867a482dSJohn Dyson } 12109b98b796SAlan Cox VM_OBJECT_UNLOCK(object); 1211867a482dSJohn Dyson } 1212867a482dSJohn Dyson 1213867a482dSJohn Dyson /* 1214df8bae1dSRodney W. Grimes * vm_object_shadow: 1215df8bae1dSRodney W. Grimes * 1216df8bae1dSRodney W. Grimes * Create a new object which is backed by the 1217df8bae1dSRodney W. Grimes * specified existing object range. The source 1218df8bae1dSRodney W. Grimes * object reference is deallocated. 1219df8bae1dSRodney W. Grimes * 1220df8bae1dSRodney W. Grimes * The new object and offset into that object 1221df8bae1dSRodney W. Grimes * are returned in the source parameters. 1222df8bae1dSRodney W. Grimes */ 122326f9a767SRodney W. Grimes void 12241b40f8c0SMatthew Dillon vm_object_shadow( 12251b40f8c0SMatthew Dillon vm_object_t *object, /* IN/OUT */ 12261b40f8c0SMatthew Dillon vm_ooffset_t *offset, /* IN/OUT */ 12271b40f8c0SMatthew Dillon vm_size_t length) 1228df8bae1dSRodney W. Grimes { 1229d031cff1SMatthew Dillon vm_object_t source; 1230d031cff1SMatthew Dillon vm_object_t result; 1231df8bae1dSRodney W. Grimes 1232df8bae1dSRodney W. Grimes source = *object; 1233df8bae1dSRodney W. Grimes 1234df8bae1dSRodney W. Grimes /* 12359a2f6362SAlan Cox * Don't create the new object if the old object isn't shared. 12369a2f6362SAlan Cox */ 1237570a2f4aSAlan Cox if (source != NULL) { 1238570a2f4aSAlan Cox VM_OBJECT_LOCK(source); 1239570a2f4aSAlan Cox if (source->ref_count == 1 && 12409a2f6362SAlan Cox source->handle == NULL && 12419a2f6362SAlan Cox (source->type == OBJT_DEFAULT || 12429917e010SAlan Cox source->type == OBJT_SWAP)) { 1243570a2f4aSAlan Cox VM_OBJECT_UNLOCK(source); 12449a2f6362SAlan Cox return; 12459917e010SAlan Cox } 1246570a2f4aSAlan Cox VM_OBJECT_UNLOCK(source); 1247570a2f4aSAlan Cox } 12489a2f6362SAlan Cox 12499a2f6362SAlan Cox /* 1250570a2f4aSAlan Cox * Allocate a new object with the given length. 1251df8bae1dSRodney W. Grimes */ 1252971dd342SAlfred Perlstein result = vm_object_allocate(OBJT_DEFAULT, length); 1253df8bae1dSRodney W. Grimes 1254df8bae1dSRodney W. Grimes /* 12550d94caffSDavid Greenman * The new object shadows the source object, adding a reference to it. 12560d94caffSDavid Greenman * Our caller changes his reference to point to the new object, 12570d94caffSDavid Greenman * removing a reference to the source object. Net result: no change 12580d94caffSDavid Greenman * of reference count. 12599b09fe24SMatthew Dillon * 12609b09fe24SMatthew Dillon * Try to optimize the result object's page color when shadowing 1261956f3135SPhilippe Charnier * in order to maintain page coloring consistency in the combined 12629b09fe24SMatthew Dillon * shadowed object. 1263df8bae1dSRodney W. Grimes */ 126424a1cce3SDavid Greenman result->backing_object = source; 12659174ca7bSTor Egge /* 12669174ca7bSTor Egge * Store the offset into the source object, and fix up the offset into 12679174ca7bSTor Egge * the new object. 12689174ca7bSTor Egge */ 12699174ca7bSTor Egge result->backing_object_offset = *offset; 1270570a2f4aSAlan Cox if (source != NULL) { 1271570a2f4aSAlan Cox VM_OBJECT_LOCK(source); 12721c500307SAlan Cox LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list); 1273eaf13dd7SJohn Dyson source->shadow_count++; 1274eaf13dd7SJohn Dyson source->generation++; 1275f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 12767b54b1a9SAlan Cox result->flags |= source->flags & OBJ_COLORED; 1277f8a47341SAlan Cox result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & 1278f8a47341SAlan Cox ((1 << (VM_NFREEORDER - 1)) - 1); 1279f8a47341SAlan Cox #endif 1280570a2f4aSAlan Cox VM_OBJECT_UNLOCK(source); 1281de5f6a77SJohn Dyson } 1282df8bae1dSRodney W. Grimes 1283df8bae1dSRodney W. Grimes 1284df8bae1dSRodney W. Grimes /* 1285df8bae1dSRodney W. Grimes * Return the new things 1286df8bae1dSRodney W. Grimes */ 1287df8bae1dSRodney W. Grimes *offset = 0; 1288df8bae1dSRodney W. Grimes *object = result; 1289df8bae1dSRodney W. Grimes } 1290df8bae1dSRodney W. Grimes 1291c5aaa06dSAlan Cox /* 1292c5aaa06dSAlan Cox * vm_object_split: 1293c5aaa06dSAlan Cox * 1294c5aaa06dSAlan Cox * Split the pages in a map entry into a new object. This affords 1295c5aaa06dSAlan Cox * easier removal of unused pages, and keeps object inheritance from 1296c5aaa06dSAlan Cox * being a negative impact on memory usage. 1297c5aaa06dSAlan Cox */ 1298c5aaa06dSAlan Cox void 1299c5aaa06dSAlan Cox vm_object_split(vm_map_entry_t entry) 1300c5aaa06dSAlan Cox { 130173000556SAlan Cox vm_page_t m, m_next; 1302c5aaa06dSAlan Cox vm_object_t orig_object, new_object, source; 130373000556SAlan Cox vm_pindex_t idx, offidxstart; 130473000556SAlan Cox vm_size_t size; 1305c5aaa06dSAlan Cox 1306c5aaa06dSAlan Cox orig_object = entry->object.vm_object; 1307c5aaa06dSAlan Cox if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 1308c5aaa06dSAlan Cox return; 1309c5aaa06dSAlan Cox if (orig_object->ref_count <= 1) 1310c5aaa06dSAlan Cox return; 13114da9f125SAlan Cox VM_OBJECT_UNLOCK(orig_object); 1312c5aaa06dSAlan Cox 13134da9f125SAlan Cox offidxstart = OFF_TO_IDX(entry->offset); 131495442adfSAlan Cox size = atop(entry->end - entry->start); 1315c5aaa06dSAlan Cox 13164da9f125SAlan Cox /* 13174da9f125SAlan Cox * If swap_pager_copy() is later called, it will convert new_object 13184da9f125SAlan Cox * into a swap object. 13194da9f125SAlan Cox */ 13204da9f125SAlan Cox new_object = vm_object_allocate(OBJT_DEFAULT, size); 1321c5aaa06dSAlan Cox 1322c5474b8fSAlan Cox /* 1323c5474b8fSAlan Cox * At this point, the new object is still private, so the order in 1324c5474b8fSAlan Cox * which the original and new objects are locked does not matter. 1325c5474b8fSAlan Cox */ 132663f6cefcSAlan Cox VM_OBJECT_LOCK(new_object); 132763f6cefcSAlan Cox VM_OBJECT_LOCK(orig_object); 1328c5aaa06dSAlan Cox source = orig_object->backing_object; 1329c5aaa06dSAlan Cox if (source != NULL) { 13308e3a76fbSAlan Cox VM_OBJECT_LOCK(source); 133119c244d0SAlan Cox if ((source->flags & OBJ_DEAD) != 0) { 133219c244d0SAlan Cox VM_OBJECT_UNLOCK(source); 133319c244d0SAlan Cox VM_OBJECT_UNLOCK(orig_object); 133419c244d0SAlan Cox VM_OBJECT_UNLOCK(new_object); 133519c244d0SAlan Cox vm_object_deallocate(new_object); 133619c244d0SAlan Cox VM_OBJECT_LOCK(orig_object); 133719c244d0SAlan Cox return; 133819c244d0SAlan Cox } 13391c500307SAlan Cox LIST_INSERT_HEAD(&source->shadow_head, 1340c5aaa06dSAlan Cox new_object, shadow_list); 13418e3a76fbSAlan Cox source->shadow_count++; 13428e3a76fbSAlan Cox source->generation++; 1343b921a12bSAlan Cox vm_object_reference_locked(source); /* for new_object */ 1344c5aaa06dSAlan Cox vm_object_clear_flag(source, OBJ_ONEMAPPING); 1345e2479b4fSAlan Cox VM_OBJECT_UNLOCK(source); 1346c5aaa06dSAlan Cox new_object->backing_object_offset = 13474da9f125SAlan Cox orig_object->backing_object_offset + entry->offset; 1348c5aaa06dSAlan Cox new_object->backing_object = source; 1349c5aaa06dSAlan Cox } 1350c5aaa06dSAlan Cox retry: 135173000556SAlan Cox if ((m = TAILQ_FIRST(&orig_object->memq)) != NULL) { 135273000556SAlan Cox if (m->pindex < offidxstart) { 135373000556SAlan Cox m = vm_page_splay(offidxstart, orig_object->root); 135473000556SAlan Cox if ((orig_object->root = m)->pindex < offidxstart) 135573000556SAlan Cox m = TAILQ_NEXT(m, listq); 135673000556SAlan Cox } 135773000556SAlan Cox } 135873000556SAlan Cox vm_page_lock_queues(); 135973000556SAlan Cox for (; m != NULL && (idx = m->pindex - offidxstart) < size; 136073000556SAlan Cox m = m_next) { 136173000556SAlan Cox m_next = TAILQ_NEXT(m, listq); 1362c5aaa06dSAlan Cox 1363c5aaa06dSAlan Cox /* 1364c5aaa06dSAlan Cox * We must wait for pending I/O to complete before we can 1365c5aaa06dSAlan Cox * rename the page. 1366c5aaa06dSAlan Cox * 1367c5aaa06dSAlan Cox * We do not have to VM_PROT_NONE the page as mappings should 1368c5aaa06dSAlan Cox * not be changed by this operation. 1369c5aaa06dSAlan Cox */ 13709af80719SAlan Cox if ((m->oflags & VPO_BUSY) || m->busy) { 13715786be7cSAlan Cox vm_page_flag_set(m, PG_REFERENCED); 137291449ce9SAlan Cox vm_page_unlock_queues(); 1373de33beddSAlan Cox VM_OBJECT_UNLOCK(new_object); 13745786be7cSAlan Cox m->oflags |= VPO_WANTED; 1375c5474b8fSAlan Cox msleep(m, VM_OBJECT_MTX(orig_object), PVM, "spltwt", 0); 1376de33beddSAlan Cox VM_OBJECT_LOCK(new_object); 1377c5aaa06dSAlan Cox goto retry; 1378de33beddSAlan Cox } 1379c5aaa06dSAlan Cox vm_page_rename(m, new_object, idx); 1380c5aaa06dSAlan Cox /* page automatically made dirty by rename and cache handled */ 1381c5aaa06dSAlan Cox vm_page_busy(m); 1382c5aaa06dSAlan Cox } 13835ba514bcSAlan Cox vm_page_unlock_queues(); 1384d7a013c3SAlan Cox if (orig_object->type == OBJT_SWAP) { 1385c5aaa06dSAlan Cox /* 1386c7c8dd7eSAlan Cox * swap_pager_copy() can sleep, in which case the orig_object's 1387c7c8dd7eSAlan Cox * and new_object's locks are released and reacquired. 1388c5aaa06dSAlan Cox */ 1389c5aaa06dSAlan Cox swap_pager_copy(orig_object, new_object, offidxstart, 0); 13907bfda801SAlan Cox 13917bfda801SAlan Cox /* 13927bfda801SAlan Cox * Transfer any cached pages from orig_object to new_object. 13937bfda801SAlan Cox */ 13947bfda801SAlan Cox if (__predict_false(orig_object->cache != NULL)) 13957bfda801SAlan Cox vm_page_cache_transfer(orig_object, offidxstart, 13967bfda801SAlan Cox new_object); 1397c5aaa06dSAlan Cox } 1398d7a013c3SAlan Cox VM_OBJECT_UNLOCK(orig_object); 1399c7118ed6SAlan Cox TAILQ_FOREACH(m, &new_object->memq, listq) 1400c5aaa06dSAlan Cox vm_page_wakeup(m); 1401c7c8dd7eSAlan Cox VM_OBJECT_UNLOCK(new_object); 1402c5aaa06dSAlan Cox entry->object.vm_object = new_object; 1403c5aaa06dSAlan Cox entry->offset = 0LL; 1404c5aaa06dSAlan Cox vm_object_deallocate(orig_object); 14054da9f125SAlan Cox VM_OBJECT_LOCK(new_object); 1406c5aaa06dSAlan Cox } 1407c5aaa06dSAlan Cox 14082ad1a3f7SMatthew Dillon #define OBSC_TEST_ALL_SHADOWED 0x0001 14092ad1a3f7SMatthew Dillon #define OBSC_COLLAPSE_NOWAIT 0x0002 14102ad1a3f7SMatthew Dillon #define OBSC_COLLAPSE_WAIT 0x0004 14112ad1a3f7SMatthew Dillon 1412b4ae4780SPoul-Henning Kamp static int 14132ad1a3f7SMatthew Dillon vm_object_backing_scan(vm_object_t object, int op) 14142ad1a3f7SMatthew Dillon { 14152ad1a3f7SMatthew Dillon int r = 1; 14162ad1a3f7SMatthew Dillon vm_page_t p; 14172ad1a3f7SMatthew Dillon vm_object_t backing_object; 14182ad1a3f7SMatthew Dillon vm_pindex_t backing_offset_index; 14192ad1a3f7SMatthew Dillon 14207ca33ad1SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 14217ca33ad1SAlan Cox VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED); 14222ad1a3f7SMatthew Dillon 14232ad1a3f7SMatthew Dillon backing_object = object->backing_object; 14242ad1a3f7SMatthew Dillon backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 14252ad1a3f7SMatthew Dillon 14262ad1a3f7SMatthew Dillon /* 14272ad1a3f7SMatthew Dillon * Initial conditions 14282ad1a3f7SMatthew Dillon */ 14292ad1a3f7SMatthew Dillon if (op & OBSC_TEST_ALL_SHADOWED) { 14302ad1a3f7SMatthew Dillon /* 14317bfda801SAlan Cox * We do not want to have to test for the existence of cache 14327bfda801SAlan Cox * or swap pages in the backing object. XXX but with the 14332ad1a3f7SMatthew Dillon * new swapper this would be pretty easy to do. 14342ad1a3f7SMatthew Dillon * 14352ad1a3f7SMatthew Dillon * XXX what about anonymous MAP_SHARED memory that hasn't 14362ad1a3f7SMatthew Dillon * been ZFOD faulted yet? If we do not test for this, the 14372ad1a3f7SMatthew Dillon * shadow test may succeed! XXX 14382ad1a3f7SMatthew Dillon */ 14392ad1a3f7SMatthew Dillon if (backing_object->type != OBJT_DEFAULT) { 14402ad1a3f7SMatthew Dillon return (0); 14412ad1a3f7SMatthew Dillon } 14422ad1a3f7SMatthew Dillon } 14432ad1a3f7SMatthew Dillon if (op & OBSC_COLLAPSE_WAIT) { 14442ad1a3f7SMatthew Dillon vm_object_set_flag(backing_object, OBJ_DEAD); 14452ad1a3f7SMatthew Dillon } 14462ad1a3f7SMatthew Dillon 14472ad1a3f7SMatthew Dillon /* 14482ad1a3f7SMatthew Dillon * Our scan 14492ad1a3f7SMatthew Dillon */ 14502ad1a3f7SMatthew Dillon p = TAILQ_FIRST(&backing_object->memq); 14512ad1a3f7SMatthew Dillon while (p) { 14522ad1a3f7SMatthew Dillon vm_page_t next = TAILQ_NEXT(p, listq); 14532ad1a3f7SMatthew Dillon vm_pindex_t new_pindex = p->pindex - backing_offset_index; 14542ad1a3f7SMatthew Dillon 14552ad1a3f7SMatthew Dillon if (op & OBSC_TEST_ALL_SHADOWED) { 14562ad1a3f7SMatthew Dillon vm_page_t pp; 14572ad1a3f7SMatthew Dillon 14582ad1a3f7SMatthew Dillon /* 14592ad1a3f7SMatthew Dillon * Ignore pages outside the parent object's range 14602ad1a3f7SMatthew Dillon * and outside the parent object's mapping of the 14612ad1a3f7SMatthew Dillon * backing object. 14622ad1a3f7SMatthew Dillon * 14632ad1a3f7SMatthew Dillon * note that we do not busy the backing object's 14642ad1a3f7SMatthew Dillon * page. 14652ad1a3f7SMatthew Dillon */ 14662ad1a3f7SMatthew Dillon if ( 14672ad1a3f7SMatthew Dillon p->pindex < backing_offset_index || 14682ad1a3f7SMatthew Dillon new_pindex >= object->size 14692ad1a3f7SMatthew Dillon ) { 14702ad1a3f7SMatthew Dillon p = next; 14712ad1a3f7SMatthew Dillon continue; 14722ad1a3f7SMatthew Dillon } 14732ad1a3f7SMatthew Dillon 14742ad1a3f7SMatthew Dillon /* 14752ad1a3f7SMatthew Dillon * See if the parent has the page or if the parent's 14762ad1a3f7SMatthew Dillon * object pager has the page. If the parent has the 14772ad1a3f7SMatthew Dillon * page but the page is not valid, the parent's 14782ad1a3f7SMatthew Dillon * object pager must have the page. 14792ad1a3f7SMatthew Dillon * 14802ad1a3f7SMatthew Dillon * If this fails, the parent does not completely shadow 14812ad1a3f7SMatthew Dillon * the object and we might as well give up now. 14822ad1a3f7SMatthew Dillon */ 14832ad1a3f7SMatthew Dillon 14842ad1a3f7SMatthew Dillon pp = vm_page_lookup(object, new_pindex); 14852ad1a3f7SMatthew Dillon if ( 14862ad1a3f7SMatthew Dillon (pp == NULL || pp->valid == 0) && 14872ad1a3f7SMatthew Dillon !vm_pager_has_page(object, new_pindex, NULL, NULL) 14882ad1a3f7SMatthew Dillon ) { 14892ad1a3f7SMatthew Dillon r = 0; 14902ad1a3f7SMatthew Dillon break; 14912ad1a3f7SMatthew Dillon } 14922ad1a3f7SMatthew Dillon } 14932ad1a3f7SMatthew Dillon 14942ad1a3f7SMatthew Dillon /* 14952ad1a3f7SMatthew Dillon * Check for busy page 14962ad1a3f7SMatthew Dillon */ 14972ad1a3f7SMatthew Dillon if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { 14982ad1a3f7SMatthew Dillon vm_page_t pp; 14992ad1a3f7SMatthew Dillon 15002ad1a3f7SMatthew Dillon if (op & OBSC_COLLAPSE_NOWAIT) { 15019af80719SAlan Cox if ((p->oflags & VPO_BUSY) || 15022ad1a3f7SMatthew Dillon !p->valid || 150300f9e8b4SAlan Cox p->busy) { 15042ad1a3f7SMatthew Dillon p = next; 15052ad1a3f7SMatthew Dillon continue; 15062ad1a3f7SMatthew Dillon } 15072ad1a3f7SMatthew Dillon } else if (op & OBSC_COLLAPSE_WAIT) { 15089af80719SAlan Cox if ((p->oflags & VPO_BUSY) || p->busy) { 1509c6ec6a7cSAlan Cox vm_page_lock_queues(); 15105786be7cSAlan Cox vm_page_flag_set(p, PG_REFERENCED); 151191449ce9SAlan Cox vm_page_unlock_queues(); 15127ca33ad1SAlan Cox VM_OBJECT_UNLOCK(object); 15135786be7cSAlan Cox p->oflags |= VPO_WANTED; 151491449ce9SAlan Cox msleep(p, VM_OBJECT_MTX(backing_object), 15157ca33ad1SAlan Cox PDROP | PVM, "vmocol", 0); 15167ca33ad1SAlan Cox VM_OBJECT_LOCK(object); 15177ca33ad1SAlan Cox VM_OBJECT_LOCK(backing_object); 15182ad1a3f7SMatthew Dillon /* 15192ad1a3f7SMatthew Dillon * If we slept, anything could have 15202ad1a3f7SMatthew Dillon * happened. Since the object is 15212ad1a3f7SMatthew Dillon * marked dead, the backing offset 15222ad1a3f7SMatthew Dillon * should not have changed so we 15232ad1a3f7SMatthew Dillon * just restart our scan. 15242ad1a3f7SMatthew Dillon */ 15252ad1a3f7SMatthew Dillon p = TAILQ_FIRST(&backing_object->memq); 15262ad1a3f7SMatthew Dillon continue; 15272ad1a3f7SMatthew Dillon } 15282ad1a3f7SMatthew Dillon } 15292ad1a3f7SMatthew Dillon 15302ad1a3f7SMatthew Dillon KASSERT( 15312ad1a3f7SMatthew Dillon p->object == backing_object, 15328e99783bSAlan Cox ("vm_object_backing_scan: object mismatch") 15332ad1a3f7SMatthew Dillon ); 15342ad1a3f7SMatthew Dillon 15352ad1a3f7SMatthew Dillon /* 15362ad1a3f7SMatthew Dillon * Destroy any associated swap 15372ad1a3f7SMatthew Dillon */ 15382ad1a3f7SMatthew Dillon if (backing_object->type == OBJT_SWAP) { 15392ad1a3f7SMatthew Dillon swap_pager_freespace( 15402ad1a3f7SMatthew Dillon backing_object, 15412ad1a3f7SMatthew Dillon p->pindex, 15422ad1a3f7SMatthew Dillon 1 15432ad1a3f7SMatthew Dillon ); 15442ad1a3f7SMatthew Dillon } 15452ad1a3f7SMatthew Dillon 15462ad1a3f7SMatthew Dillon if ( 15472ad1a3f7SMatthew Dillon p->pindex < backing_offset_index || 15482ad1a3f7SMatthew Dillon new_pindex >= object->size 15492ad1a3f7SMatthew Dillon ) { 15502ad1a3f7SMatthew Dillon /* 15512ad1a3f7SMatthew Dillon * Page is out of the parent object's range, we 15522ad1a3f7SMatthew Dillon * can simply destroy it. 15532ad1a3f7SMatthew Dillon */ 15546a684ecfSAlan Cox vm_page_lock_queues(); 1555f6d89838SAlan Cox KASSERT(!pmap_page_is_mapped(p), 1556f6d89838SAlan Cox ("freeing mapped page %p", p)); 1557f6d89838SAlan Cox if (p->wire_count == 0) 15582ad1a3f7SMatthew Dillon vm_page_free(p); 1559f6d89838SAlan Cox else 1560f6d89838SAlan Cox vm_page_remove(p); 15616a684ecfSAlan Cox vm_page_unlock_queues(); 15622ad1a3f7SMatthew Dillon p = next; 15632ad1a3f7SMatthew Dillon continue; 15642ad1a3f7SMatthew Dillon } 15652ad1a3f7SMatthew Dillon 15662ad1a3f7SMatthew Dillon pp = vm_page_lookup(object, new_pindex); 15672ad1a3f7SMatthew Dillon if ( 15682ad1a3f7SMatthew Dillon pp != NULL || 15692ad1a3f7SMatthew Dillon vm_pager_has_page(object, new_pindex, NULL, NULL) 15702ad1a3f7SMatthew Dillon ) { 15712ad1a3f7SMatthew Dillon /* 15722ad1a3f7SMatthew Dillon * page already exists in parent OR swap exists 15732ad1a3f7SMatthew Dillon * for this location in the parent. Destroy 15742ad1a3f7SMatthew Dillon * the original page from the backing object. 15752ad1a3f7SMatthew Dillon * 15762ad1a3f7SMatthew Dillon * Leave the parent's page alone 15772ad1a3f7SMatthew Dillon */ 15786a684ecfSAlan Cox vm_page_lock_queues(); 1579f6d89838SAlan Cox KASSERT(!pmap_page_is_mapped(p), 1580f6d89838SAlan Cox ("freeing mapped page %p", p)); 1581f6d89838SAlan Cox if (p->wire_count == 0) 15822ad1a3f7SMatthew Dillon vm_page_free(p); 1583f6d89838SAlan Cox else 1584f6d89838SAlan Cox vm_page_remove(p); 15856a684ecfSAlan Cox vm_page_unlock_queues(); 15862ad1a3f7SMatthew Dillon p = next; 15872ad1a3f7SMatthew Dillon continue; 15882ad1a3f7SMatthew Dillon } 15892ad1a3f7SMatthew Dillon 1590f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1591f8a47341SAlan Cox /* 1592f8a47341SAlan Cox * Rename the reservation. 1593f8a47341SAlan Cox */ 1594f8a47341SAlan Cox vm_reserv_rename(p, object, backing_object, 1595f8a47341SAlan Cox backing_offset_index); 1596f8a47341SAlan Cox #endif 1597f8a47341SAlan Cox 15982ad1a3f7SMatthew Dillon /* 15992ad1a3f7SMatthew Dillon * Page does not exist in parent, rename the 16002ad1a3f7SMatthew Dillon * page from the backing object to the main object. 1601d1bf5d56SMatthew Dillon * 1602d1bf5d56SMatthew Dillon * If the page was mapped to a process, it can remain 1603d1bf5d56SMatthew Dillon * mapped through the rename. 16042ad1a3f7SMatthew Dillon */ 1605a28cc55eSAlan Cox vm_page_lock_queues(); 16062ad1a3f7SMatthew Dillon vm_page_rename(p, object, new_pindex); 1607a28cc55eSAlan Cox vm_page_unlock_queues(); 16082ad1a3f7SMatthew Dillon /* page automatically made dirty by rename */ 16092ad1a3f7SMatthew Dillon } 16102ad1a3f7SMatthew Dillon p = next; 16112ad1a3f7SMatthew Dillon } 16122ad1a3f7SMatthew Dillon return (r); 16132ad1a3f7SMatthew Dillon } 16142ad1a3f7SMatthew Dillon 1615df8bae1dSRodney W. Grimes 1616df8bae1dSRodney W. Grimes /* 16172fe6e4d7SDavid Greenman * this version of collapse allows the operation to occur earlier and 16182fe6e4d7SDavid Greenman * when paging_in_progress is true for an object... This is not a complete 16192fe6e4d7SDavid Greenman * operation, but should plug 99.9% of the rest of the leaks. 16202fe6e4d7SDavid Greenman */ 16212fe6e4d7SDavid Greenman static void 16221b40f8c0SMatthew Dillon vm_object_qcollapse(vm_object_t object) 16232fe6e4d7SDavid Greenman { 16242ad1a3f7SMatthew Dillon vm_object_t backing_object = object->backing_object; 16252fe6e4d7SDavid Greenman 162606ecade7SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 162706ecade7SAlan Cox VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED); 16281b40f8c0SMatthew Dillon 16292fe6e4d7SDavid Greenman if (backing_object->ref_count != 1) 16302fe6e4d7SDavid Greenman return; 16312fe6e4d7SDavid Greenman 16322ad1a3f7SMatthew Dillon vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT); 16332fe6e4d7SDavid Greenman } 16342fe6e4d7SDavid Greenman 1635df8bae1dSRodney W. Grimes /* 1636df8bae1dSRodney W. Grimes * vm_object_collapse: 1637df8bae1dSRodney W. Grimes * 1638df8bae1dSRodney W. Grimes * Collapse an object with the object backing it. 1639df8bae1dSRodney W. Grimes * Pages in the backing object are moved into the 1640df8bae1dSRodney W. Grimes * parent, and the backing object is deallocated. 1641df8bae1dSRodney W. Grimes */ 164226f9a767SRodney W. Grimes void 16431b40f8c0SMatthew Dillon vm_object_collapse(vm_object_t object) 1644df8bae1dSRodney W. Grimes { 1645d7fc2210SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 164623955314SAlfred Perlstein 1647df8bae1dSRodney W. Grimes while (TRUE) { 16482ad1a3f7SMatthew Dillon vm_object_t backing_object; 16492ad1a3f7SMatthew Dillon 1650df8bae1dSRodney W. Grimes /* 1651df8bae1dSRodney W. Grimes * Verify that the conditions are right for collapse: 1652df8bae1dSRodney W. Grimes * 16532ad1a3f7SMatthew Dillon * The object exists and the backing object exists. 1654df8bae1dSRodney W. Grimes */ 165524a1cce3SDavid Greenman if ((backing_object = object->backing_object) == NULL) 16562ad1a3f7SMatthew Dillon break; 1657df8bae1dSRodney W. Grimes 1658f919ebdeSDavid Greenman /* 1659f919ebdeSDavid Greenman * we check the backing object first, because it is most likely 166024a1cce3SDavid Greenman * not collapsable. 1661f919ebdeSDavid Greenman */ 166240b808a8SAlan Cox VM_OBJECT_LOCK(backing_object); 166324a1cce3SDavid Greenman if (backing_object->handle != NULL || 166424a1cce3SDavid Greenman (backing_object->type != OBJT_DEFAULT && 166524a1cce3SDavid Greenman backing_object->type != OBJT_SWAP) || 1666f919ebdeSDavid Greenman (backing_object->flags & OBJ_DEAD) || 166724a1cce3SDavid Greenman object->handle != NULL || 166824a1cce3SDavid Greenman (object->type != OBJT_DEFAULT && 166924a1cce3SDavid Greenman object->type != OBJT_SWAP) || 167024a1cce3SDavid Greenman (object->flags & OBJ_DEAD)) { 167140b808a8SAlan Cox VM_OBJECT_UNLOCK(backing_object); 16722ad1a3f7SMatthew Dillon break; 167324a1cce3SDavid Greenman } 16749b4814bbSDavid Greenman 16752ad1a3f7SMatthew Dillon if ( 16762ad1a3f7SMatthew Dillon object->paging_in_progress != 0 || 16772ad1a3f7SMatthew Dillon backing_object->paging_in_progress != 0 16782ad1a3f7SMatthew Dillon ) { 1679b9921222SDavid Greenman vm_object_qcollapse(object); 168040b808a8SAlan Cox VM_OBJECT_UNLOCK(backing_object); 16812ad1a3f7SMatthew Dillon break; 1682df8bae1dSRodney W. Grimes } 168326f9a767SRodney W. Grimes /* 16840d94caffSDavid Greenman * We know that we can either collapse the backing object (if 16852ad1a3f7SMatthew Dillon * the parent is the only reference to it) or (perhaps) have 16862ad1a3f7SMatthew Dillon * the parent bypass the object if the parent happens to shadow 16872ad1a3f7SMatthew Dillon * all the resident pages in the entire backing object. 16882ad1a3f7SMatthew Dillon * 16892ad1a3f7SMatthew Dillon * This is ignoring pager-backed pages such as swap pages. 16902ad1a3f7SMatthew Dillon * vm_object_backing_scan fails the shadowing test in this 16912ad1a3f7SMatthew Dillon * case. 1692df8bae1dSRodney W. Grimes */ 1693df8bae1dSRodney W. Grimes if (backing_object->ref_count == 1) { 1694df8bae1dSRodney W. Grimes /* 16952ad1a3f7SMatthew Dillon * If there is exactly one reference to the backing 16962ad1a3f7SMatthew Dillon * object, we can collapse it into the parent. 1697df8bae1dSRodney W. Grimes */ 16982ad1a3f7SMatthew Dillon vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT); 1699df8bae1dSRodney W. Grimes 1700f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1701f8a47341SAlan Cox /* 1702f8a47341SAlan Cox * Break any reservations from backing_object. 1703f8a47341SAlan Cox */ 1704f8a47341SAlan Cox if (__predict_false(!LIST_EMPTY(&backing_object->rvq))) 1705f8a47341SAlan Cox vm_reserv_break_all(backing_object); 1706f8a47341SAlan Cox #endif 1707f8a47341SAlan Cox 1708df8bae1dSRodney W. Grimes /* 1709df8bae1dSRodney W. Grimes * Move the pager from backing_object to object. 1710df8bae1dSRodney W. Grimes */ 17116be36525SAlan Cox if (backing_object->type == OBJT_SWAP) { 171224a1cce3SDavid Greenman /* 1713c7c8dd7eSAlan Cox * swap_pager_copy() can sleep, in which case 1714c7c8dd7eSAlan Cox * the backing_object's and object's locks are 1715c7c8dd7eSAlan Cox * released and reacquired. 171624a1cce3SDavid Greenman */ 17171c7c3c6aSMatthew Dillon swap_pager_copy( 17181c7c3c6aSMatthew Dillon backing_object, 17191c7c3c6aSMatthew Dillon object, 17201c7c3c6aSMatthew Dillon OFF_TO_IDX(object->backing_object_offset), TRUE); 17217bfda801SAlan Cox 17227bfda801SAlan Cox /* 17237bfda801SAlan Cox * Free any cached pages from backing_object. 17247bfda801SAlan Cox */ 17257bfda801SAlan Cox if (__predict_false(backing_object->cache != NULL)) 1726c9444914SAlan Cox vm_page_cache_free(backing_object, 0, 0); 1727c0503609SDavid Greenman } 1728df8bae1dSRodney W. Grimes /* 1729df8bae1dSRodney W. Grimes * Object now shadows whatever backing_object did. 17302ad1a3f7SMatthew Dillon * Note that the reference to 17312ad1a3f7SMatthew Dillon * backing_object->backing_object moves from within 17322ad1a3f7SMatthew Dillon * backing_object to within object. 1733df8bae1dSRodney W. Grimes */ 17341c500307SAlan Cox LIST_REMOVE(object, shadow_list); 17354f7c7f6eSAlan Cox backing_object->shadow_count--; 17364f7c7f6eSAlan Cox backing_object->generation++; 1737de5f6a77SJohn Dyson if (backing_object->backing_object) { 17386be36525SAlan Cox VM_OBJECT_LOCK(backing_object->backing_object); 17391c500307SAlan Cox LIST_REMOVE(backing_object, shadow_list); 174043186e53SAlan Cox LIST_INSERT_HEAD( 174143186e53SAlan Cox &backing_object->backing_object->shadow_head, 174243186e53SAlan Cox object, shadow_list); 174343186e53SAlan Cox /* 174443186e53SAlan Cox * The shadow_count has not changed. 174543186e53SAlan Cox */ 1746eaf13dd7SJohn Dyson backing_object->backing_object->generation++; 17476be36525SAlan Cox VM_OBJECT_UNLOCK(backing_object->backing_object); 1748de5f6a77SJohn Dyson } 174924a1cce3SDavid Greenman object->backing_object = backing_object->backing_object; 17502ad1a3f7SMatthew Dillon object->backing_object_offset += 17512ad1a3f7SMatthew Dillon backing_object->backing_object_offset; 17522ad1a3f7SMatthew Dillon 1753df8bae1dSRodney W. Grimes /* 1754df8bae1dSRodney W. Grimes * Discard backing_object. 1755df8bae1dSRodney W. Grimes * 17560d94caffSDavid Greenman * Since the backing object has no pages, no pager left, 17570d94caffSDavid Greenman * and no object references within it, all that is 17580d94caffSDavid Greenman * necessary is to dispose of it. 1759df8bae1dSRodney W. Grimes */ 1760245df27cSMatthew Dillon KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object)); 17616be36525SAlan Cox VM_OBJECT_UNLOCK(backing_object); 1762df8bae1dSRodney W. Grimes 1763a5698387SAlan Cox mtx_lock(&vm_object_list_mtx); 17642ad1a3f7SMatthew Dillon TAILQ_REMOVE( 17652ad1a3f7SMatthew Dillon &vm_object_list, 17662ad1a3f7SMatthew Dillon backing_object, 17672ad1a3f7SMatthew Dillon object_list 17682ad1a3f7SMatthew Dillon ); 1769a5698387SAlan Cox mtx_unlock(&vm_object_list_mtx); 1770df8bae1dSRodney W. Grimes 1771670d17b5SJeff Roberson uma_zfree(obj_zone, backing_object); 1772df8bae1dSRodney W. Grimes 1773df8bae1dSRodney W. Grimes object_collapses++; 17740d94caffSDavid Greenman } else { 177595e5e988SJohn Dyson vm_object_t new_backing_object; 1776df8bae1dSRodney W. Grimes 1777df8bae1dSRodney W. Grimes /* 17782ad1a3f7SMatthew Dillon * If we do not entirely shadow the backing object, 17792ad1a3f7SMatthew Dillon * there is nothing we can do so we give up. 1780df8bae1dSRodney W. Grimes */ 1781df59a0feSJeff Roberson if (object->resident_page_count != object->size && 1782df59a0feSJeff Roberson vm_object_backing_scan(object, 1783df59a0feSJeff Roberson OBSC_TEST_ALL_SHADOWED) == 0) { 178440b808a8SAlan Cox VM_OBJECT_UNLOCK(backing_object); 17852ad1a3f7SMatthew Dillon break; 178624a1cce3SDavid Greenman } 1787df8bae1dSRodney W. Grimes 1788df8bae1dSRodney W. Grimes /* 17890d94caffSDavid Greenman * Make the parent shadow the next object in the 17900d94caffSDavid Greenman * chain. Deallocating backing_object will not remove 17910d94caffSDavid Greenman * it, since its reference count is at least 2. 1792df8bae1dSRodney W. Grimes */ 17931c500307SAlan Cox LIST_REMOVE(object, shadow_list); 1794eaf13dd7SJohn Dyson backing_object->shadow_count--; 1795eaf13dd7SJohn Dyson backing_object->generation++; 179695e5e988SJohn Dyson 179795e5e988SJohn Dyson new_backing_object = backing_object->backing_object; 17988aef1712SMatthew Dillon if ((object->backing_object = new_backing_object) != NULL) { 17996be36525SAlan Cox VM_OBJECT_LOCK(new_backing_object); 18001c500307SAlan Cox LIST_INSERT_HEAD( 18012ad1a3f7SMatthew Dillon &new_backing_object->shadow_head, 18022ad1a3f7SMatthew Dillon object, 18032ad1a3f7SMatthew Dillon shadow_list 18042ad1a3f7SMatthew Dillon ); 1805eaf13dd7SJohn Dyson new_backing_object->shadow_count++; 1806eaf13dd7SJohn Dyson new_backing_object->generation++; 1807b921a12bSAlan Cox vm_object_reference_locked(new_backing_object); 18086be36525SAlan Cox VM_OBJECT_UNLOCK(new_backing_object); 180995e5e988SJohn Dyson object->backing_object_offset += 181095e5e988SJohn Dyson backing_object->backing_object_offset; 1811de5f6a77SJohn Dyson } 1812df8bae1dSRodney W. Grimes 1813df8bae1dSRodney W. Grimes /* 18140d94caffSDavid Greenman * Drop the reference count on backing_object. Since 181522ec553fSAlan Cox * its ref_count was at least 2, it will not vanish. 1816df8bae1dSRodney W. Grimes */ 181722ec553fSAlan Cox backing_object->ref_count--; 181822ec553fSAlan Cox VM_OBJECT_UNLOCK(backing_object); 1819df8bae1dSRodney W. Grimes object_bypasses++; 1820df8bae1dSRodney W. Grimes } 1821df8bae1dSRodney W. Grimes 1822df8bae1dSRodney W. Grimes /* 1823df8bae1dSRodney W. Grimes * Try again with this object's new backing object. 1824df8bae1dSRodney W. Grimes */ 1825df8bae1dSRodney W. Grimes } 1826df8bae1dSRodney W. Grimes } 1827df8bae1dSRodney W. Grimes 1828df8bae1dSRodney W. Grimes /* 1829bff99f0dSAlan Cox * vm_object_page_remove: 1830df8bae1dSRodney W. Grimes * 183168855966SAlan Cox * For the given object, either frees or invalidates each of the 183268855966SAlan Cox * specified pages. In general, a page is freed. However, if a 183368855966SAlan Cox * page is wired for any reason other than the existence of a 183468855966SAlan Cox * managed, wired mapping, then it may be invalidated but not 183568855966SAlan Cox * removed from the object. Pages are specified by the given 183668855966SAlan Cox * range ["start", "end") and Boolean "clean_only". As a 183768855966SAlan Cox * special case, if "end" is zero, then the range extends from 183868855966SAlan Cox * "start" to the end of the object. If "clean_only" is TRUE, 183968855966SAlan Cox * then only the non-dirty pages within the specified range are 184068855966SAlan Cox * affected. 184168855966SAlan Cox * 184268855966SAlan Cox * In general, this operation should only be performed on objects 184368855966SAlan Cox * that contain managed pages. There are two exceptions. First, 184468855966SAlan Cox * it may be performed on the kernel and kmem objects. Second, 184568855966SAlan Cox * it may be used by msync(..., MS_INVALIDATE) to invalidate 184668855966SAlan Cox * device-backed pages. 1847df8bae1dSRodney W. Grimes * 1848df8bae1dSRodney W. Grimes * The object must be locked. 1849df8bae1dSRodney W. Grimes */ 185026f9a767SRodney W. Grimes void 1851ecde4b32SAlan Cox vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 1852ecde4b32SAlan Cox boolean_t clean_only) 1853df8bae1dSRodney W. Grimes { 1854d031cff1SMatthew Dillon vm_page_t p, next; 185559677d3cSAlan Cox int wirings; 1856df8bae1dSRodney W. Grimes 1857ecde4b32SAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1858ecde4b32SAlan Cox if (object->resident_page_count == 0) 185925732691SAlan Cox goto skipmemq; 186095e5e988SJohn Dyson 18618b03c8edSMatthew Dillon /* 18628b03c8edSMatthew Dillon * Since physically-backed objects do not use managed pages, we can't 18638b03c8edSMatthew Dillon * remove pages from the object (we must instead remove the page 18648b03c8edSMatthew Dillon * references, and then destroy the object). 18658b03c8edSMatthew Dillon */ 18669f5c801bSAlan Cox KASSERT(object->type != OBJT_PHYS || object == kernel_object || 18679f5c801bSAlan Cox object == kmem_object, 1868ecde4b32SAlan Cox ("attempt to remove pages from a physical object")); 18698b03c8edSMatthew Dillon 1870d474eaaaSDoug Rabson vm_object_pip_add(object, 1); 187126f9a767SRodney W. Grimes again: 187275741c04SAlan Cox if ((p = TAILQ_FIRST(&object->memq)) != NULL) { 187375741c04SAlan Cox if (p->pindex < start) { 187475741c04SAlan Cox p = vm_page_splay(start, object->root); 187575741c04SAlan Cox if ((object->root = p)->pindex < start) 187675741c04SAlan Cox p = TAILQ_NEXT(p, listq); 187775741c04SAlan Cox } 187875741c04SAlan Cox } 1879bfd9b137SAlan Cox vm_page_lock_queues(); 188075741c04SAlan Cox /* 188175741c04SAlan Cox * Assert: the variable p is either (1) the page with the 188275741c04SAlan Cox * least pindex greater than or equal to the parameter pindex 188375741c04SAlan Cox * or (2) NULL. 188475741c04SAlan Cox */ 188575741c04SAlan Cox for (; 1886bff99f0dSAlan Cox p != NULL && (p->pindex < end || end == 0); 188775741c04SAlan Cox p = next) { 1888b18bfc3dSJohn Dyson next = TAILQ_NEXT(p, listq); 188975741c04SAlan Cox 189059677d3cSAlan Cox /* 189159677d3cSAlan Cox * If the page is wired for any reason besides the 189259677d3cSAlan Cox * existence of managed, wired mappings, then it cannot 189368855966SAlan Cox * be freed. For example, fictitious pages, which 189468855966SAlan Cox * represent device memory, are inherently wired and 189568855966SAlan Cox * cannot be freed. They can, however, be invalidated 189668855966SAlan Cox * if "clean_only" is FALSE. 189759677d3cSAlan Cox */ 189859677d3cSAlan Cox if ((wirings = p->wire_count) != 0 && 189959677d3cSAlan Cox (wirings = pmap_page_wired_mappings(p)) != p->wire_count) { 190068855966SAlan Cox /* Fictitious pages do not have managed mappings. */ 190168855966SAlan Cox if ((p->flags & PG_FICTITIOUS) == 0) 19024fec79beSAlan Cox pmap_remove_all(p); 190359677d3cSAlan Cox /* Account for removal of managed, wired mappings. */ 190459677d3cSAlan Cox p->wire_count -= wirings; 1905a28042d1SAlan Cox if (!clean_only) { 1906bd7e5f99SJohn Dyson p->valid = 0; 1907a28042d1SAlan Cox vm_page_undirty(p); 1908a28042d1SAlan Cox } 19090d94caffSDavid Greenman continue; 19100d94caffSDavid Greenman } 191132585dd6SAlan Cox if (vm_page_sleep_if_busy(p, TRUE, "vmopar")) 191226f9a767SRodney W. Grimes goto again; 191368855966SAlan Cox KASSERT((p->flags & PG_FICTITIOUS) == 0, 191468855966SAlan Cox ("vm_object_page_remove: page %p is fictitious", p)); 19158f9110f6SJohn Dyson if (clean_only && p->valid) { 191678985e42SAlan Cox pmap_remove_write(p); 19177c1f6cedSDavid Greenman if (p->valid & p->dirty) 19187c1f6cedSDavid Greenman continue; 19197c1f6cedSDavid Greenman } 19204fec79beSAlan Cox pmap_remove_all(p); 192159677d3cSAlan Cox /* Account for removal of managed, wired mappings. */ 192259677d3cSAlan Cox if (wirings != 0) 192359677d3cSAlan Cox p->wire_count -= wirings; 1924df8bae1dSRodney W. Grimes vm_page_free(p); 192526f9a767SRodney W. Grimes } 192632585dd6SAlan Cox vm_page_unlock_queues(); 1927f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 192825732691SAlan Cox skipmemq: 1929c9444914SAlan Cox if (__predict_false(object->cache != NULL)) 1930c9444914SAlan Cox vm_page_cache_free(object, start, end); 1931c0503609SDavid Greenman } 1932df8bae1dSRodney W. Grimes 1933df8bae1dSRodney W. Grimes /* 1934df8bae1dSRodney W. Grimes * Routine: vm_object_coalesce 1935df8bae1dSRodney W. Grimes * Function: Coalesces two objects backing up adjoining 1936df8bae1dSRodney W. Grimes * regions of memory into a single object. 1937df8bae1dSRodney W. Grimes * 1938df8bae1dSRodney W. Grimes * returns TRUE if objects were combined. 1939df8bae1dSRodney W. Grimes * 1940df8bae1dSRodney W. Grimes * NOTE: Only works at the moment if the second object is NULL - 1941df8bae1dSRodney W. Grimes * if it's not, which object do we lock first? 1942df8bae1dSRodney W. Grimes * 1943df8bae1dSRodney W. Grimes * Parameters: 1944df8bae1dSRodney W. Grimes * prev_object First object to coalesce 1945df8bae1dSRodney W. Grimes * prev_offset Offset into prev_object 1946df8bae1dSRodney W. Grimes * prev_size Size of reference to prev_object 194757a21abaSAlan Cox * next_size Size of reference to the second object 1948df8bae1dSRodney W. Grimes * 1949df8bae1dSRodney W. Grimes * Conditions: 1950df8bae1dSRodney W. Grimes * The object must *not* be locked. 1951df8bae1dSRodney W. Grimes */ 19520d94caffSDavid Greenman boolean_t 195357a21abaSAlan Cox vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, 195400e1854aSAlan Cox vm_size_t prev_size, vm_size_t next_size) 1955df8bae1dSRodney W. Grimes { 1956ea41812fSAlan Cox vm_pindex_t next_pindex; 1957df8bae1dSRodney W. Grimes 195800e1854aSAlan Cox if (prev_object == NULL) 1959df8bae1dSRodney W. Grimes return (TRUE); 1960bdbfbaafSAlan Cox VM_OBJECT_LOCK(prev_object); 19614112823fSMatthew Dillon if (prev_object->type != OBJT_DEFAULT && 19624112823fSMatthew Dillon prev_object->type != OBJT_SWAP) { 1963bdbfbaafSAlan Cox VM_OBJECT_UNLOCK(prev_object); 196430dcfc09SJohn Dyson return (FALSE); 196530dcfc09SJohn Dyson } 196630dcfc09SJohn Dyson 1967df8bae1dSRodney W. Grimes /* 1968df8bae1dSRodney W. Grimes * Try to collapse the object first 1969df8bae1dSRodney W. Grimes */ 1970df8bae1dSRodney W. Grimes vm_object_collapse(prev_object); 1971df8bae1dSRodney W. Grimes 1972df8bae1dSRodney W. Grimes /* 19730d94caffSDavid Greenman * Can't coalesce if: . more than one reference . paged out . shadows 19740d94caffSDavid Greenman * another object . has a copy elsewhere (any of which mean that the 19750d94caffSDavid Greenman * pages not mapped to prev_entry may be in use anyway) 1976df8bae1dSRodney W. Grimes */ 19778cc7e047SJohn Dyson if (prev_object->backing_object != NULL) { 1978bdbfbaafSAlan Cox VM_OBJECT_UNLOCK(prev_object); 1979df8bae1dSRodney W. Grimes return (FALSE); 1980df8bae1dSRodney W. Grimes } 1981a316d390SJohn Dyson 1982a316d390SJohn Dyson prev_size >>= PAGE_SHIFT; 1983a316d390SJohn Dyson next_size >>= PAGE_SHIFT; 198457a21abaSAlan Cox next_pindex = OFF_TO_IDX(prev_offset) + prev_size; 19858cc7e047SJohn Dyson 19868cc7e047SJohn Dyson if ((prev_object->ref_count > 1) && 1987ea41812fSAlan Cox (prev_object->size != next_pindex)) { 1988bdbfbaafSAlan Cox VM_OBJECT_UNLOCK(prev_object); 19898cc7e047SJohn Dyson return (FALSE); 19908cc7e047SJohn Dyson } 19918cc7e047SJohn Dyson 1992df8bae1dSRodney W. Grimes /* 19930d94caffSDavid Greenman * Remove any pages that may still be in the object from a previous 19940d94caffSDavid Greenman * deallocation. 1995df8bae1dSRodney W. Grimes */ 1996ea41812fSAlan Cox if (next_pindex < prev_object->size) { 1997df8bae1dSRodney W. Grimes vm_object_page_remove(prev_object, 1998ea41812fSAlan Cox next_pindex, 1999ea41812fSAlan Cox next_pindex + next_size, FALSE); 2000ea41812fSAlan Cox if (prev_object->type == OBJT_SWAP) 2001ea41812fSAlan Cox swap_pager_freespace(prev_object, 2002ea41812fSAlan Cox next_pindex, next_size); 2003ea41812fSAlan Cox } 2004df8bae1dSRodney W. Grimes 2005df8bae1dSRodney W. Grimes /* 2006df8bae1dSRodney W. Grimes * Extend the object if necessary. 2007df8bae1dSRodney W. Grimes */ 2008ea41812fSAlan Cox if (next_pindex + next_size > prev_object->size) 2009ea41812fSAlan Cox prev_object->size = next_pindex + next_size; 2010df8bae1dSRodney W. Grimes 2011bdbfbaafSAlan Cox VM_OBJECT_UNLOCK(prev_object); 2012df8bae1dSRodney W. Grimes return (TRUE); 2013df8bae1dSRodney W. Grimes } 2014df8bae1dSRodney W. Grimes 20157a5a6352SMatthew Dillon void 20167a5a6352SMatthew Dillon vm_object_set_writeable_dirty(vm_object_t object) 20177a5a6352SMatthew Dillon { 20187a5a6352SMatthew Dillon struct vnode *vp; 20197a5a6352SMatthew Dillon 2020de33beddSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2021af51d7bfSAlan Cox if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) 2022ee39666aSJeff Roberson return; 2023af51d7bfSAlan Cox vm_object_set_flag(object, OBJ_MIGHTBEDIRTY); 20247a5a6352SMatthew Dillon if (object->type == OBJT_VNODE && 20257a5a6352SMatthew Dillon (vp = (struct vnode *)object->handle) != NULL) { 2026e6e370a7SJeff Roberson VI_LOCK(vp); 2027e6e370a7SJeff Roberson vp->v_iflag |= VI_OBJDIRTY; 2028e6e370a7SJeff Roberson VI_UNLOCK(vp); 20297a5a6352SMatthew Dillon } 20307a5a6352SMatthew Dillon } 20317a5a6352SMatthew Dillon 2032c7c34a24SBruce Evans #include "opt_ddb.h" 2033c3cb3e12SDavid Greenman #ifdef DDB 2034c7c34a24SBruce Evans #include <sys/kernel.h> 2035c7c34a24SBruce Evans 2036ce9edcf5SPoul-Henning Kamp #include <sys/cons.h> 2037c7c34a24SBruce Evans 2038c7c34a24SBruce Evans #include <ddb/ddb.h> 2039c7c34a24SBruce Evans 2040cac597e4SBruce Evans static int 20411b40f8c0SMatthew Dillon _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 2042a1f6d91cSDavid Greenman { 2043a1f6d91cSDavid Greenman vm_map_t tmpm; 2044a1f6d91cSDavid Greenman vm_map_entry_t tmpe; 2045a1f6d91cSDavid Greenman vm_object_t obj; 2046a1f6d91cSDavid Greenman int entcount; 2047a1f6d91cSDavid Greenman 2048a1f6d91cSDavid Greenman if (map == 0) 2049a1f6d91cSDavid Greenman return 0; 2050a1f6d91cSDavid Greenman 2051a1f6d91cSDavid Greenman if (entry == 0) { 2052a1f6d91cSDavid Greenman tmpe = map->header.next; 2053a1f6d91cSDavid Greenman entcount = map->nentries; 2054a1f6d91cSDavid Greenman while (entcount-- && (tmpe != &map->header)) { 2055a1f6d91cSDavid Greenman if (_vm_object_in_map(map, object, tmpe)) { 2056a1f6d91cSDavid Greenman return 1; 2057a1f6d91cSDavid Greenman } 2058a1f6d91cSDavid Greenman tmpe = tmpe->next; 2059a1f6d91cSDavid Greenman } 20609fdfe602SMatthew Dillon } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 20619fdfe602SMatthew Dillon tmpm = entry->object.sub_map; 2062a1f6d91cSDavid Greenman tmpe = tmpm->header.next; 2063a1f6d91cSDavid Greenman entcount = tmpm->nentries; 2064a1f6d91cSDavid Greenman while (entcount-- && tmpe != &tmpm->header) { 2065a1f6d91cSDavid Greenman if (_vm_object_in_map(tmpm, object, tmpe)) { 2066a1f6d91cSDavid Greenman return 1; 2067a1f6d91cSDavid Greenman } 2068a1f6d91cSDavid Greenman tmpe = tmpe->next; 2069a1f6d91cSDavid Greenman } 20708aef1712SMatthew Dillon } else if ((obj = entry->object.vm_object) != NULL) { 207124a1cce3SDavid Greenman for (; obj; obj = obj->backing_object) 2072a1f6d91cSDavid Greenman if (obj == object) { 2073a1f6d91cSDavid Greenman return 1; 2074a1f6d91cSDavid Greenman } 2075a1f6d91cSDavid Greenman } 2076a1f6d91cSDavid Greenman return 0; 2077a1f6d91cSDavid Greenman } 2078a1f6d91cSDavid Greenman 2079cac597e4SBruce Evans static int 20801b40f8c0SMatthew Dillon vm_object_in_map(vm_object_t object) 2081a1f6d91cSDavid Greenman { 2082a1f6d91cSDavid Greenman struct proc *p; 20831005a129SJohn Baldwin 208460517fd1SJohn Baldwin /* sx_slock(&allproc_lock); */ 2085f67af5c9SXin LI FOREACH_PROC_IN_SYSTEM(p) { 2086a1f6d91cSDavid Greenman if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 2087a1f6d91cSDavid Greenman continue; 2088553629ebSJake Burkholder if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 208960517fd1SJohn Baldwin /* sx_sunlock(&allproc_lock); */ 2090a1f6d91cSDavid Greenman return 1; 2091a1f6d91cSDavid Greenman } 2092553629ebSJake Burkholder } 209360517fd1SJohn Baldwin /* sx_sunlock(&allproc_lock); */ 2094a1f6d91cSDavid Greenman if (_vm_object_in_map(kernel_map, object, 0)) 2095a1f6d91cSDavid Greenman return 1; 2096a1f6d91cSDavid Greenman if (_vm_object_in_map(kmem_map, object, 0)) 2097a1f6d91cSDavid Greenman return 1; 2098a1f6d91cSDavid Greenman if (_vm_object_in_map(pager_map, object, 0)) 2099a1f6d91cSDavid Greenman return 1; 2100a1f6d91cSDavid Greenman if (_vm_object_in_map(buffer_map, object, 0)) 2101a1f6d91cSDavid Greenman return 1; 2102a1f6d91cSDavid Greenman return 0; 2103a1f6d91cSDavid Greenman } 2104a1f6d91cSDavid Greenman 2105c7c34a24SBruce Evans DB_SHOW_COMMAND(vmochk, vm_object_check) 2106f708ef1bSPoul-Henning Kamp { 2107a1f6d91cSDavid Greenman vm_object_t object; 2108a1f6d91cSDavid Greenman 2109a1f6d91cSDavid Greenman /* 2110a1f6d91cSDavid Greenman * make sure that internal objs are in a map somewhere 2111a1f6d91cSDavid Greenman * and none have zero ref counts. 2112a1f6d91cSDavid Greenman */ 2113cc64b484SAlfred Perlstein TAILQ_FOREACH(object, &vm_object_list, object_list) { 211424a1cce3SDavid Greenman if (object->handle == NULL && 211524a1cce3SDavid Greenman (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2116a1f6d91cSDavid Greenman if (object->ref_count == 0) { 21173efc015bSPeter Wemm db_printf("vmochk: internal obj has zero ref count: %ld\n", 21183efc015bSPeter Wemm (long)object->size); 2119a1f6d91cSDavid Greenman } 2120a1f6d91cSDavid Greenman if (!vm_object_in_map(object)) { 2121fc62ef1fSBruce Evans db_printf( 2122fc62ef1fSBruce Evans "vmochk: internal obj is not in a map: " 2123fc62ef1fSBruce Evans "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 2124fc62ef1fSBruce Evans object->ref_count, (u_long)object->size, 2125fc62ef1fSBruce Evans (u_long)object->size, 2126fc62ef1fSBruce Evans (void *)object->backing_object); 2127a1f6d91cSDavid Greenman } 2128a1f6d91cSDavid Greenman } 2129a1f6d91cSDavid Greenman } 2130a1f6d91cSDavid Greenman } 2131a1f6d91cSDavid Greenman 213226f9a767SRodney W. Grimes /* 2133df8bae1dSRodney W. Grimes * vm_object_print: [ debug ] 2134df8bae1dSRodney W. Grimes */ 2135c7c34a24SBruce Evans DB_SHOW_COMMAND(object, vm_object_print_static) 2136df8bae1dSRodney W. Grimes { 2137c7c34a24SBruce Evans /* XXX convert args. */ 2138c7c34a24SBruce Evans vm_object_t object = (vm_object_t)addr; 2139c7c34a24SBruce Evans boolean_t full = have_addr; 2140c7c34a24SBruce Evans 2141d031cff1SMatthew Dillon vm_page_t p; 2142df8bae1dSRodney W. Grimes 2143c7c34a24SBruce Evans /* XXX count is an (unused) arg. Avoid shadowing it. */ 2144c7c34a24SBruce Evans #define count was_count 2145c7c34a24SBruce Evans 2146d031cff1SMatthew Dillon int count; 2147df8bae1dSRodney W. Grimes 2148df8bae1dSRodney W. Grimes if (object == NULL) 2149df8bae1dSRodney W. Grimes return; 2150df8bae1dSRodney W. Grimes 2151eb95adefSBruce Evans db_iprintf( 2152e47cd172SMaxime Henrion "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x\n", 2153e47cd172SMaxime Henrion object, (int)object->type, (uintmax_t)object->size, 2154eb95adefSBruce Evans object->resident_page_count, object->ref_count, object->flags); 2155e47cd172SMaxime Henrion db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", 21561c7c3c6aSMatthew Dillon object->shadow_count, 2157eb95adefSBruce Evans object->backing_object ? object->backing_object->ref_count : 0, 2158e47cd172SMaxime Henrion object->backing_object, (uintmax_t)object->backing_object_offset); 2159df8bae1dSRodney W. Grimes 2160df8bae1dSRodney W. Grimes if (!full) 2161df8bae1dSRodney W. Grimes return; 2162df8bae1dSRodney W. Grimes 2163c7c34a24SBruce Evans db_indent += 2; 2164df8bae1dSRodney W. Grimes count = 0; 2165fc2ffbe6SPoul-Henning Kamp TAILQ_FOREACH(p, &object->memq, listq) { 2166df8bae1dSRodney W. Grimes if (count == 0) 2167c7c34a24SBruce Evans db_iprintf("memory:="); 2168df8bae1dSRodney W. Grimes else if (count == 6) { 2169c7c34a24SBruce Evans db_printf("\n"); 2170c7c34a24SBruce Evans db_iprintf(" ..."); 2171df8bae1dSRodney W. Grimes count = 0; 2172df8bae1dSRodney W. Grimes } else 2173c7c34a24SBruce Evans db_printf(","); 2174df8bae1dSRodney W. Grimes count++; 2175df8bae1dSRodney W. Grimes 2176e47cd172SMaxime Henrion db_printf("(off=0x%jx,page=0x%jx)", 2177e47cd172SMaxime Henrion (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); 2178df8bae1dSRodney W. Grimes } 2179df8bae1dSRodney W. Grimes if (count != 0) 2180c7c34a24SBruce Evans db_printf("\n"); 2181c7c34a24SBruce Evans db_indent -= 2; 2182df8bae1dSRodney W. Grimes } 21835070c7f8SJohn Dyson 2184c7c34a24SBruce Evans /* XXX. */ 2185c7c34a24SBruce Evans #undef count 2186c7c34a24SBruce Evans 2187c7c34a24SBruce Evans /* XXX need this non-static entry for calling from vm_map_print. */ 21885070c7f8SJohn Dyson void 21891b40f8c0SMatthew Dillon vm_object_print( 21901b40f8c0SMatthew Dillon /* db_expr_t */ long addr, 21911b40f8c0SMatthew Dillon boolean_t have_addr, 21921b40f8c0SMatthew Dillon /* db_expr_t */ long count, 21931b40f8c0SMatthew Dillon char *modif) 2194c7c34a24SBruce Evans { 2195c7c34a24SBruce Evans vm_object_print_static(addr, have_addr, count, modif); 2196c7c34a24SBruce Evans } 2197c7c34a24SBruce Evans 2198c7c34a24SBruce Evans DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 21995070c7f8SJohn Dyson { 22005070c7f8SJohn Dyson vm_object_t object; 2201bb2ac86fSKonstantin Belousov vm_pindex_t fidx; 2202bb2ac86fSKonstantin Belousov vm_paddr_t pa; 2203bb2ac86fSKonstantin Belousov vm_page_t m, prev_m; 2204bb2ac86fSKonstantin Belousov int rcount, nl, c; 2205cc64b484SAlfred Perlstein 2206bb2ac86fSKonstantin Belousov nl = 0; 2207cc64b484SAlfred Perlstein TAILQ_FOREACH(object, &vm_object_list, object_list) { 2208fc62ef1fSBruce Evans db_printf("new object: %p\n", (void *)object); 22095070c7f8SJohn Dyson if (nl > 18) { 22105070c7f8SJohn Dyson c = cngetc(); 22115070c7f8SJohn Dyson if (c != ' ') 22125070c7f8SJohn Dyson return; 22135070c7f8SJohn Dyson nl = 0; 22145070c7f8SJohn Dyson } 22155070c7f8SJohn Dyson nl++; 22165070c7f8SJohn Dyson rcount = 0; 22175070c7f8SJohn Dyson fidx = 0; 2218bb2ac86fSKonstantin Belousov pa = -1; 2219bb2ac86fSKonstantin Belousov TAILQ_FOREACH(m, &object->memq, listq) { 2220bb2ac86fSKonstantin Belousov if (m->pindex > 128) 2221bb2ac86fSKonstantin Belousov break; 2222bb2ac86fSKonstantin Belousov if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL && 2223bb2ac86fSKonstantin Belousov prev_m->pindex + 1 != m->pindex) { 22245070c7f8SJohn Dyson if (rcount) { 22253efc015bSPeter Wemm db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 22263efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 22275070c7f8SJohn Dyson if (nl > 18) { 22285070c7f8SJohn Dyson c = cngetc(); 22295070c7f8SJohn Dyson if (c != ' ') 22305070c7f8SJohn Dyson return; 22315070c7f8SJohn Dyson nl = 0; 22325070c7f8SJohn Dyson } 22335070c7f8SJohn Dyson nl++; 22345070c7f8SJohn Dyson rcount = 0; 22355070c7f8SJohn Dyson } 22365070c7f8SJohn Dyson } 22375070c7f8SJohn Dyson if (rcount && 22385070c7f8SJohn Dyson (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 22395070c7f8SJohn Dyson ++rcount; 22405070c7f8SJohn Dyson continue; 22415070c7f8SJohn Dyson } 22425070c7f8SJohn Dyson if (rcount) { 22432446e4f0SAlan Cox db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 22443efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 22455070c7f8SJohn Dyson if (nl > 18) { 22465070c7f8SJohn Dyson c = cngetc(); 22475070c7f8SJohn Dyson if (c != ' ') 22485070c7f8SJohn Dyson return; 22495070c7f8SJohn Dyson nl = 0; 22505070c7f8SJohn Dyson } 22515070c7f8SJohn Dyson nl++; 22525070c7f8SJohn Dyson } 2253bb2ac86fSKonstantin Belousov fidx = m->pindex; 22545070c7f8SJohn Dyson pa = VM_PAGE_TO_PHYS(m); 22555070c7f8SJohn Dyson rcount = 1; 22565070c7f8SJohn Dyson } 22575070c7f8SJohn Dyson if (rcount) { 22583efc015bSPeter Wemm db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 22593efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 22605070c7f8SJohn Dyson if (nl > 18) { 22615070c7f8SJohn Dyson c = cngetc(); 22625070c7f8SJohn Dyson if (c != ' ') 22635070c7f8SJohn Dyson return; 22645070c7f8SJohn Dyson nl = 0; 22655070c7f8SJohn Dyson } 22665070c7f8SJohn Dyson nl++; 22675070c7f8SJohn Dyson } 22685070c7f8SJohn Dyson } 22695070c7f8SJohn Dyson } 2270c3cb3e12SDavid Greenman #endif /* DDB */ 2271