1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 6df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 9df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 10df8bae1dSRodney W. Grimes * are met: 11df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 12df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 13df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 15df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 16df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 175929bcfaSPhilippe Charnier * must display the following acknowledgement: 18df8bae1dSRodney W. Grimes * This product includes software developed by the University of 19df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 20df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 21df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 22df8bae1dSRodney W. Grimes * without specific prior written permission. 23df8bae1dSRodney W. Grimes * 24df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34df8bae1dSRodney W. Grimes * SUCH DAMAGE. 35df8bae1dSRodney W. Grimes * 363c4dd356SDavid Greenman * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 37df8bae1dSRodney W. Grimes * 38df8bae1dSRodney W. Grimes * 39df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40df8bae1dSRodney W. Grimes * All rights reserved. 41df8bae1dSRodney W. Grimes * 42df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43df8bae1dSRodney W. Grimes * 44df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 45df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 46df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 47df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 48df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53df8bae1dSRodney W. Grimes * 54df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 55df8bae1dSRodney W. Grimes * 56df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57df8bae1dSRodney W. Grimes * School of Computer Science 58df8bae1dSRodney W. Grimes * Carnegie Mellon University 59df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 60df8bae1dSRodney W. Grimes * 61df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 62df8bae1dSRodney W. Grimes * rights to redistribute these changes. 633c4dd356SDavid Greenman * 64c3aac50fSPeter Wemm * $FreeBSD$ 65df8bae1dSRodney W. Grimes */ 66df8bae1dSRodney W. Grimes 67df8bae1dSRodney W. Grimes /* 68df8bae1dSRodney W. Grimes * Virtual memory object module. 69df8bae1dSRodney W. Grimes */ 70df8bae1dSRodney W. Grimes 71df8bae1dSRodney W. Grimes #include <sys/param.h> 72df8bae1dSRodney W. Grimes #include <sys/systm.h> 73fb919e4dSMark Murray #include <sys/lock.h> 74867a482dSJohn Dyson #include <sys/mman.h> 75cf2819ccSJohn Dyson #include <sys/mount.h> 761b367556SJason Evans #include <sys/mutex.h> 77fb919e4dSMark Murray #include <sys/proc.h> /* for curproc, pageproc */ 78fb919e4dSMark Murray #include <sys/socket.h> 79fb919e4dSMark Murray #include <sys/vnode.h> 80fb919e4dSMark Murray #include <sys/vmmeter.h> 811005a129SJohn Baldwin #include <sys/sx.h> 82df8bae1dSRodney W. Grimes 83df8bae1dSRodney W. Grimes #include <vm/vm.h> 84efeaf95aSDavid Greenman #include <vm/vm_param.h> 85efeaf95aSDavid Greenman #include <vm/pmap.h> 86efeaf95aSDavid Greenman #include <vm/vm_map.h> 87efeaf95aSDavid Greenman #include <vm/vm_object.h> 88df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 8926f9a767SRodney W. Grimes #include <vm/vm_pageout.h> 900d94caffSDavid Greenman #include <vm/vm_pager.h> 9121cd6e62SSeigo Tanimura #include <vm/vm_zone.h> 9205f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h> 93a1f6d91cSDavid Greenman #include <vm/vm_kern.h> 94efeaf95aSDavid Greenman #include <vm/vm_extern.h> 9526f9a767SRodney W. Grimes 96cac597e4SBruce Evans static void vm_object_qcollapse __P((vm_object_t object)); 97f6b04d2bSDavid Greenman 98df8bae1dSRodney W. Grimes /* 99df8bae1dSRodney W. Grimes * Virtual memory objects maintain the actual data 100df8bae1dSRodney W. Grimes * associated with allocated virtual memory. A given 101df8bae1dSRodney W. Grimes * page of memory exists within exactly one object. 102df8bae1dSRodney W. Grimes * 103df8bae1dSRodney W. Grimes * An object is only deallocated when all "references" 104df8bae1dSRodney W. Grimes * are given up. Only one "reference" to a given 105df8bae1dSRodney W. Grimes * region of an object should be writeable. 106df8bae1dSRodney W. Grimes * 107df8bae1dSRodney W. Grimes * Associated with each object is a list of all resident 108df8bae1dSRodney W. Grimes * memory pages belonging to that object; this list is 109df8bae1dSRodney W. Grimes * maintained by the "vm_page" module, and locked by the object's 110df8bae1dSRodney W. Grimes * lock. 111df8bae1dSRodney W. Grimes * 112df8bae1dSRodney W. Grimes * Each object also records a "pager" routine which is 113df8bae1dSRodney W. Grimes * used to retrieve (and store) pages to the proper backing 114df8bae1dSRodney W. Grimes * storage. In addition, objects may be backed by other 115df8bae1dSRodney W. Grimes * objects from which they were virtual-copied. 116df8bae1dSRodney W. Grimes * 117df8bae1dSRodney W. Grimes * The only items within the object structure which are 118df8bae1dSRodney W. Grimes * modified after time of creation are: 119df8bae1dSRodney W. Grimes * reference count locked by object's lock 120df8bae1dSRodney W. Grimes * pager routine locked by object's lock 121df8bae1dSRodney W. Grimes * 122df8bae1dSRodney W. Grimes */ 123df8bae1dSRodney W. Grimes 12428f8db14SBruce Evans struct object_q vm_object_list; 125cc64b484SAlfred Perlstein static struct mtx vm_object_list_mtx; /* lock for object list and count */ 1264de628deSBruce Evans static long vm_object_count; /* count of all objects */ 12728f8db14SBruce Evans vm_object_t kernel_object; 12828f8db14SBruce Evans vm_object_t kmem_object; 129f708ef1bSPoul-Henning Kamp static struct vm_object kernel_object_store; 130f708ef1bSPoul-Henning Kamp static struct vm_object kmem_object_store; 131aef922f5SJohn Dyson extern int vm_pageout_page_count; 132df8bae1dSRodney W. Grimes 133f708ef1bSPoul-Henning Kamp static long object_collapses; 134f708ef1bSPoul-Henning Kamp static long object_bypasses; 1355070c7f8SJohn Dyson static int next_index; 13699448ed1SJohn Dyson static vm_zone_t obj_zone; 13799448ed1SJohn Dyson static struct vm_zone obj_zone_store; 1381c7c3c6aSMatthew Dillon static int object_hash_rand; 13999448ed1SJohn Dyson #define VM_OBJECTS_INIT 256 140303b270bSEivind Eklund static struct vm_object vm_objects_init[VM_OBJECTS_INIT]; 141df8bae1dSRodney W. Grimes 1423075778bSJohn Dyson void 1431b40f8c0SMatthew Dillon _vm_object_allocate(objtype_t type, vm_size_t size, vm_object_t object) 144df8bae1dSRodney W. Grimes { 14599448ed1SJohn Dyson int incr; 14623955314SAlfred Perlstein 1470cddd8f0SMatthew Dillon GIANT_REQUIRED; 1480cddd8f0SMatthew Dillon 149df8bae1dSRodney W. Grimes TAILQ_INIT(&object->memq); 15024a1cce3SDavid Greenman TAILQ_INIT(&object->shadow_head); 151a1f6d91cSDavid Greenman 15224a1cce3SDavid Greenman object->type = type; 153df8bae1dSRodney W. Grimes object->size = size; 154a1f6d91cSDavid Greenman object->ref_count = 1; 15524a1cce3SDavid Greenman object->flags = 0; 15660517fd1SJohn Baldwin if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP)) 15760517fd1SJohn Baldwin vm_object_set_flag(object, OBJ_ONEMAPPING); 158df8bae1dSRodney W. Grimes object->paging_in_progress = 0; 159a1f6d91cSDavid Greenman object->resident_page_count = 0; 160de5f6a77SJohn Dyson object->shadow_count = 0; 1615070c7f8SJohn Dyson object->pg_color = next_index; 16299448ed1SJohn Dyson if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1)) 16399448ed1SJohn Dyson incr = PQ_L2_SIZE / 3 + PQ_PRIME1; 16499448ed1SJohn Dyson else 16599448ed1SJohn Dyson incr = size; 16699448ed1SJohn Dyson next_index = (next_index + incr) & PQ_L2_MASK; 16724a1cce3SDavid Greenman object->handle = NULL; 16824a1cce3SDavid Greenman object->backing_object = NULL; 169a316d390SJohn Dyson object->backing_object_offset = (vm_ooffset_t) 0; 1701c7c3c6aSMatthew Dillon /* 1711c7c3c6aSMatthew Dillon * Try to generate a number that will spread objects out in the 1721c7c3c6aSMatthew Dillon * hash table. We 'wipe' new objects across the hash in 128 page 1731c7c3c6aSMatthew Dillon * increments plus 1 more to offset it a little more by the time 1741c7c3c6aSMatthew Dillon * it wraps around. 1751c7c3c6aSMatthew Dillon */ 1761c7c3c6aSMatthew Dillon object->hash_rand = object_hash_rand - 129; 177a1f6d91cSDavid Greenman 17860517fd1SJohn Baldwin object->generation++; 17960517fd1SJohn Baldwin 18060517fd1SJohn Baldwin TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 18160517fd1SJohn Baldwin vm_object_count++; 1821c7c3c6aSMatthew Dillon object_hash_rand = object->hash_rand; 183df8bae1dSRodney W. Grimes } 184df8bae1dSRodney W. Grimes 185df8bae1dSRodney W. Grimes /* 18626f9a767SRodney W. Grimes * vm_object_init: 18726f9a767SRodney W. Grimes * 18826f9a767SRodney W. Grimes * Initialize the VM objects module. 18926f9a767SRodney W. Grimes */ 19026f9a767SRodney W. Grimes void 1911b40f8c0SMatthew Dillon vm_object_init(void) 19226f9a767SRodney W. Grimes { 1930cddd8f0SMatthew Dillon GIANT_REQUIRED; 19460517fd1SJohn Baldwin 19526f9a767SRodney W. Grimes TAILQ_INIT(&vm_object_list); 1961b367556SJason Evans mtx_init(&vm_object_list_mtx, "vm object_list", MTX_DEF); 19726f9a767SRodney W. Grimes vm_object_count = 0; 1980217125fSDavid Greenman 19926f9a767SRodney W. Grimes kernel_object = &kernel_object_store; 200a316d390SJohn Dyson _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 20126f9a767SRodney W. Grimes kernel_object); 20226f9a767SRodney W. Grimes 20326f9a767SRodney W. Grimes kmem_object = &kmem_object_store; 204a316d390SJohn Dyson _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 20526f9a767SRodney W. Grimes kmem_object); 20699448ed1SJohn Dyson 20799448ed1SJohn Dyson obj_zone = &obj_zone_store; 20899448ed1SJohn Dyson zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object), 20999448ed1SJohn Dyson vm_objects_init, VM_OBJECTS_INIT); 21099448ed1SJohn Dyson } 21199448ed1SJohn Dyson 21299448ed1SJohn Dyson void 2131b40f8c0SMatthew Dillon vm_object_init2(void) 21460517fd1SJohn Baldwin { 2150a80f406SJohn Dyson zinitna(obj_zone, NULL, NULL, 0, 0, 0, 1); 21626f9a767SRodney W. Grimes } 21726f9a767SRodney W. Grimes 2181b40f8c0SMatthew Dillon void 2191b40f8c0SMatthew Dillon vm_object_set_flag(vm_object_t object, u_short bits) 2201b40f8c0SMatthew Dillon { 2211b40f8c0SMatthew Dillon GIANT_REQUIRED; 2221b40f8c0SMatthew Dillon atomic_set_short(&object->flags, bits); 2231b40f8c0SMatthew Dillon /* object->flags |= bits; */ 2241b40f8c0SMatthew Dillon } 2251b40f8c0SMatthew Dillon 2261b40f8c0SMatthew Dillon void 2271b40f8c0SMatthew Dillon vm_object_clear_flag(vm_object_t object, u_short bits) 2281b40f8c0SMatthew Dillon { 2291b40f8c0SMatthew Dillon GIANT_REQUIRED; 2301b40f8c0SMatthew Dillon atomic_clear_short(&object->flags, bits); 2311b40f8c0SMatthew Dillon /* object->flags &= ~bits; */ 2321b40f8c0SMatthew Dillon } 2331b40f8c0SMatthew Dillon 2341b40f8c0SMatthew Dillon void 2351b40f8c0SMatthew Dillon vm_object_pip_add(vm_object_t object, short i) 2361b40f8c0SMatthew Dillon { 2371b40f8c0SMatthew Dillon GIANT_REQUIRED; 2381b40f8c0SMatthew Dillon atomic_add_short(&object->paging_in_progress, i); 2391b40f8c0SMatthew Dillon /* object->paging_in_progress += i; */ 2401b40f8c0SMatthew Dillon } 2411b40f8c0SMatthew Dillon 2421b40f8c0SMatthew Dillon void 2431b40f8c0SMatthew Dillon vm_object_pip_subtract(vm_object_t object, short i) 2441b40f8c0SMatthew Dillon { 2451b40f8c0SMatthew Dillon GIANT_REQUIRED; 2461b40f8c0SMatthew Dillon atomic_subtract_short(&object->paging_in_progress, i); 2471b40f8c0SMatthew Dillon /* object->paging_in_progress -= i; */ 2481b40f8c0SMatthew Dillon } 2491b40f8c0SMatthew Dillon 2501b40f8c0SMatthew Dillon void 2511b40f8c0SMatthew Dillon vm_object_pip_wakeup(vm_object_t object) 2521b40f8c0SMatthew Dillon { 2531b40f8c0SMatthew Dillon GIANT_REQUIRED; 2541b40f8c0SMatthew Dillon atomic_subtract_short(&object->paging_in_progress, 1); 2551b40f8c0SMatthew Dillon /* object->paging_in_progress--; */ 2561b40f8c0SMatthew Dillon if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 2571b40f8c0SMatthew Dillon vm_object_clear_flag(object, OBJ_PIPWNT); 2581b40f8c0SMatthew Dillon wakeup(object); 2591b40f8c0SMatthew Dillon } 2601b40f8c0SMatthew Dillon } 2611b40f8c0SMatthew Dillon 2621b40f8c0SMatthew Dillon void 2631b40f8c0SMatthew Dillon vm_object_pip_wakeupn(vm_object_t object, short i) 2641b40f8c0SMatthew Dillon { 2651b40f8c0SMatthew Dillon GIANT_REQUIRED; 2661b40f8c0SMatthew Dillon if (i) 2671b40f8c0SMatthew Dillon atomic_subtract_short(&object->paging_in_progress, i); 2681b40f8c0SMatthew Dillon if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 2691b40f8c0SMatthew Dillon vm_object_clear_flag(object, OBJ_PIPWNT); 2701b40f8c0SMatthew Dillon wakeup(object); 2711b40f8c0SMatthew Dillon } 2721b40f8c0SMatthew Dillon } 2731b40f8c0SMatthew Dillon 2741b40f8c0SMatthew Dillon void 2751b40f8c0SMatthew Dillon vm_object_pip_sleep(vm_object_t object, char *waitid) 2761b40f8c0SMatthew Dillon { 2771b40f8c0SMatthew Dillon GIANT_REQUIRED; 2781b40f8c0SMatthew Dillon if (object->paging_in_progress) { 2791b40f8c0SMatthew Dillon int s = splvm(); 2801b40f8c0SMatthew Dillon if (object->paging_in_progress) { 2811b40f8c0SMatthew Dillon vm_object_set_flag(object, OBJ_PIPWNT); 2821b40f8c0SMatthew Dillon tsleep(object, PVM, waitid, 0); 2831b40f8c0SMatthew Dillon } 2841b40f8c0SMatthew Dillon splx(s); 2851b40f8c0SMatthew Dillon } 2861b40f8c0SMatthew Dillon } 2871b40f8c0SMatthew Dillon 2881b40f8c0SMatthew Dillon void 2891b40f8c0SMatthew Dillon vm_object_pip_wait(vm_object_t object, char *waitid) 2901b40f8c0SMatthew Dillon { 2911b40f8c0SMatthew Dillon GIANT_REQUIRED; 2921b40f8c0SMatthew Dillon while (object->paging_in_progress) 2931b40f8c0SMatthew Dillon vm_object_pip_sleep(object, waitid); 2941b40f8c0SMatthew Dillon } 2951b40f8c0SMatthew Dillon 29626f9a767SRodney W. Grimes /* 29726f9a767SRodney W. Grimes * vm_object_allocate: 29826f9a767SRodney W. Grimes * 29926f9a767SRodney W. Grimes * Returns a new object with the given size. 30026f9a767SRodney W. Grimes */ 30126f9a767SRodney W. Grimes 30226f9a767SRodney W. Grimes vm_object_t 3031b40f8c0SMatthew Dillon vm_object_allocate(objtype_t type, vm_size_t size) 30426f9a767SRodney W. Grimes { 305d031cff1SMatthew Dillon vm_object_t result; 306d031cff1SMatthew Dillon 3070cddd8f0SMatthew Dillon GIANT_REQUIRED; 3080cddd8f0SMatthew Dillon 30999448ed1SJohn Dyson result = (vm_object_t) zalloc(obj_zone); 31024a1cce3SDavid Greenman _vm_object_allocate(type, size, result); 31126f9a767SRodney W. Grimes 31226f9a767SRodney W. Grimes return (result); 31326f9a767SRodney W. Grimes } 31426f9a767SRodney W. Grimes 31526f9a767SRodney W. Grimes 31626f9a767SRodney W. Grimes /* 317df8bae1dSRodney W. Grimes * vm_object_reference: 318df8bae1dSRodney W. Grimes * 319df8bae1dSRodney W. Grimes * Gets another reference to the given object. 320df8bae1dSRodney W. Grimes */ 3216476c0d2SJohn Dyson void 3221b40f8c0SMatthew Dillon vm_object_reference(vm_object_t object) 323df8bae1dSRodney W. Grimes { 3240cddd8f0SMatthew Dillon GIANT_REQUIRED; 32560517fd1SJohn Baldwin 326df8bae1dSRodney W. Grimes if (object == NULL) 327df8bae1dSRodney W. Grimes return; 32895e5e988SJohn Dyson 3295526d2d9SEivind Eklund KASSERT(!(object->flags & OBJ_DEAD), 3305526d2d9SEivind Eklund ("vm_object_reference: attempting to reference dead obj")); 33195e5e988SJohn Dyson 332df8bae1dSRodney W. Grimes object->ref_count++; 33347221757SJohn Dyson if (object->type == OBJT_VNODE) { 33447221757SJohn Dyson while (vget((struct vnode *) object->handle, LK_RETRY|LK_NOOBJ, curproc)) { 33547221757SJohn Dyson printf("vm_object_reference: delay in getting object\n"); 33647221757SJohn Dyson } 33747221757SJohn Dyson } 33895e5e988SJohn Dyson } 33995e5e988SJohn Dyson 34023955314SAlfred Perlstein /* 34123955314SAlfred Perlstein * handle deallocating a object of type OBJT_VNODE 34223955314SAlfred Perlstein */ 343bf27292bSJohn Dyson void 3441b40f8c0SMatthew Dillon vm_object_vndeallocate(vm_object_t object) 34595e5e988SJohn Dyson { 34695e5e988SJohn Dyson struct vnode *vp = (struct vnode *) object->handle; 347219cbf59SEivind Eklund 3480cddd8f0SMatthew Dillon GIANT_REQUIRED; 3495526d2d9SEivind Eklund KASSERT(object->type == OBJT_VNODE, 3505526d2d9SEivind Eklund ("vm_object_vndeallocate: not a vnode object")); 351219cbf59SEivind Eklund KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 352219cbf59SEivind Eklund #ifdef INVARIANTS 35395e5e988SJohn Dyson if (object->ref_count == 0) { 35495e5e988SJohn Dyson vprint("vm_object_vndeallocate", vp); 35595e5e988SJohn Dyson panic("vm_object_vndeallocate: bad object reference count"); 35695e5e988SJohn Dyson } 35795e5e988SJohn Dyson #endif 35895e5e988SJohn Dyson 35995e5e988SJohn Dyson object->ref_count--; 36047221757SJohn Dyson if (object->ref_count == 0) { 361bf27292bSJohn Dyson vp->v_flag &= ~VTEXT; 362069e9bc1SDoug Rabson vm_object_clear_flag(object, OBJ_OPT); 3632be70f79SJohn Dyson } 36423955314SAlfred Perlstein /* 36523955314SAlfred Perlstein * vrele may need a vop lock 36623955314SAlfred Perlstein */ 36747221757SJohn Dyson vrele(vp); 368df8bae1dSRodney W. Grimes } 369df8bae1dSRodney W. Grimes 370df8bae1dSRodney W. Grimes /* 371df8bae1dSRodney W. Grimes * vm_object_deallocate: 372df8bae1dSRodney W. Grimes * 373df8bae1dSRodney W. Grimes * Release a reference to the specified object, 374df8bae1dSRodney W. Grimes * gained either through a vm_object_allocate 375df8bae1dSRodney W. Grimes * or a vm_object_reference call. When all references 376df8bae1dSRodney W. Grimes * are gone, storage associated with this object 377df8bae1dSRodney W. Grimes * may be relinquished. 378df8bae1dSRodney W. Grimes * 379df8bae1dSRodney W. Grimes * No object may be locked. 380df8bae1dSRodney W. Grimes */ 38126f9a767SRodney W. Grimes void 3821b40f8c0SMatthew Dillon vm_object_deallocate(vm_object_t object) 383df8bae1dSRodney W. Grimes { 384df8bae1dSRodney W. Grimes vm_object_t temp; 385df8bae1dSRodney W. Grimes 3860cddd8f0SMatthew Dillon GIANT_REQUIRED; 3870cddd8f0SMatthew Dillon 388df8bae1dSRodney W. Grimes while (object != NULL) { 389df8bae1dSRodney W. Grimes 39095e5e988SJohn Dyson if (object->type == OBJT_VNODE) { 39195e5e988SJohn Dyson vm_object_vndeallocate(object); 39295e5e988SJohn Dyson return; 39395e5e988SJohn Dyson } 39495e5e988SJohn Dyson 3958125b1e6SAlfred Perlstein KASSERT(object->ref_count != 0, 3968125b1e6SAlfred Perlstein ("vm_object_deallocate: object deallocated too many times: %d", object->type)); 3972be70f79SJohn Dyson 3982be70f79SJohn Dyson /* 3998125b1e6SAlfred Perlstein * If the reference count goes to 0 we start calling 4008125b1e6SAlfred Perlstein * vm_object_terminate() on the object chain. 4018125b1e6SAlfred Perlstein * A ref count of 1 may be a special case depending on the 4028125b1e6SAlfred Perlstein * shadow count being 0 or 1. 4032be70f79SJohn Dyson */ 404c0877f10SJohn Dyson object->ref_count--; 4058125b1e6SAlfred Perlstein if (object->ref_count > 1) { 406c0877f10SJohn Dyson return; 4078125b1e6SAlfred Perlstein } else if (object->ref_count == 1) { 4088125b1e6SAlfred Perlstein if (object->shadow_count == 0) { 4098125b1e6SAlfred Perlstein vm_object_set_flag(object, OBJ_ONEMAPPING); 4108125b1e6SAlfred Perlstein } else if ((object->shadow_count == 1) && 4118125b1e6SAlfred Perlstein (object->handle == NULL) && 41224a1cce3SDavid Greenman (object->type == OBJT_DEFAULT || 41324a1cce3SDavid Greenman object->type == OBJT_SWAP)) { 414a1f6d91cSDavid Greenman vm_object_t robject; 41595e5e988SJohn Dyson 416b18bfc3dSJohn Dyson robject = TAILQ_FIRST(&object->shadow_head); 4175526d2d9SEivind Eklund KASSERT(robject != NULL, 418219cbf59SEivind Eklund ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 4195526d2d9SEivind Eklund object->ref_count, 4205526d2d9SEivind Eklund object->shadow_count)); 42195e5e988SJohn Dyson if ((robject->handle == NULL) && 42224a1cce3SDavid Greenman (robject->type == OBJT_DEFAULT || 42324a1cce3SDavid Greenman robject->type == OBJT_SWAP)) { 424a1f6d91cSDavid Greenman 42595e5e988SJohn Dyson robject->ref_count++; 42695e5e988SJohn Dyson 4271c7c3c6aSMatthew Dillon while ( 4281c7c3c6aSMatthew Dillon robject->paging_in_progress || 4291c7c3c6aSMatthew Dillon object->paging_in_progress 4301c7c3c6aSMatthew Dillon ) { 43166095752SJohn Dyson vm_object_pip_sleep(robject, "objde1"); 43266095752SJohn Dyson vm_object_pip_sleep(object, "objde2"); 433a1f6d91cSDavid Greenman } 434a1f6d91cSDavid Greenman 43595e5e988SJohn Dyson if (robject->ref_count == 1) { 43695e5e988SJohn Dyson robject->ref_count--; 437ba8da839SDavid Greenman object = robject; 43895e5e988SJohn Dyson goto doterm; 43995e5e988SJohn Dyson } 44095e5e988SJohn Dyson 44195e5e988SJohn Dyson object = robject; 44295e5e988SJohn Dyson vm_object_collapse(object); 443ba8da839SDavid Greenman continue; 444a1f6d91cSDavid Greenman } 44595e5e988SJohn Dyson } 44695e5e988SJohn Dyson 447a1f6d91cSDavid Greenman return; 448df8bae1dSRodney W. Grimes 44995e5e988SJohn Dyson } 45095e5e988SJohn Dyson 45195e5e988SJohn Dyson doterm: 4521efb74fbSJohn Dyson 45324a1cce3SDavid Greenman temp = object->backing_object; 454de5f6a77SJohn Dyson if (temp) { 45524a1cce3SDavid Greenman TAILQ_REMOVE(&temp->shadow_head, object, shadow_list); 45695e5e988SJohn Dyson temp->shadow_count--; 4572d8acc0fSJohn Dyson if (temp->ref_count == 0) 458069e9bc1SDoug Rabson vm_object_clear_flag(temp, OBJ_OPT); 459eaf13dd7SJohn Dyson temp->generation++; 46095461b45SJohn Dyson object->backing_object = NULL; 461de5f6a77SJohn Dyson } 462df8bae1dSRodney W. Grimes vm_object_terminate(object); 463df8bae1dSRodney W. Grimes /* unlocks and deallocates object */ 464df8bae1dSRodney W. Grimes object = temp; 465df8bae1dSRodney W. Grimes } 466df8bae1dSRodney W. Grimes } 467df8bae1dSRodney W. Grimes 468df8bae1dSRodney W. Grimes /* 469df8bae1dSRodney W. Grimes * vm_object_terminate actually destroys the specified object, freeing 470df8bae1dSRodney W. Grimes * up all previously used resources. 471df8bae1dSRodney W. Grimes * 472df8bae1dSRodney W. Grimes * The object must be locked. 4731c7c3c6aSMatthew Dillon * This routine may block. 474df8bae1dSRodney W. Grimes */ 47595e5e988SJohn Dyson void 4761b40f8c0SMatthew Dillon vm_object_terminate(vm_object_t object) 477df8bae1dSRodney W. Grimes { 478d031cff1SMatthew Dillon vm_page_t p; 479e4b7635dSDavid Greenman int s; 480df8bae1dSRodney W. Grimes 4810cddd8f0SMatthew Dillon GIANT_REQUIRED; 4820cddd8f0SMatthew Dillon 48395e5e988SJohn Dyson /* 48495e5e988SJohn Dyson * Make sure no one uses us. 48595e5e988SJohn Dyson */ 486069e9bc1SDoug Rabson vm_object_set_flag(object, OBJ_DEAD); 4873c631446SJohn Dyson 488df8bae1dSRodney W. Grimes /* 489f6b04d2bSDavid Greenman * wait for the pageout daemon to be done with the object 490df8bae1dSRodney W. Grimes */ 49166095752SJohn Dyson vm_object_pip_wait(object, "objtrm"); 492df8bae1dSRodney W. Grimes 4935526d2d9SEivind Eklund KASSERT(!object->paging_in_progress, 4945526d2d9SEivind Eklund ("vm_object_terminate: pageout in progress")); 49526f9a767SRodney W. Grimes 49626f9a767SRodney W. Grimes /* 4970d94caffSDavid Greenman * Clean and free the pages, as appropriate. All references to the 4980d94caffSDavid Greenman * object are gone, so we don't need to lock it. 49926f9a767SRodney W. Grimes */ 50024a1cce3SDavid Greenman if (object->type == OBJT_VNODE) { 50195e5e988SJohn Dyson struct vnode *vp; 50295e5e988SJohn Dyson 50395e5e988SJohn Dyson /* 50495e5e988SJohn Dyson * Freeze optimized copies. 50595e5e988SJohn Dyson */ 50695e5e988SJohn Dyson vm_freeze_copyopts(object, 0, object->size); 50795e5e988SJohn Dyson 50895e5e988SJohn Dyson /* 50995e5e988SJohn Dyson * Clean pages and flush buffers. 51095e5e988SJohn Dyson */ 5118f9110f6SJohn Dyson vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 51295e5e988SJohn Dyson 51395e5e988SJohn Dyson vp = (struct vnode *) object->handle; 514f6b04d2bSDavid Greenman vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 515bef608bdSJohn Dyson } 516bef608bdSJohn Dyson 517971dd342SAlfred Perlstein KASSERT(object->ref_count == 0, 518971dd342SAlfred Perlstein ("vm_object_terminate: object with references, ref_count=%d", 519971dd342SAlfred Perlstein object->ref_count)); 520996c772fSJohn Dyson 5210d94caffSDavid Greenman /* 522356863ebSDavid Greenman * Now free any remaining pages. For internal objects, this also 523356863ebSDavid Greenman * removes them from paging queues. Don't free wired pages, just 524356863ebSDavid Greenman * remove them from the object. 525df8bae1dSRodney W. Grimes */ 526e4b7635dSDavid Greenman s = splvm(); 527b18bfc3dSJohn Dyson while ((p = TAILQ_FIRST(&object->memq)) != NULL) { 528971dd342SAlfred Perlstein KASSERT(!p->busy && (p->flags & PG_BUSY) == 0, 529971dd342SAlfred Perlstein ("vm_object_terminate: freeing busy page %p " 530971dd342SAlfred Perlstein "p->busy = %d, p->flags %x\n", p, p->busy, p->flags)); 5310b10ba98SDavid Greenman if (p->wire_count == 0) { 532e69763a3SDoug Rabson vm_page_busy(p); 533df8bae1dSRodney W. Grimes vm_page_free(p); 534df8bae1dSRodney W. Grimes cnt.v_pfree++; 5350b10ba98SDavid Greenman } else { 536356863ebSDavid Greenman vm_page_busy(p); 5370b10ba98SDavid Greenman vm_page_remove(p); 5380b10ba98SDavid Greenman } 539df8bae1dSRodney W. Grimes } 540e4b7635dSDavid Greenman splx(s); 541bef608bdSJohn Dyson 5422d8acc0fSJohn Dyson /* 5439fcfb650SDavid Greenman * Let the pager know object is dead. 5449fcfb650SDavid Greenman */ 5459fcfb650SDavid Greenman vm_pager_deallocate(object); 5469fcfb650SDavid Greenman 5479fcfb650SDavid Greenman /* 5480b10ba98SDavid Greenman * Remove the object from the global object list. 5492d8acc0fSJohn Dyson */ 5509ed346baSBosko Milekic mtx_lock(&vm_object_list_mtx); 551df8bae1dSRodney W. Grimes TAILQ_REMOVE(&vm_object_list, object, object_list); 5529ed346baSBosko Milekic mtx_unlock(&vm_object_list_mtx); 5530b10ba98SDavid Greenman 5540b10ba98SDavid Greenman wakeup(object); 5550b10ba98SDavid Greenman 556df8bae1dSRodney W. Grimes /* 557df8bae1dSRodney W. Grimes * Free the space for the object. 558df8bae1dSRodney W. Grimes */ 55999448ed1SJohn Dyson zfree(obj_zone, object); 56047221757SJohn Dyson } 561df8bae1dSRodney W. Grimes 562df8bae1dSRodney W. Grimes /* 563df8bae1dSRodney W. Grimes * vm_object_page_clean 564df8bae1dSRodney W. Grimes * 5654f79d873SMatthew Dillon * Clean all dirty pages in the specified range of object. Leaves page 5664f79d873SMatthew Dillon * on whatever queue it is currently on. If NOSYNC is set then do not 5674f79d873SMatthew Dillon * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC), 5684f79d873SMatthew Dillon * leaving the object dirty. 56926f9a767SRodney W. Grimes * 57026f9a767SRodney W. Grimes * Odd semantics: if start == end, we clean everything. 57126f9a767SRodney W. Grimes * 57226f9a767SRodney W. Grimes * The object must be locked. 57326f9a767SRodney W. Grimes */ 574f6b04d2bSDavid Greenman 575f6b04d2bSDavid Greenman void 5761b40f8c0SMatthew Dillon vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags) 577f6b04d2bSDavid Greenman { 578d031cff1SMatthew Dillon vm_page_t p, np, tp; 579d031cff1SMatthew Dillon vm_offset_t tstart, tend; 580bd7e5f99SJohn Dyson vm_pindex_t pi; 581aef922f5SJohn Dyson int s; 58224a1cce3SDavid Greenman struct vnode *vp; 583aef922f5SJohn Dyson int runlen; 584bd7e5f99SJohn Dyson int maxf; 585bd7e5f99SJohn Dyson int chkb; 586bd7e5f99SJohn Dyson int maxb; 587bd7e5f99SJohn Dyson int i; 5884f79d873SMatthew Dillon int clearobjflags; 5898f9110f6SJohn Dyson int pagerflags; 590bd7e5f99SJohn Dyson vm_page_t maf[vm_pageout_page_count]; 591bd7e5f99SJohn Dyson vm_page_t mab[vm_pageout_page_count]; 592aef922f5SJohn Dyson vm_page_t ma[vm_pageout_page_count]; 5932d8acc0fSJohn Dyson int curgeneration; 594f6b04d2bSDavid Greenman 5950cddd8f0SMatthew Dillon GIANT_REQUIRED; 5960cddd8f0SMatthew Dillon 597aef922f5SJohn Dyson if (object->type != OBJT_VNODE || 598aef922f5SJohn Dyson (object->flags & OBJ_MIGHTBEDIRTY) == 0) 599f6b04d2bSDavid Greenman return; 600f6b04d2bSDavid Greenman 6018f9110f6SJohn Dyson pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : 0; 6028f9110f6SJohn Dyson pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0; 6038f9110f6SJohn Dyson 60424a1cce3SDavid Greenman vp = object->handle; 60524a1cce3SDavid Greenman 606069e9bc1SDoug Rabson vm_object_set_flag(object, OBJ_CLEANING); 60724a1cce3SDavid Greenman 608f6b04d2bSDavid Greenman tstart = start; 609f6b04d2bSDavid Greenman if (end == 0) { 610f6b04d2bSDavid Greenman tend = object->size; 611f6b04d2bSDavid Greenman } else { 612f6b04d2bSDavid Greenman tend = end; 613f6b04d2bSDavid Greenman } 614eaf13dd7SJohn Dyson 6154f79d873SMatthew Dillon /* 6164f79d873SMatthew Dillon * Generally set CLEANCHK interlock and make the page read-only so 6174f79d873SMatthew Dillon * we can then clear the object flags. 6184f79d873SMatthew Dillon * 6194f79d873SMatthew Dillon * However, if this is a nosync mmap then the object is likely to 6204f79d873SMatthew Dillon * stay dirty so do not mess with the page and do not clear the 6214f79d873SMatthew Dillon * object flags. 6224f79d873SMatthew Dillon */ 6234f79d873SMatthew Dillon 6244f79d873SMatthew Dillon clearobjflags = 1; 6254f79d873SMatthew Dillon 626fc2ffbe6SPoul-Henning Kamp TAILQ_FOREACH(p, &object->memq, listq) { 627e69763a3SDoug Rabson vm_page_flag_set(p, PG_CLEANCHK); 6284f79d873SMatthew Dillon if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) 6294f79d873SMatthew Dillon clearobjflags = 0; 6304f79d873SMatthew Dillon else 631eaf13dd7SJohn Dyson vm_page_protect(p, VM_PROT_READ); 632eaf13dd7SJohn Dyson } 633eaf13dd7SJohn Dyson 6344f79d873SMatthew Dillon if (clearobjflags && (tstart == 0) && (tend == object->size)) { 635069e9bc1SDoug Rabson vm_object_clear_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 636ec4f9fb0SDavid Greenman } 637f6b04d2bSDavid Greenman 638bd7e5f99SJohn Dyson rescan: 6392d8acc0fSJohn Dyson curgeneration = object->generation; 6402d8acc0fSJohn Dyson 641b18bfc3dSJohn Dyson for (p = TAILQ_FIRST(&object->memq); p; p = np) { 642b18bfc3dSJohn Dyson np = TAILQ_NEXT(p, listq); 643bd7e5f99SJohn Dyson 644bd7e5f99SJohn Dyson pi = p->pindex; 645bd7e5f99SJohn Dyson if (((p->flags & PG_CLEANCHK) == 0) || 646bd7e5f99SJohn Dyson (pi < tstart) || (pi >= tend) || 6475070c7f8SJohn Dyson (p->valid == 0) || 6485070c7f8SJohn Dyson ((p->queue - p->pc) == PQ_CACHE)) { 649e69763a3SDoug Rabson vm_page_flag_clear(p, PG_CLEANCHK); 650aef922f5SJohn Dyson continue; 651f6b04d2bSDavid Greenman } 652f6b04d2bSDavid Greenman 653bd7e5f99SJohn Dyson vm_page_test_dirty(p); 654bd7e5f99SJohn Dyson if ((p->dirty & p->valid) == 0) { 655e69763a3SDoug Rabson vm_page_flag_clear(p, PG_CLEANCHK); 656bd7e5f99SJohn Dyson continue; 657bd7e5f99SJohn Dyson } 658ec4f9fb0SDavid Greenman 6594f79d873SMatthew Dillon /* 6604f79d873SMatthew Dillon * If we have been asked to skip nosync pages and this is a 6614f79d873SMatthew Dillon * nosync page, skip it. Note that the object flags were 6624f79d873SMatthew Dillon * not cleared in this case so we do not have to set them. 6634f79d873SMatthew Dillon */ 6644f79d873SMatthew Dillon if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 6654f79d873SMatthew Dillon vm_page_flag_clear(p, PG_CLEANCHK); 6664f79d873SMatthew Dillon continue; 6674f79d873SMatthew Dillon } 6684f79d873SMatthew Dillon 669b18bfc3dSJohn Dyson s = splvm(); 6701c7c3c6aSMatthew Dillon while (vm_page_sleep_busy(p, TRUE, "vpcwai")) { 6712d8acc0fSJohn Dyson if (object->generation != curgeneration) { 672f6b04d2bSDavid Greenman splx(s); 673bd7e5f99SJohn Dyson goto rescan; 674f6b04d2bSDavid Greenman } 6752d8acc0fSJohn Dyson } 676f6b04d2bSDavid Greenman 677bd7e5f99SJohn Dyson maxf = 0; 678bd7e5f99SJohn Dyson for (i = 1; i < vm_pageout_page_count; i++) { 6798aef1712SMatthew Dillon if ((tp = vm_page_lookup(object, pi + i)) != NULL) { 680bd7e5f99SJohn Dyson if ((tp->flags & PG_BUSY) || 681ffc82b0aSJohn Dyson (tp->flags & PG_CLEANCHK) == 0 || 682ffc82b0aSJohn Dyson (tp->busy != 0)) 683bd7e5f99SJohn Dyson break; 6845070c7f8SJohn Dyson if((tp->queue - tp->pc) == PQ_CACHE) { 685e69763a3SDoug Rabson vm_page_flag_clear(tp, PG_CLEANCHK); 6863077a9c2SJohn Dyson break; 6873077a9c2SJohn Dyson } 688bd7e5f99SJohn Dyson vm_page_test_dirty(tp); 689bd7e5f99SJohn Dyson if ((tp->dirty & tp->valid) == 0) { 690e69763a3SDoug Rabson vm_page_flag_clear(tp, PG_CLEANCHK); 691bd7e5f99SJohn Dyson break; 692bd7e5f99SJohn Dyson } 693bd7e5f99SJohn Dyson maf[ i - 1 ] = tp; 694bd7e5f99SJohn Dyson maxf++; 695bd7e5f99SJohn Dyson continue; 696bd7e5f99SJohn Dyson } 697bd7e5f99SJohn Dyson break; 698bd7e5f99SJohn Dyson } 699aef922f5SJohn Dyson 700bd7e5f99SJohn Dyson maxb = 0; 701bd7e5f99SJohn Dyson chkb = vm_pageout_page_count - maxf; 702bd7e5f99SJohn Dyson if (chkb) { 703bd7e5f99SJohn Dyson for (i = 1; i < chkb; i++) { 7048aef1712SMatthew Dillon if ((tp = vm_page_lookup(object, pi - i)) != NULL) { 705bd7e5f99SJohn Dyson if ((tp->flags & PG_BUSY) || 706ffc82b0aSJohn Dyson (tp->flags & PG_CLEANCHK) == 0 || 707ffc82b0aSJohn Dyson (tp->busy != 0)) 708bd7e5f99SJohn Dyson break; 7095070c7f8SJohn Dyson if((tp->queue - tp->pc) == PQ_CACHE) { 710e69763a3SDoug Rabson vm_page_flag_clear(tp, PG_CLEANCHK); 7113077a9c2SJohn Dyson break; 7123077a9c2SJohn Dyson } 713bd7e5f99SJohn Dyson vm_page_test_dirty(tp); 714bd7e5f99SJohn Dyson if ((tp->dirty & tp->valid) == 0) { 715e69763a3SDoug Rabson vm_page_flag_clear(tp, PG_CLEANCHK); 716bd7e5f99SJohn Dyson break; 717bd7e5f99SJohn Dyson } 718bd7e5f99SJohn Dyson mab[ i - 1 ] = tp; 719bd7e5f99SJohn Dyson maxb++; 720bd7e5f99SJohn Dyson continue; 721bd7e5f99SJohn Dyson } 722bd7e5f99SJohn Dyson break; 723bd7e5f99SJohn Dyson } 724bd7e5f99SJohn Dyson } 725bd7e5f99SJohn Dyson 726bd7e5f99SJohn Dyson for (i = 0; i < maxb; i++) { 727bd7e5f99SJohn Dyson int index = (maxb - i) - 1; 728bd7e5f99SJohn Dyson ma[index] = mab[i]; 729e69763a3SDoug Rabson vm_page_flag_clear(ma[index], PG_CLEANCHK); 730bd7e5f99SJohn Dyson } 731e69763a3SDoug Rabson vm_page_flag_clear(p, PG_CLEANCHK); 732bd7e5f99SJohn Dyson ma[maxb] = p; 733bd7e5f99SJohn Dyson for (i = 0 ; i < maxf; i++) { 734bd7e5f99SJohn Dyson int index = (maxb + i) + 1; 735bd7e5f99SJohn Dyson ma[index] = maf[i]; 736e69763a3SDoug Rabson vm_page_flag_clear(ma[index], PG_CLEANCHK); 737f6b04d2bSDavid Greenman } 738bd7e5f99SJohn Dyson runlen = maxb + maxf + 1; 739cf2819ccSJohn Dyson 740f35329acSJohn Dyson splx(s); 7418f9110f6SJohn Dyson vm_pageout_flush(ma, runlen, pagerflags); 742cf2819ccSJohn Dyson for (i = 0; i < runlen; i++) { 743cf2819ccSJohn Dyson if (ma[i]->valid & ma[i]->dirty) { 744cf2819ccSJohn Dyson vm_page_protect(ma[i], VM_PROT_READ); 745e69763a3SDoug Rabson vm_page_flag_set(ma[i], PG_CLEANCHK); 746cf2819ccSJohn Dyson } 747cf2819ccSJohn Dyson } 7482d8acc0fSJohn Dyson if (object->generation != curgeneration) 749bd7e5f99SJohn Dyson goto rescan; 750f6b04d2bSDavid Greenman } 751aef922f5SJohn Dyson 752479112dfSMatthew Dillon #if 0 753cf2819ccSJohn Dyson VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc); 754479112dfSMatthew Dillon #endif 755aef922f5SJohn Dyson 756069e9bc1SDoug Rabson vm_object_clear_flag(object, OBJ_CLEANING); 757f5cf85d4SDavid Greenman return; 75826f9a767SRodney W. Grimes } 759df8bae1dSRodney W. Grimes 760df8bae1dSRodney W. Grimes /* 7618e3ad7c9SMatthew Dillon * Same as vm_object_pmap_copy, except range checking really 7621efb74fbSJohn Dyson * works, and is meant for small sections of an object. 7638e3ad7c9SMatthew Dillon * 7648e3ad7c9SMatthew Dillon * This code protects resident pages by making them read-only 7658e3ad7c9SMatthew Dillon * and is typically called on a fork or split when a page 7668e3ad7c9SMatthew Dillon * is converted to copy-on-write. 7678e3ad7c9SMatthew Dillon * 7688e3ad7c9SMatthew Dillon * NOTE: If the page is already at VM_PROT_NONE, calling 7698e3ad7c9SMatthew Dillon * vm_page_protect will have no effect. 7701efb74fbSJohn Dyson */ 7718e3ad7c9SMatthew Dillon 7721efb74fbSJohn Dyson void 7731b40f8c0SMatthew Dillon vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 7741efb74fbSJohn Dyson { 7751efb74fbSJohn Dyson vm_pindex_t idx; 776d031cff1SMatthew Dillon vm_page_t p; 7771efb74fbSJohn Dyson 7780cddd8f0SMatthew Dillon GIANT_REQUIRED; 7790cddd8f0SMatthew Dillon 7801efb74fbSJohn Dyson if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0) 7811efb74fbSJohn Dyson return; 7821efb74fbSJohn Dyson 7831efb74fbSJohn Dyson for (idx = start; idx < end; idx++) { 7841efb74fbSJohn Dyson p = vm_page_lookup(object, idx); 7851efb74fbSJohn Dyson if (p == NULL) 7861efb74fbSJohn Dyson continue; 7871efb74fbSJohn Dyson vm_page_protect(p, VM_PROT_READ); 7881efb74fbSJohn Dyson } 7891efb74fbSJohn Dyson } 7901efb74fbSJohn Dyson 7911efb74fbSJohn Dyson /* 792df8bae1dSRodney W. Grimes * vm_object_pmap_remove: 793df8bae1dSRodney W. Grimes * 794df8bae1dSRodney W. Grimes * Removes all physical pages in the specified 795df8bae1dSRodney W. Grimes * object range from all physical maps. 796df8bae1dSRodney W. Grimes * 797df8bae1dSRodney W. Grimes * The object must *not* be locked. 798df8bae1dSRodney W. Grimes */ 79926f9a767SRodney W. Grimes void 8001b40f8c0SMatthew Dillon vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 801df8bae1dSRodney W. Grimes { 802d031cff1SMatthew Dillon vm_page_t p; 803d031cff1SMatthew Dillon 8040cddd8f0SMatthew Dillon GIANT_REQUIRED; 805df8bae1dSRodney W. Grimes if (object == NULL) 806df8bae1dSRodney W. Grimes return; 807cc64b484SAlfred Perlstein TAILQ_FOREACH(p, &object->memq, listq) { 808bd7e5f99SJohn Dyson if (p->pindex >= start && p->pindex < end) 809f919ebdeSDavid Greenman vm_page_protect(p, VM_PROT_NONE); 81026f9a767SRodney W. Grimes } 8116e20a165SJohn Dyson if ((start == 0) && (object->size == end)) 812069e9bc1SDoug Rabson vm_object_clear_flag(object, OBJ_WRITEABLE); 81326f9a767SRodney W. Grimes } 814df8bae1dSRodney W. Grimes 815df8bae1dSRodney W. Grimes /* 816867a482dSJohn Dyson * vm_object_madvise: 817867a482dSJohn Dyson * 818867a482dSJohn Dyson * Implements the madvise function at the object/page level. 8191c7c3c6aSMatthew Dillon * 820193b9358SAlan Cox * MADV_WILLNEED (any object) 821193b9358SAlan Cox * 822193b9358SAlan Cox * Activate the specified pages if they are resident. 823193b9358SAlan Cox * 824193b9358SAlan Cox * MADV_DONTNEED (any object) 825193b9358SAlan Cox * 826193b9358SAlan Cox * Deactivate the specified pages if they are resident. 827193b9358SAlan Cox * 828193b9358SAlan Cox * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 829193b9358SAlan Cox * OBJ_ONEMAPPING only) 830193b9358SAlan Cox * 831193b9358SAlan Cox * Deactivate and clean the specified pages if they are 832193b9358SAlan Cox * resident. This permits the process to reuse the pages 833193b9358SAlan Cox * without faulting or the kernel to reclaim the pages 834193b9358SAlan Cox * without I/O. 835867a482dSJohn Dyson */ 836867a482dSJohn Dyson void 8371b40f8c0SMatthew Dillon vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise) 838867a482dSJohn Dyson { 8396e20a165SJohn Dyson vm_pindex_t end, tpindex; 8406e20a165SJohn Dyson vm_object_t tobject; 841867a482dSJohn Dyson vm_page_t m; 842867a482dSJohn Dyson 8430cddd8f0SMatthew Dillon GIANT_REQUIRED; 844867a482dSJohn Dyson if (object == NULL) 845867a482dSJohn Dyson return; 846867a482dSJohn Dyson 847867a482dSJohn Dyson end = pindex + count; 848867a482dSJohn Dyson 8491c7c3c6aSMatthew Dillon /* 8501c7c3c6aSMatthew Dillon * Locate and adjust resident pages 8511c7c3c6aSMatthew Dillon */ 8521c7c3c6aSMatthew Dillon 8531c7c3c6aSMatthew Dillon for (; pindex < end; pindex += 1) { 8546e20a165SJohn Dyson relookup: 8556e20a165SJohn Dyson tobject = object; 8566e20a165SJohn Dyson tpindex = pindex; 8576e20a165SJohn Dyson shadowlookup: 85858b4e6ccSAlan Cox /* 85958b4e6ccSAlan Cox * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages 86058b4e6ccSAlan Cox * and those pages must be OBJ_ONEMAPPING. 86158b4e6ccSAlan Cox */ 86258b4e6ccSAlan Cox if (advise == MADV_FREE) { 86358b4e6ccSAlan Cox if ((tobject->type != OBJT_DEFAULT && 86458b4e6ccSAlan Cox tobject->type != OBJT_SWAP) || 86558b4e6ccSAlan Cox (tobject->flags & OBJ_ONEMAPPING) == 0) { 8666e20a165SJohn Dyson continue; 8676e20a165SJohn Dyson } 86858b4e6ccSAlan Cox } 8691c7c3c6aSMatthew Dillon 8701c7c3c6aSMatthew Dillon m = vm_page_lookup(tobject, tpindex); 8711c7c3c6aSMatthew Dillon 8721c7c3c6aSMatthew Dillon if (m == NULL) { 8731ce137beSMatthew Dillon /* 8741ce137beSMatthew Dillon * There may be swap even if there is no backing page 8751ce137beSMatthew Dillon */ 8761ce137beSMatthew Dillon if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 8771ce137beSMatthew Dillon swap_pager_freespace(tobject, tpindex, 1); 8781ce137beSMatthew Dillon 8791ce137beSMatthew Dillon /* 8801ce137beSMatthew Dillon * next object 8811ce137beSMatthew Dillon */ 8826e20a165SJohn Dyson tobject = tobject->backing_object; 8831c7c3c6aSMatthew Dillon if (tobject == NULL) 8841c7c3c6aSMatthew Dillon continue; 8856e20a165SJohn Dyson tpindex += OFF_TO_IDX(tobject->backing_object_offset); 8866e20a165SJohn Dyson goto shadowlookup; 8876e20a165SJohn Dyson } 888867a482dSJohn Dyson 889867a482dSJohn Dyson /* 890867a482dSJohn Dyson * If the page is busy or not in a normal active state, 8918b03c8edSMatthew Dillon * we skip it. If the page is not managed there are no 8928b03c8edSMatthew Dillon * page queues to mess with. Things can break if we mess 8938b03c8edSMatthew Dillon * with pages in any of the below states. 894867a482dSJohn Dyson */ 8951c7c3c6aSMatthew Dillon if ( 8961c7c3c6aSMatthew Dillon m->hold_count || 8971c7c3c6aSMatthew Dillon m->wire_count || 8988b03c8edSMatthew Dillon (m->flags & PG_UNMANAGED) || 8991c7c3c6aSMatthew Dillon m->valid != VM_PAGE_BITS_ALL 9001c7c3c6aSMatthew Dillon ) { 901867a482dSJohn Dyson continue; 9026e20a165SJohn Dyson } 9036e20a165SJohn Dyson 9041c7c3c6aSMatthew Dillon if (vm_page_sleep_busy(m, TRUE, "madvpo")) 9056e20a165SJohn Dyson goto relookup; 906867a482dSJohn Dyson 907867a482dSJohn Dyson if (advise == MADV_WILLNEED) { 908867a482dSJohn Dyson vm_page_activate(m); 9096e20a165SJohn Dyson } else if (advise == MADV_DONTNEED) { 910479112dfSMatthew Dillon vm_page_dontneed(m); 9110a47b48bSJohn Dyson } else if (advise == MADV_FREE) { 9121c7c3c6aSMatthew Dillon /* 9132aaeadf8SMatthew Dillon * Mark the page clean. This will allow the page 9142aaeadf8SMatthew Dillon * to be freed up by the system. However, such pages 9152aaeadf8SMatthew Dillon * are often reused quickly by malloc()/free() 9162aaeadf8SMatthew Dillon * so we do not do anything that would cause 9172aaeadf8SMatthew Dillon * a page fault if we can help it. 9182aaeadf8SMatthew Dillon * 9192aaeadf8SMatthew Dillon * Specifically, we do not try to actually free 9202aaeadf8SMatthew Dillon * the page now nor do we try to put it in the 9212aaeadf8SMatthew Dillon * cache (which would cause a page fault on reuse). 92241c67e12SMatthew Dillon * 92341c67e12SMatthew Dillon * But we do make the page is freeable as we 92441c67e12SMatthew Dillon * can without actually taking the step of unmapping 92541c67e12SMatthew Dillon * it. 9261c7c3c6aSMatthew Dillon */ 9270385347cSPeter Wemm pmap_clear_modify(m); 9286e20a165SJohn Dyson m->dirty = 0; 92941c67e12SMatthew Dillon m->act_count = 0; 930479112dfSMatthew Dillon vm_page_dontneed(m); 9311ce137beSMatthew Dillon if (tobject->type == OBJT_SWAP) 9321ce137beSMatthew Dillon swap_pager_freespace(tobject, tpindex, 1); 933867a482dSJohn Dyson } 934867a482dSJohn Dyson } 935867a482dSJohn Dyson } 936867a482dSJohn Dyson 937867a482dSJohn Dyson /* 938df8bae1dSRodney W. Grimes * vm_object_shadow: 939df8bae1dSRodney W. Grimes * 940df8bae1dSRodney W. Grimes * Create a new object which is backed by the 941df8bae1dSRodney W. Grimes * specified existing object range. The source 942df8bae1dSRodney W. Grimes * object reference is deallocated. 943df8bae1dSRodney W. Grimes * 944df8bae1dSRodney W. Grimes * The new object and offset into that object 945df8bae1dSRodney W. Grimes * are returned in the source parameters. 946df8bae1dSRodney W. Grimes */ 947df8bae1dSRodney W. Grimes 94826f9a767SRodney W. Grimes void 9491b40f8c0SMatthew Dillon vm_object_shadow( 9501b40f8c0SMatthew Dillon vm_object_t *object, /* IN/OUT */ 9511b40f8c0SMatthew Dillon vm_ooffset_t *offset, /* IN/OUT */ 9521b40f8c0SMatthew Dillon vm_size_t length) 953df8bae1dSRodney W. Grimes { 954d031cff1SMatthew Dillon vm_object_t source; 955d031cff1SMatthew Dillon vm_object_t result; 956df8bae1dSRodney W. Grimes 9570cddd8f0SMatthew Dillon GIANT_REQUIRED; 958df8bae1dSRodney W. Grimes source = *object; 959df8bae1dSRodney W. Grimes 960df8bae1dSRodney W. Grimes /* 9619a2f6362SAlan Cox * Don't create the new object if the old object isn't shared. 9629a2f6362SAlan Cox */ 9639a2f6362SAlan Cox 964c7997d57SAlan Cox if (source != NULL && 965c7997d57SAlan Cox source->ref_count == 1 && 9669a2f6362SAlan Cox source->handle == NULL && 9679a2f6362SAlan Cox (source->type == OBJT_DEFAULT || 9689a2f6362SAlan Cox source->type == OBJT_SWAP)) 9699a2f6362SAlan Cox return; 9709a2f6362SAlan Cox 9719a2f6362SAlan Cox /* 972df8bae1dSRodney W. Grimes * Allocate a new object with the given length 973df8bae1dSRodney W. Grimes */ 974971dd342SAlfred Perlstein result = vm_object_allocate(OBJT_DEFAULT, length); 975971dd342SAlfred Perlstein KASSERT(result != NULL, ("vm_object_shadow: no object for shadowing")); 976df8bae1dSRodney W. Grimes 977df8bae1dSRodney W. Grimes /* 9780d94caffSDavid Greenman * The new object shadows the source object, adding a reference to it. 9790d94caffSDavid Greenman * Our caller changes his reference to point to the new object, 9800d94caffSDavid Greenman * removing a reference to the source object. Net result: no change 9810d94caffSDavid Greenman * of reference count. 9829b09fe24SMatthew Dillon * 9839b09fe24SMatthew Dillon * Try to optimize the result object's page color when shadowing 984956f3135SPhilippe Charnier * in order to maintain page coloring consistency in the combined 9859b09fe24SMatthew Dillon * shadowed object. 986df8bae1dSRodney W. Grimes */ 98724a1cce3SDavid Greenman result->backing_object = source; 988de5f6a77SJohn Dyson if (source) { 989de5f6a77SJohn Dyson TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list); 990eaf13dd7SJohn Dyson source->shadow_count++; 991eaf13dd7SJohn Dyson source->generation++; 9929b09fe24SMatthew Dillon result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & PQ_L2_MASK; 993de5f6a77SJohn Dyson } 994df8bae1dSRodney W. Grimes 995df8bae1dSRodney W. Grimes /* 9960d94caffSDavid Greenman * Store the offset into the source object, and fix up the offset into 9970d94caffSDavid Greenman * the new object. 998df8bae1dSRodney W. Grimes */ 999df8bae1dSRodney W. Grimes 100024a1cce3SDavid Greenman result->backing_object_offset = *offset; 1001df8bae1dSRodney W. Grimes 1002df8bae1dSRodney W. Grimes /* 1003df8bae1dSRodney W. Grimes * Return the new things 1004df8bae1dSRodney W. Grimes */ 1005df8bae1dSRodney W. Grimes 1006df8bae1dSRodney W. Grimes *offset = 0; 1007df8bae1dSRodney W. Grimes *object = result; 1008df8bae1dSRodney W. Grimes } 1009df8bae1dSRodney W. Grimes 10102ad1a3f7SMatthew Dillon #define OBSC_TEST_ALL_SHADOWED 0x0001 10112ad1a3f7SMatthew Dillon #define OBSC_COLLAPSE_NOWAIT 0x0002 10122ad1a3f7SMatthew Dillon #define OBSC_COLLAPSE_WAIT 0x0004 10132ad1a3f7SMatthew Dillon 10142ad1a3f7SMatthew Dillon static __inline int 10152ad1a3f7SMatthew Dillon vm_object_backing_scan(vm_object_t object, int op) 10162ad1a3f7SMatthew Dillon { 10172ad1a3f7SMatthew Dillon int s; 10182ad1a3f7SMatthew Dillon int r = 1; 10192ad1a3f7SMatthew Dillon vm_page_t p; 10202ad1a3f7SMatthew Dillon vm_object_t backing_object; 10212ad1a3f7SMatthew Dillon vm_pindex_t backing_offset_index; 10222ad1a3f7SMatthew Dillon 10232ad1a3f7SMatthew Dillon s = splvm(); 10240cddd8f0SMatthew Dillon GIANT_REQUIRED; 10252ad1a3f7SMatthew Dillon 10262ad1a3f7SMatthew Dillon backing_object = object->backing_object; 10272ad1a3f7SMatthew Dillon backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 10282ad1a3f7SMatthew Dillon 10292ad1a3f7SMatthew Dillon /* 10302ad1a3f7SMatthew Dillon * Initial conditions 10312ad1a3f7SMatthew Dillon */ 10322ad1a3f7SMatthew Dillon 10332ad1a3f7SMatthew Dillon if (op & OBSC_TEST_ALL_SHADOWED) { 10342ad1a3f7SMatthew Dillon /* 1035956f3135SPhilippe Charnier * We do not want to have to test for the existence of 10362ad1a3f7SMatthew Dillon * swap pages in the backing object. XXX but with the 10372ad1a3f7SMatthew Dillon * new swapper this would be pretty easy to do. 10382ad1a3f7SMatthew Dillon * 10392ad1a3f7SMatthew Dillon * XXX what about anonymous MAP_SHARED memory that hasn't 10402ad1a3f7SMatthew Dillon * been ZFOD faulted yet? If we do not test for this, the 10412ad1a3f7SMatthew Dillon * shadow test may succeed! XXX 10422ad1a3f7SMatthew Dillon */ 10432ad1a3f7SMatthew Dillon if (backing_object->type != OBJT_DEFAULT) { 10442ad1a3f7SMatthew Dillon splx(s); 10452ad1a3f7SMatthew Dillon return(0); 10462ad1a3f7SMatthew Dillon } 10472ad1a3f7SMatthew Dillon } 10482ad1a3f7SMatthew Dillon if (op & OBSC_COLLAPSE_WAIT) { 10492ad1a3f7SMatthew Dillon vm_object_set_flag(backing_object, OBJ_DEAD); 10502ad1a3f7SMatthew Dillon } 10512ad1a3f7SMatthew Dillon 10522ad1a3f7SMatthew Dillon /* 10532ad1a3f7SMatthew Dillon * Our scan 10542ad1a3f7SMatthew Dillon */ 10552ad1a3f7SMatthew Dillon 10562ad1a3f7SMatthew Dillon p = TAILQ_FIRST(&backing_object->memq); 10572ad1a3f7SMatthew Dillon while (p) { 10582ad1a3f7SMatthew Dillon vm_page_t next = TAILQ_NEXT(p, listq); 10592ad1a3f7SMatthew Dillon vm_pindex_t new_pindex = p->pindex - backing_offset_index; 10602ad1a3f7SMatthew Dillon 10612ad1a3f7SMatthew Dillon if (op & OBSC_TEST_ALL_SHADOWED) { 10622ad1a3f7SMatthew Dillon vm_page_t pp; 10632ad1a3f7SMatthew Dillon 10642ad1a3f7SMatthew Dillon /* 10652ad1a3f7SMatthew Dillon * Ignore pages outside the parent object's range 10662ad1a3f7SMatthew Dillon * and outside the parent object's mapping of the 10672ad1a3f7SMatthew Dillon * backing object. 10682ad1a3f7SMatthew Dillon * 10692ad1a3f7SMatthew Dillon * note that we do not busy the backing object's 10702ad1a3f7SMatthew Dillon * page. 10712ad1a3f7SMatthew Dillon */ 10722ad1a3f7SMatthew Dillon 10732ad1a3f7SMatthew Dillon if ( 10742ad1a3f7SMatthew Dillon p->pindex < backing_offset_index || 10752ad1a3f7SMatthew Dillon new_pindex >= object->size 10762ad1a3f7SMatthew Dillon ) { 10772ad1a3f7SMatthew Dillon p = next; 10782ad1a3f7SMatthew Dillon continue; 10792ad1a3f7SMatthew Dillon } 10802ad1a3f7SMatthew Dillon 10812ad1a3f7SMatthew Dillon /* 10822ad1a3f7SMatthew Dillon * See if the parent has the page or if the parent's 10832ad1a3f7SMatthew Dillon * object pager has the page. If the parent has the 10842ad1a3f7SMatthew Dillon * page but the page is not valid, the parent's 10852ad1a3f7SMatthew Dillon * object pager must have the page. 10862ad1a3f7SMatthew Dillon * 10872ad1a3f7SMatthew Dillon * If this fails, the parent does not completely shadow 10882ad1a3f7SMatthew Dillon * the object and we might as well give up now. 10892ad1a3f7SMatthew Dillon */ 10902ad1a3f7SMatthew Dillon 10912ad1a3f7SMatthew Dillon pp = vm_page_lookup(object, new_pindex); 10922ad1a3f7SMatthew Dillon if ( 10932ad1a3f7SMatthew Dillon (pp == NULL || pp->valid == 0) && 10942ad1a3f7SMatthew Dillon !vm_pager_has_page(object, new_pindex, NULL, NULL) 10952ad1a3f7SMatthew Dillon ) { 10962ad1a3f7SMatthew Dillon r = 0; 10972ad1a3f7SMatthew Dillon break; 10982ad1a3f7SMatthew Dillon } 10992ad1a3f7SMatthew Dillon } 11002ad1a3f7SMatthew Dillon 11012ad1a3f7SMatthew Dillon /* 11022ad1a3f7SMatthew Dillon * Check for busy page 11032ad1a3f7SMatthew Dillon */ 11042ad1a3f7SMatthew Dillon 11052ad1a3f7SMatthew Dillon if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { 11062ad1a3f7SMatthew Dillon vm_page_t pp; 11072ad1a3f7SMatthew Dillon 11082ad1a3f7SMatthew Dillon if (op & OBSC_COLLAPSE_NOWAIT) { 11092ad1a3f7SMatthew Dillon if ( 11102ad1a3f7SMatthew Dillon (p->flags & PG_BUSY) || 11112ad1a3f7SMatthew Dillon !p->valid || 11122ad1a3f7SMatthew Dillon p->hold_count || 11132ad1a3f7SMatthew Dillon p->wire_count || 11142ad1a3f7SMatthew Dillon p->busy 11152ad1a3f7SMatthew Dillon ) { 11162ad1a3f7SMatthew Dillon p = next; 11172ad1a3f7SMatthew Dillon continue; 11182ad1a3f7SMatthew Dillon } 11192ad1a3f7SMatthew Dillon } else if (op & OBSC_COLLAPSE_WAIT) { 11202ad1a3f7SMatthew Dillon if (vm_page_sleep_busy(p, TRUE, "vmocol")) { 11212ad1a3f7SMatthew Dillon /* 11222ad1a3f7SMatthew Dillon * If we slept, anything could have 11232ad1a3f7SMatthew Dillon * happened. Since the object is 11242ad1a3f7SMatthew Dillon * marked dead, the backing offset 11252ad1a3f7SMatthew Dillon * should not have changed so we 11262ad1a3f7SMatthew Dillon * just restart our scan. 11272ad1a3f7SMatthew Dillon */ 11282ad1a3f7SMatthew Dillon p = TAILQ_FIRST(&backing_object->memq); 11292ad1a3f7SMatthew Dillon continue; 11302ad1a3f7SMatthew Dillon } 11312ad1a3f7SMatthew Dillon } 11322ad1a3f7SMatthew Dillon 11332ad1a3f7SMatthew Dillon /* 11342ad1a3f7SMatthew Dillon * Busy the page 11352ad1a3f7SMatthew Dillon */ 11362ad1a3f7SMatthew Dillon vm_page_busy(p); 11372ad1a3f7SMatthew Dillon 11382ad1a3f7SMatthew Dillon KASSERT( 11392ad1a3f7SMatthew Dillon p->object == backing_object, 11402ad1a3f7SMatthew Dillon ("vm_object_qcollapse(): object mismatch") 11412ad1a3f7SMatthew Dillon ); 11422ad1a3f7SMatthew Dillon 11432ad1a3f7SMatthew Dillon /* 11442ad1a3f7SMatthew Dillon * Destroy any associated swap 11452ad1a3f7SMatthew Dillon */ 11462ad1a3f7SMatthew Dillon if (backing_object->type == OBJT_SWAP) { 11472ad1a3f7SMatthew Dillon swap_pager_freespace( 11482ad1a3f7SMatthew Dillon backing_object, 11492ad1a3f7SMatthew Dillon p->pindex, 11502ad1a3f7SMatthew Dillon 1 11512ad1a3f7SMatthew Dillon ); 11522ad1a3f7SMatthew Dillon } 11532ad1a3f7SMatthew Dillon 11542ad1a3f7SMatthew Dillon if ( 11552ad1a3f7SMatthew Dillon p->pindex < backing_offset_index || 11562ad1a3f7SMatthew Dillon new_pindex >= object->size 11572ad1a3f7SMatthew Dillon ) { 11582ad1a3f7SMatthew Dillon /* 11592ad1a3f7SMatthew Dillon * Page is out of the parent object's range, we 11602ad1a3f7SMatthew Dillon * can simply destroy it. 11612ad1a3f7SMatthew Dillon */ 11622ad1a3f7SMatthew Dillon vm_page_protect(p, VM_PROT_NONE); 11632ad1a3f7SMatthew Dillon vm_page_free(p); 11642ad1a3f7SMatthew Dillon p = next; 11652ad1a3f7SMatthew Dillon continue; 11662ad1a3f7SMatthew Dillon } 11672ad1a3f7SMatthew Dillon 11682ad1a3f7SMatthew Dillon pp = vm_page_lookup(object, new_pindex); 11692ad1a3f7SMatthew Dillon if ( 11702ad1a3f7SMatthew Dillon pp != NULL || 11712ad1a3f7SMatthew Dillon vm_pager_has_page(object, new_pindex, NULL, NULL) 11722ad1a3f7SMatthew Dillon ) { 11732ad1a3f7SMatthew Dillon /* 11742ad1a3f7SMatthew Dillon * page already exists in parent OR swap exists 11752ad1a3f7SMatthew Dillon * for this location in the parent. Destroy 11762ad1a3f7SMatthew Dillon * the original page from the backing object. 11772ad1a3f7SMatthew Dillon * 11782ad1a3f7SMatthew Dillon * Leave the parent's page alone 11792ad1a3f7SMatthew Dillon */ 11802ad1a3f7SMatthew Dillon vm_page_protect(p, VM_PROT_NONE); 11812ad1a3f7SMatthew Dillon vm_page_free(p); 11822ad1a3f7SMatthew Dillon p = next; 11832ad1a3f7SMatthew Dillon continue; 11842ad1a3f7SMatthew Dillon } 11852ad1a3f7SMatthew Dillon 11862ad1a3f7SMatthew Dillon /* 11872ad1a3f7SMatthew Dillon * Page does not exist in parent, rename the 11882ad1a3f7SMatthew Dillon * page from the backing object to the main object. 1189d1bf5d56SMatthew Dillon * 1190d1bf5d56SMatthew Dillon * If the page was mapped to a process, it can remain 1191d1bf5d56SMatthew Dillon * mapped through the rename. 11922ad1a3f7SMatthew Dillon */ 11932ad1a3f7SMatthew Dillon if ((p->queue - p->pc) == PQ_CACHE) 11942ad1a3f7SMatthew Dillon vm_page_deactivate(p); 11952ad1a3f7SMatthew Dillon 11962ad1a3f7SMatthew Dillon vm_page_rename(p, object, new_pindex); 11972ad1a3f7SMatthew Dillon /* page automatically made dirty by rename */ 11982ad1a3f7SMatthew Dillon } 11992ad1a3f7SMatthew Dillon p = next; 12002ad1a3f7SMatthew Dillon } 12012ad1a3f7SMatthew Dillon splx(s); 12022ad1a3f7SMatthew Dillon return(r); 12032ad1a3f7SMatthew Dillon } 12042ad1a3f7SMatthew Dillon 1205df8bae1dSRodney W. Grimes 1206df8bae1dSRodney W. Grimes /* 12072fe6e4d7SDavid Greenman * this version of collapse allows the operation to occur earlier and 12082fe6e4d7SDavid Greenman * when paging_in_progress is true for an object... This is not a complete 12092fe6e4d7SDavid Greenman * operation, but should plug 99.9% of the rest of the leaks. 12102fe6e4d7SDavid Greenman */ 12112fe6e4d7SDavid Greenman static void 12121b40f8c0SMatthew Dillon vm_object_qcollapse(vm_object_t object) 12132fe6e4d7SDavid Greenman { 12142ad1a3f7SMatthew Dillon vm_object_t backing_object = object->backing_object; 12152fe6e4d7SDavid Greenman 12161b40f8c0SMatthew Dillon GIANT_REQUIRED; 12171b40f8c0SMatthew Dillon 12182fe6e4d7SDavid Greenman if (backing_object->ref_count != 1) 12192fe6e4d7SDavid Greenman return; 12202fe6e4d7SDavid Greenman 1221010cf3b9SDavid Greenman backing_object->ref_count += 2; 1222010cf3b9SDavid Greenman 12232ad1a3f7SMatthew Dillon vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT); 12241c7c3c6aSMatthew Dillon 1225010cf3b9SDavid Greenman backing_object->ref_count -= 2; 12262fe6e4d7SDavid Greenman } 12272fe6e4d7SDavid Greenman 1228df8bae1dSRodney W. Grimes /* 1229df8bae1dSRodney W. Grimes * vm_object_collapse: 1230df8bae1dSRodney W. Grimes * 1231df8bae1dSRodney W. Grimes * Collapse an object with the object backing it. 1232df8bae1dSRodney W. Grimes * Pages in the backing object are moved into the 1233df8bae1dSRodney W. Grimes * parent, and the backing object is deallocated. 1234df8bae1dSRodney W. Grimes */ 123526f9a767SRodney W. Grimes void 12361b40f8c0SMatthew Dillon vm_object_collapse(vm_object_t object) 1237df8bae1dSRodney W. Grimes { 12380cddd8f0SMatthew Dillon GIANT_REQUIRED; 123923955314SAlfred Perlstein 1240df8bae1dSRodney W. Grimes while (TRUE) { 12412ad1a3f7SMatthew Dillon vm_object_t backing_object; 12422ad1a3f7SMatthew Dillon 1243df8bae1dSRodney W. Grimes /* 1244df8bae1dSRodney W. Grimes * Verify that the conditions are right for collapse: 1245df8bae1dSRodney W. Grimes * 12462ad1a3f7SMatthew Dillon * The object exists and the backing object exists. 1247df8bae1dSRodney W. Grimes */ 12482fe6e4d7SDavid Greenman if (object == NULL) 12492ad1a3f7SMatthew Dillon break; 1250df8bae1dSRodney W. Grimes 125124a1cce3SDavid Greenman if ((backing_object = object->backing_object) == NULL) 12522ad1a3f7SMatthew Dillon break; 1253df8bae1dSRodney W. Grimes 1254f919ebdeSDavid Greenman /* 1255f919ebdeSDavid Greenman * we check the backing object first, because it is most likely 125624a1cce3SDavid Greenman * not collapsable. 1257f919ebdeSDavid Greenman */ 125824a1cce3SDavid Greenman if (backing_object->handle != NULL || 125924a1cce3SDavid Greenman (backing_object->type != OBJT_DEFAULT && 126024a1cce3SDavid Greenman backing_object->type != OBJT_SWAP) || 1261f919ebdeSDavid Greenman (backing_object->flags & OBJ_DEAD) || 126224a1cce3SDavid Greenman object->handle != NULL || 126324a1cce3SDavid Greenman (object->type != OBJT_DEFAULT && 126424a1cce3SDavid Greenman object->type != OBJT_SWAP) || 126524a1cce3SDavid Greenman (object->flags & OBJ_DEAD)) { 12662ad1a3f7SMatthew Dillon break; 126724a1cce3SDavid Greenman } 12689b4814bbSDavid Greenman 12692ad1a3f7SMatthew Dillon if ( 12702ad1a3f7SMatthew Dillon object->paging_in_progress != 0 || 12712ad1a3f7SMatthew Dillon backing_object->paging_in_progress != 0 12722ad1a3f7SMatthew Dillon ) { 1273b9921222SDavid Greenman vm_object_qcollapse(object); 12742ad1a3f7SMatthew Dillon break; 1275df8bae1dSRodney W. Grimes } 1276f919ebdeSDavid Greenman 127726f9a767SRodney W. Grimes /* 12780d94caffSDavid Greenman * We know that we can either collapse the backing object (if 12792ad1a3f7SMatthew Dillon * the parent is the only reference to it) or (perhaps) have 12802ad1a3f7SMatthew Dillon * the parent bypass the object if the parent happens to shadow 12812ad1a3f7SMatthew Dillon * all the resident pages in the entire backing object. 12822ad1a3f7SMatthew Dillon * 12832ad1a3f7SMatthew Dillon * This is ignoring pager-backed pages such as swap pages. 12842ad1a3f7SMatthew Dillon * vm_object_backing_scan fails the shadowing test in this 12852ad1a3f7SMatthew Dillon * case. 1286df8bae1dSRodney W. Grimes */ 1287df8bae1dSRodney W. Grimes 1288df8bae1dSRodney W. Grimes if (backing_object->ref_count == 1) { 1289df8bae1dSRodney W. Grimes /* 12902ad1a3f7SMatthew Dillon * If there is exactly one reference to the backing 12912ad1a3f7SMatthew Dillon * object, we can collapse it into the parent. 1292df8bae1dSRodney W. Grimes */ 1293df8bae1dSRodney W. Grimes 12942ad1a3f7SMatthew Dillon vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT); 1295df8bae1dSRodney W. Grimes 1296df8bae1dSRodney W. Grimes /* 1297df8bae1dSRodney W. Grimes * Move the pager from backing_object to object. 1298df8bae1dSRodney W. Grimes */ 1299df8bae1dSRodney W. Grimes 130024a1cce3SDavid Greenman if (backing_object->type == OBJT_SWAP) { 1301d474eaaaSDoug Rabson vm_object_pip_add(backing_object, 1); 130224a1cce3SDavid Greenman 130324a1cce3SDavid Greenman /* 13041c7c3c6aSMatthew Dillon * scrap the paging_offset junk and do a 13051c7c3c6aSMatthew Dillon * discrete copy. This also removes major 13061c7c3c6aSMatthew Dillon * assumptions about how the swap-pager 13071c7c3c6aSMatthew Dillon * works from where it doesn't belong. The 13081c7c3c6aSMatthew Dillon * new swapper is able to optimize the 13091c7c3c6aSMatthew Dillon * destroy-source case. 131024a1cce3SDavid Greenman */ 13111c7c3c6aSMatthew Dillon 13121c7c3c6aSMatthew Dillon vm_object_pip_add(object, 1); 13131c7c3c6aSMatthew Dillon swap_pager_copy( 13141c7c3c6aSMatthew Dillon backing_object, 13151c7c3c6aSMatthew Dillon object, 13161c7c3c6aSMatthew Dillon OFF_TO_IDX(object->backing_object_offset), TRUE); 1317f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 1318c0503609SDavid Greenman 1319f919ebdeSDavid Greenman vm_object_pip_wakeup(backing_object); 1320c0503609SDavid Greenman } 1321df8bae1dSRodney W. Grimes /* 1322df8bae1dSRodney W. Grimes * Object now shadows whatever backing_object did. 13232ad1a3f7SMatthew Dillon * Note that the reference to 13242ad1a3f7SMatthew Dillon * backing_object->backing_object moves from within 13252ad1a3f7SMatthew Dillon * backing_object to within object. 1326df8bae1dSRodney W. Grimes */ 1327df8bae1dSRodney W. Grimes 13282ad1a3f7SMatthew Dillon TAILQ_REMOVE( 13292ad1a3f7SMatthew Dillon &object->backing_object->shadow_head, 13302ad1a3f7SMatthew Dillon object, 13312ad1a3f7SMatthew Dillon shadow_list 13322ad1a3f7SMatthew Dillon ); 1333eaf13dd7SJohn Dyson object->backing_object->shadow_count--; 1334eaf13dd7SJohn Dyson object->backing_object->generation++; 1335de5f6a77SJohn Dyson if (backing_object->backing_object) { 13362ad1a3f7SMatthew Dillon TAILQ_REMOVE( 13372ad1a3f7SMatthew Dillon &backing_object->backing_object->shadow_head, 13382ad1a3f7SMatthew Dillon backing_object, 13392ad1a3f7SMatthew Dillon shadow_list 13402ad1a3f7SMatthew Dillon ); 1341eaf13dd7SJohn Dyson backing_object->backing_object->shadow_count--; 1342eaf13dd7SJohn Dyson backing_object->backing_object->generation++; 1343de5f6a77SJohn Dyson } 134424a1cce3SDavid Greenman object->backing_object = backing_object->backing_object; 1345de5f6a77SJohn Dyson if (object->backing_object) { 13462ad1a3f7SMatthew Dillon TAILQ_INSERT_TAIL( 13472ad1a3f7SMatthew Dillon &object->backing_object->shadow_head, 13482ad1a3f7SMatthew Dillon object, 13492ad1a3f7SMatthew Dillon shadow_list 13502ad1a3f7SMatthew Dillon ); 1351eaf13dd7SJohn Dyson object->backing_object->shadow_count++; 1352eaf13dd7SJohn Dyson object->backing_object->generation++; 1353de5f6a77SJohn Dyson } 13542fe6e4d7SDavid Greenman 13552ad1a3f7SMatthew Dillon object->backing_object_offset += 13562ad1a3f7SMatthew Dillon backing_object->backing_object_offset; 13572ad1a3f7SMatthew Dillon 1358df8bae1dSRodney W. Grimes /* 1359df8bae1dSRodney W. Grimes * Discard backing_object. 1360df8bae1dSRodney W. Grimes * 13610d94caffSDavid Greenman * Since the backing object has no pages, no pager left, 13620d94caffSDavid Greenman * and no object references within it, all that is 13630d94caffSDavid Greenman * necessary is to dispose of it. 1364df8bae1dSRodney W. Grimes */ 1365df8bae1dSRodney W. Grimes 13662ad1a3f7SMatthew Dillon TAILQ_REMOVE( 13672ad1a3f7SMatthew Dillon &vm_object_list, 13682ad1a3f7SMatthew Dillon backing_object, 13692ad1a3f7SMatthew Dillon object_list 13702ad1a3f7SMatthew Dillon ); 1371df8bae1dSRodney W. Grimes vm_object_count--; 1372df8bae1dSRodney W. Grimes 137399448ed1SJohn Dyson zfree(obj_zone, backing_object); 1374df8bae1dSRodney W. Grimes 1375df8bae1dSRodney W. Grimes object_collapses++; 13760d94caffSDavid Greenman } else { 137795e5e988SJohn Dyson vm_object_t new_backing_object; 1378df8bae1dSRodney W. Grimes 1379df8bae1dSRodney W. Grimes /* 13802ad1a3f7SMatthew Dillon * If we do not entirely shadow the backing object, 13812ad1a3f7SMatthew Dillon * there is nothing we can do so we give up. 1382df8bae1dSRodney W. Grimes */ 1383df8bae1dSRodney W. Grimes 13842ad1a3f7SMatthew Dillon if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) { 13852ad1a3f7SMatthew Dillon break; 138624a1cce3SDavid Greenman } 1387df8bae1dSRodney W. Grimes 1388df8bae1dSRodney W. Grimes /* 13890d94caffSDavid Greenman * Make the parent shadow the next object in the 13900d94caffSDavid Greenman * chain. Deallocating backing_object will not remove 13910d94caffSDavid Greenman * it, since its reference count is at least 2. 1392df8bae1dSRodney W. Grimes */ 1393df8bae1dSRodney W. Grimes 13942ad1a3f7SMatthew Dillon TAILQ_REMOVE( 13952ad1a3f7SMatthew Dillon &backing_object->shadow_head, 13962ad1a3f7SMatthew Dillon object, 13972ad1a3f7SMatthew Dillon shadow_list 13982ad1a3f7SMatthew Dillon ); 1399eaf13dd7SJohn Dyson backing_object->shadow_count--; 1400eaf13dd7SJohn Dyson backing_object->generation++; 140195e5e988SJohn Dyson 140295e5e988SJohn Dyson new_backing_object = backing_object->backing_object; 14038aef1712SMatthew Dillon if ((object->backing_object = new_backing_object) != NULL) { 140495e5e988SJohn Dyson vm_object_reference(new_backing_object); 14052ad1a3f7SMatthew Dillon TAILQ_INSERT_TAIL( 14062ad1a3f7SMatthew Dillon &new_backing_object->shadow_head, 14072ad1a3f7SMatthew Dillon object, 14082ad1a3f7SMatthew Dillon shadow_list 14092ad1a3f7SMatthew Dillon ); 1410eaf13dd7SJohn Dyson new_backing_object->shadow_count++; 1411eaf13dd7SJohn Dyson new_backing_object->generation++; 141295e5e988SJohn Dyson object->backing_object_offset += 141395e5e988SJohn Dyson backing_object->backing_object_offset; 1414de5f6a77SJohn Dyson } 1415df8bae1dSRodney W. Grimes 1416df8bae1dSRodney W. Grimes /* 14170d94caffSDavid Greenman * Drop the reference count on backing_object. Since 14180d94caffSDavid Greenman * its ref_count was at least 2, it will not vanish; 1419eaf13dd7SJohn Dyson * so we don't need to call vm_object_deallocate, but 1420eaf13dd7SJohn Dyson * we do anyway. 1421df8bae1dSRodney W. Grimes */ 142295e5e988SJohn Dyson vm_object_deallocate(backing_object); 1423df8bae1dSRodney W. Grimes object_bypasses++; 1424df8bae1dSRodney W. Grimes } 1425df8bae1dSRodney W. Grimes 1426df8bae1dSRodney W. Grimes /* 1427df8bae1dSRodney W. Grimes * Try again with this object's new backing object. 1428df8bae1dSRodney W. Grimes */ 1429df8bae1dSRodney W. Grimes } 1430df8bae1dSRodney W. Grimes } 1431df8bae1dSRodney W. Grimes 1432df8bae1dSRodney W. Grimes /* 1433df8bae1dSRodney W. Grimes * vm_object_page_remove: [internal] 1434df8bae1dSRodney W. Grimes * 1435df8bae1dSRodney W. Grimes * Removes all physical pages in the specified 1436df8bae1dSRodney W. Grimes * object range from the object's list of pages. 1437df8bae1dSRodney W. Grimes * 1438df8bae1dSRodney W. Grimes * The object must be locked. 1439df8bae1dSRodney W. Grimes */ 144026f9a767SRodney W. Grimes void 14411b40f8c0SMatthew Dillon vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, boolean_t clean_only) 1442df8bae1dSRodney W. Grimes { 1443d031cff1SMatthew Dillon vm_page_t p, next; 1444a316d390SJohn Dyson unsigned int size; 1445f5ef029eSPoul-Henning Kamp int all; 1446df8bae1dSRodney W. Grimes 14470cddd8f0SMatthew Dillon GIANT_REQUIRED; 144823955314SAlfred Perlstein 1449a1a54e9fSAlan Cox if (object == NULL || 1450a1a54e9fSAlan Cox object->resident_page_count == 0) 1451df8bae1dSRodney W. Grimes return; 1452df8bae1dSRodney W. Grimes 145395e5e988SJohn Dyson all = ((end == 0) && (start == 0)); 145495e5e988SJohn Dyson 14558b03c8edSMatthew Dillon /* 14568b03c8edSMatthew Dillon * Since physically-backed objects do not use managed pages, we can't 14578b03c8edSMatthew Dillon * remove pages from the object (we must instead remove the page 14588b03c8edSMatthew Dillon * references, and then destroy the object). 14598b03c8edSMatthew Dillon */ 14608b03c8edSMatthew Dillon KASSERT(object->type != OBJT_PHYS, ("attempt to remove pages from a physical object")); 14618b03c8edSMatthew Dillon 1462d474eaaaSDoug Rabson vm_object_pip_add(object, 1); 146326f9a767SRodney W. Grimes again: 146426f9a767SRodney W. Grimes size = end - start; 1465a1a54e9fSAlan Cox if (all || size > object->resident_page_count / 4) { 1466b18bfc3dSJohn Dyson for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 1467b18bfc3dSJohn Dyson next = TAILQ_NEXT(p, listq); 146895e5e988SJohn Dyson if (all || ((start <= p->pindex) && (p->pindex < end))) { 1469bd7e5f99SJohn Dyson if (p->wire_count != 0) { 1470bd7e5f99SJohn Dyson vm_page_protect(p, VM_PROT_NONE); 1471ce65e68cSDavid Greenman if (!clean_only) 1472bd7e5f99SJohn Dyson p->valid = 0; 14730d94caffSDavid Greenman continue; 14740d94caffSDavid Greenman } 14750891ef4cSJohn Dyson 1476b18bfc3dSJohn Dyson /* 1477b18bfc3dSJohn Dyson * The busy flags are only cleared at 1478b18bfc3dSJohn Dyson * interrupt -- minimize the spl transitions 1479b18bfc3dSJohn Dyson */ 1480ffc82b0aSJohn Dyson 14811c7c3c6aSMatthew Dillon if (vm_page_sleep_busy(p, TRUE, "vmopar")) 148226f9a767SRodney W. Grimes goto again; 14830891ef4cSJohn Dyson 14848f9110f6SJohn Dyson if (clean_only && p->valid) { 14857c1f6cedSDavid Greenman vm_page_test_dirty(p); 14867c1f6cedSDavid Greenman if (p->valid & p->dirty) 14877c1f6cedSDavid Greenman continue; 14887c1f6cedSDavid Greenman } 1489eaf13dd7SJohn Dyson 1490e69763a3SDoug Rabson vm_page_busy(p); 1491f919ebdeSDavid Greenman vm_page_protect(p, VM_PROT_NONE); 1492df8bae1dSRodney W. Grimes vm_page_free(p); 149326f9a767SRodney W. Grimes } 149426f9a767SRodney W. Grimes } 149526f9a767SRodney W. Grimes } else { 149626f9a767SRodney W. Grimes while (size > 0) { 1497bd7e5f99SJohn Dyson if ((p = vm_page_lookup(object, start)) != 0) { 1498eaf13dd7SJohn Dyson 1499bd7e5f99SJohn Dyson if (p->wire_count != 0) { 1500bd7e5f99SJohn Dyson vm_page_protect(p, VM_PROT_NONE); 1501ce65e68cSDavid Greenman if (!clean_only) 1502ce65e68cSDavid Greenman p->valid = 0; 1503bd7e5f99SJohn Dyson start += 1; 1504bd7e5f99SJohn Dyson size -= 1; 1505bd7e5f99SJohn Dyson continue; 15060d94caffSDavid Greenman } 1507eaf13dd7SJohn Dyson 1508b18bfc3dSJohn Dyson /* 1509b18bfc3dSJohn Dyson * The busy flags are only cleared at 1510b18bfc3dSJohn Dyson * interrupt -- minimize the spl transitions 1511b18bfc3dSJohn Dyson */ 15121c7c3c6aSMatthew Dillon if (vm_page_sleep_busy(p, TRUE, "vmopar")) 151326f9a767SRodney W. Grimes goto again; 1514eaf13dd7SJohn Dyson 15158f9110f6SJohn Dyson if (clean_only && p->valid) { 15167c1f6cedSDavid Greenman vm_page_test_dirty(p); 1517bd7e5f99SJohn Dyson if (p->valid & p->dirty) { 1518bd7e5f99SJohn Dyson start += 1; 1519bd7e5f99SJohn Dyson size -= 1; 15207c1f6cedSDavid Greenman continue; 15217c1f6cedSDavid Greenman } 1522bd7e5f99SJohn Dyson } 1523eaf13dd7SJohn Dyson 1524e69763a3SDoug Rabson vm_page_busy(p); 1525f919ebdeSDavid Greenman vm_page_protect(p, VM_PROT_NONE); 152626f9a767SRodney W. Grimes vm_page_free(p); 152726f9a767SRodney W. Grimes } 1528a316d390SJohn Dyson start += 1; 1529a316d390SJohn Dyson size -= 1; 1530df8bae1dSRodney W. Grimes } 1531df8bae1dSRodney W. Grimes } 1532f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 1533c0503609SDavid Greenman } 1534df8bae1dSRodney W. Grimes 1535df8bae1dSRodney W. Grimes /* 1536df8bae1dSRodney W. Grimes * Routine: vm_object_coalesce 1537df8bae1dSRodney W. Grimes * Function: Coalesces two objects backing up adjoining 1538df8bae1dSRodney W. Grimes * regions of memory into a single object. 1539df8bae1dSRodney W. Grimes * 1540df8bae1dSRodney W. Grimes * returns TRUE if objects were combined. 1541df8bae1dSRodney W. Grimes * 1542df8bae1dSRodney W. Grimes * NOTE: Only works at the moment if the second object is NULL - 1543df8bae1dSRodney W. Grimes * if it's not, which object do we lock first? 1544df8bae1dSRodney W. Grimes * 1545df8bae1dSRodney W. Grimes * Parameters: 1546df8bae1dSRodney W. Grimes * prev_object First object to coalesce 1547df8bae1dSRodney W. Grimes * prev_offset Offset into prev_object 1548df8bae1dSRodney W. Grimes * next_object Second object into coalesce 1549df8bae1dSRodney W. Grimes * next_offset Offset into next_object 1550df8bae1dSRodney W. Grimes * 1551df8bae1dSRodney W. Grimes * prev_size Size of reference to prev_object 1552df8bae1dSRodney W. Grimes * next_size Size of reference to next_object 1553df8bae1dSRodney W. Grimes * 1554df8bae1dSRodney W. Grimes * Conditions: 1555df8bae1dSRodney W. Grimes * The object must *not* be locked. 1556df8bae1dSRodney W. Grimes */ 15570d94caffSDavid Greenman boolean_t 15581b40f8c0SMatthew Dillon vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex, vm_size_t prev_size, vm_size_t next_size) 1559df8bae1dSRodney W. Grimes { 1560ea41812fSAlan Cox vm_pindex_t next_pindex; 1561df8bae1dSRodney W. Grimes 15620cddd8f0SMatthew Dillon GIANT_REQUIRED; 156323955314SAlfred Perlstein 1564df8bae1dSRodney W. Grimes if (prev_object == NULL) { 1565df8bae1dSRodney W. Grimes return (TRUE); 1566df8bae1dSRodney W. Grimes } 1567df8bae1dSRodney W. Grimes 15684112823fSMatthew Dillon if (prev_object->type != OBJT_DEFAULT && 15694112823fSMatthew Dillon prev_object->type != OBJT_SWAP) { 157030dcfc09SJohn Dyson return (FALSE); 157130dcfc09SJohn Dyson } 157230dcfc09SJohn Dyson 1573df8bae1dSRodney W. Grimes /* 1574df8bae1dSRodney W. Grimes * Try to collapse the object first 1575df8bae1dSRodney W. Grimes */ 1576df8bae1dSRodney W. Grimes vm_object_collapse(prev_object); 1577df8bae1dSRodney W. Grimes 1578df8bae1dSRodney W. Grimes /* 15790d94caffSDavid Greenman * Can't coalesce if: . more than one reference . paged out . shadows 15800d94caffSDavid Greenman * another object . has a copy elsewhere (any of which mean that the 15810d94caffSDavid Greenman * pages not mapped to prev_entry may be in use anyway) 1582df8bae1dSRodney W. Grimes */ 1583df8bae1dSRodney W. Grimes 15848cc7e047SJohn Dyson if (prev_object->backing_object != NULL) { 1585df8bae1dSRodney W. Grimes return (FALSE); 1586df8bae1dSRodney W. Grimes } 1587a316d390SJohn Dyson 1588a316d390SJohn Dyson prev_size >>= PAGE_SHIFT; 1589a316d390SJohn Dyson next_size >>= PAGE_SHIFT; 1590ea41812fSAlan Cox next_pindex = prev_pindex + prev_size; 15918cc7e047SJohn Dyson 15928cc7e047SJohn Dyson if ((prev_object->ref_count > 1) && 1593ea41812fSAlan Cox (prev_object->size != next_pindex)) { 15948cc7e047SJohn Dyson return (FALSE); 15958cc7e047SJohn Dyson } 15968cc7e047SJohn Dyson 1597df8bae1dSRodney W. Grimes /* 15980d94caffSDavid Greenman * Remove any pages that may still be in the object from a previous 15990d94caffSDavid Greenman * deallocation. 1600df8bae1dSRodney W. Grimes */ 1601ea41812fSAlan Cox if (next_pindex < prev_object->size) { 1602df8bae1dSRodney W. Grimes vm_object_page_remove(prev_object, 1603ea41812fSAlan Cox next_pindex, 1604ea41812fSAlan Cox next_pindex + next_size, FALSE); 1605ea41812fSAlan Cox if (prev_object->type == OBJT_SWAP) 1606ea41812fSAlan Cox swap_pager_freespace(prev_object, 1607ea41812fSAlan Cox next_pindex, next_size); 1608ea41812fSAlan Cox } 1609df8bae1dSRodney W. Grimes 1610df8bae1dSRodney W. Grimes /* 1611df8bae1dSRodney W. Grimes * Extend the object if necessary. 1612df8bae1dSRodney W. Grimes */ 1613ea41812fSAlan Cox if (next_pindex + next_size > prev_object->size) 1614ea41812fSAlan Cox prev_object->size = next_pindex + next_size; 1615df8bae1dSRodney W. Grimes 1616df8bae1dSRodney W. Grimes return (TRUE); 1617df8bae1dSRodney W. Grimes } 1618df8bae1dSRodney W. Grimes 1619c7c34a24SBruce Evans #include "opt_ddb.h" 1620c3cb3e12SDavid Greenman #ifdef DDB 1621c7c34a24SBruce Evans #include <sys/kernel.h> 1622c7c34a24SBruce Evans 1623ce9edcf5SPoul-Henning Kamp #include <sys/cons.h> 1624c7c34a24SBruce Evans 1625c7c34a24SBruce Evans #include <ddb/ddb.h> 1626c7c34a24SBruce Evans 1627cac597e4SBruce Evans static int 16281b40f8c0SMatthew Dillon _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 1629a1f6d91cSDavid Greenman { 1630a1f6d91cSDavid Greenman vm_map_t tmpm; 1631a1f6d91cSDavid Greenman vm_map_entry_t tmpe; 1632a1f6d91cSDavid Greenman vm_object_t obj; 1633a1f6d91cSDavid Greenman int entcount; 1634a1f6d91cSDavid Greenman 1635a1f6d91cSDavid Greenman if (map == 0) 1636a1f6d91cSDavid Greenman return 0; 1637a1f6d91cSDavid Greenman 1638a1f6d91cSDavid Greenman if (entry == 0) { 1639a1f6d91cSDavid Greenman tmpe = map->header.next; 1640a1f6d91cSDavid Greenman entcount = map->nentries; 1641a1f6d91cSDavid Greenman while (entcount-- && (tmpe != &map->header)) { 1642a1f6d91cSDavid Greenman if( _vm_object_in_map(map, object, tmpe)) { 1643a1f6d91cSDavid Greenman return 1; 1644a1f6d91cSDavid Greenman } 1645a1f6d91cSDavid Greenman tmpe = tmpe->next; 1646a1f6d91cSDavid Greenman } 16479fdfe602SMatthew Dillon } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 16489fdfe602SMatthew Dillon tmpm = entry->object.sub_map; 1649a1f6d91cSDavid Greenman tmpe = tmpm->header.next; 1650a1f6d91cSDavid Greenman entcount = tmpm->nentries; 1651a1f6d91cSDavid Greenman while (entcount-- && tmpe != &tmpm->header) { 1652a1f6d91cSDavid Greenman if( _vm_object_in_map(tmpm, object, tmpe)) { 1653a1f6d91cSDavid Greenman return 1; 1654a1f6d91cSDavid Greenman } 1655a1f6d91cSDavid Greenman tmpe = tmpe->next; 1656a1f6d91cSDavid Greenman } 16578aef1712SMatthew Dillon } else if ((obj = entry->object.vm_object) != NULL) { 165824a1cce3SDavid Greenman for (; obj; obj = obj->backing_object) 1659a1f6d91cSDavid Greenman if( obj == object) { 1660a1f6d91cSDavid Greenman return 1; 1661a1f6d91cSDavid Greenman } 1662a1f6d91cSDavid Greenman } 1663a1f6d91cSDavid Greenman return 0; 1664a1f6d91cSDavid Greenman } 1665a1f6d91cSDavid Greenman 1666cac597e4SBruce Evans static int 16671b40f8c0SMatthew Dillon vm_object_in_map(vm_object_t object) 1668a1f6d91cSDavid Greenman { 1669a1f6d91cSDavid Greenman struct proc *p; 16701005a129SJohn Baldwin 167160517fd1SJohn Baldwin /* sx_slock(&allproc_lock); */ 1672fc2ffbe6SPoul-Henning Kamp LIST_FOREACH(p, &allproc, p_list) { 1673a1f6d91cSDavid Greenman if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 1674a1f6d91cSDavid Greenman continue; 1675553629ebSJake Burkholder if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 167660517fd1SJohn Baldwin /* sx_sunlock(&allproc_lock); */ 1677a1f6d91cSDavid Greenman return 1; 1678a1f6d91cSDavid Greenman } 1679553629ebSJake Burkholder } 168060517fd1SJohn Baldwin /* sx_sunlock(&allproc_lock); */ 1681a1f6d91cSDavid Greenman if( _vm_object_in_map( kernel_map, object, 0)) 1682a1f6d91cSDavid Greenman return 1; 1683a1f6d91cSDavid Greenman if( _vm_object_in_map( kmem_map, object, 0)) 1684a1f6d91cSDavid Greenman return 1; 1685a1f6d91cSDavid Greenman if( _vm_object_in_map( pager_map, object, 0)) 1686a1f6d91cSDavid Greenman return 1; 1687a1f6d91cSDavid Greenman if( _vm_object_in_map( buffer_map, object, 0)) 1688a1f6d91cSDavid Greenman return 1; 1689a1f6d91cSDavid Greenman return 0; 1690a1f6d91cSDavid Greenman } 1691a1f6d91cSDavid Greenman 1692c7c34a24SBruce Evans DB_SHOW_COMMAND(vmochk, vm_object_check) 1693f708ef1bSPoul-Henning Kamp { 1694a1f6d91cSDavid Greenman vm_object_t object; 1695a1f6d91cSDavid Greenman 1696a1f6d91cSDavid Greenman /* 1697a1f6d91cSDavid Greenman * make sure that internal objs are in a map somewhere 1698a1f6d91cSDavid Greenman * and none have zero ref counts. 1699a1f6d91cSDavid Greenman */ 1700cc64b484SAlfred Perlstein TAILQ_FOREACH(object, &vm_object_list, object_list) { 170124a1cce3SDavid Greenman if (object->handle == NULL && 170224a1cce3SDavid Greenman (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 1703a1f6d91cSDavid Greenman if (object->ref_count == 0) { 17043efc015bSPeter Wemm db_printf("vmochk: internal obj has zero ref count: %ld\n", 17053efc015bSPeter Wemm (long)object->size); 1706a1f6d91cSDavid Greenman } 1707a1f6d91cSDavid Greenman if (!vm_object_in_map(object)) { 1708fc62ef1fSBruce Evans db_printf( 1709fc62ef1fSBruce Evans "vmochk: internal obj is not in a map: " 1710fc62ef1fSBruce Evans "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 1711fc62ef1fSBruce Evans object->ref_count, (u_long)object->size, 1712fc62ef1fSBruce Evans (u_long)object->size, 1713fc62ef1fSBruce Evans (void *)object->backing_object); 1714a1f6d91cSDavid Greenman } 1715a1f6d91cSDavid Greenman } 1716a1f6d91cSDavid Greenman } 1717a1f6d91cSDavid Greenman } 1718a1f6d91cSDavid Greenman 171926f9a767SRodney W. Grimes /* 1720df8bae1dSRodney W. Grimes * vm_object_print: [ debug ] 1721df8bae1dSRodney W. Grimes */ 1722c7c34a24SBruce Evans DB_SHOW_COMMAND(object, vm_object_print_static) 1723df8bae1dSRodney W. Grimes { 1724c7c34a24SBruce Evans /* XXX convert args. */ 1725c7c34a24SBruce Evans vm_object_t object = (vm_object_t)addr; 1726c7c34a24SBruce Evans boolean_t full = have_addr; 1727c7c34a24SBruce Evans 1728d031cff1SMatthew Dillon vm_page_t p; 1729df8bae1dSRodney W. Grimes 1730c7c34a24SBruce Evans /* XXX count is an (unused) arg. Avoid shadowing it. */ 1731c7c34a24SBruce Evans #define count was_count 1732c7c34a24SBruce Evans 1733d031cff1SMatthew Dillon int count; 1734df8bae1dSRodney W. Grimes 1735df8bae1dSRodney W. Grimes if (object == NULL) 1736df8bae1dSRodney W. Grimes return; 1737df8bae1dSRodney W. Grimes 1738eb95adefSBruce Evans db_iprintf( 1739eb95adefSBruce Evans "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n", 1740eb95adefSBruce Evans object, (int)object->type, (u_long)object->size, 1741eb95adefSBruce Evans object->resident_page_count, object->ref_count, object->flags); 1742eb95adefSBruce Evans /* 17431c7c3c6aSMatthew Dillon * XXX no %qd in kernel. Truncate object->backing_object_offset. 1744eb95adefSBruce Evans */ 17451c7c3c6aSMatthew Dillon db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n", 17461c7c3c6aSMatthew Dillon object->shadow_count, 1747eb95adefSBruce Evans object->backing_object ? object->backing_object->ref_count : 0, 1748eb95adefSBruce Evans object->backing_object, (long)object->backing_object_offset); 1749df8bae1dSRodney W. Grimes 1750df8bae1dSRodney W. Grimes if (!full) 1751df8bae1dSRodney W. Grimes return; 1752df8bae1dSRodney W. Grimes 1753c7c34a24SBruce Evans db_indent += 2; 1754df8bae1dSRodney W. Grimes count = 0; 1755fc2ffbe6SPoul-Henning Kamp TAILQ_FOREACH(p, &object->memq, listq) { 1756df8bae1dSRodney W. Grimes if (count == 0) 1757c7c34a24SBruce Evans db_iprintf("memory:="); 1758df8bae1dSRodney W. Grimes else if (count == 6) { 1759c7c34a24SBruce Evans db_printf("\n"); 1760c7c34a24SBruce Evans db_iprintf(" ..."); 1761df8bae1dSRodney W. Grimes count = 0; 1762df8bae1dSRodney W. Grimes } else 1763c7c34a24SBruce Evans db_printf(","); 1764df8bae1dSRodney W. Grimes count++; 1765df8bae1dSRodney W. Grimes 1766c7c34a24SBruce Evans db_printf("(off=0x%lx,page=0x%lx)", 1767a316d390SJohn Dyson (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p)); 1768df8bae1dSRodney W. Grimes } 1769df8bae1dSRodney W. Grimes if (count != 0) 1770c7c34a24SBruce Evans db_printf("\n"); 1771c7c34a24SBruce Evans db_indent -= 2; 1772df8bae1dSRodney W. Grimes } 17735070c7f8SJohn Dyson 1774c7c34a24SBruce Evans /* XXX. */ 1775c7c34a24SBruce Evans #undef count 1776c7c34a24SBruce Evans 1777c7c34a24SBruce Evans /* XXX need this non-static entry for calling from vm_map_print. */ 17785070c7f8SJohn Dyson void 17791b40f8c0SMatthew Dillon vm_object_print( 17801b40f8c0SMatthew Dillon /* db_expr_t */ long addr, 17811b40f8c0SMatthew Dillon boolean_t have_addr, 17821b40f8c0SMatthew Dillon /* db_expr_t */ long count, 17831b40f8c0SMatthew Dillon char *modif) 1784c7c34a24SBruce Evans { 1785c7c34a24SBruce Evans vm_object_print_static(addr, have_addr, count, modif); 1786c7c34a24SBruce Evans } 1787c7c34a24SBruce Evans 1788c7c34a24SBruce Evans DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 17895070c7f8SJohn Dyson { 17905070c7f8SJohn Dyson vm_object_t object; 17915070c7f8SJohn Dyson int nl = 0; 17925070c7f8SJohn Dyson int c; 1793cc64b484SAlfred Perlstein 1794cc64b484SAlfred Perlstein TAILQ_FOREACH(object, &vm_object_list, object_list) { 17955070c7f8SJohn Dyson vm_pindex_t idx, fidx; 17965070c7f8SJohn Dyson vm_pindex_t osize; 17975070c7f8SJohn Dyson vm_offset_t pa = -1, padiff; 17985070c7f8SJohn Dyson int rcount; 17995070c7f8SJohn Dyson vm_page_t m; 18005070c7f8SJohn Dyson 1801fc62ef1fSBruce Evans db_printf("new object: %p\n", (void *)object); 18025070c7f8SJohn Dyson if ( nl > 18) { 18035070c7f8SJohn Dyson c = cngetc(); 18045070c7f8SJohn Dyson if (c != ' ') 18055070c7f8SJohn Dyson return; 18065070c7f8SJohn Dyson nl = 0; 18075070c7f8SJohn Dyson } 18085070c7f8SJohn Dyson nl++; 18095070c7f8SJohn Dyson rcount = 0; 18105070c7f8SJohn Dyson fidx = 0; 18115070c7f8SJohn Dyson osize = object->size; 18125070c7f8SJohn Dyson if (osize > 128) 18135070c7f8SJohn Dyson osize = 128; 18145070c7f8SJohn Dyson for (idx = 0; idx < osize; idx++) { 18155070c7f8SJohn Dyson m = vm_page_lookup(object, idx); 18165070c7f8SJohn Dyson if (m == NULL) { 18175070c7f8SJohn Dyson if (rcount) { 18183efc015bSPeter Wemm db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 18193efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 18205070c7f8SJohn Dyson if ( nl > 18) { 18215070c7f8SJohn Dyson c = cngetc(); 18225070c7f8SJohn Dyson if (c != ' ') 18235070c7f8SJohn Dyson return; 18245070c7f8SJohn Dyson nl = 0; 18255070c7f8SJohn Dyson } 18265070c7f8SJohn Dyson nl++; 18275070c7f8SJohn Dyson rcount = 0; 18285070c7f8SJohn Dyson } 18295070c7f8SJohn Dyson continue; 18305070c7f8SJohn Dyson } 18315070c7f8SJohn Dyson 18325070c7f8SJohn Dyson 18335070c7f8SJohn Dyson if (rcount && 18345070c7f8SJohn Dyson (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 18355070c7f8SJohn Dyson ++rcount; 18365070c7f8SJohn Dyson continue; 18375070c7f8SJohn Dyson } 18385070c7f8SJohn Dyson if (rcount) { 18395070c7f8SJohn Dyson padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m); 18405070c7f8SJohn Dyson padiff >>= PAGE_SHIFT; 18415070c7f8SJohn Dyson padiff &= PQ_L2_MASK; 18425070c7f8SJohn Dyson if (padiff == 0) { 18435070c7f8SJohn Dyson pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE; 18445070c7f8SJohn Dyson ++rcount; 18455070c7f8SJohn Dyson continue; 18465070c7f8SJohn Dyson } 18473efc015bSPeter Wemm db_printf(" index(%ld)run(%d)pa(0x%lx)", 18483efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 18493efc015bSPeter Wemm db_printf("pd(%ld)\n", (long)padiff); 18505070c7f8SJohn Dyson if ( nl > 18) { 18515070c7f8SJohn Dyson c = cngetc(); 18525070c7f8SJohn Dyson if (c != ' ') 18535070c7f8SJohn Dyson return; 18545070c7f8SJohn Dyson nl = 0; 18555070c7f8SJohn Dyson } 18565070c7f8SJohn Dyson nl++; 18575070c7f8SJohn Dyson } 18585070c7f8SJohn Dyson fidx = idx; 18595070c7f8SJohn Dyson pa = VM_PAGE_TO_PHYS(m); 18605070c7f8SJohn Dyson rcount = 1; 18615070c7f8SJohn Dyson } 18625070c7f8SJohn Dyson if (rcount) { 18633efc015bSPeter Wemm db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 18643efc015bSPeter Wemm (long)fidx, rcount, (long)pa); 18655070c7f8SJohn Dyson if ( nl > 18) { 18665070c7f8SJohn Dyson c = cngetc(); 18675070c7f8SJohn Dyson if (c != ' ') 18685070c7f8SJohn Dyson return; 18695070c7f8SJohn Dyson nl = 0; 18705070c7f8SJohn Dyson } 18715070c7f8SJohn Dyson nl++; 18725070c7f8SJohn Dyson } 18735070c7f8SJohn Dyson } 18745070c7f8SJohn Dyson } 1875c3cb3e12SDavid Greenman #endif /* DDB */ 1876