xref: /freebsd/sys/vm/vm_object.c (revision 9b4d473a6e36d23d27f72e2b4ead6f6e6b29021e)
160727d8bSWarner Losh /*-
2df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  *
5df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
6df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
9df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
10df8bae1dSRodney W. Grimes  * are met:
11df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
12df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
13df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
15df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
16df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
17df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
18df8bae1dSRodney W. Grimes  *    without specific prior written permission.
19df8bae1dSRodney W. Grimes  *
20df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
31df8bae1dSRodney W. Grimes  *
323c4dd356SDavid Greenman  *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
33df8bae1dSRodney W. Grimes  *
34df8bae1dSRodney W. Grimes  *
35df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36df8bae1dSRodney W. Grimes  * All rights reserved.
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39df8bae1dSRodney W. Grimes  *
40df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
41df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
42df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
43df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
44df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
45df8bae1dSRodney W. Grimes  *
46df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49df8bae1dSRodney W. Grimes  *
50df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
51df8bae1dSRodney W. Grimes  *
52df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53df8bae1dSRodney W. Grimes  *  School of Computer Science
54df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
55df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
56df8bae1dSRodney W. Grimes  *
57df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
58df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
59df8bae1dSRodney W. Grimes  */
60df8bae1dSRodney W. Grimes 
61df8bae1dSRodney W. Grimes /*
62df8bae1dSRodney W. Grimes  *	Virtual memory object module.
63df8bae1dSRodney W. Grimes  */
64df8bae1dSRodney W. Grimes 
65874651b1SDavid E. O'Brien #include <sys/cdefs.h>
66874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
67874651b1SDavid E. O'Brien 
68f8a47341SAlan Cox #include "opt_vm.h"
69f8a47341SAlan Cox 
70df8bae1dSRodney W. Grimes #include <sys/param.h>
71df8bae1dSRodney W. Grimes #include <sys/systm.h>
72fb919e4dSMark Murray #include <sys/lock.h>
73867a482dSJohn Dyson #include <sys/mman.h>
74cf2819ccSJohn Dyson #include <sys/mount.h>
75b9b7a4beSMatthew Dillon #include <sys/kernel.h>
76b9b7a4beSMatthew Dillon #include <sys/sysctl.h>
771b367556SJason Evans #include <sys/mutex.h>
78fb919e4dSMark Murray #include <sys/proc.h>		/* for curproc, pageproc */
79fb919e4dSMark Murray #include <sys/socket.h>
803364c323SKonstantin Belousov #include <sys/resourcevar.h>
81fb919e4dSMark Murray #include <sys/vnode.h>
82fb919e4dSMark Murray #include <sys/vmmeter.h>
831005a129SJohn Baldwin #include <sys/sx.h>
84df8bae1dSRodney W. Grimes 
85df8bae1dSRodney W. Grimes #include <vm/vm.h>
86efeaf95aSDavid Greenman #include <vm/vm_param.h>
87efeaf95aSDavid Greenman #include <vm/pmap.h>
88efeaf95aSDavid Greenman #include <vm/vm_map.h>
89efeaf95aSDavid Greenman #include <vm/vm_object.h>
90df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
9126f9a767SRodney W. Grimes #include <vm/vm_pageout.h>
920d94caffSDavid Greenman #include <vm/vm_pager.h>
9305f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h>
94a1f6d91cSDavid Greenman #include <vm/vm_kern.h>
95efeaf95aSDavid Greenman #include <vm/vm_extern.h>
96f8a47341SAlan Cox #include <vm/vm_reserv.h>
97670d17b5SJeff Roberson #include <vm/uma.h>
9826f9a767SRodney W. Grimes 
99b9b7a4beSMatthew Dillon #define EASY_SCAN_FACTOR       8
100b9b7a4beSMatthew Dillon 
101b9b7a4beSMatthew Dillon #define MSYNC_FLUSH_HARDSEQ	0x01
102b9b7a4beSMatthew Dillon #define MSYNC_FLUSH_SOFTSEQ	0x02
103b9b7a4beSMatthew Dillon 
104b9b7a4beSMatthew Dillon /*
105b9b7a4beSMatthew Dillon  * msync / VM object flushing optimizations
106b9b7a4beSMatthew Dillon  */
107b9b7a4beSMatthew Dillon static int msync_flush_flags = MSYNC_FLUSH_HARDSEQ | MSYNC_FLUSH_SOFTSEQ;
1086bd9cb1cSTom Rhodes SYSCTL_INT(_vm, OID_AUTO, msync_flush_flags, CTLFLAG_RW, &msync_flush_flags, 0,
1096bd9cb1cSTom Rhodes     "Enable sequential iteration optimization");
110b9b7a4beSMatthew Dillon 
111c53f7aceSDag-Erling Smørgrav static int old_msync;
112c53f7aceSDag-Erling Smørgrav SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0,
113c53f7aceSDag-Erling Smørgrav     "Use old (insecure) msync behavior");
114c53f7aceSDag-Erling Smørgrav 
115b9b7a4beSMatthew Dillon static void	vm_object_qcollapse(vm_object_t object);
116b9b7a4beSMatthew Dillon static int	vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags);
11702dd8331SAlan Cox static void	vm_object_vndeallocate(vm_object_t object);
118f6b04d2bSDavid Greenman 
119df8bae1dSRodney W. Grimes /*
120df8bae1dSRodney W. Grimes  *	Virtual memory objects maintain the actual data
121df8bae1dSRodney W. Grimes  *	associated with allocated virtual memory.  A given
122df8bae1dSRodney W. Grimes  *	page of memory exists within exactly one object.
123df8bae1dSRodney W. Grimes  *
124df8bae1dSRodney W. Grimes  *	An object is only deallocated when all "references"
125df8bae1dSRodney W. Grimes  *	are given up.  Only one "reference" to a given
126df8bae1dSRodney W. Grimes  *	region of an object should be writeable.
127df8bae1dSRodney W. Grimes  *
128df8bae1dSRodney W. Grimes  *	Associated with each object is a list of all resident
129df8bae1dSRodney W. Grimes  *	memory pages belonging to that object; this list is
130df8bae1dSRodney W. Grimes  *	maintained by the "vm_page" module, and locked by the object's
131df8bae1dSRodney W. Grimes  *	lock.
132df8bae1dSRodney W. Grimes  *
133df8bae1dSRodney W. Grimes  *	Each object also records a "pager" routine which is
134df8bae1dSRodney W. Grimes  *	used to retrieve (and store) pages to the proper backing
135df8bae1dSRodney W. Grimes  *	storage.  In addition, objects may be backed by other
136df8bae1dSRodney W. Grimes  *	objects from which they were virtual-copied.
137df8bae1dSRodney W. Grimes  *
138df8bae1dSRodney W. Grimes  *	The only items within the object structure which are
139df8bae1dSRodney W. Grimes  *	modified after time of creation are:
140df8bae1dSRodney W. Grimes  *		reference count		locked by object's lock
141df8bae1dSRodney W. Grimes  *		pager routine		locked by object's lock
142df8bae1dSRodney W. Grimes  *
143df8bae1dSRodney W. Grimes  */
144df8bae1dSRodney W. Grimes 
14528f8db14SBruce Evans struct object_q vm_object_list;
146a5698387SAlan Cox struct mtx vm_object_list_mtx;	/* lock for object list and count */
147cccf11b8SAlan Cox 
148cccf11b8SAlan Cox struct vm_object kernel_object_store;
149cccf11b8SAlan Cox struct vm_object kmem_object_store;
150df8bae1dSRodney W. Grimes 
151604c2bbcSAlan Cox SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, "VM object stats");
152604c2bbcSAlan Cox 
153f708ef1bSPoul-Henning Kamp static long object_collapses;
154604c2bbcSAlan Cox SYSCTL_LONG(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD,
155604c2bbcSAlan Cox     &object_collapses, 0, "VM object collapses");
156604c2bbcSAlan Cox 
157f708ef1bSPoul-Henning Kamp static long object_bypasses;
158604c2bbcSAlan Cox SYSCTL_LONG(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD,
159604c2bbcSAlan Cox     &object_bypasses, 0, "VM object bypasses");
160dad740e9SAlan Cox 
161670d17b5SJeff Roberson static uma_zone_t obj_zone;
1628355f576SJeff Roberson 
163b23f72e9SBrian Feldman static int vm_object_zinit(void *mem, int size, int flags);
1648355f576SJeff Roberson 
1658355f576SJeff Roberson #ifdef INVARIANTS
1668355f576SJeff Roberson static void vm_object_zdtor(void *mem, int size, void *arg);
1678355f576SJeff Roberson 
1688355f576SJeff Roberson static void
1698355f576SJeff Roberson vm_object_zdtor(void *mem, int size, void *arg)
1708355f576SJeff Roberson {
1718355f576SJeff Roberson 	vm_object_t object;
1728355f576SJeff Roberson 
1738355f576SJeff Roberson 	object = (vm_object_t)mem;
17443186e53SAlan Cox 	KASSERT(TAILQ_EMPTY(&object->memq),
17543186e53SAlan Cox 	    ("object %p has resident pages",
17643186e53SAlan Cox 	    object));
177f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
178f8a47341SAlan Cox 	KASSERT(LIST_EMPTY(&object->rvq),
179f8a47341SAlan Cox 	    ("object %p has reservations",
180f8a47341SAlan Cox 	    object));
181f8a47341SAlan Cox #endif
1827bfda801SAlan Cox 	KASSERT(object->cache == NULL,
1837bfda801SAlan Cox 	    ("object %p has cached pages",
1847bfda801SAlan Cox 	    object));
1858355f576SJeff Roberson 	KASSERT(object->paging_in_progress == 0,
1868355f576SJeff Roberson 	    ("object %p paging_in_progress = %d",
1878355f576SJeff Roberson 	    object, object->paging_in_progress));
1888355f576SJeff Roberson 	KASSERT(object->resident_page_count == 0,
1898355f576SJeff Roberson 	    ("object %p resident_page_count = %d",
1908355f576SJeff Roberson 	    object, object->resident_page_count));
1918355f576SJeff Roberson 	KASSERT(object->shadow_count == 0,
1928355f576SJeff Roberson 	    ("object %p shadow_count = %d",
1938355f576SJeff Roberson 	    object, object->shadow_count));
1948355f576SJeff Roberson }
1958355f576SJeff Roberson #endif
1968355f576SJeff Roberson 
197b23f72e9SBrian Feldman static int
198b23f72e9SBrian Feldman vm_object_zinit(void *mem, int size, int flags)
1998355f576SJeff Roberson {
2008355f576SJeff Roberson 	vm_object_t object;
2018355f576SJeff Roberson 
2028355f576SJeff Roberson 	object = (vm_object_t)mem;
203e3a9e1b2SAlan Cox 	bzero(&object->mtx, sizeof(object->mtx));
2045285558aSAlan Cox 	VM_OBJECT_LOCK_INIT(object, "standard object");
2058355f576SJeff Roberson 
2068355f576SJeff Roberson 	/* These are true for any object that has been freed */
2078355f576SJeff Roberson 	object->paging_in_progress = 0;
2088355f576SJeff Roberson 	object->resident_page_count = 0;
2098355f576SJeff Roberson 	object->shadow_count = 0;
210b23f72e9SBrian Feldman 	return (0);
2118355f576SJeff Roberson }
212df8bae1dSRodney W. Grimes 
2133075778bSJohn Dyson void
2146395da54SIan Dowse _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
215df8bae1dSRodney W. Grimes {
2160cddd8f0SMatthew Dillon 
217df8bae1dSRodney W. Grimes 	TAILQ_INIT(&object->memq);
2181c500307SAlan Cox 	LIST_INIT(&object->shadow_head);
219a1f6d91cSDavid Greenman 
220b86ec922SMatthew Dillon 	object->root = NULL;
22124a1cce3SDavid Greenman 	object->type = type;
222df8bae1dSRodney W. Grimes 	object->size = size;
223b881da26SAlan Cox 	object->generation = 1;
224a1f6d91cSDavid Greenman 	object->ref_count = 1;
22524a1cce3SDavid Greenman 	object->flags = 0;
2263364c323SKonstantin Belousov 	object->uip = NULL;
2273364c323SKonstantin Belousov 	object->charge = 0;
22860517fd1SJohn Baldwin 	if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
2293471677cSAlan Cox 		object->flags = OBJ_ONEMAPPING;
2302446e4f0SAlan Cox 	object->pg_color = 0;
23124a1cce3SDavid Greenman 	object->handle = NULL;
23224a1cce3SDavid Greenman 	object->backing_object = NULL;
233a316d390SJohn Dyson 	object->backing_object_offset = (vm_ooffset_t) 0;
234f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
235f8a47341SAlan Cox 	LIST_INIT(&object->rvq);
236f8a47341SAlan Cox #endif
2377bfda801SAlan Cox 	object->cache = NULL;
238a1f6d91cSDavid Greenman 
239a5698387SAlan Cox 	mtx_lock(&vm_object_list_mtx);
24060517fd1SJohn Baldwin 	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
241a5698387SAlan Cox 	mtx_unlock(&vm_object_list_mtx);
242df8bae1dSRodney W. Grimes }
243df8bae1dSRodney W. Grimes 
244df8bae1dSRodney W. Grimes /*
24526f9a767SRodney W. Grimes  *	vm_object_init:
24626f9a767SRodney W. Grimes  *
24726f9a767SRodney W. Grimes  *	Initialize the VM objects module.
24826f9a767SRodney W. Grimes  */
24926f9a767SRodney W. Grimes void
2501b40f8c0SMatthew Dillon vm_object_init(void)
25126f9a767SRodney W. Grimes {
25226f9a767SRodney W. Grimes 	TAILQ_INIT(&vm_object_list);
2536008862bSJohn Baldwin 	mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
2540217125fSDavid Greenman 
2555285558aSAlan Cox 	VM_OBJECT_LOCK_INIT(&kernel_object_store, "kernel object");
2569f5c801bSAlan Cox 	_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
25726f9a767SRodney W. Grimes 	    kernel_object);
258f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
259f8a47341SAlan Cox 	kernel_object->flags |= OBJ_COLORED;
260f8a47341SAlan Cox 	kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
261f8a47341SAlan Cox #endif
26226f9a767SRodney W. Grimes 
2635285558aSAlan Cox 	VM_OBJECT_LOCK_INIT(&kmem_object_store, "kmem object");
2649f5c801bSAlan Cox 	_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
26526f9a767SRodney W. Grimes 	    kmem_object);
266f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
267f8a47341SAlan Cox 	kmem_object->flags |= OBJ_COLORED;
268f8a47341SAlan Cox 	kmem_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
269f8a47341SAlan Cox #endif
270ed6a7863SAlan Cox 
2718dbca793STor Egge 	/*
2728dbca793STor Egge 	 * The lock portion of struct vm_object must be type stable due
2738dbca793STor Egge 	 * to vm_pageout_fallback_object_lock locking a vm object
2748dbca793STor Egge 	 * without holding any references to it.
2758dbca793STor Egge 	 */
2768355f576SJeff Roberson 	obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL,
2778355f576SJeff Roberson #ifdef INVARIANTS
2788355f576SJeff Roberson 	    vm_object_zdtor,
2798355f576SJeff Roberson #else
2808355f576SJeff Roberson 	    NULL,
2818355f576SJeff Roberson #endif
282f3c625e4SJeff Roberson 	    vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
28399448ed1SJohn Dyson }
28499448ed1SJohn Dyson 
28599448ed1SJohn Dyson void
2861b40f8c0SMatthew Dillon vm_object_clear_flag(vm_object_t object, u_short bits)
2871b40f8c0SMatthew Dillon {
2885440b5a9SAlan Cox 
289d647a0edSAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
290b06805adSJake Burkholder 	object->flags &= ~bits;
2911b40f8c0SMatthew Dillon }
2921b40f8c0SMatthew Dillon 
2931b40f8c0SMatthew Dillon void
2941b40f8c0SMatthew Dillon vm_object_pip_add(vm_object_t object, short i)
2951b40f8c0SMatthew Dillon {
296f279b88dSAlan Cox 
297d647a0edSAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
298b06805adSJake Burkholder 	object->paging_in_progress += i;
2991b40f8c0SMatthew Dillon }
3001b40f8c0SMatthew Dillon 
3011b40f8c0SMatthew Dillon void
3021b40f8c0SMatthew Dillon vm_object_pip_subtract(vm_object_t object, short i)
3031b40f8c0SMatthew Dillon {
304d647a0edSAlan Cox 
3050fa05eaeSAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
306b06805adSJake Burkholder 	object->paging_in_progress -= i;
3071b40f8c0SMatthew Dillon }
3081b40f8c0SMatthew Dillon 
3091b40f8c0SMatthew Dillon void
3101b40f8c0SMatthew Dillon vm_object_pip_wakeup(vm_object_t object)
3111b40f8c0SMatthew Dillon {
312f279b88dSAlan Cox 
313d647a0edSAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
314b06805adSJake Burkholder 	object->paging_in_progress--;
3151b40f8c0SMatthew Dillon 	if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
3161b40f8c0SMatthew Dillon 		vm_object_clear_flag(object, OBJ_PIPWNT);
3171b40f8c0SMatthew Dillon 		wakeup(object);
3181b40f8c0SMatthew Dillon 	}
3191b40f8c0SMatthew Dillon }
3201b40f8c0SMatthew Dillon 
3211b40f8c0SMatthew Dillon void
3221b40f8c0SMatthew Dillon vm_object_pip_wakeupn(vm_object_t object, short i)
3231b40f8c0SMatthew Dillon {
324d647a0edSAlan Cox 
3250d420ad3SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
3261b40f8c0SMatthew Dillon 	if (i)
327b06805adSJake Burkholder 		object->paging_in_progress -= i;
3281b40f8c0SMatthew Dillon 	if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
3291b40f8c0SMatthew Dillon 		vm_object_clear_flag(object, OBJ_PIPWNT);
3301b40f8c0SMatthew Dillon 		wakeup(object);
3311b40f8c0SMatthew Dillon 	}
3321b40f8c0SMatthew Dillon }
3331b40f8c0SMatthew Dillon 
3341b40f8c0SMatthew Dillon void
3351b40f8c0SMatthew Dillon vm_object_pip_wait(vm_object_t object, char *waitid)
3361b40f8c0SMatthew Dillon {
3371ca58953SAlan Cox 
3381ca58953SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
3391ca58953SAlan Cox 	while (object->paging_in_progress) {
3401ca58953SAlan Cox 		object->flags |= OBJ_PIPWNT;
3411ca58953SAlan Cox 		msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0);
3421ca58953SAlan Cox 	}
3431b40f8c0SMatthew Dillon }
3441b40f8c0SMatthew Dillon 
34526f9a767SRodney W. Grimes /*
34626f9a767SRodney W. Grimes  *	vm_object_allocate:
34726f9a767SRodney W. Grimes  *
34826f9a767SRodney W. Grimes  *	Returns a new object with the given size.
34926f9a767SRodney W. Grimes  */
35026f9a767SRodney W. Grimes vm_object_t
3516395da54SIan Dowse vm_object_allocate(objtype_t type, vm_pindex_t size)
35226f9a767SRodney W. Grimes {
35390688d13SAlan Cox 	vm_object_t object;
35490688d13SAlan Cox 
35590688d13SAlan Cox 	object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
35690688d13SAlan Cox 	_vm_object_allocate(type, size, object);
35790688d13SAlan Cox 	return (object);
35826f9a767SRodney W. Grimes }
35926f9a767SRodney W. Grimes 
36026f9a767SRodney W. Grimes 
36126f9a767SRodney W. Grimes /*
362df8bae1dSRodney W. Grimes  *	vm_object_reference:
363df8bae1dSRodney W. Grimes  *
36415347817SAlan Cox  *	Gets another reference to the given object.  Note: OBJ_DEAD
36515347817SAlan Cox  *	objects can be referenced during final cleaning.
366df8bae1dSRodney W. Grimes  */
3676476c0d2SJohn Dyson void
3681b40f8c0SMatthew Dillon vm_object_reference(vm_object_t object)
369df8bae1dSRodney W. Grimes {
370df8bae1dSRodney W. Grimes 	if (object == NULL)
371df8bae1dSRodney W. Grimes 		return;
37215347817SAlan Cox 	VM_OBJECT_LOCK(object);
37352481a9aSJeff Roberson 	vm_object_reference_locked(object);
37482f9defeSAlan Cox 	VM_OBJECT_UNLOCK(object);
37595e5e988SJohn Dyson }
37695e5e988SJohn Dyson 
37723955314SAlfred Perlstein /*
378b921a12bSAlan Cox  *	vm_object_reference_locked:
379b921a12bSAlan Cox  *
380b921a12bSAlan Cox  *	Gets another reference to the given object.
381b921a12bSAlan Cox  *
382b921a12bSAlan Cox  *	The object must be locked.
383b921a12bSAlan Cox  */
384b921a12bSAlan Cox void
385b921a12bSAlan Cox vm_object_reference_locked(vm_object_t object)
386b921a12bSAlan Cox {
387b921a12bSAlan Cox 	struct vnode *vp;
388b921a12bSAlan Cox 
389b921a12bSAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
390b921a12bSAlan Cox 	object->ref_count++;
391b921a12bSAlan Cox 	if (object->type == OBJT_VNODE) {
392b921a12bSAlan Cox 		vp = object->handle;
393b921a12bSAlan Cox 		vref(vp);
394b921a12bSAlan Cox 	}
395b921a12bSAlan Cox }
396b921a12bSAlan Cox 
397b921a12bSAlan Cox /*
3989d5abbddSJens Schweikhardt  * Handle deallocating an object of type OBJT_VNODE.
39923955314SAlfred Perlstein  */
40002dd8331SAlan Cox static void
4011b40f8c0SMatthew Dillon vm_object_vndeallocate(vm_object_t object)
40295e5e988SJohn Dyson {
40395e5e988SJohn Dyson 	struct vnode *vp = (struct vnode *) object->handle;
404219cbf59SEivind Eklund 
405ae51ff11SJeff Roberson 	VFS_ASSERT_GIANT(vp->v_mount);
406ad682c48SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
4075526d2d9SEivind Eklund 	KASSERT(object->type == OBJT_VNODE,
4085526d2d9SEivind Eklund 	    ("vm_object_vndeallocate: not a vnode object"));
409219cbf59SEivind Eklund 	KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
410219cbf59SEivind Eklund #ifdef INVARIANTS
41195e5e988SJohn Dyson 	if (object->ref_count == 0) {
41295e5e988SJohn Dyson 		vprint("vm_object_vndeallocate", vp);
41395e5e988SJohn Dyson 		panic("vm_object_vndeallocate: bad object reference count");
41495e5e988SJohn Dyson 	}
41595e5e988SJohn Dyson #endif
41695e5e988SJohn Dyson 
41795e5e988SJohn Dyson 	object->ref_count--;
41847221757SJohn Dyson 	if (object->ref_count == 0) {
419e6e370a7SJeff Roberson 		mp_fixme("Unlocked vflag access.");
420e6e370a7SJeff Roberson 		vp->v_vflag &= ~VV_TEXT;
4212be70f79SJohn Dyson 	}
422ad682c48SAlan Cox 	VM_OBJECT_UNLOCK(object);
42323955314SAlfred Perlstein 	/*
42423955314SAlfred Perlstein 	 * vrele may need a vop lock
42523955314SAlfred Perlstein 	 */
42647221757SJohn Dyson 	vrele(vp);
427df8bae1dSRodney W. Grimes }
428df8bae1dSRodney W. Grimes 
429df8bae1dSRodney W. Grimes /*
430df8bae1dSRodney W. Grimes  *	vm_object_deallocate:
431df8bae1dSRodney W. Grimes  *
432df8bae1dSRodney W. Grimes  *	Release a reference to the specified object,
433df8bae1dSRodney W. Grimes  *	gained either through a vm_object_allocate
434df8bae1dSRodney W. Grimes  *	or a vm_object_reference call.  When all references
435df8bae1dSRodney W. Grimes  *	are gone, storage associated with this object
436df8bae1dSRodney W. Grimes  *	may be relinquished.
437df8bae1dSRodney W. Grimes  *
438df8bae1dSRodney W. Grimes  *	No object may be locked.
439df8bae1dSRodney W. Grimes  */
44026f9a767SRodney W. Grimes void
4411b40f8c0SMatthew Dillon vm_object_deallocate(vm_object_t object)
442df8bae1dSRodney W. Grimes {
443df8bae1dSRodney W. Grimes 	vm_object_t temp;
444df8bae1dSRodney W. Grimes 
445df8bae1dSRodney W. Grimes 	while (object != NULL) {
446ae51ff11SJeff Roberson 		int vfslocked;
447ca95b514SJohn Baldwin 
448ae51ff11SJeff Roberson 		vfslocked = 0;
449ca95b514SJohn Baldwin 	restart:
450ad682c48SAlan Cox 		VM_OBJECT_LOCK(object);
4513b68228cSAlan Cox 		if (object->type == OBJT_VNODE) {
452ca95b514SJohn Baldwin 			struct vnode *vp = (struct vnode *) object->handle;
453ca95b514SJohn Baldwin 
454ca95b514SJohn Baldwin 			/*
455ca95b514SJohn Baldwin 			 * Conditionally acquire Giant for a vnode-backed
456ca95b514SJohn Baldwin 			 * object.  We have to be careful since the type of
457ca95b514SJohn Baldwin 			 * a vnode object can change while the object is
458ca95b514SJohn Baldwin 			 * unlocked.
459ca95b514SJohn Baldwin 			 */
460ca95b514SJohn Baldwin 			if (VFS_NEEDSGIANT(vp->v_mount) && !vfslocked) {
461ca95b514SJohn Baldwin 				vfslocked = 1;
462ca95b514SJohn Baldwin 				if (!mtx_trylock(&Giant)) {
463ca95b514SJohn Baldwin 					VM_OBJECT_UNLOCK(object);
464ca95b514SJohn Baldwin 					mtx_lock(&Giant);
465ca95b514SJohn Baldwin 					goto restart;
466ca95b514SJohn Baldwin 				}
467ca95b514SJohn Baldwin 			}
46895e5e988SJohn Dyson 			vm_object_vndeallocate(object);
469ae51ff11SJeff Roberson 			VFS_UNLOCK_GIANT(vfslocked);
47023b186d3SAlan Cox 			return;
471ca95b514SJohn Baldwin 		} else
472ca95b514SJohn Baldwin 			/*
473ca95b514SJohn Baldwin 			 * This is to handle the case that the object
474ca95b514SJohn Baldwin 			 * changed type while we dropped its lock to
475ca95b514SJohn Baldwin 			 * obtain Giant.
476ca95b514SJohn Baldwin 			 */
477ca95b514SJohn Baldwin 			VFS_UNLOCK_GIANT(vfslocked);
47895e5e988SJohn Dyson 
4798125b1e6SAlfred Perlstein 		KASSERT(object->ref_count != 0,
4808125b1e6SAlfred Perlstein 			("vm_object_deallocate: object deallocated too many times: %d", object->type));
4812be70f79SJohn Dyson 
4822be70f79SJohn Dyson 		/*
4838125b1e6SAlfred Perlstein 		 * If the reference count goes to 0 we start calling
4848125b1e6SAlfred Perlstein 		 * vm_object_terminate() on the object chain.
4858125b1e6SAlfred Perlstein 		 * A ref count of 1 may be a special case depending on the
4868125b1e6SAlfred Perlstein 		 * shadow count being 0 or 1.
4872be70f79SJohn Dyson 		 */
488c0877f10SJohn Dyson 		object->ref_count--;
4898125b1e6SAlfred Perlstein 		if (object->ref_count > 1) {
4903b68228cSAlan Cox 			VM_OBJECT_UNLOCK(object);
49123b186d3SAlan Cox 			return;
4928125b1e6SAlfred Perlstein 		} else if (object->ref_count == 1) {
4934c8e0452SAlan Cox 			if (object->shadow_count == 0 &&
4944c8e0452SAlan Cox 			    object->handle == NULL &&
4954c8e0452SAlan Cox 			    (object->type == OBJT_DEFAULT ||
4964c8e0452SAlan Cox 			     object->type == OBJT_SWAP)) {
4978125b1e6SAlfred Perlstein 				vm_object_set_flag(object, OBJ_ONEMAPPING);
4988125b1e6SAlfred Perlstein 			} else if ((object->shadow_count == 1) &&
4998125b1e6SAlfred Perlstein 			    (object->handle == NULL) &&
50024a1cce3SDavid Greenman 			    (object->type == OBJT_DEFAULT ||
50124a1cce3SDavid Greenman 			     object->type == OBJT_SWAP)) {
502a1f6d91cSDavid Greenman 				vm_object_t robject;
50395e5e988SJohn Dyson 
5041c500307SAlan Cox 				robject = LIST_FIRST(&object->shadow_head);
5055526d2d9SEivind Eklund 				KASSERT(robject != NULL,
506219cbf59SEivind Eklund 				    ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
5075526d2d9SEivind Eklund 					 object->ref_count,
5085526d2d9SEivind Eklund 					 object->shadow_count));
509b72b0115SAlan Cox 				if (!VM_OBJECT_TRYLOCK(robject)) {
510b72b0115SAlan Cox 					/*
511b72b0115SAlan Cox 					 * Avoid a potential deadlock.
512b72b0115SAlan Cox 					 */
513b72b0115SAlan Cox 					object->ref_count++;
514b72b0115SAlan Cox 					VM_OBJECT_UNLOCK(object);
515a7d86121SAlan Cox 					/*
516a7d86121SAlan Cox 					 * More likely than not the thread
517a7d86121SAlan Cox 					 * holding robject's lock has lower
518a7d86121SAlan Cox 					 * priority than the current thread.
519a7d86121SAlan Cox 					 * Let the lower priority thread run.
520a7d86121SAlan Cox 					 */
5218db5fc58SJohn Baldwin 					pause("vmo_de", 1);
522b72b0115SAlan Cox 					continue;
523b72b0115SAlan Cox 				}
524d936694fSAlan Cox 				/*
525d936694fSAlan Cox 				 * Collapse object into its shadow unless its
526d936694fSAlan Cox 				 * shadow is dead.  In that case, object will
527d936694fSAlan Cox 				 * be deallocated by the thread that is
528d936694fSAlan Cox 				 * deallocating its shadow.
529d936694fSAlan Cox 				 */
530d936694fSAlan Cox 				if ((robject->flags & OBJ_DEAD) == 0 &&
531d936694fSAlan Cox 				    (robject->handle == NULL) &&
53224a1cce3SDavid Greenman 				    (robject->type == OBJT_DEFAULT ||
53324a1cce3SDavid Greenman 				     robject->type == OBJT_SWAP)) {
534a1f6d91cSDavid Greenman 
53595e5e988SJohn Dyson 					robject->ref_count++;
536138449dcSAlan Cox retry:
537138449dcSAlan Cox 					if (robject->paging_in_progress) {
538138449dcSAlan Cox 						VM_OBJECT_UNLOCK(object);
539138449dcSAlan Cox 						vm_object_pip_wait(robject,
540138449dcSAlan Cox 						    "objde1");
5412e9f4a69SAlan Cox 						temp = robject->backing_object;
5422e9f4a69SAlan Cox 						if (object == temp) {
543138449dcSAlan Cox 							VM_OBJECT_LOCK(object);
544138449dcSAlan Cox 							goto retry;
5452e9f4a69SAlan Cox 						}
546138449dcSAlan Cox 					} else if (object->paging_in_progress) {
547138449dcSAlan Cox 						VM_OBJECT_UNLOCK(robject);
548138449dcSAlan Cox 						object->flags |= OBJ_PIPWNT;
549138449dcSAlan Cox 						msleep(object,
550138449dcSAlan Cox 						    VM_OBJECT_MTX(object),
551138449dcSAlan Cox 						    PDROP | PVM, "objde2", 0);
552138449dcSAlan Cox 						VM_OBJECT_LOCK(robject);
5532e9f4a69SAlan Cox 						temp = robject->backing_object;
5542e9f4a69SAlan Cox 						if (object == temp) {
555138449dcSAlan Cox 							VM_OBJECT_LOCK(object);
556138449dcSAlan Cox 							goto retry;
557a1f6d91cSDavid Greenman 						}
5582e9f4a69SAlan Cox 					} else
5593b68228cSAlan Cox 						VM_OBJECT_UNLOCK(object);
5602e9f4a69SAlan Cox 
56195e5e988SJohn Dyson 					if (robject->ref_count == 1) {
56295e5e988SJohn Dyson 						robject->ref_count--;
563ba8da839SDavid Greenman 						object = robject;
56495e5e988SJohn Dyson 						goto doterm;
56595e5e988SJohn Dyson 					}
56695e5e988SJohn Dyson 					object = robject;
56795e5e988SJohn Dyson 					vm_object_collapse(object);
568d7fc2210SAlan Cox 					VM_OBJECT_UNLOCK(object);
569ba8da839SDavid Greenman 					continue;
570a1f6d91cSDavid Greenman 				}
571b72b0115SAlan Cox 				VM_OBJECT_UNLOCK(robject);
57295e5e988SJohn Dyson 			}
5733b68228cSAlan Cox 			VM_OBJECT_UNLOCK(object);
57423b186d3SAlan Cox 			return;
57595e5e988SJohn Dyson 		}
57695e5e988SJohn Dyson doterm:
57724a1cce3SDavid Greenman 		temp = object->backing_object;
578c9917419SAlan Cox 		if (temp != NULL) {
579c9917419SAlan Cox 			VM_OBJECT_LOCK(temp);
5801c500307SAlan Cox 			LIST_REMOVE(object, shadow_list);
58195e5e988SJohn Dyson 			temp->shadow_count--;
582eaf13dd7SJohn Dyson 			temp->generation++;
583c9917419SAlan Cox 			VM_OBJECT_UNLOCK(temp);
58495461b45SJohn Dyson 			object->backing_object = NULL;
585de5f6a77SJohn Dyson 		}
586245df27cSMatthew Dillon 		/*
587245df27cSMatthew Dillon 		 * Don't double-terminate, we could be in a termination
588245df27cSMatthew Dillon 		 * recursion due to the terminate having to sync data
589245df27cSMatthew Dillon 		 * to disk.
590245df27cSMatthew Dillon 		 */
591245df27cSMatthew Dillon 		if ((object->flags & OBJ_DEAD) == 0)
592df8bae1dSRodney W. Grimes 			vm_object_terminate(object);
593c829b9d0SAlan Cox 		else
594c829b9d0SAlan Cox 			VM_OBJECT_UNLOCK(object);
595df8bae1dSRodney W. Grimes 		object = temp;
596df8bae1dSRodney W. Grimes 	}
597df8bae1dSRodney W. Grimes }
598df8bae1dSRodney W. Grimes 
599df8bae1dSRodney W. Grimes /*
6002ac78f0eSStephan Uphoff  *	vm_object_destroy removes the object from the global object list
6012ac78f0eSStephan Uphoff  *      and frees the space for the object.
6022ac78f0eSStephan Uphoff  */
6032ac78f0eSStephan Uphoff void
6042ac78f0eSStephan Uphoff vm_object_destroy(vm_object_t object)
6052ac78f0eSStephan Uphoff {
6062ac78f0eSStephan Uphoff 
6072ac78f0eSStephan Uphoff 	/*
6082ac78f0eSStephan Uphoff 	 * Remove the object from the global object list.
6092ac78f0eSStephan Uphoff 	 */
6102ac78f0eSStephan Uphoff 	mtx_lock(&vm_object_list_mtx);
6112ac78f0eSStephan Uphoff 	TAILQ_REMOVE(&vm_object_list, object, object_list);
6122ac78f0eSStephan Uphoff 	mtx_unlock(&vm_object_list_mtx);
6132ac78f0eSStephan Uphoff 
6142ac78f0eSStephan Uphoff 	/*
6153364c323SKonstantin Belousov 	 * Release the allocation charge.
6163364c323SKonstantin Belousov 	 */
6173364c323SKonstantin Belousov 	if (object->uip != NULL) {
6183364c323SKonstantin Belousov 		KASSERT(object->type == OBJT_DEFAULT ||
6193364c323SKonstantin Belousov 		    object->type == OBJT_SWAP,
6203364c323SKonstantin Belousov 		    ("vm_object_terminate: non-swap obj %p has uip",
6213364c323SKonstantin Belousov 		     object));
6223364c323SKonstantin Belousov 		swap_release_by_uid(object->charge, object->uip);
6233364c323SKonstantin Belousov 		object->charge = 0;
6243364c323SKonstantin Belousov 		uifree(object->uip);
6253364c323SKonstantin Belousov 		object->uip = NULL;
6263364c323SKonstantin Belousov 	}
6273364c323SKonstantin Belousov 
6283364c323SKonstantin Belousov 	/*
6292ac78f0eSStephan Uphoff 	 * Free the space for the object.
6302ac78f0eSStephan Uphoff 	 */
6312ac78f0eSStephan Uphoff 	uma_zfree(obj_zone, object);
6322ac78f0eSStephan Uphoff }
6332ac78f0eSStephan Uphoff 
6342ac78f0eSStephan Uphoff /*
635df8bae1dSRodney W. Grimes  *	vm_object_terminate actually destroys the specified object, freeing
636df8bae1dSRodney W. Grimes  *	up all previously used resources.
637df8bae1dSRodney W. Grimes  *
638df8bae1dSRodney W. Grimes  *	The object must be locked.
6391c7c3c6aSMatthew Dillon  *	This routine may block.
640df8bae1dSRodney W. Grimes  */
64195e5e988SJohn Dyson void
6421b40f8c0SMatthew Dillon vm_object_terminate(vm_object_t object)
643df8bae1dSRodney W. Grimes {
644d031cff1SMatthew Dillon 	vm_page_t p;
645df8bae1dSRodney W. Grimes 
646c829b9d0SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
6470cddd8f0SMatthew Dillon 
64895e5e988SJohn Dyson 	/*
64995e5e988SJohn Dyson 	 * Make sure no one uses us.
65095e5e988SJohn Dyson 	 */
651069e9bc1SDoug Rabson 	vm_object_set_flag(object, OBJ_DEAD);
6523c631446SJohn Dyson 
653df8bae1dSRodney W. Grimes 	/*
654f6b04d2bSDavid Greenman 	 * wait for the pageout daemon to be done with the object
655df8bae1dSRodney W. Grimes 	 */
65666095752SJohn Dyson 	vm_object_pip_wait(object, "objtrm");
657df8bae1dSRodney W. Grimes 
6585526d2d9SEivind Eklund 	KASSERT(!object->paging_in_progress,
6595526d2d9SEivind Eklund 		("vm_object_terminate: pageout in progress"));
66026f9a767SRodney W. Grimes 
66126f9a767SRodney W. Grimes 	/*
6620d94caffSDavid Greenman 	 * Clean and free the pages, as appropriate. All references to the
6630d94caffSDavid Greenman 	 * object are gone, so we don't need to lock it.
66426f9a767SRodney W. Grimes 	 */
66524a1cce3SDavid Greenman 	if (object->type == OBJT_VNODE) {
666f7dd7b63SAlan Cox 		struct vnode *vp = (struct vnode *)object->handle;
66795e5e988SJohn Dyson 
66895e5e988SJohn Dyson 		/*
66995e5e988SJohn Dyson 		 * Clean pages and flush buffers.
67095e5e988SJohn Dyson 		 */
6718f9110f6SJohn Dyson 		vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
672b6e48e03SAlan Cox 		VM_OBJECT_UNLOCK(object);
67395e5e988SJohn Dyson 
6740d7935fdSAttilio Rao 		vinvalbuf(vp, V_SAVE, 0, 0);
675f7dd7b63SAlan Cox 
676f7dd7b63SAlan Cox 		VM_OBJECT_LOCK(object);
677bef608bdSJohn Dyson 	}
678bef608bdSJohn Dyson 
679971dd342SAlfred Perlstein 	KASSERT(object->ref_count == 0,
680971dd342SAlfred Perlstein 		("vm_object_terminate: object with references, ref_count=%d",
681971dd342SAlfred Perlstein 		object->ref_count));
682996c772fSJohn Dyson 
6830d94caffSDavid Greenman 	/*
684356863ebSDavid Greenman 	 * Now free any remaining pages. For internal objects, this also
685356863ebSDavid Greenman 	 * removes them from paging queues. Don't free wired pages, just
686356863ebSDavid Greenman 	 * remove them from the object.
687df8bae1dSRodney W. Grimes 	 */
68856030358SAlan Cox 	vm_page_lock_queues();
689b18bfc3dSJohn Dyson 	while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
6909af80719SAlan Cox 		KASSERT(!p->busy && (p->oflags & VPO_BUSY) == 0,
691971dd342SAlfred Perlstein 			("vm_object_terminate: freeing busy page %p "
692e9f54126SRobert Noland 			"p->busy = %d, p->oflags %x\n", p, p->busy, p->oflags));
6930b10ba98SDavid Greenman 		if (p->wire_count == 0) {
694df8bae1dSRodney W. Grimes 			vm_page_free(p);
695393a081dSAttilio Rao 			cnt.v_pfree++;
6960b10ba98SDavid Greenman 		} else {
6970b10ba98SDavid Greenman 			vm_page_remove(p);
6980b10ba98SDavid Greenman 		}
699df8bae1dSRodney W. Grimes 	}
70056030358SAlan Cox 	vm_page_unlock_queues();
701bef608bdSJohn Dyson 
702f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
703f8a47341SAlan Cox 	if (__predict_false(!LIST_EMPTY(&object->rvq)))
704f8a47341SAlan Cox 		vm_reserv_break_all(object);
705f8a47341SAlan Cox #endif
7067bfda801SAlan Cox 	if (__predict_false(object->cache != NULL))
707c9444914SAlan Cox 		vm_page_cache_free(object, 0, 0);
7087bfda801SAlan Cox 
7092d8acc0fSJohn Dyson 	/*
7109fcfb650SDavid Greenman 	 * Let the pager know object is dead.
7119fcfb650SDavid Greenman 	 */
7129fcfb650SDavid Greenman 	vm_pager_deallocate(object);
713658ad5ffSAlan Cox 	VM_OBJECT_UNLOCK(object);
7149fcfb650SDavid Greenman 
7152ac78f0eSStephan Uphoff 	vm_object_destroy(object);
71647221757SJohn Dyson }
717df8bae1dSRodney W. Grimes 
718df8bae1dSRodney W. Grimes /*
719df8bae1dSRodney W. Grimes  *	vm_object_page_clean
720df8bae1dSRodney W. Grimes  *
7214f79d873SMatthew Dillon  *	Clean all dirty pages in the specified range of object.  Leaves page
7224f79d873SMatthew Dillon  * 	on whatever queue it is currently on.   If NOSYNC is set then do not
723b146f9e5SAlan Cox  *	write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC),
7244f79d873SMatthew Dillon  *	leaving the object dirty.
72526f9a767SRodney W. Grimes  *
72643b7990eSMatthew Dillon  *	When stuffing pages asynchronously, allow clustering.  XXX we need a
72743b7990eSMatthew Dillon  *	synchronous clustering mode implementation.
72843b7990eSMatthew Dillon  *
72926f9a767SRodney W. Grimes  *	Odd semantics: if start == end, we clean everything.
73026f9a767SRodney W. Grimes  *
73126f9a767SRodney W. Grimes  *	The object must be locked.
73226f9a767SRodney W. Grimes  */
733f6b04d2bSDavid Greenman void
7341b40f8c0SMatthew Dillon vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags)
735f6b04d2bSDavid Greenman {
736b9b7a4beSMatthew Dillon 	vm_page_t p, np;
7376395da54SIan Dowse 	vm_pindex_t tstart, tend;
738bd7e5f99SJohn Dyson 	vm_pindex_t pi;
7394f79d873SMatthew Dillon 	int clearobjflags;
7408f9110f6SJohn Dyson 	int pagerflags;
7412d8acc0fSJohn Dyson 	int curgeneration;
742f6b04d2bSDavid Greenman 
743b6e48e03SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
744aef922f5SJohn Dyson 	if (object->type != OBJT_VNODE ||
745aef922f5SJohn Dyson 		(object->flags & OBJ_MIGHTBEDIRTY) == 0)
746f6b04d2bSDavid Greenman 		return;
747f6b04d2bSDavid Greenman 
74843b7990eSMatthew Dillon 	pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
7498f9110f6SJohn Dyson 	pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
7508f9110f6SJohn Dyson 
751069e9bc1SDoug Rabson 	vm_object_set_flag(object, OBJ_CLEANING);
75224a1cce3SDavid Greenman 
753f6b04d2bSDavid Greenman 	tstart = start;
754f6b04d2bSDavid Greenman 	if (end == 0) {
755f6b04d2bSDavid Greenman 		tend = object->size;
756f6b04d2bSDavid Greenman 	} else {
757f6b04d2bSDavid Greenman 		tend = end;
758f6b04d2bSDavid Greenman 	}
759eaf13dd7SJohn Dyson 
76035c01631SAlan Cox 	vm_page_lock_queues();
7614f79d873SMatthew Dillon 	/*
762b9b7a4beSMatthew Dillon 	 * If the caller is smart and only msync()s a range he knows is
763b9b7a4beSMatthew Dillon 	 * dirty, we may be able to avoid an object scan.  This results in
764b9b7a4beSMatthew Dillon 	 * a phenominal improvement in performance.  We cannot do this
765b9b7a4beSMatthew Dillon 	 * as a matter of course because the object may be huge - e.g.
766b9b7a4beSMatthew Dillon 	 * the size might be in the gigabytes or terrabytes.
767b9b7a4beSMatthew Dillon 	 */
768b9b7a4beSMatthew Dillon 	if (msync_flush_flags & MSYNC_FLUSH_HARDSEQ) {
769300b96acSIan Dowse 		vm_pindex_t tscan;
770b9b7a4beSMatthew Dillon 		int scanlimit;
771b9b7a4beSMatthew Dillon 		int scanreset;
772b9b7a4beSMatthew Dillon 
773b9b7a4beSMatthew Dillon 		scanreset = object->resident_page_count / EASY_SCAN_FACTOR;
774b9b7a4beSMatthew Dillon 		if (scanreset < 16)
775b9b7a4beSMatthew Dillon 			scanreset = 16;
77643b7990eSMatthew Dillon 		pagerflags |= VM_PAGER_IGNORE_CLEANCHK;
777b9b7a4beSMatthew Dillon 
778b9b7a4beSMatthew Dillon 		scanlimit = scanreset;
779b9b7a4beSMatthew Dillon 		tscan = tstart;
780b9b7a4beSMatthew Dillon 		while (tscan < tend) {
781b9b7a4beSMatthew Dillon 			curgeneration = object->generation;
782b9b7a4beSMatthew Dillon 			p = vm_page_lookup(object, tscan);
7837bfda801SAlan Cox 			if (p == NULL || p->valid == 0) {
784b9b7a4beSMatthew Dillon 				if (--scanlimit == 0)
785b9b7a4beSMatthew Dillon 					break;
786b9b7a4beSMatthew Dillon 				++tscan;
787b9b7a4beSMatthew Dillon 				continue;
788b9b7a4beSMatthew Dillon 			}
789b9b7a4beSMatthew Dillon 			vm_page_test_dirty(p);
79026f4eea5SAlan Cox 			if (p->dirty == 0) {
791b9b7a4beSMatthew Dillon 				if (--scanlimit == 0)
792b9b7a4beSMatthew Dillon 					break;
793b9b7a4beSMatthew Dillon 				++tscan;
794b9b7a4beSMatthew Dillon 				continue;
795b9b7a4beSMatthew Dillon 			}
796b9b7a4beSMatthew Dillon 			/*
797b9b7a4beSMatthew Dillon 			 * If we have been asked to skip nosync pages and
798b9b7a4beSMatthew Dillon 			 * this is a nosync page, we can't continue.
799b9b7a4beSMatthew Dillon 			 */
800b146f9e5SAlan Cox 			if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) {
801b9b7a4beSMatthew Dillon 				if (--scanlimit == 0)
802b9b7a4beSMatthew Dillon 					break;
803b9b7a4beSMatthew Dillon 				++tscan;
804b9b7a4beSMatthew Dillon 				continue;
805b9b7a4beSMatthew Dillon 			}
806b9b7a4beSMatthew Dillon 			scanlimit = scanreset;
807b9b7a4beSMatthew Dillon 
808b9b7a4beSMatthew Dillon 			/*
809b9b7a4beSMatthew Dillon 			 * This returns 0 if it was unable to busy the first
810b9b7a4beSMatthew Dillon 			 * page (i.e. had to sleep).
811b9b7a4beSMatthew Dillon 			 */
812b9b7a4beSMatthew Dillon 			tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags);
813b9b7a4beSMatthew Dillon 		}
814b9b7a4beSMatthew Dillon 
815b9b7a4beSMatthew Dillon 		/*
816b9b7a4beSMatthew Dillon 		 * If everything was dirty and we flushed it successfully,
817b9b7a4beSMatthew Dillon 		 * and the requested range is not the entire object, we
818b9b7a4beSMatthew Dillon 		 * don't have to mess with CLEANCHK or MIGHTBEDIRTY and can
819b9b7a4beSMatthew Dillon 		 * return immediately.
820b9b7a4beSMatthew Dillon 		 */
821b9b7a4beSMatthew Dillon 		if (tscan >= tend && (tstart || tend < object->size)) {
82235c01631SAlan Cox 			vm_page_unlock_queues();
823b9b7a4beSMatthew Dillon 			vm_object_clear_flag(object, OBJ_CLEANING);
824b9b7a4beSMatthew Dillon 			return;
825b9b7a4beSMatthew Dillon 		}
82643b7990eSMatthew Dillon 		pagerflags &= ~VM_PAGER_IGNORE_CLEANCHK;
827b9b7a4beSMatthew Dillon 	}
828b9b7a4beSMatthew Dillon 
829b9b7a4beSMatthew Dillon 	/*
8304f79d873SMatthew Dillon 	 * Generally set CLEANCHK interlock and make the page read-only so
8314f79d873SMatthew Dillon 	 * we can then clear the object flags.
8324f79d873SMatthew Dillon 	 *
8334f79d873SMatthew Dillon 	 * However, if this is a nosync mmap then the object is likely to
8344f79d873SMatthew Dillon 	 * stay dirty so do not mess with the page and do not clear the
8354f79d873SMatthew Dillon 	 * object flags.
8364f79d873SMatthew Dillon 	 */
8374f79d873SMatthew Dillon 	clearobjflags = 1;
838fc2ffbe6SPoul-Henning Kamp 	TAILQ_FOREACH(p, &object->memq, listq) {
8390cd31a0dSAlan Cox 		p->oflags |= VPO_CLEANCHK;
840b146f9e5SAlan Cox 		if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC))
8414f79d873SMatthew Dillon 			clearobjflags = 0;
8424f79d873SMatthew Dillon 		else
84378985e42SAlan Cox 			pmap_remove_write(p);
844eaf13dd7SJohn Dyson 	}
845eaf13dd7SJohn Dyson 
8464f79d873SMatthew Dillon 	if (clearobjflags && (tstart == 0) && (tend == object->size)) {
847245df27cSMatthew Dillon 		struct vnode *vp;
848245df27cSMatthew Dillon 
849af51d7bfSAlan Cox 		vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY);
850245df27cSMatthew Dillon 		if (object->type == OBJT_VNODE &&
851245df27cSMatthew Dillon 		    (vp = (struct vnode *)object->handle) != NULL) {
852e6e370a7SJeff Roberson 			VI_LOCK(vp);
853e6e370a7SJeff Roberson 			if (vp->v_iflag & VI_OBJDIRTY)
854e6e370a7SJeff Roberson 				vp->v_iflag &= ~VI_OBJDIRTY;
855e6e370a7SJeff Roberson 			VI_UNLOCK(vp);
856245df27cSMatthew Dillon 		}
857ec4f9fb0SDavid Greenman 	}
858f6b04d2bSDavid Greenman 
859bd7e5f99SJohn Dyson rescan:
8602d8acc0fSJohn Dyson 	curgeneration = object->generation;
8612d8acc0fSJohn Dyson 
862b18bfc3dSJohn Dyson 	for (p = TAILQ_FIRST(&object->memq); p; p = np) {
863b9b7a4beSMatthew Dillon 		int n;
864b9b7a4beSMatthew Dillon 
865b18bfc3dSJohn Dyson 		np = TAILQ_NEXT(p, listq);
866bd7e5f99SJohn Dyson 
867b9b7a4beSMatthew Dillon again:
868bd7e5f99SJohn Dyson 		pi = p->pindex;
8690cd31a0dSAlan Cox 		if ((p->oflags & VPO_CLEANCHK) == 0 ||
870bd7e5f99SJohn Dyson 			(pi < tstart) || (pi >= tend) ||
8717bfda801SAlan Cox 		    p->valid == 0) {
8720cd31a0dSAlan Cox 			p->oflags &= ~VPO_CLEANCHK;
873aef922f5SJohn Dyson 			continue;
874f6b04d2bSDavid Greenman 		}
875f6b04d2bSDavid Greenman 
876bd7e5f99SJohn Dyson 		vm_page_test_dirty(p);
87726f4eea5SAlan Cox 		if (p->dirty == 0) {
8780cd31a0dSAlan Cox 			p->oflags &= ~VPO_CLEANCHK;
879bd7e5f99SJohn Dyson 			continue;
880bd7e5f99SJohn Dyson 		}
881ec4f9fb0SDavid Greenman 
8824f79d873SMatthew Dillon 		/*
8834f79d873SMatthew Dillon 		 * If we have been asked to skip nosync pages and this is a
8844f79d873SMatthew Dillon 		 * nosync page, skip it.  Note that the object flags were
8854f79d873SMatthew Dillon 		 * not cleared in this case so we do not have to set them.
8864f79d873SMatthew Dillon 		 */
887b146f9e5SAlan Cox 		if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) {
8880cd31a0dSAlan Cox 			p->oflags &= ~VPO_CLEANCHK;
8894f79d873SMatthew Dillon 			continue;
8904f79d873SMatthew Dillon 		}
8914f79d873SMatthew Dillon 
892b9b7a4beSMatthew Dillon 		n = vm_object_page_collect_flush(object, p,
893b9b7a4beSMatthew Dillon 			curgeneration, pagerflags);
894b9b7a4beSMatthew Dillon 		if (n == 0)
895b9b7a4beSMatthew Dillon 			goto rescan;
896b9b7a4beSMatthew Dillon 
897b9b7a4beSMatthew Dillon 		if (object->generation != curgeneration)
898b9b7a4beSMatthew Dillon 			goto rescan;
899b9b7a4beSMatthew Dillon 
900b9b7a4beSMatthew Dillon 		/*
901b9b7a4beSMatthew Dillon 		 * Try to optimize the next page.  If we can't we pick up
902b9b7a4beSMatthew Dillon 		 * our (random) scan where we left off.
903b9b7a4beSMatthew Dillon 		 */
904b9b7a4beSMatthew Dillon 		if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) {
905b9b7a4beSMatthew Dillon 			if ((p = vm_page_lookup(object, pi + n)) != NULL)
906b9b7a4beSMatthew Dillon 				goto again;
907b9b7a4beSMatthew Dillon 		}
908b9b7a4beSMatthew Dillon 	}
90935c01631SAlan Cox 	vm_page_unlock_queues();
910b9b7a4beSMatthew Dillon #if 0
9118df6bac4SPoul-Henning Kamp 	VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc);
912b9b7a4beSMatthew Dillon #endif
913b9b7a4beSMatthew Dillon 
914b9b7a4beSMatthew Dillon 	vm_object_clear_flag(object, OBJ_CLEANING);
915b9b7a4beSMatthew Dillon 	return;
916b9b7a4beSMatthew Dillon }
917b9b7a4beSMatthew Dillon 
918b9b7a4beSMatthew Dillon static int
919b9b7a4beSMatthew Dillon vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags)
920b9b7a4beSMatthew Dillon {
921b9b7a4beSMatthew Dillon 	int runlen;
922b9b7a4beSMatthew Dillon 	int maxf;
923b9b7a4beSMatthew Dillon 	int chkb;
924b9b7a4beSMatthew Dillon 	int maxb;
925b9b7a4beSMatthew Dillon 	int i;
926b9b7a4beSMatthew Dillon 	vm_pindex_t pi;
927b9b7a4beSMatthew Dillon 	vm_page_t maf[vm_pageout_page_count];
928b9b7a4beSMatthew Dillon 	vm_page_t mab[vm_pageout_page_count];
929b9b7a4beSMatthew Dillon 	vm_page_t ma[vm_pageout_page_count];
930b9b7a4beSMatthew Dillon 
93135c01631SAlan Cox 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
932b9b7a4beSMatthew Dillon 	pi = p->pindex;
933bd82dc74SAlan Cox 	while (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) {
93435c01631SAlan Cox 		vm_page_lock_queues();
9352d8acc0fSJohn Dyson 		if (object->generation != curgeneration) {
936b9b7a4beSMatthew Dillon 			return(0);
937f6b04d2bSDavid Greenman 		}
938bd82dc74SAlan Cox 	}
939bd7e5f99SJohn Dyson 	maxf = 0;
940bd7e5f99SJohn Dyson 	for(i = 1; i < vm_pageout_page_count; i++) {
941b9b7a4beSMatthew Dillon 		vm_page_t tp;
942b9b7a4beSMatthew Dillon 
9438aef1712SMatthew Dillon 		if ((tp = vm_page_lookup(object, pi + i)) != NULL) {
9449af80719SAlan Cox 			if ((tp->oflags & VPO_BUSY) ||
94543b7990eSMatthew Dillon 				((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
9460cd31a0dSAlan Cox 				 (tp->oflags & VPO_CLEANCHK) == 0) ||
947ffc82b0aSJohn Dyson 				(tp->busy != 0))
948bd7e5f99SJohn Dyson 				break;
949bd7e5f99SJohn Dyson 			vm_page_test_dirty(tp);
95026f4eea5SAlan Cox 			if (tp->dirty == 0) {
9510cd31a0dSAlan Cox 				tp->oflags &= ~VPO_CLEANCHK;
952bd7e5f99SJohn Dyson 				break;
953bd7e5f99SJohn Dyson 			}
954bd7e5f99SJohn Dyson 			maf[ i - 1 ] = tp;
955bd7e5f99SJohn Dyson 			maxf++;
956bd7e5f99SJohn Dyson 			continue;
957bd7e5f99SJohn Dyson 		}
958bd7e5f99SJohn Dyson 		break;
959bd7e5f99SJohn Dyson 	}
960aef922f5SJohn Dyson 
961bd7e5f99SJohn Dyson 	maxb = 0;
962bd7e5f99SJohn Dyson 	chkb = vm_pageout_page_count -  maxf;
963bd7e5f99SJohn Dyson 	if (chkb) {
964bd7e5f99SJohn Dyson 		for(i = 1; i < chkb;i++) {
965b9b7a4beSMatthew Dillon 			vm_page_t tp;
966b9b7a4beSMatthew Dillon 
9678aef1712SMatthew Dillon 			if ((tp = vm_page_lookup(object, pi - i)) != NULL) {
9689af80719SAlan Cox 				if ((tp->oflags & VPO_BUSY) ||
96943b7990eSMatthew Dillon 					((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
9700cd31a0dSAlan Cox 					 (tp->oflags & VPO_CLEANCHK) == 0) ||
971ffc82b0aSJohn Dyson 					(tp->busy != 0))
972bd7e5f99SJohn Dyson 					break;
973bd7e5f99SJohn Dyson 				vm_page_test_dirty(tp);
97426f4eea5SAlan Cox 				if (tp->dirty == 0) {
9750cd31a0dSAlan Cox 					tp->oflags &= ~VPO_CLEANCHK;
976bd7e5f99SJohn Dyson 					break;
977bd7e5f99SJohn Dyson 				}
978bd7e5f99SJohn Dyson 				mab[ i - 1 ] = tp;
979bd7e5f99SJohn Dyson 				maxb++;
980bd7e5f99SJohn Dyson 				continue;
981bd7e5f99SJohn Dyson 			}
982bd7e5f99SJohn Dyson 			break;
983bd7e5f99SJohn Dyson 		}
984bd7e5f99SJohn Dyson 	}
985bd7e5f99SJohn Dyson 
986bd7e5f99SJohn Dyson 	for(i = 0; i < maxb; i++) {
987bd7e5f99SJohn Dyson 		int index = (maxb - i) - 1;
988bd7e5f99SJohn Dyson 		ma[index] = mab[i];
9890cd31a0dSAlan Cox 		ma[index]->oflags &= ~VPO_CLEANCHK;
990bd7e5f99SJohn Dyson 	}
9910cd31a0dSAlan Cox 	p->oflags &= ~VPO_CLEANCHK;
992bd7e5f99SJohn Dyson 	ma[maxb] = p;
993bd7e5f99SJohn Dyson 	for(i = 0; i < maxf; i++) {
994bd7e5f99SJohn Dyson 		int index = (maxb + i) + 1;
995bd7e5f99SJohn Dyson 		ma[index] = maf[i];
9960cd31a0dSAlan Cox 		ma[index]->oflags &= ~VPO_CLEANCHK;
997f6b04d2bSDavid Greenman 	}
998bd7e5f99SJohn Dyson 	runlen = maxb + maxf + 1;
999cf2819ccSJohn Dyson 
10007a935082SAlan Cox 	vm_pageout_flush(ma, runlen, pagerflags);
1001cf2819ccSJohn Dyson 	for (i = 0; i < runlen; i++) {
100226f4eea5SAlan Cox 		if (ma[i]->dirty) {
100378985e42SAlan Cox 			pmap_remove_write(ma[i]);
10040cd31a0dSAlan Cox 			ma[i]->oflags |= VPO_CLEANCHK;
1005aef922f5SJohn Dyson 
1006b9b7a4beSMatthew Dillon 			/*
1007b9b7a4beSMatthew Dillon 			 * maxf will end up being the actual number of pages
1008b9b7a4beSMatthew Dillon 			 * we wrote out contiguously, non-inclusive of the
1009b9b7a4beSMatthew Dillon 			 * first page.  We do not count look-behind pages.
1010b9b7a4beSMatthew Dillon 			 */
1011b9b7a4beSMatthew Dillon 			if (i >= maxb + 1 && (maxf > i - maxb - 1))
1012b9b7a4beSMatthew Dillon 				maxf = i - maxb - 1;
1013b9b7a4beSMatthew Dillon 		}
1014b9b7a4beSMatthew Dillon 	}
1015b9b7a4beSMatthew Dillon 	return(maxf + 1);
101626f9a767SRodney W. Grimes }
1017df8bae1dSRodney W. Grimes 
10181efb74fbSJohn Dyson /*
1019950f8459SAlan Cox  * Note that there is absolutely no sense in writing out
1020950f8459SAlan Cox  * anonymous objects, so we track down the vnode object
1021950f8459SAlan Cox  * to write out.
1022950f8459SAlan Cox  * We invalidate (remove) all pages from the address space
1023950f8459SAlan Cox  * for semantic correctness.
1024950f8459SAlan Cox  *
1025950f8459SAlan Cox  * Note: certain anonymous maps, such as MAP_NOSYNC maps,
1026950f8459SAlan Cox  * may start out with a NULL object.
1027950f8459SAlan Cox  */
1028950f8459SAlan Cox void
1029950f8459SAlan Cox vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
1030950f8459SAlan Cox     boolean_t syncio, boolean_t invalidate)
1031950f8459SAlan Cox {
1032950f8459SAlan Cox 	vm_object_t backing_object;
1033950f8459SAlan Cox 	struct vnode *vp;
10343b582b4eSTor Egge 	struct mount *mp;
1035950f8459SAlan Cox 	int flags;
1036950f8459SAlan Cox 
1037950f8459SAlan Cox 	if (object == NULL)
1038950f8459SAlan Cox 		return;
1039950f8459SAlan Cox 	VM_OBJECT_LOCK(object);
1040950f8459SAlan Cox 	while ((backing_object = object->backing_object) != NULL) {
1041950f8459SAlan Cox 		VM_OBJECT_LOCK(backing_object);
104256e0670fSAlan Cox 		offset += object->backing_object_offset;
1043950f8459SAlan Cox 		VM_OBJECT_UNLOCK(object);
1044950f8459SAlan Cox 		object = backing_object;
1045950f8459SAlan Cox 		if (object->size < OFF_TO_IDX(offset + size))
1046950f8459SAlan Cox 			size = IDX_TO_OFF(object->size) - offset;
1047950f8459SAlan Cox 	}
1048950f8459SAlan Cox 	/*
1049950f8459SAlan Cox 	 * Flush pages if writing is allowed, invalidate them
1050950f8459SAlan Cox 	 * if invalidation requested.  Pages undergoing I/O
1051950f8459SAlan Cox 	 * will be ignored by vm_object_page_remove().
1052950f8459SAlan Cox 	 *
1053950f8459SAlan Cox 	 * We cannot lock the vnode and then wait for paging
1054950f8459SAlan Cox 	 * to complete without deadlocking against vm_fault.
1055950f8459SAlan Cox 	 * Instead we simply call vm_object_page_remove() and
1056950f8459SAlan Cox 	 * allow it to block internally on a page-by-page
1057950f8459SAlan Cox 	 * basis when it encounters pages undergoing async
1058950f8459SAlan Cox 	 * I/O.
1059950f8459SAlan Cox 	 */
1060950f8459SAlan Cox 	if (object->type == OBJT_VNODE &&
1061950f8459SAlan Cox 	    (object->flags & OBJ_MIGHTBEDIRTY) != 0) {
1062ae51ff11SJeff Roberson 		int vfslocked;
1063950f8459SAlan Cox 		vp = object->handle;
1064950f8459SAlan Cox 		VM_OBJECT_UNLOCK(object);
10653b582b4eSTor Egge 		(void) vn_start_write(vp, &mp, V_WAIT);
1066ae51ff11SJeff Roberson 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1067cb05b60aSAttilio Rao 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1068950f8459SAlan Cox 		flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
1069950f8459SAlan Cox 		flags |= invalidate ? OBJPC_INVAL : 0;
1070950f8459SAlan Cox 		VM_OBJECT_LOCK(object);
1071950f8459SAlan Cox 		vm_object_page_clean(object,
1072950f8459SAlan Cox 		    OFF_TO_IDX(offset),
1073950f8459SAlan Cox 		    OFF_TO_IDX(offset + size + PAGE_MASK),
1074950f8459SAlan Cox 		    flags);
1075950f8459SAlan Cox 		VM_OBJECT_UNLOCK(object);
107622db15c0SAttilio Rao 		VOP_UNLOCK(vp, 0);
1077ae51ff11SJeff Roberson 		VFS_UNLOCK_GIANT(vfslocked);
10783b582b4eSTor Egge 		vn_finished_write(mp);
1079950f8459SAlan Cox 		VM_OBJECT_LOCK(object);
1080950f8459SAlan Cox 	}
1081950f8459SAlan Cox 	if ((object->type == OBJT_VNODE ||
1082950f8459SAlan Cox 	     object->type == OBJT_DEVICE) && invalidate) {
1083874f0135SDoug Rabson 		boolean_t purge;
1084874f0135SDoug Rabson 		purge = old_msync || (object->type == OBJT_DEVICE);
1085950f8459SAlan Cox 		vm_object_page_remove(object,
1086950f8459SAlan Cox 		    OFF_TO_IDX(offset),
1087950f8459SAlan Cox 		    OFF_TO_IDX(offset + size + PAGE_MASK),
1088874f0135SDoug Rabson 		    purge ? FALSE : TRUE);
1089950f8459SAlan Cox 	}
1090950f8459SAlan Cox 	VM_OBJECT_UNLOCK(object);
1091950f8459SAlan Cox }
1092950f8459SAlan Cox 
1093950f8459SAlan Cox /*
1094867a482dSJohn Dyson  *	vm_object_madvise:
1095867a482dSJohn Dyson  *
1096867a482dSJohn Dyson  *	Implements the madvise function at the object/page level.
10971c7c3c6aSMatthew Dillon  *
1098193b9358SAlan Cox  *	MADV_WILLNEED	(any object)
1099193b9358SAlan Cox  *
1100193b9358SAlan Cox  *	    Activate the specified pages if they are resident.
1101193b9358SAlan Cox  *
1102193b9358SAlan Cox  *	MADV_DONTNEED	(any object)
1103193b9358SAlan Cox  *
1104193b9358SAlan Cox  *	    Deactivate the specified pages if they are resident.
1105193b9358SAlan Cox  *
1106193b9358SAlan Cox  *	MADV_FREE	(OBJT_DEFAULT/OBJT_SWAP objects,
1107193b9358SAlan Cox  *			 OBJ_ONEMAPPING only)
1108193b9358SAlan Cox  *
1109193b9358SAlan Cox  *	    Deactivate and clean the specified pages if they are
1110193b9358SAlan Cox  *	    resident.  This permits the process to reuse the pages
1111193b9358SAlan Cox  *	    without faulting or the kernel to reclaim the pages
1112193b9358SAlan Cox  *	    without I/O.
1113867a482dSJohn Dyson  */
1114867a482dSJohn Dyson void
11151b40f8c0SMatthew Dillon vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
1116867a482dSJohn Dyson {
11176e20a165SJohn Dyson 	vm_pindex_t end, tpindex;
111834567de7SAlan Cox 	vm_object_t backing_object, tobject;
1119867a482dSJohn Dyson 	vm_page_t m;
1120867a482dSJohn Dyson 
1121867a482dSJohn Dyson 	if (object == NULL)
1122867a482dSJohn Dyson 		return;
11239b98b796SAlan Cox 	VM_OBJECT_LOCK(object);
1124867a482dSJohn Dyson 	end = pindex + count;
11251c7c3c6aSMatthew Dillon 	/*
11261c7c3c6aSMatthew Dillon 	 * Locate and adjust resident pages
11271c7c3c6aSMatthew Dillon 	 */
11281c7c3c6aSMatthew Dillon 	for (; pindex < end; pindex += 1) {
11296e20a165SJohn Dyson relookup:
11306e20a165SJohn Dyson 		tobject = object;
11316e20a165SJohn Dyson 		tpindex = pindex;
11326e20a165SJohn Dyson shadowlookup:
113358b4e6ccSAlan Cox 		/*
113458b4e6ccSAlan Cox 		 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
113558b4e6ccSAlan Cox 		 * and those pages must be OBJ_ONEMAPPING.
113658b4e6ccSAlan Cox 		 */
113758b4e6ccSAlan Cox 		if (advise == MADV_FREE) {
113858b4e6ccSAlan Cox 			if ((tobject->type != OBJT_DEFAULT &&
113958b4e6ccSAlan Cox 			     tobject->type != OBJT_SWAP) ||
114058b4e6ccSAlan Cox 			    (tobject->flags & OBJ_ONEMAPPING) == 0) {
114134567de7SAlan Cox 				goto unlock_tobject;
11426e20a165SJohn Dyson 			}
114358b4e6ccSAlan Cox 		}
11441c7c3c6aSMatthew Dillon 		m = vm_page_lookup(tobject, tpindex);
11457bfda801SAlan Cox 		if (m == NULL && advise == MADV_WILLNEED) {
11467bfda801SAlan Cox 			/*
11477bfda801SAlan Cox 			 * If the page is cached, reactivate it.
11487bfda801SAlan Cox 			 */
1149f3a2ed4bSAlan Cox 			m = vm_page_alloc(tobject, tpindex, VM_ALLOC_IFCACHED |
1150f3a2ed4bSAlan Cox 			    VM_ALLOC_NOBUSY);
11517bfda801SAlan Cox 		}
11521c7c3c6aSMatthew Dillon 		if (m == NULL) {
11531ce137beSMatthew Dillon 			/*
11541ce137beSMatthew Dillon 			 * There may be swap even if there is no backing page
11551ce137beSMatthew Dillon 			 */
11561ce137beSMatthew Dillon 			if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
11571ce137beSMatthew Dillon 				swap_pager_freespace(tobject, tpindex, 1);
11581ce137beSMatthew Dillon 			/*
11591ce137beSMatthew Dillon 			 * next object
11601ce137beSMatthew Dillon 			 */
116134567de7SAlan Cox 			backing_object = tobject->backing_object;
116234567de7SAlan Cox 			if (backing_object == NULL)
116334567de7SAlan Cox 				goto unlock_tobject;
116434567de7SAlan Cox 			VM_OBJECT_LOCK(backing_object);
116556e0670fSAlan Cox 			tpindex += OFF_TO_IDX(tobject->backing_object_offset);
11669b98b796SAlan Cox 			if (tobject != object)
116734567de7SAlan Cox 				VM_OBJECT_UNLOCK(tobject);
116834567de7SAlan Cox 			tobject = backing_object;
11696e20a165SJohn Dyson 			goto shadowlookup;
11706e20a165SJohn Dyson 		}
1171867a482dSJohn Dyson 		/*
1172867a482dSJohn Dyson 		 * If the page is busy or not in a normal active state,
11738b03c8edSMatthew Dillon 		 * we skip it.  If the page is not managed there are no
11748b03c8edSMatthew Dillon 		 * page queues to mess with.  Things can break if we mess
11758b03c8edSMatthew Dillon 		 * with pages in any of the below states.
1176867a482dSJohn Dyson 		 */
117732585dd6SAlan Cox 		vm_page_lock_queues();
117832585dd6SAlan Cox 		if (m->hold_count ||
11791c7c3c6aSMatthew Dillon 		    m->wire_count ||
11808b03c8edSMatthew Dillon 		    (m->flags & PG_UNMANAGED) ||
118132585dd6SAlan Cox 		    m->valid != VM_PAGE_BITS_ALL) {
118232585dd6SAlan Cox 			vm_page_unlock_queues();
118334567de7SAlan Cox 			goto unlock_tobject;
11846e20a165SJohn Dyson 		}
11859af80719SAlan Cox 		if ((m->oflags & VPO_BUSY) || m->busy) {
11865786be7cSAlan Cox 			vm_page_flag_set(m, PG_REFERENCED);
118791449ce9SAlan Cox 			vm_page_unlock_queues();
11889b98b796SAlan Cox 			if (object != tobject)
11899b98b796SAlan Cox 				VM_OBJECT_UNLOCK(object);
11905786be7cSAlan Cox 			m->oflags |= VPO_WANTED;
119191449ce9SAlan Cox 			msleep(m, VM_OBJECT_MTX(tobject), PDROP | PVM, "madvpo", 0);
11929b98b796SAlan Cox 			VM_OBJECT_LOCK(object);
11936e20a165SJohn Dyson   			goto relookup;
119434567de7SAlan Cox 		}
1195867a482dSJohn Dyson 		if (advise == MADV_WILLNEED) {
1196867a482dSJohn Dyson 			vm_page_activate(m);
11976e20a165SJohn Dyson 		} else if (advise == MADV_DONTNEED) {
1198479112dfSMatthew Dillon 			vm_page_dontneed(m);
11990a47b48bSJohn Dyson 		} else if (advise == MADV_FREE) {
12001c7c3c6aSMatthew Dillon 			/*
12012aaeadf8SMatthew Dillon 			 * Mark the page clean.  This will allow the page
12022aaeadf8SMatthew Dillon 			 * to be freed up by the system.  However, such pages
12032aaeadf8SMatthew Dillon 			 * are often reused quickly by malloc()/free()
12042aaeadf8SMatthew Dillon 			 * so we do not do anything that would cause
12052aaeadf8SMatthew Dillon 			 * a page fault if we can help it.
12062aaeadf8SMatthew Dillon 			 *
12072aaeadf8SMatthew Dillon 			 * Specifically, we do not try to actually free
12082aaeadf8SMatthew Dillon 			 * the page now nor do we try to put it in the
12092aaeadf8SMatthew Dillon 			 * cache (which would cause a page fault on reuse).
121041c67e12SMatthew Dillon 			 *
121141c67e12SMatthew Dillon 			 * But we do make the page is freeable as we
121241c67e12SMatthew Dillon 			 * can without actually taking the step of unmapping
121341c67e12SMatthew Dillon 			 * it.
12141c7c3c6aSMatthew Dillon 			 */
12150385347cSPeter Wemm 			pmap_clear_modify(m);
12166e20a165SJohn Dyson 			m->dirty = 0;
121741c67e12SMatthew Dillon 			m->act_count = 0;
1218479112dfSMatthew Dillon 			vm_page_dontneed(m);
1219867a482dSJohn Dyson 		}
12202999e9faSAlan Cox 		vm_page_unlock_queues();
12212999e9faSAlan Cox 		if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
12222999e9faSAlan Cox 			swap_pager_freespace(tobject, tpindex, 1);
122334567de7SAlan Cox unlock_tobject:
12249b98b796SAlan Cox 		if (tobject != object)
122534567de7SAlan Cox 			VM_OBJECT_UNLOCK(tobject);
1226867a482dSJohn Dyson 	}
12279b98b796SAlan Cox 	VM_OBJECT_UNLOCK(object);
1228867a482dSJohn Dyson }
1229867a482dSJohn Dyson 
1230867a482dSJohn Dyson /*
1231df8bae1dSRodney W. Grimes  *	vm_object_shadow:
1232df8bae1dSRodney W. Grimes  *
1233df8bae1dSRodney W. Grimes  *	Create a new object which is backed by the
1234df8bae1dSRodney W. Grimes  *	specified existing object range.  The source
1235df8bae1dSRodney W. Grimes  *	object reference is deallocated.
1236df8bae1dSRodney W. Grimes  *
1237df8bae1dSRodney W. Grimes  *	The new object and offset into that object
1238df8bae1dSRodney W. Grimes  *	are returned in the source parameters.
1239df8bae1dSRodney W. Grimes  */
124026f9a767SRodney W. Grimes void
12411b40f8c0SMatthew Dillon vm_object_shadow(
12421b40f8c0SMatthew Dillon 	vm_object_t *object,	/* IN/OUT */
12431b40f8c0SMatthew Dillon 	vm_ooffset_t *offset,	/* IN/OUT */
12441b40f8c0SMatthew Dillon 	vm_size_t length)
1245df8bae1dSRodney W. Grimes {
1246d031cff1SMatthew Dillon 	vm_object_t source;
1247d031cff1SMatthew Dillon 	vm_object_t result;
1248df8bae1dSRodney W. Grimes 
1249df8bae1dSRodney W. Grimes 	source = *object;
1250df8bae1dSRodney W. Grimes 
1251df8bae1dSRodney W. Grimes 	/*
12529a2f6362SAlan Cox 	 * Don't create the new object if the old object isn't shared.
12539a2f6362SAlan Cox 	 */
1254570a2f4aSAlan Cox 	if (source != NULL) {
1255570a2f4aSAlan Cox 		VM_OBJECT_LOCK(source);
1256570a2f4aSAlan Cox 		if (source->ref_count == 1 &&
12579a2f6362SAlan Cox 		    source->handle == NULL &&
12589a2f6362SAlan Cox 		    (source->type == OBJT_DEFAULT ||
12599917e010SAlan Cox 		     source->type == OBJT_SWAP)) {
1260570a2f4aSAlan Cox 			VM_OBJECT_UNLOCK(source);
12619a2f6362SAlan Cox 			return;
12629917e010SAlan Cox 		}
1263570a2f4aSAlan Cox 		VM_OBJECT_UNLOCK(source);
1264570a2f4aSAlan Cox 	}
12659a2f6362SAlan Cox 
12669a2f6362SAlan Cox 	/*
1267570a2f4aSAlan Cox 	 * Allocate a new object with the given length.
1268df8bae1dSRodney W. Grimes 	 */
1269971dd342SAlfred Perlstein 	result = vm_object_allocate(OBJT_DEFAULT, length);
1270df8bae1dSRodney W. Grimes 
1271df8bae1dSRodney W. Grimes 	/*
12720d94caffSDavid Greenman 	 * The new object shadows the source object, adding a reference to it.
12730d94caffSDavid Greenman 	 * Our caller changes his reference to point to the new object,
12740d94caffSDavid Greenman 	 * removing a reference to the source object.  Net result: no change
12750d94caffSDavid Greenman 	 * of reference count.
12769b09fe24SMatthew Dillon 	 *
12779b09fe24SMatthew Dillon 	 * Try to optimize the result object's page color when shadowing
1278956f3135SPhilippe Charnier 	 * in order to maintain page coloring consistency in the combined
12799b09fe24SMatthew Dillon 	 * shadowed object.
1280df8bae1dSRodney W. Grimes 	 */
128124a1cce3SDavid Greenman 	result->backing_object = source;
12829174ca7bSTor Egge 	/*
12839174ca7bSTor Egge 	 * Store the offset into the source object, and fix up the offset into
12849174ca7bSTor Egge 	 * the new object.
12859174ca7bSTor Egge 	 */
12869174ca7bSTor Egge 	result->backing_object_offset = *offset;
1287570a2f4aSAlan Cox 	if (source != NULL) {
1288570a2f4aSAlan Cox 		VM_OBJECT_LOCK(source);
12891c500307SAlan Cox 		LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1290eaf13dd7SJohn Dyson 		source->shadow_count++;
1291eaf13dd7SJohn Dyson 		source->generation++;
1292f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
12937b54b1a9SAlan Cox 		result->flags |= source->flags & OBJ_COLORED;
1294f8a47341SAlan Cox 		result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) &
1295f8a47341SAlan Cox 		    ((1 << (VM_NFREEORDER - 1)) - 1);
1296f8a47341SAlan Cox #endif
1297570a2f4aSAlan Cox 		VM_OBJECT_UNLOCK(source);
1298de5f6a77SJohn Dyson 	}
1299df8bae1dSRodney W. Grimes 
1300df8bae1dSRodney W. Grimes 
1301df8bae1dSRodney W. Grimes 	/*
1302df8bae1dSRodney W. Grimes 	 * Return the new things
1303df8bae1dSRodney W. Grimes 	 */
1304df8bae1dSRodney W. Grimes 	*offset = 0;
1305df8bae1dSRodney W. Grimes 	*object = result;
1306df8bae1dSRodney W. Grimes }
1307df8bae1dSRodney W. Grimes 
1308c5aaa06dSAlan Cox /*
1309c5aaa06dSAlan Cox  *	vm_object_split:
1310c5aaa06dSAlan Cox  *
1311c5aaa06dSAlan Cox  * Split the pages in a map entry into a new object.  This affords
1312c5aaa06dSAlan Cox  * easier removal of unused pages, and keeps object inheritance from
1313c5aaa06dSAlan Cox  * being a negative impact on memory usage.
1314c5aaa06dSAlan Cox  */
1315c5aaa06dSAlan Cox void
1316c5aaa06dSAlan Cox vm_object_split(vm_map_entry_t entry)
1317c5aaa06dSAlan Cox {
131873000556SAlan Cox 	vm_page_t m, m_next;
1319c5aaa06dSAlan Cox 	vm_object_t orig_object, new_object, source;
132073000556SAlan Cox 	vm_pindex_t idx, offidxstart;
132173000556SAlan Cox 	vm_size_t size;
1322c5aaa06dSAlan Cox 
1323c5aaa06dSAlan Cox 	orig_object = entry->object.vm_object;
1324c5aaa06dSAlan Cox 	if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
1325c5aaa06dSAlan Cox 		return;
1326c5aaa06dSAlan Cox 	if (orig_object->ref_count <= 1)
1327c5aaa06dSAlan Cox 		return;
13284da9f125SAlan Cox 	VM_OBJECT_UNLOCK(orig_object);
1329c5aaa06dSAlan Cox 
13304da9f125SAlan Cox 	offidxstart = OFF_TO_IDX(entry->offset);
133195442adfSAlan Cox 	size = atop(entry->end - entry->start);
1332c5aaa06dSAlan Cox 
13334da9f125SAlan Cox 	/*
13344da9f125SAlan Cox 	 * If swap_pager_copy() is later called, it will convert new_object
13354da9f125SAlan Cox 	 * into a swap object.
13364da9f125SAlan Cox 	 */
13374da9f125SAlan Cox 	new_object = vm_object_allocate(OBJT_DEFAULT, size);
1338c5aaa06dSAlan Cox 
1339c5474b8fSAlan Cox 	/*
1340c5474b8fSAlan Cox 	 * At this point, the new object is still private, so the order in
1341c5474b8fSAlan Cox 	 * which the original and new objects are locked does not matter.
1342c5474b8fSAlan Cox 	 */
134363f6cefcSAlan Cox 	VM_OBJECT_LOCK(new_object);
134463f6cefcSAlan Cox 	VM_OBJECT_LOCK(orig_object);
1345c5aaa06dSAlan Cox 	source = orig_object->backing_object;
1346c5aaa06dSAlan Cox 	if (source != NULL) {
13478e3a76fbSAlan Cox 		VM_OBJECT_LOCK(source);
134819c244d0SAlan Cox 		if ((source->flags & OBJ_DEAD) != 0) {
134919c244d0SAlan Cox 			VM_OBJECT_UNLOCK(source);
135019c244d0SAlan Cox 			VM_OBJECT_UNLOCK(orig_object);
135119c244d0SAlan Cox 			VM_OBJECT_UNLOCK(new_object);
135219c244d0SAlan Cox 			vm_object_deallocate(new_object);
135319c244d0SAlan Cox 			VM_OBJECT_LOCK(orig_object);
135419c244d0SAlan Cox 			return;
135519c244d0SAlan Cox 		}
13561c500307SAlan Cox 		LIST_INSERT_HEAD(&source->shadow_head,
1357c5aaa06dSAlan Cox 				  new_object, shadow_list);
13588e3a76fbSAlan Cox 		source->shadow_count++;
13598e3a76fbSAlan Cox 		source->generation++;
1360b921a12bSAlan Cox 		vm_object_reference_locked(source);	/* for new_object */
1361c5aaa06dSAlan Cox 		vm_object_clear_flag(source, OBJ_ONEMAPPING);
1362e2479b4fSAlan Cox 		VM_OBJECT_UNLOCK(source);
1363c5aaa06dSAlan Cox 		new_object->backing_object_offset =
13644da9f125SAlan Cox 			orig_object->backing_object_offset + entry->offset;
1365c5aaa06dSAlan Cox 		new_object->backing_object = source;
1366c5aaa06dSAlan Cox 	}
13673364c323SKonstantin Belousov 	if (orig_object->uip != NULL) {
13683364c323SKonstantin Belousov 		new_object->uip = orig_object->uip;
13693364c323SKonstantin Belousov 		uihold(orig_object->uip);
13703364c323SKonstantin Belousov 		new_object->charge = ptoa(size);
13713364c323SKonstantin Belousov 		KASSERT(orig_object->charge >= ptoa(size),
13723364c323SKonstantin Belousov 		    ("orig_object->charge < 0"));
13733364c323SKonstantin Belousov 		orig_object->charge -= ptoa(size);
13743364c323SKonstantin Belousov 	}
1375c5aaa06dSAlan Cox retry:
137673000556SAlan Cox 	if ((m = TAILQ_FIRST(&orig_object->memq)) != NULL) {
137773000556SAlan Cox 		if (m->pindex < offidxstart) {
137873000556SAlan Cox 			m = vm_page_splay(offidxstart, orig_object->root);
137973000556SAlan Cox 			if ((orig_object->root = m)->pindex < offidxstart)
138073000556SAlan Cox 				m = TAILQ_NEXT(m, listq);
138173000556SAlan Cox 		}
138273000556SAlan Cox 	}
138373000556SAlan Cox 	vm_page_lock_queues();
138473000556SAlan Cox 	for (; m != NULL && (idx = m->pindex - offidxstart) < size;
138573000556SAlan Cox 	    m = m_next) {
138673000556SAlan Cox 		m_next = TAILQ_NEXT(m, listq);
1387c5aaa06dSAlan Cox 
1388c5aaa06dSAlan Cox 		/*
1389c5aaa06dSAlan Cox 		 * We must wait for pending I/O to complete before we can
1390c5aaa06dSAlan Cox 		 * rename the page.
1391c5aaa06dSAlan Cox 		 *
1392c5aaa06dSAlan Cox 		 * We do not have to VM_PROT_NONE the page as mappings should
1393c5aaa06dSAlan Cox 		 * not be changed by this operation.
1394c5aaa06dSAlan Cox 		 */
13959af80719SAlan Cox 		if ((m->oflags & VPO_BUSY) || m->busy) {
13965786be7cSAlan Cox 			vm_page_flag_set(m, PG_REFERENCED);
139791449ce9SAlan Cox 			vm_page_unlock_queues();
1398de33beddSAlan Cox 			VM_OBJECT_UNLOCK(new_object);
13995786be7cSAlan Cox 			m->oflags |= VPO_WANTED;
1400c5474b8fSAlan Cox 			msleep(m, VM_OBJECT_MTX(orig_object), PVM, "spltwt", 0);
1401de33beddSAlan Cox 			VM_OBJECT_LOCK(new_object);
1402c5aaa06dSAlan Cox 			goto retry;
1403de33beddSAlan Cox 		}
1404c5aaa06dSAlan Cox 		vm_page_rename(m, new_object, idx);
1405c5aaa06dSAlan Cox 		/* page automatically made dirty by rename and cache handled */
1406c5aaa06dSAlan Cox 		vm_page_busy(m);
1407c5aaa06dSAlan Cox 	}
14085ba514bcSAlan Cox 	vm_page_unlock_queues();
1409d7a013c3SAlan Cox 	if (orig_object->type == OBJT_SWAP) {
1410c5aaa06dSAlan Cox 		/*
1411c7c8dd7eSAlan Cox 		 * swap_pager_copy() can sleep, in which case the orig_object's
1412c7c8dd7eSAlan Cox 		 * and new_object's locks are released and reacquired.
1413c5aaa06dSAlan Cox 		 */
1414c5aaa06dSAlan Cox 		swap_pager_copy(orig_object, new_object, offidxstart, 0);
14157bfda801SAlan Cox 
14167bfda801SAlan Cox 		/*
14177bfda801SAlan Cox 		 * Transfer any cached pages from orig_object to new_object.
14187bfda801SAlan Cox 		 */
14197bfda801SAlan Cox 		if (__predict_false(orig_object->cache != NULL))
14207bfda801SAlan Cox 			vm_page_cache_transfer(orig_object, offidxstart,
14217bfda801SAlan Cox 			    new_object);
1422c5aaa06dSAlan Cox 	}
1423d7a013c3SAlan Cox 	VM_OBJECT_UNLOCK(orig_object);
1424c7118ed6SAlan Cox 	TAILQ_FOREACH(m, &new_object->memq, listq)
1425c5aaa06dSAlan Cox 		vm_page_wakeup(m);
1426c7c8dd7eSAlan Cox 	VM_OBJECT_UNLOCK(new_object);
1427c5aaa06dSAlan Cox 	entry->object.vm_object = new_object;
1428c5aaa06dSAlan Cox 	entry->offset = 0LL;
1429c5aaa06dSAlan Cox 	vm_object_deallocate(orig_object);
14304da9f125SAlan Cox 	VM_OBJECT_LOCK(new_object);
1431c5aaa06dSAlan Cox }
1432c5aaa06dSAlan Cox 
14332ad1a3f7SMatthew Dillon #define	OBSC_TEST_ALL_SHADOWED	0x0001
14342ad1a3f7SMatthew Dillon #define	OBSC_COLLAPSE_NOWAIT	0x0002
14352ad1a3f7SMatthew Dillon #define	OBSC_COLLAPSE_WAIT	0x0004
14362ad1a3f7SMatthew Dillon 
1437b4ae4780SPoul-Henning Kamp static int
14382ad1a3f7SMatthew Dillon vm_object_backing_scan(vm_object_t object, int op)
14392ad1a3f7SMatthew Dillon {
14402ad1a3f7SMatthew Dillon 	int r = 1;
14412ad1a3f7SMatthew Dillon 	vm_page_t p;
14422ad1a3f7SMatthew Dillon 	vm_object_t backing_object;
14432ad1a3f7SMatthew Dillon 	vm_pindex_t backing_offset_index;
14442ad1a3f7SMatthew Dillon 
14457ca33ad1SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
14467ca33ad1SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED);
14472ad1a3f7SMatthew Dillon 
14482ad1a3f7SMatthew Dillon 	backing_object = object->backing_object;
14492ad1a3f7SMatthew Dillon 	backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
14502ad1a3f7SMatthew Dillon 
14512ad1a3f7SMatthew Dillon 	/*
14522ad1a3f7SMatthew Dillon 	 * Initial conditions
14532ad1a3f7SMatthew Dillon 	 */
14542ad1a3f7SMatthew Dillon 	if (op & OBSC_TEST_ALL_SHADOWED) {
14552ad1a3f7SMatthew Dillon 		/*
14567bfda801SAlan Cox 		 * We do not want to have to test for the existence of cache
14577bfda801SAlan Cox 		 * or swap pages in the backing object.  XXX but with the
14582ad1a3f7SMatthew Dillon 		 * new swapper this would be pretty easy to do.
14592ad1a3f7SMatthew Dillon 		 *
14602ad1a3f7SMatthew Dillon 		 * XXX what about anonymous MAP_SHARED memory that hasn't
14612ad1a3f7SMatthew Dillon 		 * been ZFOD faulted yet?  If we do not test for this, the
14622ad1a3f7SMatthew Dillon 		 * shadow test may succeed! XXX
14632ad1a3f7SMatthew Dillon 		 */
14642ad1a3f7SMatthew Dillon 		if (backing_object->type != OBJT_DEFAULT) {
14652ad1a3f7SMatthew Dillon 			return (0);
14662ad1a3f7SMatthew Dillon 		}
14672ad1a3f7SMatthew Dillon 	}
14682ad1a3f7SMatthew Dillon 	if (op & OBSC_COLLAPSE_WAIT) {
14692ad1a3f7SMatthew Dillon 		vm_object_set_flag(backing_object, OBJ_DEAD);
14702ad1a3f7SMatthew Dillon 	}
14712ad1a3f7SMatthew Dillon 
14722ad1a3f7SMatthew Dillon 	/*
14732ad1a3f7SMatthew Dillon 	 * Our scan
14742ad1a3f7SMatthew Dillon 	 */
14752ad1a3f7SMatthew Dillon 	p = TAILQ_FIRST(&backing_object->memq);
14762ad1a3f7SMatthew Dillon 	while (p) {
14772ad1a3f7SMatthew Dillon 		vm_page_t next = TAILQ_NEXT(p, listq);
14782ad1a3f7SMatthew Dillon 		vm_pindex_t new_pindex = p->pindex - backing_offset_index;
14792ad1a3f7SMatthew Dillon 
14802ad1a3f7SMatthew Dillon 		if (op & OBSC_TEST_ALL_SHADOWED) {
14812ad1a3f7SMatthew Dillon 			vm_page_t pp;
14822ad1a3f7SMatthew Dillon 
14832ad1a3f7SMatthew Dillon 			/*
14842ad1a3f7SMatthew Dillon 			 * Ignore pages outside the parent object's range
14852ad1a3f7SMatthew Dillon 			 * and outside the parent object's mapping of the
14862ad1a3f7SMatthew Dillon 			 * backing object.
14872ad1a3f7SMatthew Dillon 			 *
14882ad1a3f7SMatthew Dillon 			 * note that we do not busy the backing object's
14892ad1a3f7SMatthew Dillon 			 * page.
14902ad1a3f7SMatthew Dillon 			 */
14912ad1a3f7SMatthew Dillon 			if (
14922ad1a3f7SMatthew Dillon 			    p->pindex < backing_offset_index ||
14932ad1a3f7SMatthew Dillon 			    new_pindex >= object->size
14942ad1a3f7SMatthew Dillon 			) {
14952ad1a3f7SMatthew Dillon 				p = next;
14962ad1a3f7SMatthew Dillon 				continue;
14972ad1a3f7SMatthew Dillon 			}
14982ad1a3f7SMatthew Dillon 
14992ad1a3f7SMatthew Dillon 			/*
15002ad1a3f7SMatthew Dillon 			 * See if the parent has the page or if the parent's
15012ad1a3f7SMatthew Dillon 			 * object pager has the page.  If the parent has the
15022ad1a3f7SMatthew Dillon 			 * page but the page is not valid, the parent's
15032ad1a3f7SMatthew Dillon 			 * object pager must have the page.
15042ad1a3f7SMatthew Dillon 			 *
15052ad1a3f7SMatthew Dillon 			 * If this fails, the parent does not completely shadow
15062ad1a3f7SMatthew Dillon 			 * the object and we might as well give up now.
15072ad1a3f7SMatthew Dillon 			 */
15082ad1a3f7SMatthew Dillon 
15092ad1a3f7SMatthew Dillon 			pp = vm_page_lookup(object, new_pindex);
15102ad1a3f7SMatthew Dillon 			if (
15112ad1a3f7SMatthew Dillon 			    (pp == NULL || pp->valid == 0) &&
15122ad1a3f7SMatthew Dillon 			    !vm_pager_has_page(object, new_pindex, NULL, NULL)
15132ad1a3f7SMatthew Dillon 			) {
15142ad1a3f7SMatthew Dillon 				r = 0;
15152ad1a3f7SMatthew Dillon 				break;
15162ad1a3f7SMatthew Dillon 			}
15172ad1a3f7SMatthew Dillon 		}
15182ad1a3f7SMatthew Dillon 
15192ad1a3f7SMatthew Dillon 		/*
15202ad1a3f7SMatthew Dillon 		 * Check for busy page
15212ad1a3f7SMatthew Dillon 		 */
15222ad1a3f7SMatthew Dillon 		if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
15232ad1a3f7SMatthew Dillon 			vm_page_t pp;
15242ad1a3f7SMatthew Dillon 
15252ad1a3f7SMatthew Dillon 			if (op & OBSC_COLLAPSE_NOWAIT) {
15269af80719SAlan Cox 				if ((p->oflags & VPO_BUSY) ||
15272ad1a3f7SMatthew Dillon 				    !p->valid ||
152800f9e8b4SAlan Cox 				    p->busy) {
15292ad1a3f7SMatthew Dillon 					p = next;
15302ad1a3f7SMatthew Dillon 					continue;
15312ad1a3f7SMatthew Dillon 				}
15322ad1a3f7SMatthew Dillon 			} else if (op & OBSC_COLLAPSE_WAIT) {
15339af80719SAlan Cox 				if ((p->oflags & VPO_BUSY) || p->busy) {
1534c6ec6a7cSAlan Cox 					vm_page_lock_queues();
15355786be7cSAlan Cox 					vm_page_flag_set(p, PG_REFERENCED);
153691449ce9SAlan Cox 					vm_page_unlock_queues();
15377ca33ad1SAlan Cox 					VM_OBJECT_UNLOCK(object);
15385786be7cSAlan Cox 					p->oflags |= VPO_WANTED;
153991449ce9SAlan Cox 					msleep(p, VM_OBJECT_MTX(backing_object),
15407ca33ad1SAlan Cox 					    PDROP | PVM, "vmocol", 0);
15417ca33ad1SAlan Cox 					VM_OBJECT_LOCK(object);
15427ca33ad1SAlan Cox 					VM_OBJECT_LOCK(backing_object);
15432ad1a3f7SMatthew Dillon 					/*
15442ad1a3f7SMatthew Dillon 					 * If we slept, anything could have
15452ad1a3f7SMatthew Dillon 					 * happened.  Since the object is
15462ad1a3f7SMatthew Dillon 					 * marked dead, the backing offset
15472ad1a3f7SMatthew Dillon 					 * should not have changed so we
15482ad1a3f7SMatthew Dillon 					 * just restart our scan.
15492ad1a3f7SMatthew Dillon 					 */
15502ad1a3f7SMatthew Dillon 					p = TAILQ_FIRST(&backing_object->memq);
15512ad1a3f7SMatthew Dillon 					continue;
15522ad1a3f7SMatthew Dillon 				}
15532ad1a3f7SMatthew Dillon 			}
15542ad1a3f7SMatthew Dillon 
15552ad1a3f7SMatthew Dillon 			KASSERT(
15562ad1a3f7SMatthew Dillon 			    p->object == backing_object,
15578e99783bSAlan Cox 			    ("vm_object_backing_scan: object mismatch")
15582ad1a3f7SMatthew Dillon 			);
15592ad1a3f7SMatthew Dillon 
15602ad1a3f7SMatthew Dillon 			/*
15612ad1a3f7SMatthew Dillon 			 * Destroy any associated swap
15622ad1a3f7SMatthew Dillon 			 */
15632ad1a3f7SMatthew Dillon 			if (backing_object->type == OBJT_SWAP) {
15642ad1a3f7SMatthew Dillon 				swap_pager_freespace(
15652ad1a3f7SMatthew Dillon 				    backing_object,
15662ad1a3f7SMatthew Dillon 				    p->pindex,
15672ad1a3f7SMatthew Dillon 				    1
15682ad1a3f7SMatthew Dillon 				);
15692ad1a3f7SMatthew Dillon 			}
15702ad1a3f7SMatthew Dillon 
15712ad1a3f7SMatthew Dillon 			if (
15722ad1a3f7SMatthew Dillon 			    p->pindex < backing_offset_index ||
15732ad1a3f7SMatthew Dillon 			    new_pindex >= object->size
15742ad1a3f7SMatthew Dillon 			) {
15752ad1a3f7SMatthew Dillon 				/*
15762ad1a3f7SMatthew Dillon 				 * Page is out of the parent object's range, we
15772ad1a3f7SMatthew Dillon 				 * can simply destroy it.
15782ad1a3f7SMatthew Dillon 				 */
15796a684ecfSAlan Cox 				vm_page_lock_queues();
1580f6d89838SAlan Cox 				KASSERT(!pmap_page_is_mapped(p),
1581f6d89838SAlan Cox 				    ("freeing mapped page %p", p));
1582f6d89838SAlan Cox 				if (p->wire_count == 0)
15832ad1a3f7SMatthew Dillon 					vm_page_free(p);
1584f6d89838SAlan Cox 				else
1585f6d89838SAlan Cox 					vm_page_remove(p);
15866a684ecfSAlan Cox 				vm_page_unlock_queues();
15872ad1a3f7SMatthew Dillon 				p = next;
15882ad1a3f7SMatthew Dillon 				continue;
15892ad1a3f7SMatthew Dillon 			}
15902ad1a3f7SMatthew Dillon 
15912ad1a3f7SMatthew Dillon 			pp = vm_page_lookup(object, new_pindex);
15922ad1a3f7SMatthew Dillon 			if (
15932ad1a3f7SMatthew Dillon 			    pp != NULL ||
15942ad1a3f7SMatthew Dillon 			    vm_pager_has_page(object, new_pindex, NULL, NULL)
15952ad1a3f7SMatthew Dillon 			) {
15962ad1a3f7SMatthew Dillon 				/*
15972ad1a3f7SMatthew Dillon 				 * page already exists in parent OR swap exists
15982ad1a3f7SMatthew Dillon 				 * for this location in the parent.  Destroy
15992ad1a3f7SMatthew Dillon 				 * the original page from the backing object.
16002ad1a3f7SMatthew Dillon 				 *
16012ad1a3f7SMatthew Dillon 				 * Leave the parent's page alone
16022ad1a3f7SMatthew Dillon 				 */
16036a684ecfSAlan Cox 				vm_page_lock_queues();
1604f6d89838SAlan Cox 				KASSERT(!pmap_page_is_mapped(p),
1605f6d89838SAlan Cox 				    ("freeing mapped page %p", p));
1606f6d89838SAlan Cox 				if (p->wire_count == 0)
16072ad1a3f7SMatthew Dillon 					vm_page_free(p);
1608f6d89838SAlan Cox 				else
1609f6d89838SAlan Cox 					vm_page_remove(p);
16106a684ecfSAlan Cox 				vm_page_unlock_queues();
16112ad1a3f7SMatthew Dillon 				p = next;
16122ad1a3f7SMatthew Dillon 				continue;
16132ad1a3f7SMatthew Dillon 			}
16142ad1a3f7SMatthew Dillon 
1615f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
1616f8a47341SAlan Cox 			/*
1617f8a47341SAlan Cox 			 * Rename the reservation.
1618f8a47341SAlan Cox 			 */
1619f8a47341SAlan Cox 			vm_reserv_rename(p, object, backing_object,
1620f8a47341SAlan Cox 			    backing_offset_index);
1621f8a47341SAlan Cox #endif
1622f8a47341SAlan Cox 
16232ad1a3f7SMatthew Dillon 			/*
16242ad1a3f7SMatthew Dillon 			 * Page does not exist in parent, rename the
16252ad1a3f7SMatthew Dillon 			 * page from the backing object to the main object.
1626d1bf5d56SMatthew Dillon 			 *
1627d1bf5d56SMatthew Dillon 			 * If the page was mapped to a process, it can remain
1628d1bf5d56SMatthew Dillon 			 * mapped through the rename.
16292ad1a3f7SMatthew Dillon 			 */
1630a28cc55eSAlan Cox 			vm_page_lock_queues();
16312ad1a3f7SMatthew Dillon 			vm_page_rename(p, object, new_pindex);
1632a28cc55eSAlan Cox 			vm_page_unlock_queues();
16332ad1a3f7SMatthew Dillon 			/* page automatically made dirty by rename */
16342ad1a3f7SMatthew Dillon 		}
16352ad1a3f7SMatthew Dillon 		p = next;
16362ad1a3f7SMatthew Dillon 	}
16372ad1a3f7SMatthew Dillon 	return (r);
16382ad1a3f7SMatthew Dillon }
16392ad1a3f7SMatthew Dillon 
1640df8bae1dSRodney W. Grimes 
1641df8bae1dSRodney W. Grimes /*
16422fe6e4d7SDavid Greenman  * this version of collapse allows the operation to occur earlier and
16432fe6e4d7SDavid Greenman  * when paging_in_progress is true for an object...  This is not a complete
16442fe6e4d7SDavid Greenman  * operation, but should plug 99.9% of the rest of the leaks.
16452fe6e4d7SDavid Greenman  */
16462fe6e4d7SDavid Greenman static void
16471b40f8c0SMatthew Dillon vm_object_qcollapse(vm_object_t object)
16482fe6e4d7SDavid Greenman {
16492ad1a3f7SMatthew Dillon 	vm_object_t backing_object = object->backing_object;
16502fe6e4d7SDavid Greenman 
165106ecade7SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
165206ecade7SAlan Cox 	VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED);
16531b40f8c0SMatthew Dillon 
16542fe6e4d7SDavid Greenman 	if (backing_object->ref_count != 1)
16552fe6e4d7SDavid Greenman 		return;
16562fe6e4d7SDavid Greenman 
16572ad1a3f7SMatthew Dillon 	vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT);
16582fe6e4d7SDavid Greenman }
16592fe6e4d7SDavid Greenman 
1660df8bae1dSRodney W. Grimes /*
1661df8bae1dSRodney W. Grimes  *	vm_object_collapse:
1662df8bae1dSRodney W. Grimes  *
1663df8bae1dSRodney W. Grimes  *	Collapse an object with the object backing it.
1664df8bae1dSRodney W. Grimes  *	Pages in the backing object are moved into the
1665df8bae1dSRodney W. Grimes  *	parent, and the backing object is deallocated.
1666df8bae1dSRodney W. Grimes  */
166726f9a767SRodney W. Grimes void
16681b40f8c0SMatthew Dillon vm_object_collapse(vm_object_t object)
1669df8bae1dSRodney W. Grimes {
1670d7fc2210SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
167123955314SAlfred Perlstein 
1672df8bae1dSRodney W. Grimes 	while (TRUE) {
16732ad1a3f7SMatthew Dillon 		vm_object_t backing_object;
16742ad1a3f7SMatthew Dillon 
1675df8bae1dSRodney W. Grimes 		/*
1676df8bae1dSRodney W. Grimes 		 * Verify that the conditions are right for collapse:
1677df8bae1dSRodney W. Grimes 		 *
16782ad1a3f7SMatthew Dillon 		 * The object exists and the backing object exists.
1679df8bae1dSRodney W. Grimes 		 */
168024a1cce3SDavid Greenman 		if ((backing_object = object->backing_object) == NULL)
16812ad1a3f7SMatthew Dillon 			break;
1682df8bae1dSRodney W. Grimes 
1683f919ebdeSDavid Greenman 		/*
1684f919ebdeSDavid Greenman 		 * we check the backing object first, because it is most likely
168524a1cce3SDavid Greenman 		 * not collapsable.
1686f919ebdeSDavid Greenman 		 */
168740b808a8SAlan Cox 		VM_OBJECT_LOCK(backing_object);
168824a1cce3SDavid Greenman 		if (backing_object->handle != NULL ||
168924a1cce3SDavid Greenman 		    (backing_object->type != OBJT_DEFAULT &&
169024a1cce3SDavid Greenman 		     backing_object->type != OBJT_SWAP) ||
1691f919ebdeSDavid Greenman 		    (backing_object->flags & OBJ_DEAD) ||
169224a1cce3SDavid Greenman 		    object->handle != NULL ||
169324a1cce3SDavid Greenman 		    (object->type != OBJT_DEFAULT &&
169424a1cce3SDavid Greenman 		     object->type != OBJT_SWAP) ||
169524a1cce3SDavid Greenman 		    (object->flags & OBJ_DEAD)) {
169640b808a8SAlan Cox 			VM_OBJECT_UNLOCK(backing_object);
16972ad1a3f7SMatthew Dillon 			break;
169824a1cce3SDavid Greenman 		}
16999b4814bbSDavid Greenman 
17002ad1a3f7SMatthew Dillon 		if (
17012ad1a3f7SMatthew Dillon 		    object->paging_in_progress != 0 ||
17022ad1a3f7SMatthew Dillon 		    backing_object->paging_in_progress != 0
17032ad1a3f7SMatthew Dillon 		) {
1704b9921222SDavid Greenman 			vm_object_qcollapse(object);
170540b808a8SAlan Cox 			VM_OBJECT_UNLOCK(backing_object);
17062ad1a3f7SMatthew Dillon 			break;
1707df8bae1dSRodney W. Grimes 		}
170826f9a767SRodney W. Grimes 		/*
17090d94caffSDavid Greenman 		 * We know that we can either collapse the backing object (if
17102ad1a3f7SMatthew Dillon 		 * the parent is the only reference to it) or (perhaps) have
17112ad1a3f7SMatthew Dillon 		 * the parent bypass the object if the parent happens to shadow
17122ad1a3f7SMatthew Dillon 		 * all the resident pages in the entire backing object.
17132ad1a3f7SMatthew Dillon 		 *
17142ad1a3f7SMatthew Dillon 		 * This is ignoring pager-backed pages such as swap pages.
17152ad1a3f7SMatthew Dillon 		 * vm_object_backing_scan fails the shadowing test in this
17162ad1a3f7SMatthew Dillon 		 * case.
1717df8bae1dSRodney W. Grimes 		 */
1718df8bae1dSRodney W. Grimes 		if (backing_object->ref_count == 1) {
1719df8bae1dSRodney W. Grimes 			/*
17202ad1a3f7SMatthew Dillon 			 * If there is exactly one reference to the backing
17212ad1a3f7SMatthew Dillon 			 * object, we can collapse it into the parent.
1722df8bae1dSRodney W. Grimes 			 */
17232ad1a3f7SMatthew Dillon 			vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
1724df8bae1dSRodney W. Grimes 
1725f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
1726f8a47341SAlan Cox 			/*
1727f8a47341SAlan Cox 			 * Break any reservations from backing_object.
1728f8a47341SAlan Cox 			 */
1729f8a47341SAlan Cox 			if (__predict_false(!LIST_EMPTY(&backing_object->rvq)))
1730f8a47341SAlan Cox 				vm_reserv_break_all(backing_object);
1731f8a47341SAlan Cox #endif
1732f8a47341SAlan Cox 
1733df8bae1dSRodney W. Grimes 			/*
1734df8bae1dSRodney W. Grimes 			 * Move the pager from backing_object to object.
1735df8bae1dSRodney W. Grimes 			 */
17366be36525SAlan Cox 			if (backing_object->type == OBJT_SWAP) {
173724a1cce3SDavid Greenman 				/*
1738c7c8dd7eSAlan Cox 				 * swap_pager_copy() can sleep, in which case
1739c7c8dd7eSAlan Cox 				 * the backing_object's and object's locks are
1740c7c8dd7eSAlan Cox 				 * released and reacquired.
174124a1cce3SDavid Greenman 				 */
17421c7c3c6aSMatthew Dillon 				swap_pager_copy(
17431c7c3c6aSMatthew Dillon 				    backing_object,
17441c7c3c6aSMatthew Dillon 				    object,
17451c7c3c6aSMatthew Dillon 				    OFF_TO_IDX(object->backing_object_offset), TRUE);
17467bfda801SAlan Cox 
17477bfda801SAlan Cox 				/*
17487bfda801SAlan Cox 				 * Free any cached pages from backing_object.
17497bfda801SAlan Cox 				 */
17507bfda801SAlan Cox 				if (__predict_false(backing_object->cache != NULL))
1751c9444914SAlan Cox 					vm_page_cache_free(backing_object, 0, 0);
1752c0503609SDavid Greenman 			}
1753df8bae1dSRodney W. Grimes 			/*
1754df8bae1dSRodney W. Grimes 			 * Object now shadows whatever backing_object did.
17552ad1a3f7SMatthew Dillon 			 * Note that the reference to
17562ad1a3f7SMatthew Dillon 			 * backing_object->backing_object moves from within
17572ad1a3f7SMatthew Dillon 			 * backing_object to within object.
1758df8bae1dSRodney W. Grimes 			 */
17591c500307SAlan Cox 			LIST_REMOVE(object, shadow_list);
17604f7c7f6eSAlan Cox 			backing_object->shadow_count--;
17614f7c7f6eSAlan Cox 			backing_object->generation++;
1762de5f6a77SJohn Dyson 			if (backing_object->backing_object) {
17636be36525SAlan Cox 				VM_OBJECT_LOCK(backing_object->backing_object);
17641c500307SAlan Cox 				LIST_REMOVE(backing_object, shadow_list);
176543186e53SAlan Cox 				LIST_INSERT_HEAD(
176643186e53SAlan Cox 				    &backing_object->backing_object->shadow_head,
176743186e53SAlan Cox 				    object, shadow_list);
176843186e53SAlan Cox 				/*
176943186e53SAlan Cox 				 * The shadow_count has not changed.
177043186e53SAlan Cox 				 */
1771eaf13dd7SJohn Dyson 				backing_object->backing_object->generation++;
17726be36525SAlan Cox 				VM_OBJECT_UNLOCK(backing_object->backing_object);
1773de5f6a77SJohn Dyson 			}
177424a1cce3SDavid Greenman 			object->backing_object = backing_object->backing_object;
17752ad1a3f7SMatthew Dillon 			object->backing_object_offset +=
17762ad1a3f7SMatthew Dillon 			    backing_object->backing_object_offset;
17772ad1a3f7SMatthew Dillon 
1778df8bae1dSRodney W. Grimes 			/*
1779df8bae1dSRodney W. Grimes 			 * Discard backing_object.
1780df8bae1dSRodney W. Grimes 			 *
17810d94caffSDavid Greenman 			 * Since the backing object has no pages, no pager left,
17820d94caffSDavid Greenman 			 * and no object references within it, all that is
17830d94caffSDavid Greenman 			 * necessary is to dispose of it.
1784df8bae1dSRodney W. Grimes 			 */
17859b4d473aSKonstantin Belousov 			KASSERT(backing_object->ref_count == 1, (
17869b4d473aSKonstantin Belousov "backing_object %p was somehow re-referenced during collapse!",
17879b4d473aSKonstantin Belousov 			    backing_object));
17886be36525SAlan Cox 			VM_OBJECT_UNLOCK(backing_object);
17899b4d473aSKonstantin Belousov 			vm_object_destroy(backing_object);
1790df8bae1dSRodney W. Grimes 
1791df8bae1dSRodney W. Grimes 			object_collapses++;
17920d94caffSDavid Greenman 		} else {
179395e5e988SJohn Dyson 			vm_object_t new_backing_object;
1794df8bae1dSRodney W. Grimes 
1795df8bae1dSRodney W. Grimes 			/*
17962ad1a3f7SMatthew Dillon 			 * If we do not entirely shadow the backing object,
17972ad1a3f7SMatthew Dillon 			 * there is nothing we can do so we give up.
1798df8bae1dSRodney W. Grimes 			 */
1799df59a0feSJeff Roberson 			if (object->resident_page_count != object->size &&
1800df59a0feSJeff Roberson 			    vm_object_backing_scan(object,
1801df59a0feSJeff Roberson 			    OBSC_TEST_ALL_SHADOWED) == 0) {
180240b808a8SAlan Cox 				VM_OBJECT_UNLOCK(backing_object);
18032ad1a3f7SMatthew Dillon 				break;
180424a1cce3SDavid Greenman 			}
1805df8bae1dSRodney W. Grimes 
1806df8bae1dSRodney W. Grimes 			/*
18070d94caffSDavid Greenman 			 * Make the parent shadow the next object in the
18080d94caffSDavid Greenman 			 * chain.  Deallocating backing_object will not remove
18090d94caffSDavid Greenman 			 * it, since its reference count is at least 2.
1810df8bae1dSRodney W. Grimes 			 */
18111c500307SAlan Cox 			LIST_REMOVE(object, shadow_list);
1812eaf13dd7SJohn Dyson 			backing_object->shadow_count--;
1813eaf13dd7SJohn Dyson 			backing_object->generation++;
181495e5e988SJohn Dyson 
181595e5e988SJohn Dyson 			new_backing_object = backing_object->backing_object;
18168aef1712SMatthew Dillon 			if ((object->backing_object = new_backing_object) != NULL) {
18176be36525SAlan Cox 				VM_OBJECT_LOCK(new_backing_object);
18181c500307SAlan Cox 				LIST_INSERT_HEAD(
18192ad1a3f7SMatthew Dillon 				    &new_backing_object->shadow_head,
18202ad1a3f7SMatthew Dillon 				    object,
18212ad1a3f7SMatthew Dillon 				    shadow_list
18222ad1a3f7SMatthew Dillon 				);
1823eaf13dd7SJohn Dyson 				new_backing_object->shadow_count++;
1824eaf13dd7SJohn Dyson 				new_backing_object->generation++;
1825b921a12bSAlan Cox 				vm_object_reference_locked(new_backing_object);
18266be36525SAlan Cox 				VM_OBJECT_UNLOCK(new_backing_object);
182795e5e988SJohn Dyson 				object->backing_object_offset +=
182895e5e988SJohn Dyson 					backing_object->backing_object_offset;
1829de5f6a77SJohn Dyson 			}
1830df8bae1dSRodney W. Grimes 
1831df8bae1dSRodney W. Grimes 			/*
18320d94caffSDavid Greenman 			 * Drop the reference count on backing_object. Since
183322ec553fSAlan Cox 			 * its ref_count was at least 2, it will not vanish.
1834df8bae1dSRodney W. Grimes 			 */
183522ec553fSAlan Cox 			backing_object->ref_count--;
183622ec553fSAlan Cox 			VM_OBJECT_UNLOCK(backing_object);
1837df8bae1dSRodney W. Grimes 			object_bypasses++;
1838df8bae1dSRodney W. Grimes 		}
1839df8bae1dSRodney W. Grimes 
1840df8bae1dSRodney W. Grimes 		/*
1841df8bae1dSRodney W. Grimes 		 * Try again with this object's new backing object.
1842df8bae1dSRodney W. Grimes 		 */
1843df8bae1dSRodney W. Grimes 	}
1844df8bae1dSRodney W. Grimes }
1845df8bae1dSRodney W. Grimes 
1846df8bae1dSRodney W. Grimes /*
1847bff99f0dSAlan Cox  *	vm_object_page_remove:
1848df8bae1dSRodney W. Grimes  *
184968855966SAlan Cox  *	For the given object, either frees or invalidates each of the
185068855966SAlan Cox  *	specified pages.  In general, a page is freed.  However, if a
185168855966SAlan Cox  *	page is wired for any reason other than the existence of a
185268855966SAlan Cox  *	managed, wired mapping, then it may be invalidated but not
185368855966SAlan Cox  *	removed from the object.  Pages are specified by the given
185468855966SAlan Cox  *	range ["start", "end") and Boolean "clean_only".  As a
185568855966SAlan Cox  *	special case, if "end" is zero, then the range extends from
185668855966SAlan Cox  *	"start" to the end of the object.  If "clean_only" is TRUE,
185768855966SAlan Cox  *	then only the non-dirty pages within the specified range are
185868855966SAlan Cox  *	affected.
185968855966SAlan Cox  *
186068855966SAlan Cox  *	In general, this operation should only be performed on objects
186168855966SAlan Cox  *	that contain managed pages.  There are two exceptions.  First,
186268855966SAlan Cox  *	it may be performed on the kernel and kmem objects.  Second,
186368855966SAlan Cox  *	it may be used by msync(..., MS_INVALIDATE) to invalidate
186468855966SAlan Cox  *	device-backed pages.
1865df8bae1dSRodney W. Grimes  *
1866df8bae1dSRodney W. Grimes  *	The object must be locked.
1867df8bae1dSRodney W. Grimes  */
186826f9a767SRodney W. Grimes void
1869ecde4b32SAlan Cox vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1870ecde4b32SAlan Cox     boolean_t clean_only)
1871df8bae1dSRodney W. Grimes {
1872d031cff1SMatthew Dillon 	vm_page_t p, next;
187359677d3cSAlan Cox 	int wirings;
1874df8bae1dSRodney W. Grimes 
1875ecde4b32SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1876ecde4b32SAlan Cox 	if (object->resident_page_count == 0)
187725732691SAlan Cox 		goto skipmemq;
187895e5e988SJohn Dyson 
18798b03c8edSMatthew Dillon 	/*
18808b03c8edSMatthew Dillon 	 * Since physically-backed objects do not use managed pages, we can't
18818b03c8edSMatthew Dillon 	 * remove pages from the object (we must instead remove the page
18828b03c8edSMatthew Dillon 	 * references, and then destroy the object).
18838b03c8edSMatthew Dillon 	 */
18849f5c801bSAlan Cox 	KASSERT(object->type != OBJT_PHYS || object == kernel_object ||
18859f5c801bSAlan Cox 	    object == kmem_object,
1886ecde4b32SAlan Cox 	    ("attempt to remove pages from a physical object"));
18878b03c8edSMatthew Dillon 
1888d474eaaaSDoug Rabson 	vm_object_pip_add(object, 1);
188926f9a767SRodney W. Grimes again:
189075741c04SAlan Cox 	if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
189175741c04SAlan Cox 		if (p->pindex < start) {
189275741c04SAlan Cox 			p = vm_page_splay(start, object->root);
189375741c04SAlan Cox 			if ((object->root = p)->pindex < start)
189475741c04SAlan Cox 				p = TAILQ_NEXT(p, listq);
189575741c04SAlan Cox 		}
189675741c04SAlan Cox 	}
1897bfd9b137SAlan Cox 	vm_page_lock_queues();
189875741c04SAlan Cox 	/*
189975741c04SAlan Cox 	 * Assert: the variable p is either (1) the page with the
190075741c04SAlan Cox 	 * least pindex greater than or equal to the parameter pindex
190175741c04SAlan Cox 	 * or (2) NULL.
190275741c04SAlan Cox 	 */
190375741c04SAlan Cox 	for (;
1904bff99f0dSAlan Cox 	     p != NULL && (p->pindex < end || end == 0);
190575741c04SAlan Cox 	     p = next) {
1906b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(p, listq);
190775741c04SAlan Cox 
190859677d3cSAlan Cox 		/*
190959677d3cSAlan Cox 		 * If the page is wired for any reason besides the
191059677d3cSAlan Cox 		 * existence of managed, wired mappings, then it cannot
191168855966SAlan Cox 		 * be freed.  For example, fictitious pages, which
191268855966SAlan Cox 		 * represent device memory, are inherently wired and
191368855966SAlan Cox 		 * cannot be freed.  They can, however, be invalidated
191468855966SAlan Cox 		 * if "clean_only" is FALSE.
191559677d3cSAlan Cox 		 */
191659677d3cSAlan Cox 		if ((wirings = p->wire_count) != 0 &&
191759677d3cSAlan Cox 		    (wirings = pmap_page_wired_mappings(p)) != p->wire_count) {
191868855966SAlan Cox 			/* Fictitious pages do not have managed mappings. */
191968855966SAlan Cox 			if ((p->flags & PG_FICTITIOUS) == 0)
19204fec79beSAlan Cox 				pmap_remove_all(p);
192159677d3cSAlan Cox 			/* Account for removal of managed, wired mappings. */
192259677d3cSAlan Cox 			p->wire_count -= wirings;
1923a28042d1SAlan Cox 			if (!clean_only) {
1924bd7e5f99SJohn Dyson 				p->valid = 0;
1925a28042d1SAlan Cox 				vm_page_undirty(p);
1926a28042d1SAlan Cox 			}
19270d94caffSDavid Greenman 			continue;
19280d94caffSDavid Greenman 		}
192932585dd6SAlan Cox 		if (vm_page_sleep_if_busy(p, TRUE, "vmopar"))
193026f9a767SRodney W. Grimes 			goto again;
193168855966SAlan Cox 		KASSERT((p->flags & PG_FICTITIOUS) == 0,
193268855966SAlan Cox 		    ("vm_object_page_remove: page %p is fictitious", p));
19338f9110f6SJohn Dyson 		if (clean_only && p->valid) {
193478985e42SAlan Cox 			pmap_remove_write(p);
193526f4eea5SAlan Cox 			if (p->dirty)
19367c1f6cedSDavid Greenman 				continue;
19377c1f6cedSDavid Greenman 		}
19384fec79beSAlan Cox 		pmap_remove_all(p);
193959677d3cSAlan Cox 		/* Account for removal of managed, wired mappings. */
194059677d3cSAlan Cox 		if (wirings != 0)
194159677d3cSAlan Cox 			p->wire_count -= wirings;
1942df8bae1dSRodney W. Grimes 		vm_page_free(p);
194326f9a767SRodney W. Grimes 	}
194432585dd6SAlan Cox 	vm_page_unlock_queues();
1945f919ebdeSDavid Greenman 	vm_object_pip_wakeup(object);
194625732691SAlan Cox skipmemq:
1947c9444914SAlan Cox 	if (__predict_false(object->cache != NULL))
1948c9444914SAlan Cox 		vm_page_cache_free(object, start, end);
1949c0503609SDavid Greenman }
1950df8bae1dSRodney W. Grimes 
1951df8bae1dSRodney W. Grimes /*
1952387aabc5SAlan Cox  *	Populate the specified range of the object with valid pages.  Returns
1953387aabc5SAlan Cox  *	TRUE if the range is successfully populated and FALSE otherwise.
1954387aabc5SAlan Cox  *
1955387aabc5SAlan Cox  *	Note: This function should be optimized to pass a larger array of
1956387aabc5SAlan Cox  *	pages to vm_pager_get_pages() before it is applied to a non-
1957387aabc5SAlan Cox  *	OBJT_DEVICE object.
1958387aabc5SAlan Cox  *
1959387aabc5SAlan Cox  *	The object must be locked.
1960387aabc5SAlan Cox  */
1961387aabc5SAlan Cox boolean_t
1962387aabc5SAlan Cox vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1963387aabc5SAlan Cox {
1964387aabc5SAlan Cox 	vm_page_t m, ma[1];
1965387aabc5SAlan Cox 	vm_pindex_t pindex;
1966387aabc5SAlan Cox 	int rv;
1967387aabc5SAlan Cox 
1968387aabc5SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1969387aabc5SAlan Cox 	for (pindex = start; pindex < end; pindex++) {
1970387aabc5SAlan Cox 		m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL |
1971387aabc5SAlan Cox 		    VM_ALLOC_RETRY);
1972387aabc5SAlan Cox 		if (m->valid != VM_PAGE_BITS_ALL) {
1973387aabc5SAlan Cox 			ma[0] = m;
1974387aabc5SAlan Cox 			rv = vm_pager_get_pages(object, ma, 1, 0);
1975387aabc5SAlan Cox 			m = vm_page_lookup(object, pindex);
1976387aabc5SAlan Cox 			if (m == NULL)
1977387aabc5SAlan Cox 				break;
1978387aabc5SAlan Cox 			if (rv != VM_PAGER_OK) {
1979387aabc5SAlan Cox 				vm_page_lock_queues();
1980387aabc5SAlan Cox 				vm_page_free(m);
1981387aabc5SAlan Cox 				vm_page_unlock_queues();
1982387aabc5SAlan Cox 				break;
1983387aabc5SAlan Cox 			}
1984387aabc5SAlan Cox 		}
1985387aabc5SAlan Cox 		/*
1986387aabc5SAlan Cox 		 * Keep "m" busy because a subsequent iteration may unlock
1987387aabc5SAlan Cox 		 * the object.
1988387aabc5SAlan Cox 		 */
1989387aabc5SAlan Cox 	}
1990387aabc5SAlan Cox 	if (pindex > start) {
1991387aabc5SAlan Cox 		m = vm_page_lookup(object, start);
1992387aabc5SAlan Cox 		while (m != NULL && m->pindex < pindex) {
1993387aabc5SAlan Cox 			vm_page_wakeup(m);
1994387aabc5SAlan Cox 			m = TAILQ_NEXT(m, listq);
1995387aabc5SAlan Cox 		}
1996387aabc5SAlan Cox 	}
1997387aabc5SAlan Cox 	return (pindex == end);
1998387aabc5SAlan Cox }
1999387aabc5SAlan Cox 
2000387aabc5SAlan Cox /*
2001df8bae1dSRodney W. Grimes  *	Routine:	vm_object_coalesce
2002df8bae1dSRodney W. Grimes  *	Function:	Coalesces two objects backing up adjoining
2003df8bae1dSRodney W. Grimes  *			regions of memory into a single object.
2004df8bae1dSRodney W. Grimes  *
2005df8bae1dSRodney W. Grimes  *	returns TRUE if objects were combined.
2006df8bae1dSRodney W. Grimes  *
2007df8bae1dSRodney W. Grimes  *	NOTE:	Only works at the moment if the second object is NULL -
2008df8bae1dSRodney W. Grimes  *		if it's not, which object do we lock first?
2009df8bae1dSRodney W. Grimes  *
2010df8bae1dSRodney W. Grimes  *	Parameters:
2011df8bae1dSRodney W. Grimes  *		prev_object	First object to coalesce
2012df8bae1dSRodney W. Grimes  *		prev_offset	Offset into prev_object
2013df8bae1dSRodney W. Grimes  *		prev_size	Size of reference to prev_object
201457a21abaSAlan Cox  *		next_size	Size of reference to the second object
20153364c323SKonstantin Belousov  *		reserved	Indicator that extension region has
20163364c323SKonstantin Belousov  *				swap accounted for
2017df8bae1dSRodney W. Grimes  *
2018df8bae1dSRodney W. Grimes  *	Conditions:
2019df8bae1dSRodney W. Grimes  *	The object must *not* be locked.
2020df8bae1dSRodney W. Grimes  */
20210d94caffSDavid Greenman boolean_t
202257a21abaSAlan Cox vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
20233364c323SKonstantin Belousov     vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
2024df8bae1dSRodney W. Grimes {
2025ea41812fSAlan Cox 	vm_pindex_t next_pindex;
2026df8bae1dSRodney W. Grimes 
202700e1854aSAlan Cox 	if (prev_object == NULL)
2028df8bae1dSRodney W. Grimes 		return (TRUE);
2029bdbfbaafSAlan Cox 	VM_OBJECT_LOCK(prev_object);
20304112823fSMatthew Dillon 	if (prev_object->type != OBJT_DEFAULT &&
20314112823fSMatthew Dillon 	    prev_object->type != OBJT_SWAP) {
2032bdbfbaafSAlan Cox 		VM_OBJECT_UNLOCK(prev_object);
203330dcfc09SJohn Dyson 		return (FALSE);
203430dcfc09SJohn Dyson 	}
203530dcfc09SJohn Dyson 
2036df8bae1dSRodney W. Grimes 	/*
2037df8bae1dSRodney W. Grimes 	 * Try to collapse the object first
2038df8bae1dSRodney W. Grimes 	 */
2039df8bae1dSRodney W. Grimes 	vm_object_collapse(prev_object);
2040df8bae1dSRodney W. Grimes 
2041df8bae1dSRodney W. Grimes 	/*
20420d94caffSDavid Greenman 	 * Can't coalesce if: . more than one reference . paged out . shadows
20430d94caffSDavid Greenman 	 * another object . has a copy elsewhere (any of which mean that the
20440d94caffSDavid Greenman 	 * pages not mapped to prev_entry may be in use anyway)
2045df8bae1dSRodney W. Grimes 	 */
20468cc7e047SJohn Dyson 	if (prev_object->backing_object != NULL) {
2047bdbfbaafSAlan Cox 		VM_OBJECT_UNLOCK(prev_object);
2048df8bae1dSRodney W. Grimes 		return (FALSE);
2049df8bae1dSRodney W. Grimes 	}
2050a316d390SJohn Dyson 
2051a316d390SJohn Dyson 	prev_size >>= PAGE_SHIFT;
2052a316d390SJohn Dyson 	next_size >>= PAGE_SHIFT;
205357a21abaSAlan Cox 	next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
20548cc7e047SJohn Dyson 
20558cc7e047SJohn Dyson 	if ((prev_object->ref_count > 1) &&
2056ea41812fSAlan Cox 	    (prev_object->size != next_pindex)) {
2057bdbfbaafSAlan Cox 		VM_OBJECT_UNLOCK(prev_object);
20588cc7e047SJohn Dyson 		return (FALSE);
20598cc7e047SJohn Dyson 	}
20608cc7e047SJohn Dyson 
2061df8bae1dSRodney W. Grimes 	/*
20623364c323SKonstantin Belousov 	 * Account for the charge.
20633364c323SKonstantin Belousov 	 */
20643364c323SKonstantin Belousov 	if (prev_object->uip != NULL) {
20653364c323SKonstantin Belousov 
20663364c323SKonstantin Belousov 		/*
20673364c323SKonstantin Belousov 		 * If prev_object was charged, then this mapping,
20683364c323SKonstantin Belousov 		 * althought not charged now, may become writable
20693364c323SKonstantin Belousov 		 * later. Non-NULL uip in the object would prevent
20703364c323SKonstantin Belousov 		 * swap reservation during enabling of the write
20713364c323SKonstantin Belousov 		 * access, so reserve swap now. Failed reservation
20723364c323SKonstantin Belousov 		 * cause allocation of the separate object for the map
20733364c323SKonstantin Belousov 		 * entry, and swap reservation for this entry is
20743364c323SKonstantin Belousov 		 * managed in appropriate time.
20753364c323SKonstantin Belousov 		 */
20763364c323SKonstantin Belousov 		if (!reserved && !swap_reserve_by_uid(ptoa(next_size),
20773364c323SKonstantin Belousov 		    prev_object->uip)) {
20783364c323SKonstantin Belousov 			return (FALSE);
20793364c323SKonstantin Belousov 		}
20803364c323SKonstantin Belousov 		prev_object->charge += ptoa(next_size);
20813364c323SKonstantin Belousov 	}
20823364c323SKonstantin Belousov 
20833364c323SKonstantin Belousov 	/*
20840d94caffSDavid Greenman 	 * Remove any pages that may still be in the object from a previous
20850d94caffSDavid Greenman 	 * deallocation.
2086df8bae1dSRodney W. Grimes 	 */
2087ea41812fSAlan Cox 	if (next_pindex < prev_object->size) {
2088df8bae1dSRodney W. Grimes 		vm_object_page_remove(prev_object,
2089ea41812fSAlan Cox 				      next_pindex,
2090ea41812fSAlan Cox 				      next_pindex + next_size, FALSE);
2091ea41812fSAlan Cox 		if (prev_object->type == OBJT_SWAP)
2092ea41812fSAlan Cox 			swap_pager_freespace(prev_object,
2093ea41812fSAlan Cox 					     next_pindex, next_size);
20943364c323SKonstantin Belousov #if 0
20953364c323SKonstantin Belousov 		if (prev_object->uip != NULL) {
20963364c323SKonstantin Belousov 			KASSERT(prev_object->charge >=
20973364c323SKonstantin Belousov 			    ptoa(prev_object->size - next_pindex),
20983364c323SKonstantin Belousov 			    ("object %p overcharged 1 %jx %jx", prev_object,
20993364c323SKonstantin Belousov 				(uintmax_t)next_pindex, (uintmax_t)next_size));
21003364c323SKonstantin Belousov 			prev_object->charge -= ptoa(prev_object->size -
21013364c323SKonstantin Belousov 			    next_pindex);
21023364c323SKonstantin Belousov 		}
21033364c323SKonstantin Belousov #endif
2104ea41812fSAlan Cox 	}
2105df8bae1dSRodney W. Grimes 
2106df8bae1dSRodney W. Grimes 	/*
2107df8bae1dSRodney W. Grimes 	 * Extend the object if necessary.
2108df8bae1dSRodney W. Grimes 	 */
2109ea41812fSAlan Cox 	if (next_pindex + next_size > prev_object->size)
2110ea41812fSAlan Cox 		prev_object->size = next_pindex + next_size;
2111df8bae1dSRodney W. Grimes 
2112bdbfbaafSAlan Cox 	VM_OBJECT_UNLOCK(prev_object);
2113df8bae1dSRodney W. Grimes 	return (TRUE);
2114df8bae1dSRodney W. Grimes }
2115df8bae1dSRodney W. Grimes 
21167a5a6352SMatthew Dillon void
21177a5a6352SMatthew Dillon vm_object_set_writeable_dirty(vm_object_t object)
21187a5a6352SMatthew Dillon {
21197a5a6352SMatthew Dillon 	struct vnode *vp;
21207a5a6352SMatthew Dillon 
2121de33beddSAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2122af51d7bfSAlan Cox 	if ((object->flags & OBJ_MIGHTBEDIRTY) != 0)
2123ee39666aSJeff Roberson 		return;
2124af51d7bfSAlan Cox 	vm_object_set_flag(object, OBJ_MIGHTBEDIRTY);
21257a5a6352SMatthew Dillon 	if (object->type == OBJT_VNODE &&
21267a5a6352SMatthew Dillon 	    (vp = (struct vnode *)object->handle) != NULL) {
2127e6e370a7SJeff Roberson 		VI_LOCK(vp);
2128e6e370a7SJeff Roberson 		vp->v_iflag |= VI_OBJDIRTY;
2129e6e370a7SJeff Roberson 		VI_UNLOCK(vp);
21307a5a6352SMatthew Dillon 	}
21317a5a6352SMatthew Dillon }
21327a5a6352SMatthew Dillon 
2133c7c34a24SBruce Evans #include "opt_ddb.h"
2134c3cb3e12SDavid Greenman #ifdef DDB
2135c7c34a24SBruce Evans #include <sys/kernel.h>
2136c7c34a24SBruce Evans 
2137ce9edcf5SPoul-Henning Kamp #include <sys/cons.h>
2138c7c34a24SBruce Evans 
2139c7c34a24SBruce Evans #include <ddb/ddb.h>
2140c7c34a24SBruce Evans 
2141cac597e4SBruce Evans static int
21421b40f8c0SMatthew Dillon _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2143a1f6d91cSDavid Greenman {
2144a1f6d91cSDavid Greenman 	vm_map_t tmpm;
2145a1f6d91cSDavid Greenman 	vm_map_entry_t tmpe;
2146a1f6d91cSDavid Greenman 	vm_object_t obj;
2147a1f6d91cSDavid Greenman 	int entcount;
2148a1f6d91cSDavid Greenman 
2149a1f6d91cSDavid Greenman 	if (map == 0)
2150a1f6d91cSDavid Greenman 		return 0;
2151a1f6d91cSDavid Greenman 
2152a1f6d91cSDavid Greenman 	if (entry == 0) {
2153a1f6d91cSDavid Greenman 		tmpe = map->header.next;
2154a1f6d91cSDavid Greenman 		entcount = map->nentries;
2155a1f6d91cSDavid Greenman 		while (entcount-- && (tmpe != &map->header)) {
2156a1f6d91cSDavid Greenman 			if (_vm_object_in_map(map, object, tmpe)) {
2157a1f6d91cSDavid Greenman 				return 1;
2158a1f6d91cSDavid Greenman 			}
2159a1f6d91cSDavid Greenman 			tmpe = tmpe->next;
2160a1f6d91cSDavid Greenman 		}
21619fdfe602SMatthew Dillon 	} else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
21629fdfe602SMatthew Dillon 		tmpm = entry->object.sub_map;
2163a1f6d91cSDavid Greenman 		tmpe = tmpm->header.next;
2164a1f6d91cSDavid Greenman 		entcount = tmpm->nentries;
2165a1f6d91cSDavid Greenman 		while (entcount-- && tmpe != &tmpm->header) {
2166a1f6d91cSDavid Greenman 			if (_vm_object_in_map(tmpm, object, tmpe)) {
2167a1f6d91cSDavid Greenman 				return 1;
2168a1f6d91cSDavid Greenman 			}
2169a1f6d91cSDavid Greenman 			tmpe = tmpe->next;
2170a1f6d91cSDavid Greenman 		}
21718aef1712SMatthew Dillon 	} else if ((obj = entry->object.vm_object) != NULL) {
217224a1cce3SDavid Greenman 		for (; obj; obj = obj->backing_object)
2173a1f6d91cSDavid Greenman 			if (obj == object) {
2174a1f6d91cSDavid Greenman 				return 1;
2175a1f6d91cSDavid Greenman 			}
2176a1f6d91cSDavid Greenman 	}
2177a1f6d91cSDavid Greenman 	return 0;
2178a1f6d91cSDavid Greenman }
2179a1f6d91cSDavid Greenman 
2180cac597e4SBruce Evans static int
21811b40f8c0SMatthew Dillon vm_object_in_map(vm_object_t object)
2182a1f6d91cSDavid Greenman {
2183a1f6d91cSDavid Greenman 	struct proc *p;
21841005a129SJohn Baldwin 
218560517fd1SJohn Baldwin 	/* sx_slock(&allproc_lock); */
2186f67af5c9SXin LI 	FOREACH_PROC_IN_SYSTEM(p) {
2187a1f6d91cSDavid Greenman 		if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
2188a1f6d91cSDavid Greenman 			continue;
2189553629ebSJake Burkholder 		if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
219060517fd1SJohn Baldwin 			/* sx_sunlock(&allproc_lock); */
2191a1f6d91cSDavid Greenman 			return 1;
2192a1f6d91cSDavid Greenman 		}
2193553629ebSJake Burkholder 	}
219460517fd1SJohn Baldwin 	/* sx_sunlock(&allproc_lock); */
2195a1f6d91cSDavid Greenman 	if (_vm_object_in_map(kernel_map, object, 0))
2196a1f6d91cSDavid Greenman 		return 1;
2197a1f6d91cSDavid Greenman 	if (_vm_object_in_map(kmem_map, object, 0))
2198a1f6d91cSDavid Greenman 		return 1;
2199a1f6d91cSDavid Greenman 	if (_vm_object_in_map(pager_map, object, 0))
2200a1f6d91cSDavid Greenman 		return 1;
2201a1f6d91cSDavid Greenman 	if (_vm_object_in_map(buffer_map, object, 0))
2202a1f6d91cSDavid Greenman 		return 1;
2203a1f6d91cSDavid Greenman 	return 0;
2204a1f6d91cSDavid Greenman }
2205a1f6d91cSDavid Greenman 
2206c7c34a24SBruce Evans DB_SHOW_COMMAND(vmochk, vm_object_check)
2207f708ef1bSPoul-Henning Kamp {
2208a1f6d91cSDavid Greenman 	vm_object_t object;
2209a1f6d91cSDavid Greenman 
2210a1f6d91cSDavid Greenman 	/*
2211a1f6d91cSDavid Greenman 	 * make sure that internal objs are in a map somewhere
2212a1f6d91cSDavid Greenman 	 * and none have zero ref counts.
2213a1f6d91cSDavid Greenman 	 */
2214cc64b484SAlfred Perlstein 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
221524a1cce3SDavid Greenman 		if (object->handle == NULL &&
221624a1cce3SDavid Greenman 		    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2217a1f6d91cSDavid Greenman 			if (object->ref_count == 0) {
22183efc015bSPeter Wemm 				db_printf("vmochk: internal obj has zero ref count: %ld\n",
22193efc015bSPeter Wemm 					(long)object->size);
2220a1f6d91cSDavid Greenman 			}
2221a1f6d91cSDavid Greenman 			if (!vm_object_in_map(object)) {
2222fc62ef1fSBruce Evans 				db_printf(
2223fc62ef1fSBruce Evans 			"vmochk: internal obj is not in a map: "
2224fc62ef1fSBruce Evans 			"ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2225fc62ef1fSBruce Evans 				    object->ref_count, (u_long)object->size,
2226fc62ef1fSBruce Evans 				    (u_long)object->size,
2227fc62ef1fSBruce Evans 				    (void *)object->backing_object);
2228a1f6d91cSDavid Greenman 			}
2229a1f6d91cSDavid Greenman 		}
2230a1f6d91cSDavid Greenman 	}
2231a1f6d91cSDavid Greenman }
2232a1f6d91cSDavid Greenman 
223326f9a767SRodney W. Grimes /*
2234df8bae1dSRodney W. Grimes  *	vm_object_print:	[ debug ]
2235df8bae1dSRodney W. Grimes  */
2236c7c34a24SBruce Evans DB_SHOW_COMMAND(object, vm_object_print_static)
2237df8bae1dSRodney W. Grimes {
2238c7c34a24SBruce Evans 	/* XXX convert args. */
2239c7c34a24SBruce Evans 	vm_object_t object = (vm_object_t)addr;
2240c7c34a24SBruce Evans 	boolean_t full = have_addr;
2241c7c34a24SBruce Evans 
2242d031cff1SMatthew Dillon 	vm_page_t p;
2243df8bae1dSRodney W. Grimes 
2244c7c34a24SBruce Evans 	/* XXX count is an (unused) arg.  Avoid shadowing it. */
2245c7c34a24SBruce Evans #define	count	was_count
2246c7c34a24SBruce Evans 
2247d031cff1SMatthew Dillon 	int count;
2248df8bae1dSRodney W. Grimes 
2249df8bae1dSRodney W. Grimes 	if (object == NULL)
2250df8bae1dSRodney W. Grimes 		return;
2251df8bae1dSRodney W. Grimes 
2252eb95adefSBruce Evans 	db_iprintf(
22533364c323SKonstantin Belousov 	    "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x uip %d charge %jx\n",
2254e47cd172SMaxime Henrion 	    object, (int)object->type, (uintmax_t)object->size,
22553364c323SKonstantin Belousov 	    object->resident_page_count, object->ref_count, object->flags,
22563364c323SKonstantin Belousov 	    object->uip ? object->uip->ui_uid : -1, (uintmax_t)object->charge);
2257e47cd172SMaxime Henrion 	db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
22581c7c3c6aSMatthew Dillon 	    object->shadow_count,
2259eb95adefSBruce Evans 	    object->backing_object ? object->backing_object->ref_count : 0,
2260e47cd172SMaxime Henrion 	    object->backing_object, (uintmax_t)object->backing_object_offset);
2261df8bae1dSRodney W. Grimes 
2262df8bae1dSRodney W. Grimes 	if (!full)
2263df8bae1dSRodney W. Grimes 		return;
2264df8bae1dSRodney W. Grimes 
2265c7c34a24SBruce Evans 	db_indent += 2;
2266df8bae1dSRodney W. Grimes 	count = 0;
2267fc2ffbe6SPoul-Henning Kamp 	TAILQ_FOREACH(p, &object->memq, listq) {
2268df8bae1dSRodney W. Grimes 		if (count == 0)
2269c7c34a24SBruce Evans 			db_iprintf("memory:=");
2270df8bae1dSRodney W. Grimes 		else if (count == 6) {
2271c7c34a24SBruce Evans 			db_printf("\n");
2272c7c34a24SBruce Evans 			db_iprintf(" ...");
2273df8bae1dSRodney W. Grimes 			count = 0;
2274df8bae1dSRodney W. Grimes 		} else
2275c7c34a24SBruce Evans 			db_printf(",");
2276df8bae1dSRodney W. Grimes 		count++;
2277df8bae1dSRodney W. Grimes 
2278e47cd172SMaxime Henrion 		db_printf("(off=0x%jx,page=0x%jx)",
2279e47cd172SMaxime Henrion 		    (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p));
2280df8bae1dSRodney W. Grimes 	}
2281df8bae1dSRodney W. Grimes 	if (count != 0)
2282c7c34a24SBruce Evans 		db_printf("\n");
2283c7c34a24SBruce Evans 	db_indent -= 2;
2284df8bae1dSRodney W. Grimes }
22855070c7f8SJohn Dyson 
2286c7c34a24SBruce Evans /* XXX. */
2287c7c34a24SBruce Evans #undef count
2288c7c34a24SBruce Evans 
2289c7c34a24SBruce Evans /* XXX need this non-static entry for calling from vm_map_print. */
22905070c7f8SJohn Dyson void
22911b40f8c0SMatthew Dillon vm_object_print(
22921b40f8c0SMatthew Dillon         /* db_expr_t */ long addr,
22931b40f8c0SMatthew Dillon 	boolean_t have_addr,
22941b40f8c0SMatthew Dillon 	/* db_expr_t */ long count,
22951b40f8c0SMatthew Dillon 	char *modif)
2296c7c34a24SBruce Evans {
2297c7c34a24SBruce Evans 	vm_object_print_static(addr, have_addr, count, modif);
2298c7c34a24SBruce Evans }
2299c7c34a24SBruce Evans 
2300c7c34a24SBruce Evans DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
23015070c7f8SJohn Dyson {
23025070c7f8SJohn Dyson 	vm_object_t object;
2303bb2ac86fSKonstantin Belousov 	vm_pindex_t fidx;
2304bb2ac86fSKonstantin Belousov 	vm_paddr_t pa;
2305bb2ac86fSKonstantin Belousov 	vm_page_t m, prev_m;
2306bb2ac86fSKonstantin Belousov 	int rcount, nl, c;
2307cc64b484SAlfred Perlstein 
2308bb2ac86fSKonstantin Belousov 	nl = 0;
2309cc64b484SAlfred Perlstein 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
2310fc62ef1fSBruce Evans 		db_printf("new object: %p\n", (void *)object);
23115070c7f8SJohn Dyson 		if (nl > 18) {
23125070c7f8SJohn Dyson 			c = cngetc();
23135070c7f8SJohn Dyson 			if (c != ' ')
23145070c7f8SJohn Dyson 				return;
23155070c7f8SJohn Dyson 			nl = 0;
23165070c7f8SJohn Dyson 		}
23175070c7f8SJohn Dyson 		nl++;
23185070c7f8SJohn Dyson 		rcount = 0;
23195070c7f8SJohn Dyson 		fidx = 0;
2320bb2ac86fSKonstantin Belousov 		pa = -1;
2321bb2ac86fSKonstantin Belousov 		TAILQ_FOREACH(m, &object->memq, listq) {
2322bb2ac86fSKonstantin Belousov 			if (m->pindex > 128)
2323bb2ac86fSKonstantin Belousov 				break;
2324bb2ac86fSKonstantin Belousov 			if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL &&
2325bb2ac86fSKonstantin Belousov 			    prev_m->pindex + 1 != m->pindex) {
23265070c7f8SJohn Dyson 				if (rcount) {
23273efc015bSPeter Wemm 					db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
23283efc015bSPeter Wemm 						(long)fidx, rcount, (long)pa);
23295070c7f8SJohn Dyson 					if (nl > 18) {
23305070c7f8SJohn Dyson 						c = cngetc();
23315070c7f8SJohn Dyson 						if (c != ' ')
23325070c7f8SJohn Dyson 							return;
23335070c7f8SJohn Dyson 						nl = 0;
23345070c7f8SJohn Dyson 					}
23355070c7f8SJohn Dyson 					nl++;
23365070c7f8SJohn Dyson 					rcount = 0;
23375070c7f8SJohn Dyson 				}
23385070c7f8SJohn Dyson 			}
23395070c7f8SJohn Dyson 			if (rcount &&
23405070c7f8SJohn Dyson 				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
23415070c7f8SJohn Dyson 				++rcount;
23425070c7f8SJohn Dyson 				continue;
23435070c7f8SJohn Dyson 			}
23445070c7f8SJohn Dyson 			if (rcount) {
23452446e4f0SAlan Cox 				db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
23463efc015bSPeter Wemm 					(long)fidx, rcount, (long)pa);
23475070c7f8SJohn Dyson 				if (nl > 18) {
23485070c7f8SJohn Dyson 					c = cngetc();
23495070c7f8SJohn Dyson 					if (c != ' ')
23505070c7f8SJohn Dyson 						return;
23515070c7f8SJohn Dyson 					nl = 0;
23525070c7f8SJohn Dyson 				}
23535070c7f8SJohn Dyson 				nl++;
23545070c7f8SJohn Dyson 			}
2355bb2ac86fSKonstantin Belousov 			fidx = m->pindex;
23565070c7f8SJohn Dyson 			pa = VM_PAGE_TO_PHYS(m);
23575070c7f8SJohn Dyson 			rcount = 1;
23585070c7f8SJohn Dyson 		}
23595070c7f8SJohn Dyson 		if (rcount) {
23603efc015bSPeter Wemm 			db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
23613efc015bSPeter Wemm 				(long)fidx, rcount, (long)pa);
23625070c7f8SJohn Dyson 			if (nl > 18) {
23635070c7f8SJohn Dyson 				c = cngetc();
23645070c7f8SJohn Dyson 				if (c != ' ')
23655070c7f8SJohn Dyson 					return;
23665070c7f8SJohn Dyson 				nl = 0;
23675070c7f8SJohn Dyson 			}
23685070c7f8SJohn Dyson 			nl++;
23695070c7f8SJohn Dyson 		}
23705070c7f8SJohn Dyson 	}
23715070c7f8SJohn Dyson }
2372c3cb3e12SDavid Greenman #endif /* DDB */
2373