xref: /freebsd/sys/vm/vm_object.c (revision c99d0c5801ce22a9976e491d04ce7e1b8996dfdd)
160727d8bSWarner Losh /*-
2796df753SPedro F. Giffuni  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
351369649SPedro F. Giffuni  *
4df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
5df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
6df8bae1dSRodney W. Grimes  *
7df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
8df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
9df8bae1dSRodney W. Grimes  *
10df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
11df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
12df8bae1dSRodney W. Grimes  * are met:
13df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
15df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
17df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
18fbbd9655SWarner Losh  * 3. Neither the name of the University nor the names of its contributors
19df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
20df8bae1dSRodney W. Grimes  *    without specific prior written permission.
21df8bae1dSRodney W. Grimes  *
22df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
33df8bae1dSRodney W. Grimes  *
343c4dd356SDavid Greenman  *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
35df8bae1dSRodney W. Grimes  *
36df8bae1dSRodney W. Grimes  *
37df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38df8bae1dSRodney W. Grimes  * All rights reserved.
39df8bae1dSRodney W. Grimes  *
40df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
43df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
44df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
45df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
46df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
47df8bae1dSRodney W. Grimes  *
48df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51df8bae1dSRodney W. Grimes  *
52df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55df8bae1dSRodney W. Grimes  *  School of Computer Science
56df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
57df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
58df8bae1dSRodney W. Grimes  *
59df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
60df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
61df8bae1dSRodney W. Grimes  */
62df8bae1dSRodney W. Grimes 
63df8bae1dSRodney W. Grimes /*
64df8bae1dSRodney W. Grimes  *	Virtual memory object module.
65df8bae1dSRodney W. Grimes  */
66df8bae1dSRodney W. Grimes 
67874651b1SDavid E. O'Brien #include <sys/cdefs.h>
68874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
69874651b1SDavid E. O'Brien 
70f8a47341SAlan Cox #include "opt_vm.h"
71f8a47341SAlan Cox 
72df8bae1dSRodney W. Grimes #include <sys/param.h>
73df8bae1dSRodney W. Grimes #include <sys/systm.h>
74*c99d0c58SMark Johnston #include <sys/blockcount.h>
753f289c3fSJeff Roberson #include <sys/cpuset.h>
76fb919e4dSMark Murray #include <sys/lock.h>
77867a482dSJohn Dyson #include <sys/mman.h>
78cf2819ccSJohn Dyson #include <sys/mount.h>
79b9b7a4beSMatthew Dillon #include <sys/kernel.h>
80f425ab8eSKonstantin Belousov #include <sys/pctrie.h>
81b9b7a4beSMatthew Dillon #include <sys/sysctl.h>
821b367556SJason Evans #include <sys/mutex.h>
83fb919e4dSMark Murray #include <sys/proc.h>		/* for curproc, pageproc */
84cf27e0d1SJeff Roberson #include <sys/refcount.h>
85fb919e4dSMark Murray #include <sys/socket.h>
863364c323SKonstantin Belousov #include <sys/resourcevar.h>
87205be21dSJeff Roberson #include <sys/refcount.h>
8889f6b863SAttilio Rao #include <sys/rwlock.h>
89ff87ae35SJohn Baldwin #include <sys/user.h>
90fb919e4dSMark Murray #include <sys/vnode.h>
91fb919e4dSMark Murray #include <sys/vmmeter.h>
921005a129SJohn Baldwin #include <sys/sx.h>
93df8bae1dSRodney W. Grimes 
94df8bae1dSRodney W. Grimes #include <vm/vm.h>
95efeaf95aSDavid Greenman #include <vm/vm_param.h>
96efeaf95aSDavid Greenman #include <vm/pmap.h>
97efeaf95aSDavid Greenman #include <vm/vm_map.h>
98efeaf95aSDavid Greenman #include <vm/vm_object.h>
99df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
10026f9a767SRodney W. Grimes #include <vm/vm_pageout.h>
1010d94caffSDavid Greenman #include <vm/vm_pager.h>
102e2068d0bSJeff Roberson #include <vm/vm_phys.h>
103e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h>
10405f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h>
105a1f6d91cSDavid Greenman #include <vm/vm_kern.h>
106efeaf95aSDavid Greenman #include <vm/vm_extern.h>
107774d251dSAttilio Rao #include <vm/vm_radix.h>
108f8a47341SAlan Cox #include <vm/vm_reserv.h>
109670d17b5SJeff Roberson #include <vm/uma.h>
11026f9a767SRodney W. Grimes 
111c53f7aceSDag-Erling Smørgrav static int old_msync;
112c53f7aceSDag-Erling Smørgrav SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0,
113c53f7aceSDag-Erling Smørgrav     "Use old (insecure) msync behavior");
114c53f7aceSDag-Erling Smørgrav 
115757216f3SKonstantin Belousov static int	vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
11667d0e293SJeff Roberson 		    int pagerflags, int flags, boolean_t *allclean,
117126d6082SKonstantin Belousov 		    boolean_t *eio);
1183280870dSKonstantin Belousov static boolean_t vm_object_page_remove_write(vm_page_t p, int flags,
11967d0e293SJeff Roberson 		    boolean_t *allclean);
12051b867e5SJeff Roberson static void	vm_object_backing_remove(vm_object_t object);
121f6b04d2bSDavid Greenman 
122df8bae1dSRodney W. Grimes /*
123df8bae1dSRodney W. Grimes  *	Virtual memory objects maintain the actual data
124df8bae1dSRodney W. Grimes  *	associated with allocated virtual memory.  A given
125df8bae1dSRodney W. Grimes  *	page of memory exists within exactly one object.
126df8bae1dSRodney W. Grimes  *
127df8bae1dSRodney W. Grimes  *	An object is only deallocated when all "references"
128df8bae1dSRodney W. Grimes  *	are given up.  Only one "reference" to a given
129df8bae1dSRodney W. Grimes  *	region of an object should be writeable.
130df8bae1dSRodney W. Grimes  *
131df8bae1dSRodney W. Grimes  *	Associated with each object is a list of all resident
132df8bae1dSRodney W. Grimes  *	memory pages belonging to that object; this list is
133df8bae1dSRodney W. Grimes  *	maintained by the "vm_page" module, and locked by the object's
134df8bae1dSRodney W. Grimes  *	lock.
135df8bae1dSRodney W. Grimes  *
136df8bae1dSRodney W. Grimes  *	Each object also records a "pager" routine which is
137df8bae1dSRodney W. Grimes  *	used to retrieve (and store) pages to the proper backing
138df8bae1dSRodney W. Grimes  *	storage.  In addition, objects may be backed by other
139df8bae1dSRodney W. Grimes  *	objects from which they were virtual-copied.
140df8bae1dSRodney W. Grimes  *
141df8bae1dSRodney W. Grimes  *	The only items within the object structure which are
142df8bae1dSRodney W. Grimes  *	modified after time of creation are:
143df8bae1dSRodney W. Grimes  *		reference count		locked by object's lock
144df8bae1dSRodney W. Grimes  *		pager routine		locked by object's lock
145df8bae1dSRodney W. Grimes  *
146df8bae1dSRodney W. Grimes  */
147df8bae1dSRodney W. Grimes 
14828f8db14SBruce Evans struct object_q vm_object_list;
149a5698387SAlan Cox struct mtx vm_object_list_mtx;	/* lock for object list and count */
150cccf11b8SAlan Cox 
151cccf11b8SAlan Cox struct vm_object kernel_object_store;
152df8bae1dSRodney W. Grimes 
1537029da5cSPawel Biernacki static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1546472ac3dSEd Schouten     "VM object stats");
155604c2bbcSAlan Cox 
15611542376SAlan Cox static counter_u64_t object_collapses = EARLY_COUNTER;
15711542376SAlan Cox SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD,
15811542376SAlan Cox     &object_collapses,
15911542376SAlan Cox     "VM object collapses");
160604c2bbcSAlan Cox 
16111542376SAlan Cox static counter_u64_t object_bypasses = EARLY_COUNTER;
16211542376SAlan Cox SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD,
16311542376SAlan Cox     &object_bypasses,
16411542376SAlan Cox     "VM object bypasses");
16511542376SAlan Cox 
16698087a06SJeff Roberson static counter_u64_t object_collapse_waits = EARLY_COUNTER;
16798087a06SJeff Roberson SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapse_waits, CTLFLAG_RD,
16898087a06SJeff Roberson     &object_collapse_waits,
16998087a06SJeff Roberson     "Number of sleeps for collapse");
17098087a06SJeff Roberson 
17111542376SAlan Cox static void
17211542376SAlan Cox counter_startup(void)
17311542376SAlan Cox {
17411542376SAlan Cox 
17511542376SAlan Cox 	object_collapses = counter_u64_alloc(M_WAITOK);
17611542376SAlan Cox 	object_bypasses = counter_u64_alloc(M_WAITOK);
17798087a06SJeff Roberson 	object_collapse_waits = counter_u64_alloc(M_WAITOK);
17811542376SAlan Cox }
17911542376SAlan Cox SYSINIT(object_counters, SI_SUB_CPU, SI_ORDER_ANY, counter_startup, NULL);
180dad740e9SAlan Cox 
181670d17b5SJeff Roberson static uma_zone_t obj_zone;
1828355f576SJeff Roberson 
183b23f72e9SBrian Feldman static int vm_object_zinit(void *mem, int size, int flags);
1848355f576SJeff Roberson 
1858355f576SJeff Roberson #ifdef INVARIANTS
1868355f576SJeff Roberson static void vm_object_zdtor(void *mem, int size, void *arg);
1878355f576SJeff Roberson 
1888355f576SJeff Roberson static void
1898355f576SJeff Roberson vm_object_zdtor(void *mem, int size, void *arg)
1908355f576SJeff Roberson {
1918355f576SJeff Roberson 	vm_object_t object;
1928355f576SJeff Roberson 
1938355f576SJeff Roberson 	object = (vm_object_t)mem;
194e735691bSJohn Baldwin 	KASSERT(object->ref_count == 0,
195e735691bSJohn Baldwin 	    ("object %p ref_count = %d", object, object->ref_count));
19643186e53SAlan Cox 	KASSERT(TAILQ_EMPTY(&object->memq),
197198da1b2SAttilio Rao 	    ("object %p has resident pages in its memq", object));
198774d251dSAttilio Rao 	KASSERT(vm_radix_is_empty(&object->rtree),
199774d251dSAttilio Rao 	    ("object %p has resident pages in its trie", object));
200f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
201f8a47341SAlan Cox 	KASSERT(LIST_EMPTY(&object->rvq),
202f8a47341SAlan Cox 	    ("object %p has reservations",
203f8a47341SAlan Cox 	    object));
204f8a47341SAlan Cox #endif
205*c99d0c58SMark Johnston 	KASSERT(blockcount_read(&object->paging_in_progress) == 0,
2068355f576SJeff Roberson 	    ("object %p paging_in_progress = %d",
207*c99d0c58SMark Johnston 	    object, blockcount_read(&object->paging_in_progress)));
208*c99d0c58SMark Johnston 	KASSERT(!vm_object_busied(object),
209*c99d0c58SMark Johnston 	    ("object %p busy = %d", object, blockcount_read(&object->busy)));
2108355f576SJeff Roberson 	KASSERT(object->resident_page_count == 0,
2118355f576SJeff Roberson 	    ("object %p resident_page_count = %d",
2128355f576SJeff Roberson 	    object, object->resident_page_count));
2138355f576SJeff Roberson 	KASSERT(object->shadow_count == 0,
2148355f576SJeff Roberson 	    ("object %p shadow_count = %d",
2158355f576SJeff Roberson 	    object, object->shadow_count));
216e735691bSJohn Baldwin 	KASSERT(object->type == OBJT_DEAD,
217e735691bSJohn Baldwin 	    ("object %p has non-dead type %d",
218e735691bSJohn Baldwin 	    object, object->type));
2198355f576SJeff Roberson }
2208355f576SJeff Roberson #endif
2218355f576SJeff Roberson 
222b23f72e9SBrian Feldman static int
223b23f72e9SBrian Feldman vm_object_zinit(void *mem, int size, int flags)
2248355f576SJeff Roberson {
2258355f576SJeff Roberson 	vm_object_t object;
2268355f576SJeff Roberson 
2278355f576SJeff Roberson 	object = (vm_object_t)mem;
228777a36c5SAlan Cox 	rw_init_flags(&object->lock, "vm object", RW_DUPOK | RW_NEW);
2298355f576SJeff Roberson 
2308355f576SJeff Roberson 	/* These are true for any object that has been freed */
231e735691bSJohn Baldwin 	object->type = OBJT_DEAD;
232cd1241fbSKonstantin Belousov 	vm_radix_init(&object->rtree);
23351df5321SJeff Roberson 	refcount_init(&object->ref_count, 0);
234*c99d0c58SMark Johnston 	blockcount_init(&object->paging_in_progress);
235*c99d0c58SMark Johnston 	blockcount_init(&object->busy);
2368355f576SJeff Roberson 	object->resident_page_count = 0;
2378355f576SJeff Roberson 	object->shadow_count = 0;
238f425ab8eSKonstantin Belousov 	object->flags = OBJ_DEAD;
239e735691bSJohn Baldwin 
240e735691bSJohn Baldwin 	mtx_lock(&vm_object_list_mtx);
241e735691bSJohn Baldwin 	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
242e735691bSJohn Baldwin 	mtx_unlock(&vm_object_list_mtx);
243b23f72e9SBrian Feldman 	return (0);
2448355f576SJeff Roberson }
245df8bae1dSRodney W. Grimes 
246a4915c21SAttilio Rao static void
24763967687SJeff Roberson _vm_object_allocate(objtype_t type, vm_pindex_t size, u_short flags,
24867388836SKonstantin Belousov     vm_object_t object, void *handle)
249df8bae1dSRodney W. Grimes {
2500cddd8f0SMatthew Dillon 
251df8bae1dSRodney W. Grimes 	TAILQ_INIT(&object->memq);
2521c500307SAlan Cox 	LIST_INIT(&object->shadow_head);
253a1f6d91cSDavid Greenman 
25424a1cce3SDavid Greenman 	object->type = type;
255f425ab8eSKonstantin Belousov 	if (type == OBJT_SWAP)
256f425ab8eSKonstantin Belousov 		pctrie_init(&object->un_pager.swp.swp_blks);
257f425ab8eSKonstantin Belousov 
258f425ab8eSKonstantin Belousov 	/*
259f425ab8eSKonstantin Belousov 	 * Ensure that swap_pager_swapoff() iteration over object_list
260f425ab8eSKonstantin Belousov 	 * sees up to date type and pctrie head if it observed
261f425ab8eSKonstantin Belousov 	 * non-dead object.
262f425ab8eSKonstantin Belousov 	 */
263f425ab8eSKonstantin Belousov 	atomic_thread_fence_rel();
264f425ab8eSKonstantin Belousov 
26563967687SJeff Roberson 	object->pg_color = 0;
26663967687SJeff Roberson 	object->flags = flags;
267df8bae1dSRodney W. Grimes 	object->size = size;
2684c29d2deSMark Johnston 	object->domain.dr_policy = NULL;
269b881da26SAlan Cox 	object->generation = 1;
27067d0e293SJeff Roberson 	object->cleangeneration = 1;
27151df5321SJeff Roberson 	refcount_init(&object->ref_count, 1);
2723153e878SAlan Cox 	object->memattr = VM_MEMATTR_DEFAULT;
273ef694c1aSEdward Tomasz Napierala 	object->cred = NULL;
2743364c323SKonstantin Belousov 	object->charge = 0;
27567388836SKonstantin Belousov 	object->handle = handle;
27624a1cce3SDavid Greenman 	object->backing_object = NULL;
277a316d390SJohn Dyson 	object->backing_object_offset = (vm_ooffset_t) 0;
278f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
279f8a47341SAlan Cox 	LIST_INIT(&object->rvq);
280f8a47341SAlan Cox #endif
2811bdbd705SKonstantin Belousov 	umtx_shm_object_init(object);
282df8bae1dSRodney W. Grimes }
283df8bae1dSRodney W. Grimes 
284df8bae1dSRodney W. Grimes /*
28526f9a767SRodney W. Grimes  *	vm_object_init:
28626f9a767SRodney W. Grimes  *
28726f9a767SRodney W. Grimes  *	Initialize the VM objects module.
28826f9a767SRodney W. Grimes  */
28926f9a767SRodney W. Grimes void
2901b40f8c0SMatthew Dillon vm_object_init(void)
29126f9a767SRodney W. Grimes {
29226f9a767SRodney W. Grimes 	TAILQ_INIT(&vm_object_list);
2936008862bSJohn Baldwin 	mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
2940217125fSDavid Greenman 
29589f6b863SAttilio Rao 	rw_init(&kernel_object->lock, "kernel vm object");
296d1780e8dSKonstantin Belousov 	_vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS -
29767388836SKonstantin Belousov 	    VM_MIN_KERNEL_ADDRESS), OBJ_UNMANAGED, kernel_object, NULL);
298f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
299f8a47341SAlan Cox 	kernel_object->flags |= OBJ_COLORED;
300f8a47341SAlan Cox 	kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
301f8a47341SAlan Cox #endif
30226f9a767SRodney W. Grimes 
3038dbca793STor Egge 	/*
3048dbca793STor Egge 	 * The lock portion of struct vm_object must be type stable due
3058dbca793STor Egge 	 * to vm_pageout_fallback_object_lock locking a vm object
3068dbca793STor Egge 	 * without holding any references to it.
3078dbca793STor Egge 	 */
3088355f576SJeff Roberson 	obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL,
3098355f576SJeff Roberson #ifdef INVARIANTS
3108355f576SJeff Roberson 	    vm_object_zdtor,
3118355f576SJeff Roberson #else
3128355f576SJeff Roberson 	    NULL,
3138355f576SJeff Roberson #endif
3145df87b21SJeff Roberson 	    vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
315774d251dSAttilio Rao 
316cd1241fbSKonstantin Belousov 	vm_radix_zinit();
31799448ed1SJohn Dyson }
31899448ed1SJohn Dyson 
31999448ed1SJohn Dyson void
3201b40f8c0SMatthew Dillon vm_object_clear_flag(vm_object_t object, u_short bits)
3211b40f8c0SMatthew Dillon {
3225440b5a9SAlan Cox 
32389f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
324b06805adSJake Burkholder 	object->flags &= ~bits;
3251b40f8c0SMatthew Dillon }
3261b40f8c0SMatthew Dillon 
3273153e878SAlan Cox /*
3283153e878SAlan Cox  *	Sets the default memory attribute for the specified object.  Pages
3293153e878SAlan Cox  *	that are allocated to this object are by default assigned this memory
3303153e878SAlan Cox  *	attribute.
3313153e878SAlan Cox  *
3323153e878SAlan Cox  *	Presently, this function must be called before any pages are allocated
3333153e878SAlan Cox  *	to the object.  In the future, this requirement may be relaxed for
3343153e878SAlan Cox  *	"default" and "swap" objects.
3353153e878SAlan Cox  */
3363153e878SAlan Cox int
3373153e878SAlan Cox vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
3383153e878SAlan Cox {
3393153e878SAlan Cox 
34089f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
3413153e878SAlan Cox 	switch (object->type) {
3423153e878SAlan Cox 	case OBJT_DEFAULT:
3433153e878SAlan Cox 	case OBJT_DEVICE:
34496b0b92aSAlan Cox 	case OBJT_MGTDEVICE:
3453153e878SAlan Cox 	case OBJT_PHYS:
34601381811SJohn Baldwin 	case OBJT_SG:
3473153e878SAlan Cox 	case OBJT_SWAP:
3483153e878SAlan Cox 	case OBJT_VNODE:
3493153e878SAlan Cox 		if (!TAILQ_EMPTY(&object->memq))
3503153e878SAlan Cox 			return (KERN_FAILURE);
3513153e878SAlan Cox 		break;
3523153e878SAlan Cox 	case OBJT_DEAD:
3533153e878SAlan Cox 		return (KERN_INVALID_ARGUMENT);
35496b0b92aSAlan Cox 	default:
35596b0b92aSAlan Cox 		panic("vm_object_set_memattr: object %p is of undefined type",
35696b0b92aSAlan Cox 		    object);
3573153e878SAlan Cox 	}
3583153e878SAlan Cox 	object->memattr = memattr;
3593153e878SAlan Cox 	return (KERN_SUCCESS);
3603153e878SAlan Cox }
3613153e878SAlan Cox 
3621b40f8c0SMatthew Dillon void
3631b40f8c0SMatthew Dillon vm_object_pip_add(vm_object_t object, short i)
3641b40f8c0SMatthew Dillon {
365f279b88dSAlan Cox 
366*c99d0c58SMark Johnston 	if (i > 0)
367*c99d0c58SMark Johnston 		blockcount_acquire(&object->paging_in_progress, i);
3681b40f8c0SMatthew Dillon }
3691b40f8c0SMatthew Dillon 
3701b40f8c0SMatthew Dillon void
3711b40f8c0SMatthew Dillon vm_object_pip_wakeup(vm_object_t object)
3721b40f8c0SMatthew Dillon {
373f279b88dSAlan Cox 
374*c99d0c58SMark Johnston 	vm_object_pip_wakeupn(object, 1);
3751b40f8c0SMatthew Dillon }
3761b40f8c0SMatthew Dillon 
3771b40f8c0SMatthew Dillon void
3781b40f8c0SMatthew Dillon vm_object_pip_wakeupn(vm_object_t object, short i)
3791b40f8c0SMatthew Dillon {
380d647a0edSAlan Cox 
381*c99d0c58SMark Johnston 	if (i > 0)
382*c99d0c58SMark Johnston 		blockcount_release(&object->paging_in_progress, i);
3831b40f8c0SMatthew Dillon }
3841b40f8c0SMatthew Dillon 
38598087a06SJeff Roberson /*
386*c99d0c58SMark Johnston  * Atomically drop the object lock and wait for pip to drain.  This protects
387*c99d0c58SMark Johnston  * from sleep/wakeup races due to identity changes.  The lock is not re-acquired
388*c99d0c58SMark Johnston  * on return.
38998087a06SJeff Roberson  */
39098087a06SJeff Roberson static void
391eaa17d42SRyan Libby vm_object_pip_sleep(vm_object_t object, const char *waitid)
39298087a06SJeff Roberson {
39398087a06SJeff Roberson 
394*c99d0c58SMark Johnston 	(void)blockcount_sleep(&object->paging_in_progress, &object->lock,
395*c99d0c58SMark Johnston 	    waitid, PVM | PDROP);
39698087a06SJeff Roberson }
39798087a06SJeff Roberson 
3981b40f8c0SMatthew Dillon void
399eaa17d42SRyan Libby vm_object_pip_wait(vm_object_t object, const char *waitid)
4001b40f8c0SMatthew Dillon {
4011ca58953SAlan Cox 
40289f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
403cf27e0d1SJeff Roberson 
404*c99d0c58SMark Johnston 	blockcount_wait(&object->paging_in_progress, &object->lock, waitid,
405*c99d0c58SMark Johnston 	    PVM);
4061b40f8c0SMatthew Dillon }
4071b40f8c0SMatthew Dillon 
408cf27e0d1SJeff Roberson void
409eaa17d42SRyan Libby vm_object_pip_wait_unlocked(vm_object_t object, const char *waitid)
410cf27e0d1SJeff Roberson {
411cf27e0d1SJeff Roberson 
412cf27e0d1SJeff Roberson 	VM_OBJECT_ASSERT_UNLOCKED(object);
413cf27e0d1SJeff Roberson 
414*c99d0c58SMark Johnston 	blockcount_wait(&object->paging_in_progress, NULL, waitid, PVM);
415cf27e0d1SJeff Roberson }
416cf27e0d1SJeff Roberson 
41726f9a767SRodney W. Grimes /*
41826f9a767SRodney W. Grimes  *	vm_object_allocate:
41926f9a767SRodney W. Grimes  *
42026f9a767SRodney W. Grimes  *	Returns a new object with the given size.
42126f9a767SRodney W. Grimes  */
42226f9a767SRodney W. Grimes vm_object_t
4236395da54SIan Dowse vm_object_allocate(objtype_t type, vm_pindex_t size)
42426f9a767SRodney W. Grimes {
42590688d13SAlan Cox 	vm_object_t object;
42663967687SJeff Roberson 	u_short flags;
42763967687SJeff Roberson 
42863967687SJeff Roberson 	switch (type) {
42963967687SJeff Roberson 	case OBJT_DEAD:
43063967687SJeff Roberson 		panic("vm_object_allocate: can't create OBJT_DEAD");
43163967687SJeff Roberson 	case OBJT_DEFAULT:
43263967687SJeff Roberson 	case OBJT_SWAP:
43363967687SJeff Roberson 		flags = OBJ_COLORED;
43463967687SJeff Roberson 		break;
43563967687SJeff Roberson 	case OBJT_DEVICE:
43663967687SJeff Roberson 	case OBJT_SG:
43763967687SJeff Roberson 		flags = OBJ_FICTITIOUS | OBJ_UNMANAGED;
43863967687SJeff Roberson 		break;
43963967687SJeff Roberson 	case OBJT_MGTDEVICE:
44063967687SJeff Roberson 		flags = OBJ_FICTITIOUS;
44163967687SJeff Roberson 		break;
44263967687SJeff Roberson 	case OBJT_PHYS:
44363967687SJeff Roberson 		flags = OBJ_UNMANAGED;
44463967687SJeff Roberson 		break;
44563967687SJeff Roberson 	case OBJT_VNODE:
44663967687SJeff Roberson 		flags = 0;
44763967687SJeff Roberson 		break;
44863967687SJeff Roberson 	default:
44963967687SJeff Roberson 		panic("vm_object_allocate: type %d is undefined", type);
45063967687SJeff Roberson 	}
45163967687SJeff Roberson 	object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
45267388836SKonstantin Belousov 	_vm_object_allocate(type, size, flags, object, NULL);
45363967687SJeff Roberson 
45463967687SJeff Roberson 	return (object);
45563967687SJeff Roberson }
45663967687SJeff Roberson 
45763967687SJeff Roberson /*
45863967687SJeff Roberson  *	vm_object_allocate_anon:
45963967687SJeff Roberson  *
46063967687SJeff Roberson  *	Returns a new default object of the given size and marked as
46163967687SJeff Roberson  *	anonymous memory for special split/collapse handling.  Color
46263967687SJeff Roberson  *	to be initialized by the caller.
46363967687SJeff Roberson  */
46463967687SJeff Roberson vm_object_t
46567388836SKonstantin Belousov vm_object_allocate_anon(vm_pindex_t size, vm_object_t backing_object,
46667388836SKonstantin Belousov     struct ucred *cred, vm_size_t charge)
46763967687SJeff Roberson {
46867388836SKonstantin Belousov 	vm_object_t handle, object;
46990688d13SAlan Cox 
47067388836SKonstantin Belousov 	if (backing_object == NULL)
47167388836SKonstantin Belousov 		handle = NULL;
47267388836SKonstantin Belousov 	else if ((backing_object->flags & OBJ_ANON) != 0)
47367388836SKonstantin Belousov 		handle = backing_object->handle;
47467388836SKonstantin Belousov 	else
47567388836SKonstantin Belousov 		handle = backing_object;
47667388836SKonstantin Belousov 	object = uma_zalloc(obj_zone, M_WAITOK);
47763967687SJeff Roberson 	_vm_object_allocate(OBJT_DEFAULT, size, OBJ_ANON | OBJ_ONEMAPPING,
47867388836SKonstantin Belousov 	    object, handle);
47967388836SKonstantin Belousov 	object->cred = cred;
48067388836SKonstantin Belousov 	object->charge = cred != NULL ? charge : 0;
48190688d13SAlan Cox 	return (object);
48226f9a767SRodney W. Grimes }
48326f9a767SRodney W. Grimes 
48498087a06SJeff Roberson static void
48598087a06SJeff Roberson vm_object_reference_vnode(vm_object_t object)
486df8bae1dSRodney W. Grimes {
487a67d5408SJeff Roberson 	struct vnode *vp;
488a67d5408SJeff Roberson 	u_int old;
489a67d5408SJeff Roberson 
490a67d5408SJeff Roberson 	/*
49198087a06SJeff Roberson 	 * vnode objects need the lock for the first reference
49298087a06SJeff Roberson 	 * to serialize with vnode_object_deallocate().
493a67d5408SJeff Roberson 	 */
49498087a06SJeff Roberson 	if (!refcount_acquire_if_gt(&object->ref_count, 0)) {
49551df5321SJeff Roberson 		VM_OBJECT_RLOCK(object);
496a67d5408SJeff Roberson 		old = refcount_acquire(&object->ref_count);
497a67d5408SJeff Roberson 		if (object->type == OBJT_VNODE && old == 0) {
498a67d5408SJeff Roberson 			vp = object->handle;
499a67d5408SJeff Roberson 			vref(vp);
500a67d5408SJeff Roberson 		}
50151df5321SJeff Roberson 		VM_OBJECT_RUNLOCK(object);
50295e5e988SJohn Dyson 	}
503a67d5408SJeff Roberson }
50495e5e988SJohn Dyson 
50523955314SAlfred Perlstein /*
50698087a06SJeff Roberson  *	vm_object_reference:
50798087a06SJeff Roberson  *
50898087a06SJeff Roberson  *	Acquires a reference to the given object.
50998087a06SJeff Roberson  */
51098087a06SJeff Roberson void
51198087a06SJeff Roberson vm_object_reference(vm_object_t object)
51298087a06SJeff Roberson {
51398087a06SJeff Roberson 
51498087a06SJeff Roberson 	if (object == NULL)
51598087a06SJeff Roberson 		return;
51698087a06SJeff Roberson 
51798087a06SJeff Roberson 	if (object->type == OBJT_VNODE)
51898087a06SJeff Roberson 		vm_object_reference_vnode(object);
51998087a06SJeff Roberson 	else
52098087a06SJeff Roberson 		refcount_acquire(&object->ref_count);
52198087a06SJeff Roberson 	KASSERT((object->flags & OBJ_DEAD) == 0,
52298087a06SJeff Roberson 	    ("vm_object_reference: Referenced dead object."));
52398087a06SJeff Roberson }
52498087a06SJeff Roberson 
52598087a06SJeff Roberson /*
526b921a12bSAlan Cox  *	vm_object_reference_locked:
527b921a12bSAlan Cox  *
528b921a12bSAlan Cox  *	Gets another reference to the given object.
529b921a12bSAlan Cox  *
530b921a12bSAlan Cox  *	The object must be locked.
531b921a12bSAlan Cox  */
532b921a12bSAlan Cox void
533b921a12bSAlan Cox vm_object_reference_locked(vm_object_t object)
534b921a12bSAlan Cox {
535b921a12bSAlan Cox 	struct vnode *vp;
536a67d5408SJeff Roberson 	u_int old;
537b921a12bSAlan Cox 
53851df5321SJeff Roberson 	VM_OBJECT_ASSERT_LOCKED(object);
539a67d5408SJeff Roberson 	old = refcount_acquire(&object->ref_count);
540a67d5408SJeff Roberson 	if (object->type == OBJT_VNODE && old == 0) {
54198087a06SJeff Roberson 		vp = object->handle; vref(vp); }
54298087a06SJeff Roberson 	KASSERT((object->flags & OBJ_DEAD) == 0,
54398087a06SJeff Roberson 	    ("vm_object_reference: Referenced dead object."));
544b921a12bSAlan Cox }
545b921a12bSAlan Cox 
546b921a12bSAlan Cox /*
5479d5abbddSJens Schweikhardt  * Handle deallocating an object of type OBJT_VNODE.
54823955314SAlfred Perlstein  */
54902dd8331SAlan Cox static void
55098087a06SJeff Roberson vm_object_deallocate_vnode(vm_object_t object)
55195e5e988SJohn Dyson {
55295e5e988SJohn Dyson 	struct vnode *vp = (struct vnode *) object->handle;
55326c4e983SJeff Roberson 	bool last;
554219cbf59SEivind Eklund 
5555526d2d9SEivind Eklund 	KASSERT(object->type == OBJT_VNODE,
55698087a06SJeff Roberson 	    ("vm_object_deallocate_vnode: not a vnode object"));
55798087a06SJeff Roberson 	KASSERT(vp != NULL, ("vm_object_deallocate_vnode: missing vp"));
55895e5e988SJohn Dyson 
55926c4e983SJeff Roberson 	/* Object lock to protect handle lookup. */
56026c4e983SJeff Roberson 	last = refcount_release(&object->ref_count);
56126c4e983SJeff Roberson 	VM_OBJECT_RUNLOCK(object);
56226c4e983SJeff Roberson 
56326c4e983SJeff Roberson 	if (!last)
56426c4e983SJeff Roberson 		return;
56526c4e983SJeff Roberson 
566a67d5408SJeff Roberson 	if (!umtx_shm_vnobj_persistent)
5671bdbd705SKonstantin Belousov 		umtx_shm_object_terminated(object);
5681bdbd705SKonstantin Belousov 
56903fa5b34SKonstantin Belousov 	/* vrele may need the vnode lock. */
57047221757SJohn Dyson 	vrele(vp);
57186769ac0SKonstantin Belousov }
572df8bae1dSRodney W. Grimes 
57398087a06SJeff Roberson 
57498087a06SJeff Roberson /*
57598087a06SJeff Roberson  * We dropped a reference on an object and discovered that it had a
57698087a06SJeff Roberson  * single remaining shadow.  This is a sibling of the reference we
57798087a06SJeff Roberson  * dropped.  Attempt to collapse the sibling and backing object.
57898087a06SJeff Roberson  */
57998087a06SJeff Roberson static vm_object_t
58098087a06SJeff Roberson vm_object_deallocate_anon(vm_object_t backing_object)
58198087a06SJeff Roberson {
58298087a06SJeff Roberson 	vm_object_t object;
58398087a06SJeff Roberson 
58498087a06SJeff Roberson 	/* Fetch the final shadow.  */
58598087a06SJeff Roberson 	object = LIST_FIRST(&backing_object->shadow_head);
58698087a06SJeff Roberson 	KASSERT(object != NULL && backing_object->shadow_count == 1,
58798087a06SJeff Roberson 	    ("vm_object_anon_deallocate: ref_count: %d, shadow_count: %d",
58898087a06SJeff Roberson 	    backing_object->ref_count, backing_object->shadow_count));
58998087a06SJeff Roberson 	KASSERT((object->flags & (OBJ_TMPFS_NODE | OBJ_ANON)) == OBJ_ANON,
59098087a06SJeff Roberson 	    ("invalid shadow object %p", object));
59198087a06SJeff Roberson 
59298087a06SJeff Roberson 	if (!VM_OBJECT_TRYWLOCK(object)) {
59398087a06SJeff Roberson 		/*
59498087a06SJeff Roberson 		 * Prevent object from disappearing since we do not have a
59598087a06SJeff Roberson 		 * reference.
59698087a06SJeff Roberson 		 */
59798087a06SJeff Roberson 		vm_object_pip_add(object, 1);
59898087a06SJeff Roberson 		VM_OBJECT_WUNLOCK(backing_object);
59998087a06SJeff Roberson 		VM_OBJECT_WLOCK(object);
60098087a06SJeff Roberson 		vm_object_pip_wakeup(object);
60198087a06SJeff Roberson 	} else
60298087a06SJeff Roberson 		VM_OBJECT_WUNLOCK(backing_object);
60398087a06SJeff Roberson 
60498087a06SJeff Roberson 	/*
60598087a06SJeff Roberson 	 * Check for a collapse/terminate race with the last reference holder.
60698087a06SJeff Roberson 	 */
60798087a06SJeff Roberson 	if ((object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) != 0 ||
60898087a06SJeff Roberson 	    !refcount_acquire_if_not_zero(&object->ref_count)) {
60998087a06SJeff Roberson 		VM_OBJECT_WUNLOCK(object);
61098087a06SJeff Roberson 		return (NULL);
61198087a06SJeff Roberson 	}
61298087a06SJeff Roberson 	backing_object = object->backing_object;
61398087a06SJeff Roberson 	if (backing_object != NULL && (backing_object->flags & OBJ_ANON) != 0)
61498087a06SJeff Roberson 		vm_object_collapse(object);
61598087a06SJeff Roberson 	VM_OBJECT_WUNLOCK(object);
61698087a06SJeff Roberson 
61798087a06SJeff Roberson 	return (object);
61898087a06SJeff Roberson }
61998087a06SJeff Roberson 
620df8bae1dSRodney W. Grimes /*
621df8bae1dSRodney W. Grimes  *	vm_object_deallocate:
622df8bae1dSRodney W. Grimes  *
623df8bae1dSRodney W. Grimes  *	Release a reference to the specified object,
624df8bae1dSRodney W. Grimes  *	gained either through a vm_object_allocate
625df8bae1dSRodney W. Grimes  *	or a vm_object_reference call.  When all references
626df8bae1dSRodney W. Grimes  *	are gone, storage associated with this object
627df8bae1dSRodney W. Grimes  *	may be relinquished.
628df8bae1dSRodney W. Grimes  *
629df8bae1dSRodney W. Grimes  *	No object may be locked.
630df8bae1dSRodney W. Grimes  */
63126f9a767SRodney W. Grimes void
6321b40f8c0SMatthew Dillon vm_object_deallocate(vm_object_t object)
633df8bae1dSRodney W. Grimes {
63498087a06SJeff Roberson 	vm_object_t temp;
63526c4e983SJeff Roberson 	bool released;
636df8bae1dSRodney W. Grimes 
637df8bae1dSRodney W. Grimes 	while (object != NULL) {
63851df5321SJeff Roberson 		/*
63951df5321SJeff Roberson 		 * If the reference count goes to 0 we start calling
64051df5321SJeff Roberson 		 * vm_object_terminate() on the object chain.  A ref count
64151df5321SJeff Roberson 		 * of 1 may be a special case depending on the shadow count
64251df5321SJeff Roberson 		 * being 0 or 1.  These cases require a write lock on the
64351df5321SJeff Roberson 		 * object.
64451df5321SJeff Roberson 		 */
64563967687SJeff Roberson 		if ((object->flags & OBJ_ANON) == 0)
64663967687SJeff Roberson 			released = refcount_release_if_gt(&object->ref_count, 1);
64763967687SJeff Roberson 		else
64851df5321SJeff Roberson 			released = refcount_release_if_gt(&object->ref_count, 2);
64951df5321SJeff Roberson 		if (released)
65051df5321SJeff Roberson 			return;
65151df5321SJeff Roberson 
652a67d5408SJeff Roberson 		if (object->type == OBJT_VNODE) {
65326c4e983SJeff Roberson 			VM_OBJECT_RLOCK(object);
65426c4e983SJeff Roberson 			if (object->type == OBJT_VNODE) {
65598087a06SJeff Roberson 				vm_object_deallocate_vnode(object);
656a67d5408SJeff Roberson 				return;
657a67d5408SJeff Roberson 			}
65826c4e983SJeff Roberson 			VM_OBJECT_RUNLOCK(object);
65926c4e983SJeff Roberson 		}
66026c4e983SJeff Roberson 
66126c4e983SJeff Roberson 		VM_OBJECT_WLOCK(object);
66226c4e983SJeff Roberson 		KASSERT(object->ref_count > 0,
66326c4e983SJeff Roberson 		    ("vm_object_deallocate: object deallocated too many times: %d",
66426c4e983SJeff Roberson 		    object->type));
66526c4e983SJeff Roberson 
666b72b0115SAlan Cox 		/*
66798087a06SJeff Roberson 		 * If this is not the final reference to an anonymous
66898087a06SJeff Roberson 		 * object we may need to collapse the shadow chain.
669b72b0115SAlan Cox 		 */
67098087a06SJeff Roberson 		if (!refcount_release(&object->ref_count)) {
67198087a06SJeff Roberson 			if (object->ref_count > 1 ||
67298087a06SJeff Roberson 			    object->shadow_count == 0) {
67398087a06SJeff Roberson 				if ((object->flags & OBJ_ANON) != 0 &&
67498087a06SJeff Roberson 				    object->ref_count == 1)
67598087a06SJeff Roberson 					vm_object_set_flag(object,
67698087a06SJeff Roberson 					    OBJ_ONEMAPPING);
67789f6b863SAttilio Rao 				VM_OBJECT_WUNLOCK(object);
67823b186d3SAlan Cox 				return;
67995e5e988SJohn Dyson 			}
68098087a06SJeff Roberson 
68198087a06SJeff Roberson 			/* Handle collapsing last ref on anonymous objects. */
68298087a06SJeff Roberson 			object = vm_object_deallocate_anon(object);
68398087a06SJeff Roberson 			continue;
68498087a06SJeff Roberson 		}
68598087a06SJeff Roberson 
68698087a06SJeff Roberson 		/*
68798087a06SJeff Roberson 		 * Handle the final reference to an object.  We restart
68898087a06SJeff Roberson 		 * the loop with the backing object to avoid recursion.
68998087a06SJeff Roberson 		 */
6901bdbd705SKonstantin Belousov 		umtx_shm_object_terminated(object);
69124a1cce3SDavid Greenman 		temp = object->backing_object;
692c9917419SAlan Cox 		if (temp != NULL) {
6934bace8e7SKonstantin Belousov 			KASSERT((object->flags & OBJ_TMPFS_NODE) == 0,
6944bace8e7SKonstantin Belousov 			    ("shadowed tmpfs v_object 2 %p", object));
69551b867e5SJeff Roberson 			vm_object_backing_remove(object);
696de5f6a77SJohn Dyson 		}
69798087a06SJeff Roberson 
69898087a06SJeff Roberson 		KASSERT((object->flags & OBJ_DEAD) == 0,
69998087a06SJeff Roberson 		    ("vm_object_deallocate: Terminating dead object."));
700783a68aaSKonstantin Belousov 		vm_object_set_flag(object, OBJ_DEAD);
701df8bae1dSRodney W. Grimes 		vm_object_terminate(object);
702df8bae1dSRodney W. Grimes 		object = temp;
703df8bae1dSRodney W. Grimes 	}
704df8bae1dSRodney W. Grimes }
705df8bae1dSRodney W. Grimes 
706df8bae1dSRodney W. Grimes /*
7072ac78f0eSStephan Uphoff  *	vm_object_destroy removes the object from the global object list
7082ac78f0eSStephan Uphoff  *      and frees the space for the object.
7092ac78f0eSStephan Uphoff  */
7102ac78f0eSStephan Uphoff void
7112ac78f0eSStephan Uphoff vm_object_destroy(vm_object_t object)
7122ac78f0eSStephan Uphoff {
7132ac78f0eSStephan Uphoff 
7142ac78f0eSStephan Uphoff 	/*
7153364c323SKonstantin Belousov 	 * Release the allocation charge.
7163364c323SKonstantin Belousov 	 */
717ef694c1aSEdward Tomasz Napierala 	if (object->cred != NULL) {
718ef694c1aSEdward Tomasz Napierala 		swap_release_by_cred(object->charge, object->cred);
7193364c323SKonstantin Belousov 		object->charge = 0;
720ef694c1aSEdward Tomasz Napierala 		crfree(object->cred);
721ef694c1aSEdward Tomasz Napierala 		object->cred = NULL;
7223364c323SKonstantin Belousov 	}
7233364c323SKonstantin Belousov 
7243364c323SKonstantin Belousov 	/*
7252ac78f0eSStephan Uphoff 	 * Free the space for the object.
7262ac78f0eSStephan Uphoff 	 */
7272ac78f0eSStephan Uphoff 	uma_zfree(obj_zone, object);
7282ac78f0eSStephan Uphoff }
7292ac78f0eSStephan Uphoff 
73051b867e5SJeff Roberson static void
73151b867e5SJeff Roberson vm_object_backing_remove_locked(vm_object_t object)
73251b867e5SJeff Roberson {
73351b867e5SJeff Roberson 	vm_object_t backing_object;
73451b867e5SJeff Roberson 
73551b867e5SJeff Roberson 	backing_object = object->backing_object;
73651b867e5SJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
73751b867e5SJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(backing_object);
73851b867e5SJeff Roberson 
73998087a06SJeff Roberson 	KASSERT((object->flags & OBJ_COLLAPSING) == 0,
74098087a06SJeff Roberson 	    ("vm_object_backing_remove: Removing collapsing object."));
74198087a06SJeff Roberson 
74251b867e5SJeff Roberson 	if ((object->flags & OBJ_SHADOWLIST) != 0) {
74351b867e5SJeff Roberson 		LIST_REMOVE(object, shadow_list);
74451b867e5SJeff Roberson 		backing_object->shadow_count--;
74551b867e5SJeff Roberson 		object->flags &= ~OBJ_SHADOWLIST;
74651b867e5SJeff Roberson 	}
74751b867e5SJeff Roberson 	object->backing_object = NULL;
74851b867e5SJeff Roberson }
74951b867e5SJeff Roberson 
75051b867e5SJeff Roberson static void
75151b867e5SJeff Roberson vm_object_backing_remove(vm_object_t object)
75251b867e5SJeff Roberson {
75351b867e5SJeff Roberson 	vm_object_t backing_object;
75451b867e5SJeff Roberson 
75551b867e5SJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
75651b867e5SJeff Roberson 
75751b867e5SJeff Roberson 	if ((object->flags & OBJ_SHADOWLIST) != 0) {
75851b867e5SJeff Roberson 		backing_object = object->backing_object;
75951b867e5SJeff Roberson 		VM_OBJECT_WLOCK(backing_object);
76051b867e5SJeff Roberson 		vm_object_backing_remove_locked(object);
76151b867e5SJeff Roberson 		VM_OBJECT_WUNLOCK(backing_object);
76251b867e5SJeff Roberson 	} else
76351b867e5SJeff Roberson 		object->backing_object = NULL;
76451b867e5SJeff Roberson }
76551b867e5SJeff Roberson 
76651b867e5SJeff Roberson static void
76751b867e5SJeff Roberson vm_object_backing_insert_locked(vm_object_t object, vm_object_t backing_object)
76851b867e5SJeff Roberson {
76951b867e5SJeff Roberson 
77051b867e5SJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
77151b867e5SJeff Roberson 
77251b867e5SJeff Roberson 	if ((backing_object->flags & OBJ_ANON) != 0) {
77351b867e5SJeff Roberson 		VM_OBJECT_ASSERT_WLOCKED(backing_object);
77451b867e5SJeff Roberson 		LIST_INSERT_HEAD(&backing_object->shadow_head, object,
77551b867e5SJeff Roberson 		    shadow_list);
77651b867e5SJeff Roberson 		backing_object->shadow_count++;
77751b867e5SJeff Roberson 		object->flags |= OBJ_SHADOWLIST;
77851b867e5SJeff Roberson 	}
77951b867e5SJeff Roberson 	object->backing_object = backing_object;
78051b867e5SJeff Roberson }
78151b867e5SJeff Roberson 
78251b867e5SJeff Roberson static void
78351b867e5SJeff Roberson vm_object_backing_insert(vm_object_t object, vm_object_t backing_object)
78451b867e5SJeff Roberson {
78551b867e5SJeff Roberson 
78651b867e5SJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
78751b867e5SJeff Roberson 
78851b867e5SJeff Roberson 	if ((backing_object->flags & OBJ_ANON) != 0) {
78951b867e5SJeff Roberson 		VM_OBJECT_WLOCK(backing_object);
79051b867e5SJeff Roberson 		vm_object_backing_insert_locked(object, backing_object);
79151b867e5SJeff Roberson 		VM_OBJECT_WUNLOCK(backing_object);
79251b867e5SJeff Roberson 	} else
79351b867e5SJeff Roberson 		object->backing_object = backing_object;
79451b867e5SJeff Roberson }
79551b867e5SJeff Roberson 
79698087a06SJeff Roberson /*
79798087a06SJeff Roberson  * Insert an object into a backing_object's shadow list with an additional
79898087a06SJeff Roberson  * reference to the backing_object added.
79998087a06SJeff Roberson  */
80098087a06SJeff Roberson static void
80198087a06SJeff Roberson vm_object_backing_insert_ref(vm_object_t object, vm_object_t backing_object)
80298087a06SJeff Roberson {
80398087a06SJeff Roberson 
80498087a06SJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
80598087a06SJeff Roberson 
80698087a06SJeff Roberson 	if ((backing_object->flags & OBJ_ANON) != 0) {
80798087a06SJeff Roberson 		VM_OBJECT_WLOCK(backing_object);
80898087a06SJeff Roberson 		KASSERT((backing_object->flags & OBJ_DEAD) == 0,
80998087a06SJeff Roberson 		    ("shadowing dead anonymous object"));
81098087a06SJeff Roberson 		vm_object_reference_locked(backing_object);
81198087a06SJeff Roberson 		vm_object_backing_insert_locked(object, backing_object);
81298087a06SJeff Roberson 		vm_object_clear_flag(backing_object, OBJ_ONEMAPPING);
81398087a06SJeff Roberson 		VM_OBJECT_WUNLOCK(backing_object);
81498087a06SJeff Roberson 	} else {
81598087a06SJeff Roberson 		vm_object_reference(backing_object);
81698087a06SJeff Roberson 		object->backing_object = backing_object;
81798087a06SJeff Roberson 	}
81898087a06SJeff Roberson }
81998087a06SJeff Roberson 
82098087a06SJeff Roberson /*
82198087a06SJeff Roberson  * Transfer a backing reference from backing_object to object.
82298087a06SJeff Roberson  */
82398087a06SJeff Roberson static void
82498087a06SJeff Roberson vm_object_backing_transfer(vm_object_t object, vm_object_t backing_object)
82598087a06SJeff Roberson {
82698087a06SJeff Roberson 	vm_object_t new_backing_object;
82798087a06SJeff Roberson 
82898087a06SJeff Roberson 	/*
82998087a06SJeff Roberson 	 * Note that the reference to backing_object->backing_object
83098087a06SJeff Roberson 	 * moves from within backing_object to within object.
83198087a06SJeff Roberson 	 */
83298087a06SJeff Roberson 	vm_object_backing_remove_locked(object);
83398087a06SJeff Roberson 	new_backing_object = backing_object->backing_object;
83498087a06SJeff Roberson 	if (new_backing_object == NULL)
83598087a06SJeff Roberson 		return;
83698087a06SJeff Roberson 	if ((new_backing_object->flags & OBJ_ANON) != 0) {
83798087a06SJeff Roberson 		VM_OBJECT_WLOCK(new_backing_object);
83898087a06SJeff Roberson 		vm_object_backing_remove_locked(backing_object);
83998087a06SJeff Roberson 		vm_object_backing_insert_locked(object, new_backing_object);
84098087a06SJeff Roberson 		VM_OBJECT_WUNLOCK(new_backing_object);
84198087a06SJeff Roberson 	} else {
84298087a06SJeff Roberson 		object->backing_object = new_backing_object;
84398087a06SJeff Roberson 		backing_object->backing_object = NULL;
84498087a06SJeff Roberson 	}
84598087a06SJeff Roberson }
84698087a06SJeff Roberson 
84798087a06SJeff Roberson /*
84898087a06SJeff Roberson  * Wait for a concurrent collapse to settle.
84998087a06SJeff Roberson  */
85098087a06SJeff Roberson static void
85198087a06SJeff Roberson vm_object_collapse_wait(vm_object_t object)
85298087a06SJeff Roberson {
85398087a06SJeff Roberson 
85498087a06SJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
85598087a06SJeff Roberson 
85698087a06SJeff Roberson 	while ((object->flags & OBJ_COLLAPSING) != 0) {
85798087a06SJeff Roberson 		vm_object_pip_wait(object, "vmcolwait");
85898087a06SJeff Roberson 		counter_u64_add(object_collapse_waits, 1);
85998087a06SJeff Roberson 	}
86098087a06SJeff Roberson }
86198087a06SJeff Roberson 
86298087a06SJeff Roberson /*
86398087a06SJeff Roberson  * Waits for a backing object to clear a pending collapse and returns
86498087a06SJeff Roberson  * it locked if it is an ANON object.
86598087a06SJeff Roberson  */
86698087a06SJeff Roberson static vm_object_t
86798087a06SJeff Roberson vm_object_backing_collapse_wait(vm_object_t object)
86898087a06SJeff Roberson {
86998087a06SJeff Roberson 	vm_object_t backing_object;
87098087a06SJeff Roberson 
87198087a06SJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
87298087a06SJeff Roberson 
87398087a06SJeff Roberson 	for (;;) {
87498087a06SJeff Roberson 		backing_object = object->backing_object;
87598087a06SJeff Roberson 		if (backing_object == NULL ||
87698087a06SJeff Roberson 		    (backing_object->flags & OBJ_ANON) == 0)
87798087a06SJeff Roberson 			return (NULL);
87898087a06SJeff Roberson 		VM_OBJECT_WLOCK(backing_object);
87998087a06SJeff Roberson 		if ((backing_object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) == 0)
88098087a06SJeff Roberson 			break;
88198087a06SJeff Roberson 		VM_OBJECT_WUNLOCK(object);
88298087a06SJeff Roberson 		vm_object_pip_sleep(backing_object, "vmbckwait");
88398087a06SJeff Roberson 		counter_u64_add(object_collapse_waits, 1);
88498087a06SJeff Roberson 		VM_OBJECT_WLOCK(object);
88598087a06SJeff Roberson 	}
88698087a06SJeff Roberson 	return (backing_object);
88798087a06SJeff Roberson }
88851b867e5SJeff Roberson 
8892ac78f0eSStephan Uphoff /*
8907bbdb843SRuslan Bukin  *	vm_object_terminate_pages removes any remaining pageable pages
8917bbdb843SRuslan Bukin  *	from the object and resets the object to an empty state.
8927bbdb843SRuslan Bukin  */
8937bbdb843SRuslan Bukin static void
8947bbdb843SRuslan Bukin vm_object_terminate_pages(vm_object_t object)
8957bbdb843SRuslan Bukin {
8967bbdb843SRuslan Bukin 	vm_page_t p, p_next;
8977bbdb843SRuslan Bukin 
8987bbdb843SRuslan Bukin 	VM_OBJECT_ASSERT_WLOCKED(object);
8997bbdb843SRuslan Bukin 
9007bbdb843SRuslan Bukin 	/*
9017bbdb843SRuslan Bukin 	 * Free any remaining pageable pages.  This also removes them from the
9027bbdb843SRuslan Bukin 	 * paging queues.  However, don't free wired pages, just remove them
9037bbdb843SRuslan Bukin 	 * from the object.  Rather than incrementally removing each page from
9047bbdb843SRuslan Bukin 	 * the object, the page and object are reset to any empty state.
9057bbdb843SRuslan Bukin 	 */
9067bbdb843SRuslan Bukin 	TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) {
9077bbdb843SRuslan Bukin 		vm_page_assert_unbusied(p);
908fee2a2faSMark Johnston 		KASSERT(p->object == object &&
909fee2a2faSMark Johnston 		    (p->ref_count & VPRC_OBJREF) != 0,
910fee2a2faSMark Johnston 		    ("vm_object_terminate_pages: page %p is inconsistent", p));
911fee2a2faSMark Johnston 
9127bbdb843SRuslan Bukin 		p->object = NULL;
913fee2a2faSMark Johnston 		if (vm_page_drop(p, VPRC_OBJREF) == VPRC_OBJREF) {
9145cd29d0fSMark Johnston 			VM_CNT_INC(v_pfree);
9155cd29d0fSMark Johnston 			vm_page_free(p);
9164074d642SAlan Cox 		}
917fee2a2faSMark Johnston 	}
9182fcd1ff6SKonstantin Belousov 
9197bbdb843SRuslan Bukin 	/*
9207bbdb843SRuslan Bukin 	 * If the object contained any pages, then reset it to an empty state.
9217bbdb843SRuslan Bukin 	 * None of the object's fields, including "resident_page_count", were
9227bbdb843SRuslan Bukin 	 * modified by the preceding loop.
9237bbdb843SRuslan Bukin 	 */
9247bbdb843SRuslan Bukin 	if (object->resident_page_count != 0) {
9257bbdb843SRuslan Bukin 		vm_radix_reclaim_allnodes(&object->rtree);
9267bbdb843SRuslan Bukin 		TAILQ_INIT(&object->memq);
9277bbdb843SRuslan Bukin 		object->resident_page_count = 0;
9287bbdb843SRuslan Bukin 		if (object->type == OBJT_VNODE)
9297bbdb843SRuslan Bukin 			vdrop(object->handle);
9307bbdb843SRuslan Bukin 	}
9317bbdb843SRuslan Bukin }
9327bbdb843SRuslan Bukin 
9337bbdb843SRuslan Bukin /*
934df8bae1dSRodney W. Grimes  *	vm_object_terminate actually destroys the specified object, freeing
935df8bae1dSRodney W. Grimes  *	up all previously used resources.
936df8bae1dSRodney W. Grimes  *
937df8bae1dSRodney W. Grimes  *	The object must be locked.
9381c7c3c6aSMatthew Dillon  *	This routine may block.
939df8bae1dSRodney W. Grimes  */
94095e5e988SJohn Dyson void
9411b40f8c0SMatthew Dillon vm_object_terminate(vm_object_t object)
942df8bae1dSRodney W. Grimes {
94398087a06SJeff Roberson 
94489f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
945783a68aaSKonstantin Belousov 	KASSERT((object->flags & OBJ_DEAD) != 0,
946783a68aaSKonstantin Belousov 	    ("terminating non-dead obj %p", object));
94798087a06SJeff Roberson 	KASSERT((object->flags & OBJ_COLLAPSING) == 0,
94898087a06SJeff Roberson 	    ("terminating collapsing obj %p", object));
94998087a06SJeff Roberson 	KASSERT(object->backing_object == NULL,
95098087a06SJeff Roberson 	    ("terminating shadow obj %p", object));
951bef608bdSJohn Dyson 
952cf27e0d1SJeff Roberson 	/*
953cf27e0d1SJeff Roberson 	 * wait for the pageout daemon to be done with the object
954cf27e0d1SJeff Roberson 	 */
955cf27e0d1SJeff Roberson 	vm_object_pip_wait(object, "objtrm");
956cf27e0d1SJeff Roberson 
957*c99d0c58SMark Johnston 	KASSERT(!blockcount_read(&object->paging_in_progress),
958cf27e0d1SJeff Roberson 	    ("vm_object_terminate: pageout in progress"));
959cf27e0d1SJeff Roberson 
960971dd342SAlfred Perlstein 	KASSERT(object->ref_count == 0,
961971dd342SAlfred Perlstein 	    ("vm_object_terminate: object with references, ref_count=%d",
962971dd342SAlfred Perlstein 	    object->ref_count));
963996c772fSJohn Dyson 
9647bbdb843SRuslan Bukin 	if ((object->flags & OBJ_PG_DTOR) == 0)
9657bbdb843SRuslan Bukin 		vm_object_terminate_pages(object);
966bef608bdSJohn Dyson 
967f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
968f8a47341SAlan Cox 	if (__predict_false(!LIST_EMPTY(&object->rvq)))
969f8a47341SAlan Cox 		vm_reserv_break_all(object);
970f8a47341SAlan Cox #endif
9717bfda801SAlan Cox 
972e735691bSJohn Baldwin 	KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT ||
973e735691bSJohn Baldwin 	    object->type == OBJT_SWAP,
974e735691bSJohn Baldwin 	    ("%s: non-swap obj %p has cred", __func__, object));
975e735691bSJohn Baldwin 
9762d8acc0fSJohn Dyson 	/*
9779fcfb650SDavid Greenman 	 * Let the pager know object is dead.
9789fcfb650SDavid Greenman 	 */
9799fcfb650SDavid Greenman 	vm_pager_deallocate(object);
98089f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(object);
9819fcfb650SDavid Greenman 
9822ac78f0eSStephan Uphoff 	vm_object_destroy(object);
98347221757SJohn Dyson }
984df8bae1dSRodney W. Grimes 
985edf93b25SAlan Cox /*
986edf93b25SAlan Cox  * Make the page read-only so that we can clear the object flags.  However, if
987edf93b25SAlan Cox  * this is a nosync mmap then the object is likely to stay dirty so do not
988edf93b25SAlan Cox  * mess with the page and do not clear the object flags.  Returns TRUE if the
989edf93b25SAlan Cox  * page should be flushed, and FALSE otherwise.
990edf93b25SAlan Cox  */
9913280870dSKonstantin Belousov static boolean_t
99267d0e293SJeff Roberson vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *allclean)
9933280870dSKonstantin Belousov {
9943280870dSKonstantin Belousov 
995fff5403fSJeff Roberson 	vm_page_assert_busied(p);
996fff5403fSJeff Roberson 
9973280870dSKonstantin Belousov 	/*
9983280870dSKonstantin Belousov 	 * If we have been asked to skip nosync pages and this is a
9993280870dSKonstantin Belousov 	 * nosync page, skip it.  Note that the object flags were not
10003280870dSKonstantin Belousov 	 * cleared in this case so we do not have to set them.
10013280870dSKonstantin Belousov 	 */
10025cff1f4dSMark Johnston 	if ((flags & OBJPC_NOSYNC) != 0 && (p->a.flags & PGA_NOSYNC) != 0) {
100367d0e293SJeff Roberson 		*allclean = FALSE;
10043280870dSKonstantin Belousov 		return (FALSE);
10053280870dSKonstantin Belousov 	} else {
10063280870dSKonstantin Belousov 		pmap_remove_write(p);
10073280870dSKonstantin Belousov 		return (p->dirty != 0);
10083280870dSKonstantin Belousov 	}
10093280870dSKonstantin Belousov }
10103280870dSKonstantin Belousov 
1011df8bae1dSRodney W. Grimes /*
1012df8bae1dSRodney W. Grimes  *	vm_object_page_clean
1013df8bae1dSRodney W. Grimes  *
10144f79d873SMatthew Dillon  *	Clean all dirty pages in the specified range of object.  Leaves page
10154f79d873SMatthew Dillon  * 	on whatever queue it is currently on.   If NOSYNC is set then do not
1016fff5403fSJeff Roberson  *	write out pages with PGA_NOSYNC set (originally comes from MAP_NOSYNC),
10174f79d873SMatthew Dillon  *	leaving the object dirty.
101826f9a767SRodney W. Grimes  *
10198d34a3bfSKonstantin Belousov  *	For swap objects backing tmpfs regular files, do not flush anything,
10208d34a3bfSKonstantin Belousov  *	but remove write protection on the mapped pages to update mtime through
10218d34a3bfSKonstantin Belousov  *	mmaped writes.
10228d34a3bfSKonstantin Belousov  *
102343b7990eSMatthew Dillon  *	When stuffing pages asynchronously, allow clustering.  XXX we need a
102443b7990eSMatthew Dillon  *	synchronous clustering mode implementation.
102543b7990eSMatthew Dillon  *
102626f9a767SRodney W. Grimes  *	Odd semantics: if start == end, we clean everything.
102726f9a767SRodney W. Grimes  *
102826f9a767SRodney W. Grimes  *	The object must be locked.
1029126d6082SKonstantin Belousov  *
1030126d6082SKonstantin Belousov  *	Returns FALSE if some page from the range was not written, as
1031126d6082SKonstantin Belousov  *	reported by the pager, and TRUE otherwise.
103226f9a767SRodney W. Grimes  */
1033126d6082SKonstantin Belousov boolean_t
103417f3095dSAlan Cox vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
1035e239bb97SKonstantin Belousov     int flags)
1036f6b04d2bSDavid Greenman {
1037e239bb97SKonstantin Belousov 	vm_page_t np, p;
103817f3095dSAlan Cox 	vm_pindex_t pi, tend, tstart;
1039126d6082SKonstantin Belousov 	int curgeneration, n, pagerflags;
104067d0e293SJeff Roberson 	boolean_t eio, res, allclean;
1041f6b04d2bSDavid Greenman 
104289f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
1043e5f299ffSKonstantin Belousov 
10448d34a3bfSKonstantin Belousov 	if (!vm_object_mightbedirty(object) || object->resident_page_count == 0)
1045126d6082SKonstantin Belousov 		return (TRUE);
1046f6b04d2bSDavid Greenman 
1047e239bb97SKonstantin Belousov 	pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ?
1048e239bb97SKonstantin Belousov 	    VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
1049e239bb97SKonstantin Belousov 	pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0;
1050e239bb97SKonstantin Belousov 
105117f3095dSAlan Cox 	tstart = OFF_TO_IDX(start);
105217f3095dSAlan Cox 	tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK);
105367d0e293SJeff Roberson 	allclean = tstart == 0 && tend >= object->size;
1054126d6082SKonstantin Belousov 	res = TRUE;
1055f6b04d2bSDavid Greenman 
1056bd7e5f99SJohn Dyson rescan:
10572d8acc0fSJohn Dyson 	curgeneration = object->generation;
10582d8acc0fSJohn Dyson 
105917f3095dSAlan Cox 	for (p = vm_page_find_least(object, tstart); p != NULL; p = np) {
1060bd7e5f99SJohn Dyson 		pi = p->pindex;
1061e239bb97SKonstantin Belousov 		if (pi >= tend)
1062e239bb97SKonstantin Belousov 			break;
1063e239bb97SKonstantin Belousov 		np = TAILQ_NEXT(p, listq);
10640012f373SJeff Roberson 		if (vm_page_none_valid(p))
1065aef922f5SJohn Dyson 			continue;
106663e97555SJeff Roberson 		if (vm_page_busy_acquire(p, VM_ALLOC_WAITFAIL) == 0) {
106767d0e293SJeff Roberson 			if (object->generation != curgeneration &&
106867d0e293SJeff Roberson 			    (flags & OBJPC_SYNC) != 0)
1069e239bb97SKonstantin Belousov 				goto rescan;
1070780636b7SKonstantin Belousov 			np = vm_page_find_least(object, pi);
1071780636b7SKonstantin Belousov 			continue;
1072f6b04d2bSDavid Greenman 		}
107367d0e293SJeff Roberson 		if (!vm_object_page_remove_write(p, flags, &allclean)) {
107463e97555SJeff Roberson 			vm_page_xunbusy(p);
1075bd7e5f99SJohn Dyson 			continue;
107663e97555SJeff Roberson 		}
10778d34a3bfSKonstantin Belousov 		if (object->type == OBJT_VNODE) {
10783280870dSKonstantin Belousov 			n = vm_object_page_collect_flush(object, p, pagerflags,
107967d0e293SJeff Roberson 			    flags, &allclean, &eio);
1080126d6082SKonstantin Belousov 			if (eio) {
1081126d6082SKonstantin Belousov 				res = FALSE;
108267d0e293SJeff Roberson 				allclean = FALSE;
1083126d6082SKonstantin Belousov 			}
108467d0e293SJeff Roberson 			if (object->generation != curgeneration &&
108567d0e293SJeff Roberson 			    (flags & OBJPC_SYNC) != 0)
1086b9b7a4beSMatthew Dillon 				goto rescan;
1087031ec8c1SKonstantin Belousov 
1088031ec8c1SKonstantin Belousov 			/*
1089031ec8c1SKonstantin Belousov 			 * If the VOP_PUTPAGES() did a truncated write, so
1090031ec8c1SKonstantin Belousov 			 * that even the first page of the run is not fully
1091031ec8c1SKonstantin Belousov 			 * written, vm_pageout_flush() returns 0 as the run
1092031ec8c1SKonstantin Belousov 			 * length.  Since the condition that caused truncated
1093031ec8c1SKonstantin Belousov 			 * write may be permanent, e.g. exhausted free space,
1094031ec8c1SKonstantin Belousov 			 * accepting n == 0 would cause an infinite loop.
1095031ec8c1SKonstantin Belousov 			 *
1096031ec8c1SKonstantin Belousov 			 * Forwarding the iterator leaves the unwritten page
1097031ec8c1SKonstantin Belousov 			 * behind, but there is not much we can do there if
1098031ec8c1SKonstantin Belousov 			 * filesystem refuses to write it.
1099031ec8c1SKonstantin Belousov 			 */
1100126d6082SKonstantin Belousov 			if (n == 0) {
1101031ec8c1SKonstantin Belousov 				n = 1;
110267d0e293SJeff Roberson 				allclean = FALSE;
1103126d6082SKonstantin Belousov 			}
11048d34a3bfSKonstantin Belousov 		} else {
11058d34a3bfSKonstantin Belousov 			n = 1;
11068d34a3bfSKonstantin Belousov 			vm_page_xunbusy(p);
11078d34a3bfSKonstantin Belousov 		}
1108e239bb97SKonstantin Belousov 		np = vm_page_find_least(object, pi + n);
1109b9b7a4beSMatthew Dillon 	}
1110b9b7a4beSMatthew Dillon #if 0
1111e239bb97SKonstantin Belousov 	VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0);
1112b9b7a4beSMatthew Dillon #endif
1113b9b7a4beSMatthew Dillon 
11148d34a3bfSKonstantin Belousov 	/*
11158d34a3bfSKonstantin Belousov 	 * Leave updating cleangeneration for tmpfs objects to tmpfs
11168d34a3bfSKonstantin Belousov 	 * scan.  It needs to update mtime, which happens for other
11178d34a3bfSKonstantin Belousov 	 * filesystems during page writeouts.
11188d34a3bfSKonstantin Belousov 	 */
11198d34a3bfSKonstantin Belousov 	if (allclean && object->type == OBJT_VNODE)
112067d0e293SJeff Roberson 		object->cleangeneration = curgeneration;
1121126d6082SKonstantin Belousov 	return (res);
1122b9b7a4beSMatthew Dillon }
1123b9b7a4beSMatthew Dillon 
1124b9b7a4beSMatthew Dillon static int
11253280870dSKonstantin Belousov vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
112667d0e293SJeff Roberson     int flags, boolean_t *allclean, boolean_t *eio)
1127b9b7a4beSMatthew Dillon {
11283157c503SKonstantin Belousov 	vm_page_t ma[vm_pageout_page_count], p_first, tp;
11293157c503SKonstantin Belousov 	int count, i, mreq, runlen;
1130b9b7a4beSMatthew Dillon 
11317bec141bSKip Macy 	vm_page_lock_assert(p, MA_NOTOWNED);
113263e97555SJeff Roberson 	vm_page_assert_xbusied(p);
113389f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
11343157c503SKonstantin Belousov 
11353157c503SKonstantin Belousov 	count = 1;
11363157c503SKonstantin Belousov 	mreq = 0;
11373157c503SKonstantin Belousov 
11383157c503SKonstantin Belousov 	for (tp = p; count < vm_pageout_page_count; count++) {
11393157c503SKonstantin Belousov 		tp = vm_page_next(tp);
114063e97555SJeff Roberson 		if (tp == NULL || vm_page_tryxbusy(tp) == 0)
1141bd7e5f99SJohn Dyson 			break;
114267d0e293SJeff Roberson 		if (!vm_object_page_remove_write(tp, flags, allclean)) {
114363e97555SJeff Roberson 			vm_page_xunbusy(tp);
1144bd7e5f99SJohn Dyson 			break;
1145bd7e5f99SJohn Dyson 		}
114663e97555SJeff Roberson 	}
1147aef922f5SJohn Dyson 
11483157c503SKonstantin Belousov 	for (p_first = p; count < vm_pageout_page_count; count++) {
11493157c503SKonstantin Belousov 		tp = vm_page_prev(p_first);
115063e97555SJeff Roberson 		if (tp == NULL || vm_page_tryxbusy(tp) == 0)
1151bd7e5f99SJohn Dyson 			break;
115267d0e293SJeff Roberson 		if (!vm_object_page_remove_write(tp, flags, allclean)) {
115363e97555SJeff Roberson 			vm_page_xunbusy(tp);
1154bd7e5f99SJohn Dyson 			break;
115563e97555SJeff Roberson 		}
11563157c503SKonstantin Belousov 		p_first = tp;
11573157c503SKonstantin Belousov 		mreq++;
1158bd7e5f99SJohn Dyson 	}
1159bd7e5f99SJohn Dyson 
11603157c503SKonstantin Belousov 	for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++)
11613157c503SKonstantin Belousov 		ma[i] = tp;
1162cf2819ccSJohn Dyson 
1163126d6082SKonstantin Belousov 	vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio);
11641e8a675cSKonstantin Belousov 	return (runlen);
116526f9a767SRodney W. Grimes }
1166df8bae1dSRodney W. Grimes 
11671efb74fbSJohn Dyson /*
1168950f8459SAlan Cox  * Note that there is absolutely no sense in writing out
1169950f8459SAlan Cox  * anonymous objects, so we track down the vnode object
1170950f8459SAlan Cox  * to write out.
1171950f8459SAlan Cox  * We invalidate (remove) all pages from the address space
1172950f8459SAlan Cox  * for semantic correctness.
1173950f8459SAlan Cox  *
11746bbee8e2SAlan Cox  * If the backing object is a device object with unmanaged pages, then any
11756bbee8e2SAlan Cox  * mappings to the specified range of pages must be removed before this
11766bbee8e2SAlan Cox  * function is called.
11776bbee8e2SAlan Cox  *
1178950f8459SAlan Cox  * Note: certain anonymous maps, such as MAP_NOSYNC maps,
1179950f8459SAlan Cox  * may start out with a NULL object.
1180950f8459SAlan Cox  */
1181126d6082SKonstantin Belousov boolean_t
1182950f8459SAlan Cox vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
1183950f8459SAlan Cox     boolean_t syncio, boolean_t invalidate)
1184950f8459SAlan Cox {
1185950f8459SAlan Cox 	vm_object_t backing_object;
1186950f8459SAlan Cox 	struct vnode *vp;
11873b582b4eSTor Egge 	struct mount *mp;
1188126d6082SKonstantin Belousov 	int error, flags, fsync_after;
1189126d6082SKonstantin Belousov 	boolean_t res;
1190950f8459SAlan Cox 
1191950f8459SAlan Cox 	if (object == NULL)
1192126d6082SKonstantin Belousov 		return (TRUE);
1193126d6082SKonstantin Belousov 	res = TRUE;
1194126d6082SKonstantin Belousov 	error = 0;
119589f6b863SAttilio Rao 	VM_OBJECT_WLOCK(object);
1196950f8459SAlan Cox 	while ((backing_object = object->backing_object) != NULL) {
119789f6b863SAttilio Rao 		VM_OBJECT_WLOCK(backing_object);
119856e0670fSAlan Cox 		offset += object->backing_object_offset;
119989f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(object);
1200950f8459SAlan Cox 		object = backing_object;
1201950f8459SAlan Cox 		if (object->size < OFF_TO_IDX(offset + size))
1202950f8459SAlan Cox 			size = IDX_TO_OFF(object->size) - offset;
1203950f8459SAlan Cox 	}
1204950f8459SAlan Cox 	/*
1205950f8459SAlan Cox 	 * Flush pages if writing is allowed, invalidate them
1206950f8459SAlan Cox 	 * if invalidation requested.  Pages undergoing I/O
1207950f8459SAlan Cox 	 * will be ignored by vm_object_page_remove().
1208950f8459SAlan Cox 	 *
1209950f8459SAlan Cox 	 * We cannot lock the vnode and then wait for paging
1210950f8459SAlan Cox 	 * to complete without deadlocking against vm_fault.
1211950f8459SAlan Cox 	 * Instead we simply call vm_object_page_remove() and
1212950f8459SAlan Cox 	 * allow it to block internally on a page-by-page
1213950f8459SAlan Cox 	 * basis when it encounters pages undergoing async
1214950f8459SAlan Cox 	 * I/O.
1215950f8459SAlan Cox 	 */
1216950f8459SAlan Cox 	if (object->type == OBJT_VNODE &&
121767d0e293SJeff Roberson 	    vm_object_mightbedirty(object) != 0 &&
12185bf94937SKonstantin Belousov 	    ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) {
121989f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(object);
12203b582b4eSTor Egge 		(void) vn_start_write(vp, &mp, V_WAIT);
1221cb05b60aSAttilio Rao 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
122275ff604aSKonstantin Belousov 		if (syncio && !invalidate && offset == 0 &&
1223d1780e8dSKonstantin Belousov 		    atop(size) == object->size) {
122475ff604aSKonstantin Belousov 			/*
122575ff604aSKonstantin Belousov 			 * If syncing the whole mapping of the file,
122675ff604aSKonstantin Belousov 			 * it is faster to schedule all the writes in
122775ff604aSKonstantin Belousov 			 * async mode, also allowing the clustering,
122875ff604aSKonstantin Belousov 			 * and then wait for i/o to complete.
122975ff604aSKonstantin Belousov 			 */
123075ff604aSKonstantin Belousov 			flags = 0;
123175ff604aSKonstantin Belousov 			fsync_after = TRUE;
123275ff604aSKonstantin Belousov 		} else {
1233950f8459SAlan Cox 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
123475ff604aSKonstantin Belousov 			flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0;
123575ff604aSKonstantin Belousov 			fsync_after = FALSE;
123675ff604aSKonstantin Belousov 		}
123789f6b863SAttilio Rao 		VM_OBJECT_WLOCK(object);
1238126d6082SKonstantin Belousov 		res = vm_object_page_clean(object, offset, offset + size,
1239126d6082SKonstantin Belousov 		    flags);
124089f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(object);
124175ff604aSKonstantin Belousov 		if (fsync_after)
1242126d6082SKonstantin Belousov 			error = VOP_FSYNC(vp, MNT_WAIT, curthread);
1243b249ce48SMateusz Guzik 		VOP_UNLOCK(vp);
12443b582b4eSTor Egge 		vn_finished_write(mp);
1245126d6082SKonstantin Belousov 		if (error != 0)
1246126d6082SKonstantin Belousov 			res = FALSE;
124789f6b863SAttilio Rao 		VM_OBJECT_WLOCK(object);
1248950f8459SAlan Cox 	}
1249950f8459SAlan Cox 	if ((object->type == OBJT_VNODE ||
1250950f8459SAlan Cox 	     object->type == OBJT_DEVICE) && invalidate) {
12516bbee8e2SAlan Cox 		if (object->type == OBJT_DEVICE)
12526bbee8e2SAlan Cox 			/*
12536bbee8e2SAlan Cox 			 * The option OBJPR_NOTMAPPED must be passed here
12546bbee8e2SAlan Cox 			 * because vm_object_page_remove() cannot remove
12556bbee8e2SAlan Cox 			 * unmanaged mappings.
12566bbee8e2SAlan Cox 			 */
12576bbee8e2SAlan Cox 			flags = OBJPR_NOTMAPPED;
12586bbee8e2SAlan Cox 		else if (old_msync)
12596195b24aSKonstantin Belousov 			flags = 0;
12606bbee8e2SAlan Cox 		else
12616195b24aSKonstantin Belousov 			flags = OBJPR_CLEANONLY;
12626bbee8e2SAlan Cox 		vm_object_page_remove(object, OFF_TO_IDX(offset),
12636bbee8e2SAlan Cox 		    OFF_TO_IDX(offset + size + PAGE_MASK), flags);
1264950f8459SAlan Cox 	}
126589f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(object);
1266126d6082SKonstantin Belousov 	return (res);
1267950f8459SAlan Cox }
1268950f8459SAlan Cox 
1269950f8459SAlan Cox /*
1270aa3650eaSMark Johnston  * Determine whether the given advice can be applied to the object.  Advice is
1271aa3650eaSMark Johnston  * not applied to unmanaged pages since they never belong to page queues, and
1272aa3650eaSMark Johnston  * since MADV_FREE is destructive, it can apply only to anonymous pages that
1273aa3650eaSMark Johnston  * have been mapped at most once.
1274aa3650eaSMark Johnston  */
1275aa3650eaSMark Johnston static bool
1276aa3650eaSMark Johnston vm_object_advice_applies(vm_object_t object, int advice)
1277aa3650eaSMark Johnston {
1278aa3650eaSMark Johnston 
1279aa3650eaSMark Johnston 	if ((object->flags & OBJ_UNMANAGED) != 0)
1280aa3650eaSMark Johnston 		return (false);
1281aa3650eaSMark Johnston 	if (advice != MADV_FREE)
1282aa3650eaSMark Johnston 		return (true);
128363967687SJeff Roberson 	return ((object->flags & (OBJ_ONEMAPPING | OBJ_ANON)) ==
128463967687SJeff Roberson 	    (OBJ_ONEMAPPING | OBJ_ANON));
1285aa3650eaSMark Johnston }
1286aa3650eaSMark Johnston 
1287aa3650eaSMark Johnston static void
1288aa3650eaSMark Johnston vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex,
1289aa3650eaSMark Johnston     vm_size_t size)
1290aa3650eaSMark Johnston {
1291aa3650eaSMark Johnston 
1292aa3650eaSMark Johnston 	if (advice == MADV_FREE && object->type == OBJT_SWAP)
1293aa3650eaSMark Johnston 		swap_pager_freespace(object, pindex, size);
1294aa3650eaSMark Johnston }
1295aa3650eaSMark Johnston 
1296aa3650eaSMark Johnston /*
1297867a482dSJohn Dyson  *	vm_object_madvise:
1298867a482dSJohn Dyson  *
1299867a482dSJohn Dyson  *	Implements the madvise function at the object/page level.
13001c7c3c6aSMatthew Dillon  *
1301193b9358SAlan Cox  *	MADV_WILLNEED	(any object)
1302193b9358SAlan Cox  *
1303193b9358SAlan Cox  *	    Activate the specified pages if they are resident.
1304193b9358SAlan Cox  *
1305193b9358SAlan Cox  *	MADV_DONTNEED	(any object)
1306193b9358SAlan Cox  *
1307193b9358SAlan Cox  *	    Deactivate the specified pages if they are resident.
1308193b9358SAlan Cox  *
1309193b9358SAlan Cox  *	MADV_FREE	(OBJT_DEFAULT/OBJT_SWAP objects,
1310193b9358SAlan Cox  *			 OBJ_ONEMAPPING only)
1311193b9358SAlan Cox  *
1312193b9358SAlan Cox  *	    Deactivate and clean the specified pages if they are
1313193b9358SAlan Cox  *	    resident.  This permits the process to reuse the pages
1314193b9358SAlan Cox  *	    without faulting or the kernel to reclaim the pages
1315193b9358SAlan Cox  *	    without I/O.
1316867a482dSJohn Dyson  */
1317867a482dSJohn Dyson void
131892a59946SJohn Baldwin vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
1319c2655a40SMark Johnston     int advice)
1320867a482dSJohn Dyson {
132192a59946SJohn Baldwin 	vm_pindex_t tpindex;
132234567de7SAlan Cox 	vm_object_t backing_object, tobject;
1323aa3650eaSMark Johnston 	vm_page_t m, tm;
1324867a482dSJohn Dyson 
1325867a482dSJohn Dyson 	if (object == NULL)
1326867a482dSJohn Dyson 		return;
1327c2655a40SMark Johnston 
13286e20a165SJohn Dyson relookup:
1329aa3650eaSMark Johnston 	VM_OBJECT_WLOCK(object);
1330aa3650eaSMark Johnston 	if (!vm_object_advice_applies(object, advice)) {
1331aa3650eaSMark Johnston 		VM_OBJECT_WUNLOCK(object);
1332aa3650eaSMark Johnston 		return;
13336e20a165SJohn Dyson 	}
1334aa3650eaSMark Johnston 	for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) {
1335aa3650eaSMark Johnston 		tobject = object;
1336c2655a40SMark Johnston 
13371ce137beSMatthew Dillon 		/*
1338aa3650eaSMark Johnston 		 * If the next page isn't resident in the top-level object, we
1339aa3650eaSMark Johnston 		 * need to search the shadow chain.  When applying MADV_FREE, we
1340aa3650eaSMark Johnston 		 * take care to release any swap space used to store
1341aa3650eaSMark Johnston 		 * non-resident pages.
1342aa3650eaSMark Johnston 		 */
1343aa3650eaSMark Johnston 		if (m == NULL || pindex < m->pindex) {
1344aa3650eaSMark Johnston 			/*
1345aa3650eaSMark Johnston 			 * Optimize a common case: if the top-level object has
1346aa3650eaSMark Johnston 			 * no backing object, we can skip over the non-resident
1347aa3650eaSMark Johnston 			 * range in constant time.
13481ce137beSMatthew Dillon 			 */
1349c2655a40SMark Johnston 			if (object->backing_object == NULL) {
1350c2655a40SMark Johnston 				tpindex = (m != NULL && m->pindex < end) ?
1351c2655a40SMark Johnston 				    m->pindex : end;
1352aa3650eaSMark Johnston 				vm_object_madvise_freespace(object, advice,
1353aa3650eaSMark Johnston 				    pindex, tpindex - pindex);
1354c2655a40SMark Johnston 				if ((pindex = tpindex) == end)
1355c2655a40SMark Johnston 					break;
1356aa3650eaSMark Johnston 				goto next_page;
1357aa3650eaSMark Johnston 			}
1358aa3650eaSMark Johnston 
1359aa3650eaSMark Johnston 			tpindex = pindex;
1360aa3650eaSMark Johnston 			do {
1361aa3650eaSMark Johnston 				vm_object_madvise_freespace(tobject, advice,
1362aa3650eaSMark Johnston 				    tpindex, 1);
13631ce137beSMatthew Dillon 				/*
1364aa3650eaSMark Johnston 				 * Prepare to search the next object in the
1365aa3650eaSMark Johnston 				 * chain.
13661ce137beSMatthew Dillon 				 */
136734567de7SAlan Cox 				backing_object = tobject->backing_object;
136834567de7SAlan Cox 				if (backing_object == NULL)
1369aa3650eaSMark Johnston 					goto next_pindex;
137089f6b863SAttilio Rao 				VM_OBJECT_WLOCK(backing_object);
1371aa3650eaSMark Johnston 				tpindex +=
1372aa3650eaSMark Johnston 				    OFF_TO_IDX(tobject->backing_object_offset);
13739b98b796SAlan Cox 				if (tobject != object)
137489f6b863SAttilio Rao 					VM_OBJECT_WUNLOCK(tobject);
137534567de7SAlan Cox 				tobject = backing_object;
1376aa3650eaSMark Johnston 				if (!vm_object_advice_applies(tobject, advice))
1377aa3650eaSMark Johnston 					goto next_pindex;
1378aa3650eaSMark Johnston 			} while ((tm = vm_page_lookup(tobject, tpindex)) ==
1379aa3650eaSMark Johnston 			    NULL);
1380aa3650eaSMark Johnston 		} else {
1381aa3650eaSMark Johnston next_page:
1382aa3650eaSMark Johnston 			tm = m;
1383aa3650eaSMark Johnston 			m = TAILQ_NEXT(m, listq);
1384c2655a40SMark Johnston 		}
1385c2655a40SMark Johnston 
1386867a482dSJohn Dyson 		/*
13870012f373SJeff Roberson 		 * If the page is not in a normal state, skip it.  The page
13880012f373SJeff Roberson 		 * can not be invalidated while the object lock is held.
1389867a482dSJohn Dyson 		 */
13900012f373SJeff Roberson 		if (!vm_page_all_valid(tm) || vm_page_wired(tm))
1391aa3650eaSMark Johnston 			goto next_pindex;
1392aa3650eaSMark Johnston 		KASSERT((tm->flags & PG_FICTITIOUS) == 0,
1393aa3650eaSMark Johnston 		    ("vm_object_madvise: page %p is fictitious", tm));
1394aa3650eaSMark Johnston 		KASSERT((tm->oflags & VPO_UNMANAGED) == 0,
1395aa3650eaSMark Johnston 		    ("vm_object_madvise: page %p is not managed", tm));
139663e97555SJeff Roberson 		if (vm_page_tryxbusy(tm) == 0) {
1397aa3650eaSMark Johnston 			if (object != tobject)
1398aa3650eaSMark Johnston 				VM_OBJECT_WUNLOCK(object);
1399c2655a40SMark Johnston 			if (advice == MADV_WILLNEED) {
1400b11b56b5SAlan Cox 				/*
1401b11b56b5SAlan Cox 				 * Reference the page before unlocking and
1402b11b56b5SAlan Cox 				 * sleeping so that the page daemon is less
1403b11b56b5SAlan Cox 				 * likely to reclaim it.
1404b11b56b5SAlan Cox 				 */
1405aa3650eaSMark Johnston 				vm_page_aflag_set(tm, PGA_REFERENCED);
1406567e51e1SAlan Cox 			}
1407aa3650eaSMark Johnston 			vm_page_busy_sleep(tm, "madvpo", false);
14086e20a165SJohn Dyson   			goto relookup;
140934567de7SAlan Cox 		}
1410aa3650eaSMark Johnston 		vm_page_advise(tm, advice);
141163e97555SJeff Roberson 		vm_page_xunbusy(tm);
1412aa3650eaSMark Johnston 		vm_object_madvise_freespace(tobject, advice, tm->pindex, 1);
1413aa3650eaSMark Johnston next_pindex:
14149b98b796SAlan Cox 		if (tobject != object)
141589f6b863SAttilio Rao 			VM_OBJECT_WUNLOCK(tobject);
1416867a482dSJohn Dyson 	}
141789f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(object);
1418867a482dSJohn Dyson }
1419867a482dSJohn Dyson 
1420867a482dSJohn Dyson /*
1421df8bae1dSRodney W. Grimes  *	vm_object_shadow:
1422df8bae1dSRodney W. Grimes  *
1423df8bae1dSRodney W. Grimes  *	Create a new object which is backed by the
1424df8bae1dSRodney W. Grimes  *	specified existing object range.  The source
1425df8bae1dSRodney W. Grimes  *	object reference is deallocated.
1426df8bae1dSRodney W. Grimes  *
1427df8bae1dSRodney W. Grimes  *	The new object and offset into that object
1428df8bae1dSRodney W. Grimes  *	are returned in the source parameters.
1429df8bae1dSRodney W. Grimes  */
143026f9a767SRodney W. Grimes void
143167388836SKonstantin Belousov vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length,
143267388836SKonstantin Belousov     struct ucred *cred, bool shared)
1433df8bae1dSRodney W. Grimes {
1434d031cff1SMatthew Dillon 	vm_object_t source;
1435d031cff1SMatthew Dillon 	vm_object_t result;
1436df8bae1dSRodney W. Grimes 
1437df8bae1dSRodney W. Grimes 	source = *object;
1438df8bae1dSRodney W. Grimes 
1439df8bae1dSRodney W. Grimes 	/*
14409a2f6362SAlan Cox 	 * Don't create the new object if the old object isn't shared.
144163967687SJeff Roberson 	 *
144263967687SJeff Roberson 	 * If we hold the only reference we can guarantee that it won't
144363967687SJeff Roberson 	 * increase while we have the map locked.  Otherwise the race is
144463967687SJeff Roberson 	 * harmless and we will end up with an extra shadow object that
144563967687SJeff Roberson 	 * will be collapsed later.
14469a2f6362SAlan Cox 	 */
144763967687SJeff Roberson 	if (source != NULL && source->ref_count == 1 &&
144832362449SKonstantin Belousov 	    (source->flags & OBJ_ANON) != 0)
14499a2f6362SAlan Cox 		return;
14509a2f6362SAlan Cox 
14519a2f6362SAlan Cox 	/*
1452570a2f4aSAlan Cox 	 * Allocate a new object with the given length.
1453df8bae1dSRodney W. Grimes 	 */
145467388836SKonstantin Belousov 	result = vm_object_allocate_anon(atop(length), source, cred, length);
1455df8bae1dSRodney W. Grimes 
1456df8bae1dSRodney W. Grimes 	/*
145751b867e5SJeff Roberson 	 * Store the offset into the source object, and fix up the offset into
145851b867e5SJeff Roberson 	 * the new object.
145951b867e5SJeff Roberson 	 */
146051b867e5SJeff Roberson 	result->backing_object_offset = *offset;
146151b867e5SJeff Roberson 
146267388836SKonstantin Belousov 	if (shared || source != NULL) {
146367388836SKonstantin Belousov 		VM_OBJECT_WLOCK(result);
146467388836SKonstantin Belousov 
146551b867e5SJeff Roberson 		/*
146667388836SKonstantin Belousov 		 * The new object shadows the source object, adding a
146767388836SKonstantin Belousov 		 * reference to it.  Our caller changes his reference
146867388836SKonstantin Belousov 		 * to point to the new object, removing a reference to
146967388836SKonstantin Belousov 		 * the source object.  Net result: no change of
147067388836SKonstantin Belousov 		 * reference count, unless the caller needs to add one
147167388836SKonstantin Belousov 		 * more reference due to forking a shared map entry.
147267388836SKonstantin Belousov 		 */
147367388836SKonstantin Belousov 		if (shared) {
147467388836SKonstantin Belousov 			vm_object_reference_locked(result);
147567388836SKonstantin Belousov 			vm_object_clear_flag(result, OBJ_ONEMAPPING);
147667388836SKonstantin Belousov 		}
147767388836SKonstantin Belousov 
147867388836SKonstantin Belousov 		/*
147967388836SKonstantin Belousov 		 * Try to optimize the result object's page color when
148067388836SKonstantin Belousov 		 * shadowing in order to maintain page coloring
148167388836SKonstantin Belousov 		 * consistency in the combined shadowed object.
1482df8bae1dSRodney W. Grimes 		 */
1483570a2f4aSAlan Cox 		if (source != NULL) {
148451b867e5SJeff Roberson 			vm_object_backing_insert(result, source);
14853f289c3fSJeff Roberson 			result->domain = source->domain;
1486f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
14877b54b1a9SAlan Cox 			result->flags |= source->flags & OBJ_COLORED;
148867388836SKonstantin Belousov 			result->pg_color = (source->pg_color +
148967388836SKonstantin Belousov 			    OFF_TO_IDX(*offset)) & ((1 << (VM_NFREEORDER -
149067388836SKonstantin Belousov 			    1)) - 1);
1491f8a47341SAlan Cox #endif
149267388836SKonstantin Belousov 		}
149351b867e5SJeff Roberson 		VM_OBJECT_WUNLOCK(result);
1494de5f6a77SJohn Dyson 	}
1495df8bae1dSRodney W. Grimes 
1496df8bae1dSRodney W. Grimes 	/*
1497df8bae1dSRodney W. Grimes 	 * Return the new things
1498df8bae1dSRodney W. Grimes 	 */
1499df8bae1dSRodney W. Grimes 	*offset = 0;
1500df8bae1dSRodney W. Grimes 	*object = result;
1501df8bae1dSRodney W. Grimes }
1502df8bae1dSRodney W. Grimes 
1503c5aaa06dSAlan Cox /*
1504c5aaa06dSAlan Cox  *	vm_object_split:
1505c5aaa06dSAlan Cox  *
1506c5aaa06dSAlan Cox  * Split the pages in a map entry into a new object.  This affords
1507c5aaa06dSAlan Cox  * easier removal of unused pages, and keeps object inheritance from
1508c5aaa06dSAlan Cox  * being a negative impact on memory usage.
1509c5aaa06dSAlan Cox  */
1510c5aaa06dSAlan Cox void
1511c5aaa06dSAlan Cox vm_object_split(vm_map_entry_t entry)
1512c5aaa06dSAlan Cox {
151373000556SAlan Cox 	vm_page_t m, m_next;
151498087a06SJeff Roberson 	vm_object_t orig_object, new_object, backing_object;
151573000556SAlan Cox 	vm_pindex_t idx, offidxstart;
151673000556SAlan Cox 	vm_size_t size;
1517c5aaa06dSAlan Cox 
1518c5aaa06dSAlan Cox 	orig_object = entry->object.vm_object;
151998087a06SJeff Roberson 	KASSERT((orig_object->flags & OBJ_ONEMAPPING) != 0,
152098087a06SJeff Roberson 	    ("vm_object_split:  Splitting object with multiple mappings."));
152163967687SJeff Roberson 	if ((orig_object->flags & OBJ_ANON) == 0)
1522c5aaa06dSAlan Cox 		return;
1523c5aaa06dSAlan Cox 	if (orig_object->ref_count <= 1)
1524c5aaa06dSAlan Cox 		return;
152589f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(orig_object);
1526c5aaa06dSAlan Cox 
15274da9f125SAlan Cox 	offidxstart = OFF_TO_IDX(entry->offset);
152895442adfSAlan Cox 	size = atop(entry->end - entry->start);
1529c5aaa06dSAlan Cox 
15304da9f125SAlan Cox 	/*
15314da9f125SAlan Cox 	 * If swap_pager_copy() is later called, it will convert new_object
15324da9f125SAlan Cox 	 * into a swap object.
15334da9f125SAlan Cox 	 */
153467388836SKonstantin Belousov 	new_object = vm_object_allocate_anon(size, orig_object,
153567388836SKonstantin Belousov 	    orig_object->cred, ptoa(size));
1536c5aaa06dSAlan Cox 
1537c5474b8fSAlan Cox 	/*
153898087a06SJeff Roberson 	 * We must wait for the orig_object to complete any in-progress
153998087a06SJeff Roberson 	 * collapse so that the swap blocks are stable below.  The
154098087a06SJeff Roberson 	 * additional reference on backing_object by new object will
154198087a06SJeff Roberson 	 * prevent further collapse operations until split completes.
154298087a06SJeff Roberson 	 */
154398087a06SJeff Roberson 	VM_OBJECT_WLOCK(orig_object);
154498087a06SJeff Roberson 	vm_object_collapse_wait(orig_object);
154598087a06SJeff Roberson 
154698087a06SJeff Roberson 	/*
1547c5474b8fSAlan Cox 	 * At this point, the new object is still private, so the order in
1548c5474b8fSAlan Cox 	 * which the original and new objects are locked does not matter.
1549c5474b8fSAlan Cox 	 */
155089f6b863SAttilio Rao 	VM_OBJECT_WLOCK(new_object);
15513f289c3fSJeff Roberson 	new_object->domain = orig_object->domain;
155298087a06SJeff Roberson 	backing_object = orig_object->backing_object;
155398087a06SJeff Roberson 	if (backing_object != NULL) {
155498087a06SJeff Roberson 		vm_object_backing_insert_ref(new_object, backing_object);
1555c5aaa06dSAlan Cox 		new_object->backing_object_offset =
15564da9f125SAlan Cox 		    orig_object->backing_object_offset + entry->offset;
1557c5aaa06dSAlan Cox 	}
1558ef694c1aSEdward Tomasz Napierala 	if (orig_object->cred != NULL) {
1559ef694c1aSEdward Tomasz Napierala 		crhold(orig_object->cred);
15603364c323SKonstantin Belousov 		KASSERT(orig_object->charge >= ptoa(size),
15613364c323SKonstantin Belousov 		    ("orig_object->charge < 0"));
15623364c323SKonstantin Belousov 		orig_object->charge -= ptoa(size);
15633364c323SKonstantin Belousov 	}
156498087a06SJeff Roberson 
156598087a06SJeff Roberson 	/*
156698087a06SJeff Roberson 	 * Mark the split operation so that swap_pager_getpages() knows
156798087a06SJeff Roberson 	 * that the object is in transition.
156898087a06SJeff Roberson 	 */
156998087a06SJeff Roberson 	vm_object_set_flag(orig_object, OBJ_SPLIT);
1570c5aaa06dSAlan Cox retry:
1571b382c10aSKonstantin Belousov 	m = vm_page_find_least(orig_object, offidxstart);
157273000556SAlan Cox 	for (; m != NULL && (idx = m->pindex - offidxstart) < size;
157373000556SAlan Cox 	    m = m_next) {
157473000556SAlan Cox 		m_next = TAILQ_NEXT(m, listq);
1575c5aaa06dSAlan Cox 
1576c5aaa06dSAlan Cox 		/*
1577c5aaa06dSAlan Cox 		 * We must wait for pending I/O to complete before we can
1578c5aaa06dSAlan Cox 		 * rename the page.
1579c5aaa06dSAlan Cox 		 *
1580c5aaa06dSAlan Cox 		 * We do not have to VM_PROT_NONE the page as mappings should
1581c5aaa06dSAlan Cox 		 * not be changed by this operation.
1582c5aaa06dSAlan Cox 		 */
158363e97555SJeff Roberson 		if (vm_page_tryxbusy(m) == 0) {
158489f6b863SAttilio Rao 			VM_OBJECT_WUNLOCK(new_object);
15854cdea4a8SJeff Roberson 			vm_page_sleep_if_busy(m, "spltwt");
158689f6b863SAttilio Rao 			VM_OBJECT_WLOCK(new_object);
1587c5aaa06dSAlan Cox 			goto retry;
1588de33beddSAlan Cox 		}
1589e946b949SAttilio Rao 
15904bf95d00SJeff Roberson 		/*
15914bf95d00SJeff Roberson 		 * The page was left invalid.  Likely placed there by
15924bf95d00SJeff Roberson 		 * an incomplete fault.  Just remove and ignore.
15934bf95d00SJeff Roberson 		 */
15944bf95d00SJeff Roberson 		if (vm_page_none_valid(m)) {
15954bf95d00SJeff Roberson 			if (vm_page_remove(m))
15964bf95d00SJeff Roberson 				vm_page_free(m);
15974bf95d00SJeff Roberson 			continue;
15984bf95d00SJeff Roberson 		}
15994bf95d00SJeff Roberson 
16003453bca8SAlan Cox 		/* vm_page_rename() will dirty the page. */
1601e946b949SAttilio Rao 		if (vm_page_rename(m, new_object, idx)) {
160263e97555SJeff Roberson 			vm_page_xunbusy(m);
1603e946b949SAttilio Rao 			VM_OBJECT_WUNLOCK(new_object);
1604e946b949SAttilio Rao 			VM_OBJECT_WUNLOCK(orig_object);
16058d6fbbb8SJeff Roberson 			vm_radix_wait();
1606e946b949SAttilio Rao 			VM_OBJECT_WLOCK(orig_object);
1607e946b949SAttilio Rao 			VM_OBJECT_WLOCK(new_object);
1608e946b949SAttilio Rao 			goto retry;
1609e946b949SAttilio Rao 		}
161063e97555SJeff Roberson 
1611b5f359b7SAlan Cox #if VM_NRESERVLEVEL > 0
1612b5f359b7SAlan Cox 		/*
1613b5f359b7SAlan Cox 		 * If some of the reservation's allocated pages remain with
1614b5f359b7SAlan Cox 		 * the original object, then transferring the reservation to
1615b5f359b7SAlan Cox 		 * the new object is neither particularly beneficial nor
1616b5f359b7SAlan Cox 		 * particularly harmful as compared to leaving the reservation
1617b5f359b7SAlan Cox 		 * with the original object.  If, however, all of the
1618b5f359b7SAlan Cox 		 * reservation's allocated pages are transferred to the new
1619b5f359b7SAlan Cox 		 * object, then transferring the reservation is typically
1620b5f359b7SAlan Cox 		 * beneficial.  Determining which of these two cases applies
1621b5f359b7SAlan Cox 		 * would be more costly than unconditionally renaming the
1622b5f359b7SAlan Cox 		 * reservation.
1623b5f359b7SAlan Cox 		 */
1624b5f359b7SAlan Cox 		vm_reserv_rename(m, new_object, orig_object, offidxstart);
1625b5f359b7SAlan Cox #endif
16268da1c098SJeff Roberson 		if (orig_object->type != OBJT_SWAP)
16278da1c098SJeff Roberson 			vm_page_xunbusy(m);
1628c5aaa06dSAlan Cox 	}
1629d7a013c3SAlan Cox 	if (orig_object->type == OBJT_SWAP) {
1630c5aaa06dSAlan Cox 		/*
1631c7c8dd7eSAlan Cox 		 * swap_pager_copy() can sleep, in which case the orig_object's
1632c7c8dd7eSAlan Cox 		 * and new_object's locks are released and reacquired.
1633c5aaa06dSAlan Cox 		 */
1634c5aaa06dSAlan Cox 		swap_pager_copy(orig_object, new_object, offidxstart, 0);
1635dfd55c0cSAttilio Rao 		TAILQ_FOREACH(m, &new_object->memq, listq)
1636c7aebda8SAttilio Rao 			vm_page_xunbusy(m);
1637c5aaa06dSAlan Cox 	}
163898087a06SJeff Roberson 	vm_object_clear_flag(orig_object, OBJ_SPLIT);
163989f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(orig_object);
164089f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(new_object);
1641c5aaa06dSAlan Cox 	entry->object.vm_object = new_object;
1642c5aaa06dSAlan Cox 	entry->offset = 0LL;
1643c5aaa06dSAlan Cox 	vm_object_deallocate(orig_object);
164489f6b863SAttilio Rao 	VM_OBJECT_WLOCK(new_object);
1645c5aaa06dSAlan Cox }
1646c5aaa06dSAlan Cox 
164799a1570aSKonstantin Belousov static vm_page_t
164898087a06SJeff Roberson vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p)
164999a1570aSKonstantin Belousov {
165099a1570aSKonstantin Belousov 	vm_object_t backing_object;
165199a1570aSKonstantin Belousov 
165299a1570aSKonstantin Belousov 	VM_OBJECT_ASSERT_WLOCKED(object);
165399a1570aSKonstantin Belousov 	backing_object = object->backing_object;
165499a1570aSKonstantin Belousov 	VM_OBJECT_ASSERT_WLOCKED(backing_object);
165599a1570aSKonstantin Belousov 
165699a1570aSKonstantin Belousov 	KASSERT(p == NULL || p->object == object || p->object == backing_object,
165799a1570aSKonstantin Belousov 	    ("invalid ownership %p %p %p", p, object, backing_object));
16588d6fbbb8SJeff Roberson 	/* The page is only NULL when rename fails. */
16594cdea4a8SJeff Roberson 	if (p == NULL) {
16606a14746cSRyan Libby 		VM_OBJECT_WUNLOCK(object);
16616a14746cSRyan Libby 		VM_OBJECT_WUNLOCK(backing_object);
16628d6fbbb8SJeff Roberson 		vm_radix_wait();
16634cdea4a8SJeff Roberson 	} else {
16644cdea4a8SJeff Roberson 		if (p->object == object)
16654cdea4a8SJeff Roberson 			VM_OBJECT_WUNLOCK(backing_object);
166699a1570aSKonstantin Belousov 		else
16674cdea4a8SJeff Roberson 			VM_OBJECT_WUNLOCK(object);
16685975e53dSKonstantin Belousov 		vm_page_busy_sleep(p, "vmocol", false);
16694cdea4a8SJeff Roberson 	}
167099a1570aSKonstantin Belousov 	VM_OBJECT_WLOCK(object);
167199a1570aSKonstantin Belousov 	VM_OBJECT_WLOCK(backing_object);
167299a1570aSKonstantin Belousov 	return (TAILQ_FIRST(&backing_object->memq));
167399a1570aSKonstantin Belousov }
167499a1570aSKonstantin Belousov 
167599a1570aSKonstantin Belousov static bool
16764cc8daf7SConrad Meyer vm_object_scan_all_shadowed(vm_object_t object)
16774cc8daf7SConrad Meyer {
16784cc8daf7SConrad Meyer 	vm_object_t backing_object;
16794cc8daf7SConrad Meyer 	vm_page_t p, pp;
168077d6fd97SKonstantin Belousov 	vm_pindex_t backing_offset_index, new_pindex, pi, ps;
16814cc8daf7SConrad Meyer 
16824cc8daf7SConrad Meyer 	VM_OBJECT_ASSERT_WLOCKED(object);
16834cc8daf7SConrad Meyer 	VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
16844cc8daf7SConrad Meyer 
16854cc8daf7SConrad Meyer 	backing_object = object->backing_object;
16864cc8daf7SConrad Meyer 
168763967687SJeff Roberson 	if ((backing_object->flags & OBJ_ANON) == 0)
16884cc8daf7SConrad Meyer 		return (false);
16894cc8daf7SConrad Meyer 
169077d6fd97SKonstantin Belousov 	pi = backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
169177d6fd97SKonstantin Belousov 	p = vm_page_find_least(backing_object, pi);
169277d6fd97SKonstantin Belousov 	ps = swap_pager_find_least(backing_object, pi);
16934cc8daf7SConrad Meyer 
16944cc8daf7SConrad Meyer 	/*
169577d6fd97SKonstantin Belousov 	 * Only check pages inside the parent object's range and
169677d6fd97SKonstantin Belousov 	 * inside the parent object's mapping of the backing object.
16974cc8daf7SConrad Meyer 	 */
169877d6fd97SKonstantin Belousov 	for (;; pi++) {
169977d6fd97SKonstantin Belousov 		if (p != NULL && p->pindex < pi)
170077d6fd97SKonstantin Belousov 			p = TAILQ_NEXT(p, listq);
170177d6fd97SKonstantin Belousov 		if (ps < pi)
170277d6fd97SKonstantin Belousov 			ps = swap_pager_find_least(backing_object, pi);
170377d6fd97SKonstantin Belousov 		if (p == NULL && ps >= backing_object->size)
170477d6fd97SKonstantin Belousov 			break;
170577d6fd97SKonstantin Belousov 		else if (p == NULL)
170677d6fd97SKonstantin Belousov 			pi = ps;
170777d6fd97SKonstantin Belousov 		else
170877d6fd97SKonstantin Belousov 			pi = MIN(p->pindex, ps);
170977d6fd97SKonstantin Belousov 
171077d6fd97SKonstantin Belousov 		new_pindex = pi - backing_offset_index;
171177d6fd97SKonstantin Belousov 		if (new_pindex >= object->size)
171277d6fd97SKonstantin Belousov 			break;
17134cc8daf7SConrad Meyer 
1714cd0047f3SKonstantin Belousov 		if (p != NULL) {
17154cc8daf7SConrad Meyer 			/*
1716cd0047f3SKonstantin Belousov 			 * If the backing object page is busy a
1717cd0047f3SKonstantin Belousov 			 * grandparent or older page may still be
1718cd0047f3SKonstantin Belousov 			 * undergoing CoW.  It is not safe to collapse
1719cd0047f3SKonstantin Belousov 			 * the backing object until it is quiesced.
172058447749SJeff Roberson 			 */
1721cd0047f3SKonstantin Belousov 			if (vm_page_tryxbusy(p) == 0)
172258447749SJeff Roberson 				return (false);
172358447749SJeff Roberson 
172458447749SJeff Roberson 			/*
1725cd0047f3SKonstantin Belousov 			 * We raced with the fault handler that left
1726cd0047f3SKonstantin Belousov 			 * newly allocated invalid page on the object
1727cd0047f3SKonstantin Belousov 			 * queue and retried.
1728cd0047f3SKonstantin Belousov 			 */
1729cd0047f3SKonstantin Belousov 			if (!vm_page_all_valid(p))
1730cd0047f3SKonstantin Belousov 				goto unbusy_ret;
1731cd0047f3SKonstantin Belousov 		}
1732cd0047f3SKonstantin Belousov 
1733cd0047f3SKonstantin Belousov 		/*
17344cc8daf7SConrad Meyer 		 * See if the parent has the page or if the parent's object
17354cc8daf7SConrad Meyer 		 * pager has the page.  If the parent has the page but the page
17364cc8daf7SConrad Meyer 		 * is not valid, the parent's object pager must have the page.
17374cc8daf7SConrad Meyer 		 *
17384cc8daf7SConrad Meyer 		 * If this fails, the parent does not completely shadow the
17394cc8daf7SConrad Meyer 		 * object and we might as well give up now.
17404cc8daf7SConrad Meyer 		 */
17414cc8daf7SConrad Meyer 		pp = vm_page_lookup(object, new_pindex);
1742cd0047f3SKonstantin Belousov 
17430012f373SJeff Roberson 		/*
1744cd0047f3SKonstantin Belousov 		 * The valid check here is stable due to object lock
1745cd0047f3SKonstantin Belousov 		 * being required to clear valid and initiate paging.
1746cd0047f3SKonstantin Belousov 		 * Busy of p disallows fault handler to validate pp.
17470012f373SJeff Roberson 		 */
17480012f373SJeff Roberson 		if ((pp == NULL || vm_page_none_valid(pp)) &&
17494cc8daf7SConrad Meyer 		    !vm_pager_has_page(object, new_pindex, NULL, NULL))
1750cd0047f3SKonstantin Belousov 			goto unbusy_ret;
1751cd0047f3SKonstantin Belousov 		if (p != NULL)
1752cd0047f3SKonstantin Belousov 			vm_page_xunbusy(p);
17534cc8daf7SConrad Meyer 	}
17544cc8daf7SConrad Meyer 	return (true);
1755cd0047f3SKonstantin Belousov 
1756cd0047f3SKonstantin Belousov unbusy_ret:
1757cd0047f3SKonstantin Belousov 	if (p != NULL)
1758cd0047f3SKonstantin Belousov 		vm_page_xunbusy(p);
1759cd0047f3SKonstantin Belousov 	return (false);
17604cc8daf7SConrad Meyer }
17614cc8daf7SConrad Meyer 
176298087a06SJeff Roberson static void
176398087a06SJeff Roberson vm_object_collapse_scan(vm_object_t object)
17642ad1a3f7SMatthew Dillon {
17652ad1a3f7SMatthew Dillon 	vm_object_t backing_object;
176699a1570aSKonstantin Belousov 	vm_page_t next, p, pp;
176799a1570aSKonstantin Belousov 	vm_pindex_t backing_offset_index, new_pindex;
17682ad1a3f7SMatthew Dillon 
176989f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
177089f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
17712ad1a3f7SMatthew Dillon 
17722ad1a3f7SMatthew Dillon 	backing_object = object->backing_object;
17732ad1a3f7SMatthew Dillon 	backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
17742ad1a3f7SMatthew Dillon 
17752ad1a3f7SMatthew Dillon 	/*
17762ad1a3f7SMatthew Dillon 	 * Our scan
17772ad1a3f7SMatthew Dillon 	 */
17784cc8daf7SConrad Meyer 	for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; p = next) {
177999a1570aSKonstantin Belousov 		next = TAILQ_NEXT(p, listq);
178099a1570aSKonstantin Belousov 		new_pindex = p->pindex - backing_offset_index;
17812ad1a3f7SMatthew Dillon 
17822ad1a3f7SMatthew Dillon 		/*
17832ad1a3f7SMatthew Dillon 		 * Check for busy page
17842ad1a3f7SMatthew Dillon 		 */
178563e97555SJeff Roberson 		if (vm_page_tryxbusy(p) == 0) {
178698087a06SJeff Roberson 			next = vm_object_collapse_scan_wait(object, p);
17872ad1a3f7SMatthew Dillon 			continue;
17882ad1a3f7SMatthew Dillon 		}
17892ad1a3f7SMatthew Dillon 
179098087a06SJeff Roberson 		KASSERT(object->backing_object == backing_object,
179198087a06SJeff Roberson 		    ("vm_object_collapse_scan: backing object mismatch %p != %p",
179298087a06SJeff Roberson 		    object->backing_object, backing_object));
179399a1570aSKonstantin Belousov 		KASSERT(p->object == backing_object,
179498087a06SJeff Roberson 		    ("vm_object_collapse_scan: object mismatch %p != %p",
179598087a06SJeff Roberson 		    p->object, backing_object));
17962ad1a3f7SMatthew Dillon 
179799a1570aSKonstantin Belousov 		if (p->pindex < backing_offset_index ||
179899a1570aSKonstantin Belousov 		    new_pindex >= object->size) {
1799e946b949SAttilio Rao 			if (backing_object->type == OBJT_SWAP)
18004cc8daf7SConrad Meyer 				swap_pager_freespace(backing_object, p->pindex,
18014cc8daf7SConrad Meyer 				    1);
1802e946b949SAttilio Rao 
1803f6d89838SAlan Cox 			KASSERT(!pmap_page_is_mapped(p),
1804f6d89838SAlan Cox 			    ("freeing mapped page %p", p));
18050fd977b3SMark Johnston 			if (vm_page_remove(p))
18062ad1a3f7SMatthew Dillon 				vm_page_free(p);
18072ad1a3f7SMatthew Dillon 			continue;
18082ad1a3f7SMatthew Dillon 		}
18092ad1a3f7SMatthew Dillon 
1810cd0047f3SKonstantin Belousov 		if (!vm_page_all_valid(p)) {
1811cd0047f3SKonstantin Belousov 			KASSERT(!pmap_page_is_mapped(p),
1812cd0047f3SKonstantin Belousov 			    ("freeing mapped page %p", p));
1813cd0047f3SKonstantin Belousov 			if (vm_page_remove(p))
1814cd0047f3SKonstantin Belousov 				vm_page_free(p);
1815cd0047f3SKonstantin Belousov 			continue;
1816cd0047f3SKonstantin Belousov 		}
1817cd0047f3SKonstantin Belousov 
18182ad1a3f7SMatthew Dillon 		pp = vm_page_lookup(object, new_pindex);
181963e97555SJeff Roberson 		if (pp != NULL && vm_page_tryxbusy(pp) == 0) {
182063e97555SJeff Roberson 			vm_page_xunbusy(p);
1821e18cc7bfSMax Laier 			/*
18224cc8daf7SConrad Meyer 			 * The page in the parent is busy and possibly not
18234cc8daf7SConrad Meyer 			 * (yet) valid.  Until its state is finalized by the
18244cc8daf7SConrad Meyer 			 * busy bit owner, we can't tell whether it shadows the
182598087a06SJeff Roberson 			 * original page.
1826e18cc7bfSMax Laier 			 */
182798087a06SJeff Roberson 			next = vm_object_collapse_scan_wait(object, pp);
1828e18cc7bfSMax Laier 			continue;
1829e18cc7bfSMax Laier 		}
183099a1570aSKonstantin Belousov 
18314bf95d00SJeff Roberson 		if (pp != NULL && vm_page_none_valid(pp)) {
18324bf95d00SJeff Roberson 			/*
18334bf95d00SJeff Roberson 			 * The page was invalid in the parent.  Likely placed
18344bf95d00SJeff Roberson 			 * there by an incomplete fault.  Just remove and
18354bf95d00SJeff Roberson 			 * ignore.  p can replace it.
18364bf95d00SJeff Roberson 			 */
18374bf95d00SJeff Roberson 			if (vm_page_remove(pp))
18384bf95d00SJeff Roberson 				vm_page_free(pp);
18394bf95d00SJeff Roberson 			pp = NULL;
18404bf95d00SJeff Roberson 		}
184199a1570aSKonstantin Belousov 
18424cc8daf7SConrad Meyer 		if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL,
18434cc8daf7SConrad Meyer 			NULL)) {
184499a1570aSKonstantin Belousov 			/*
18454cc8daf7SConrad Meyer 			 * The page already exists in the parent OR swap exists
18464cc8daf7SConrad Meyer 			 * for this location in the parent.  Leave the parent's
18474cc8daf7SConrad Meyer 			 * page alone.  Destroy the original page from the
18484cc8daf7SConrad Meyer 			 * backing object.
184999a1570aSKonstantin Belousov 			 */
1850e946b949SAttilio Rao 			if (backing_object->type == OBJT_SWAP)
18514cc8daf7SConrad Meyer 				swap_pager_freespace(backing_object, p->pindex,
18524cc8daf7SConrad Meyer 				    1);
1853f6d89838SAlan Cox 			KASSERT(!pmap_page_is_mapped(p),
1854f6d89838SAlan Cox 			    ("freeing mapped page %p", p));
18550fd977b3SMark Johnston 			if (vm_page_remove(p))
18562ad1a3f7SMatthew Dillon 				vm_page_free(p);
185763e97555SJeff Roberson 			if (pp != NULL)
185863e97555SJeff Roberson 				vm_page_xunbusy(pp);
18592ad1a3f7SMatthew Dillon 			continue;
18602ad1a3f7SMatthew Dillon 		}
18612ad1a3f7SMatthew Dillon 
1862e946b949SAttilio Rao 		/*
18634cc8daf7SConrad Meyer 		 * Page does not exist in parent, rename the page from the
18644cc8daf7SConrad Meyer 		 * backing object to the main object.
1865e946b949SAttilio Rao 		 *
18664cc8daf7SConrad Meyer 		 * If the page was mapped to a process, it can remain mapped
18673453bca8SAlan Cox 		 * through the rename.  vm_page_rename() will dirty the page.
1868e946b949SAttilio Rao 		 */
1869e946b949SAttilio Rao 		if (vm_page_rename(p, object, new_pindex)) {
187063e97555SJeff Roberson 			vm_page_xunbusy(p);
187198087a06SJeff Roberson 			next = vm_object_collapse_scan_wait(object, NULL);
1872e946b949SAttilio Rao 			continue;
1873e946b949SAttilio Rao 		}
187414a5dc17SAttilio Rao 
187514a5dc17SAttilio Rao 		/* Use the old pindex to free the right page. */
1876e946b949SAttilio Rao 		if (backing_object->type == OBJT_SWAP)
187714a5dc17SAttilio Rao 			swap_pager_freespace(backing_object,
187814a5dc17SAttilio Rao 			    new_pindex + backing_offset_index, 1);
1879e946b949SAttilio Rao 
1880f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
1881f8a47341SAlan Cox 		/*
1882f8a47341SAlan Cox 		 * Rename the reservation.
1883f8a47341SAlan Cox 		 */
1884f8a47341SAlan Cox 		vm_reserv_rename(p, object, backing_object,
1885f8a47341SAlan Cox 		    backing_offset_index);
1886f8a47341SAlan Cox #endif
18878da1c098SJeff Roberson 		vm_page_xunbusy(p);
18882ad1a3f7SMatthew Dillon 	}
18892fe6e4d7SDavid Greenman 	return;
18902fe6e4d7SDavid Greenman }
18912fe6e4d7SDavid Greenman 
1892df8bae1dSRodney W. Grimes /*
1893df8bae1dSRodney W. Grimes  *	vm_object_collapse:
1894df8bae1dSRodney W. Grimes  *
1895df8bae1dSRodney W. Grimes  *	Collapse an object with the object backing it.
1896df8bae1dSRodney W. Grimes  *	Pages in the backing object are moved into the
1897df8bae1dSRodney W. Grimes  *	parent, and the backing object is deallocated.
1898df8bae1dSRodney W. Grimes  */
189926f9a767SRodney W. Grimes void
19001b40f8c0SMatthew Dillon vm_object_collapse(vm_object_t object)
1901df8bae1dSRodney W. Grimes {
190298f139daSKonstantin Belousov 	vm_object_t backing_object, new_backing_object;
190398f139daSKonstantin Belousov 
190489f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
190523955314SAlfred Perlstein 
1906df8bae1dSRodney W. Grimes 	while (TRUE) {
190798087a06SJeff Roberson 		KASSERT((object->flags & (OBJ_DEAD | OBJ_ANON)) == OBJ_ANON,
190898087a06SJeff Roberson 		    ("collapsing invalid object"));
190998087a06SJeff Roberson 
1910df8bae1dSRodney W. Grimes 		/*
191198087a06SJeff Roberson 		 * Wait for the backing_object to finish any pending
191298087a06SJeff Roberson 		 * collapse so that the caller sees the shortest possible
191398087a06SJeff Roberson 		 * shadow chain.
1914df8bae1dSRodney W. Grimes 		 */
191598087a06SJeff Roberson 		backing_object = vm_object_backing_collapse_wait(object);
191698087a06SJeff Roberson 		if (backing_object == NULL)
191798087a06SJeff Roberson 			return;
191898087a06SJeff Roberson 
191998087a06SJeff Roberson 		KASSERT(object->ref_count > 0 &&
192098087a06SJeff Roberson 		    object->ref_count > object->shadow_count,
192198087a06SJeff Roberson 		    ("collapse with invalid ref %d or shadow %d count.",
192298087a06SJeff Roberson 		    object->ref_count, object->shadow_count));
192398087a06SJeff Roberson 		KASSERT((backing_object->flags &
192498087a06SJeff Roberson 		    (OBJ_COLLAPSING | OBJ_DEAD)) == 0,
192598087a06SJeff Roberson 		    ("vm_object_collapse: Backing object already collapsing."));
192698087a06SJeff Roberson 		KASSERT((object->flags & (OBJ_COLLAPSING | OBJ_DEAD)) == 0,
192798087a06SJeff Roberson 		    ("vm_object_collapse: object is already collapsing."));
1928df8bae1dSRodney W. Grimes 
1929f919ebdeSDavid Greenman 		/*
193098087a06SJeff Roberson 		 * We know that we can either collapse the backing object if
193198087a06SJeff Roberson 		 * the parent is the only reference to it, or (perhaps) have
19322ad1a3f7SMatthew Dillon 		 * the parent bypass the object if the parent happens to shadow
19332ad1a3f7SMatthew Dillon 		 * all the resident pages in the entire backing object.
1934df8bae1dSRodney W. Grimes 		 */
1935df8bae1dSRodney W. Grimes 		if (backing_object->ref_count == 1) {
193698087a06SJeff Roberson 			KASSERT(backing_object->shadow_count == 1,
193798087a06SJeff Roberson 			    ("vm_object_collapse: shadow_count: %d",
193898087a06SJeff Roberson 			    backing_object->shadow_count));
1939aa9bc3b1SKonstantin Belousov 			vm_object_pip_add(object, 1);
194098087a06SJeff Roberson 			vm_object_set_flag(object, OBJ_COLLAPSING);
1941aa9bc3b1SKonstantin Belousov 			vm_object_pip_add(backing_object, 1);
194298087a06SJeff Roberson 			vm_object_set_flag(backing_object, OBJ_DEAD);
1943aa9bc3b1SKonstantin Belousov 
1944df8bae1dSRodney W. Grimes 			/*
19452ad1a3f7SMatthew Dillon 			 * If there is exactly one reference to the backing
19462ad1a3f7SMatthew Dillon 			 * object, we can collapse it into the parent.
1947df8bae1dSRodney W. Grimes 			 */
194898087a06SJeff Roberson 			vm_object_collapse_scan(object);
1949df8bae1dSRodney W. Grimes 
1950f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
1951f8a47341SAlan Cox 			/*
1952f8a47341SAlan Cox 			 * Break any reservations from backing_object.
1953f8a47341SAlan Cox 			 */
1954f8a47341SAlan Cox 			if (__predict_false(!LIST_EMPTY(&backing_object->rvq)))
1955f8a47341SAlan Cox 				vm_reserv_break_all(backing_object);
1956f8a47341SAlan Cox #endif
1957f8a47341SAlan Cox 
1958df8bae1dSRodney W. Grimes 			/*
1959df8bae1dSRodney W. Grimes 			 * Move the pager from backing_object to object.
1960df8bae1dSRodney W. Grimes 			 */
19616be36525SAlan Cox 			if (backing_object->type == OBJT_SWAP) {
196224a1cce3SDavid Greenman 				/*
1963c7c8dd7eSAlan Cox 				 * swap_pager_copy() can sleep, in which case
1964c7c8dd7eSAlan Cox 				 * the backing_object's and object's locks are
1965c7c8dd7eSAlan Cox 				 * released and reacquired.
1966571a1e92SAttilio Rao 				 * Since swap_pager_copy() is being asked to
196798087a06SJeff Roberson 				 * destroy backing_object, it will change the
196898087a06SJeff Roberson 				 * type to OBJT_DEFAULT.
196924a1cce3SDavid Greenman 				 */
19701c7c3c6aSMatthew Dillon 				swap_pager_copy(
19711c7c3c6aSMatthew Dillon 				    backing_object,
19721c7c3c6aSMatthew Dillon 				    object,
19731c7c3c6aSMatthew Dillon 				    OFF_TO_IDX(object->backing_object_offset), TRUE);
1974c0503609SDavid Greenman 			}
197598087a06SJeff Roberson 
1976df8bae1dSRodney W. Grimes 			/*
1977df8bae1dSRodney W. Grimes 			 * Object now shadows whatever backing_object did.
1978df8bae1dSRodney W. Grimes 			 */
197998087a06SJeff Roberson 			vm_object_clear_flag(object, OBJ_COLLAPSING);
198098087a06SJeff Roberson 			vm_object_backing_transfer(object, backing_object);
19812ad1a3f7SMatthew Dillon 			object->backing_object_offset +=
19822ad1a3f7SMatthew Dillon 			    backing_object->backing_object_offset;
198398087a06SJeff Roberson 			VM_OBJECT_WUNLOCK(object);
198498087a06SJeff Roberson 			vm_object_pip_wakeup(object);
19852ad1a3f7SMatthew Dillon 
1986df8bae1dSRodney W. Grimes 			/*
1987df8bae1dSRodney W. Grimes 			 * Discard backing_object.
1988df8bae1dSRodney W. Grimes 			 *
19890d94caffSDavid Greenman 			 * Since the backing object has no pages, no pager left,
19900d94caffSDavid Greenman 			 * and no object references within it, all that is
19910d94caffSDavid Greenman 			 * necessary is to dispose of it.
1992df8bae1dSRodney W. Grimes 			 */
19939b4d473aSKonstantin Belousov 			KASSERT(backing_object->ref_count == 1, (
19949b4d473aSKonstantin Belousov "backing_object %p was somehow re-referenced during collapse!",
19959b4d473aSKonstantin Belousov 			    backing_object));
1996aa9bc3b1SKonstantin Belousov 			vm_object_pip_wakeup(backing_object);
199798087a06SJeff Roberson 			(void)refcount_release(&backing_object->ref_count);
199898087a06SJeff Roberson 			vm_object_terminate(backing_object);
199911542376SAlan Cox 			counter_u64_add(object_collapses, 1);
200098087a06SJeff Roberson 			VM_OBJECT_WLOCK(object);
20010d94caffSDavid Greenman 		} else {
2002df8bae1dSRodney W. Grimes 			/*
20032ad1a3f7SMatthew Dillon 			 * If we do not entirely shadow the backing object,
20042ad1a3f7SMatthew Dillon 			 * there is nothing we can do so we give up.
200598087a06SJeff Roberson 			 *
200698087a06SJeff Roberson 			 * The object lock and backing_object lock must not
200798087a06SJeff Roberson 			 * be dropped during this sequence.
2008df8bae1dSRodney W. Grimes 			 */
200958447749SJeff Roberson 			if (!vm_object_scan_all_shadowed(object)) {
201089f6b863SAttilio Rao 				VM_OBJECT_WUNLOCK(backing_object);
20112ad1a3f7SMatthew Dillon 				break;
201224a1cce3SDavid Greenman 			}
2013df8bae1dSRodney W. Grimes 
2014df8bae1dSRodney W. Grimes 			/*
20150d94caffSDavid Greenman 			 * Make the parent shadow the next object in the
20160d94caffSDavid Greenman 			 * chain.  Deallocating backing_object will not remove
20170d94caffSDavid Greenman 			 * it, since its reference count is at least 2.
2018df8bae1dSRodney W. Grimes 			 */
201951b867e5SJeff Roberson 			vm_object_backing_remove_locked(object);
202095e5e988SJohn Dyson 			new_backing_object = backing_object->backing_object;
202151b867e5SJeff Roberson 			if (new_backing_object != NULL) {
202298087a06SJeff Roberson 				vm_object_backing_insert_ref(object,
202351b867e5SJeff Roberson 				    new_backing_object);
202495e5e988SJohn Dyson 				object->backing_object_offset +=
202595e5e988SJohn Dyson 				    backing_object->backing_object_offset;
2026de5f6a77SJohn Dyson 			}
2027df8bae1dSRodney W. Grimes 
2028df8bae1dSRodney W. Grimes 			/*
20290d94caffSDavid Greenman 			 * Drop the reference count on backing_object. Since
203022ec553fSAlan Cox 			 * its ref_count was at least 2, it will not vanish.
2031df8bae1dSRodney W. Grimes 			 */
203298087a06SJeff Roberson 			(void)refcount_release(&backing_object->ref_count);
203398087a06SJeff Roberson 			KASSERT(backing_object->ref_count >= 1, (
203498087a06SJeff Roberson "backing_object %p was somehow dereferenced during collapse!",
203598087a06SJeff Roberson 			    backing_object));
203689f6b863SAttilio Rao 			VM_OBJECT_WUNLOCK(backing_object);
203711542376SAlan Cox 			counter_u64_add(object_bypasses, 1);
2038df8bae1dSRodney W. Grimes 		}
2039df8bae1dSRodney W. Grimes 
2040df8bae1dSRodney W. Grimes 		/*
2041df8bae1dSRodney W. Grimes 		 * Try again with this object's new backing object.
2042df8bae1dSRodney W. Grimes 		 */
2043df8bae1dSRodney W. Grimes 	}
2044df8bae1dSRodney W. Grimes }
2045df8bae1dSRodney W. Grimes 
2046df8bae1dSRodney W. Grimes /*
2047bff99f0dSAlan Cox  *	vm_object_page_remove:
2048df8bae1dSRodney W. Grimes  *
204968855966SAlan Cox  *	For the given object, either frees or invalidates each of the
20506bbee8e2SAlan Cox  *	specified pages.  In general, a page is freed.  However, if a page is
20516bbee8e2SAlan Cox  *	wired for any reason other than the existence of a managed, wired
20526bbee8e2SAlan Cox  *	mapping, then it may be invalidated but not removed from the object.
20536bbee8e2SAlan Cox  *	Pages are specified by the given range ["start", "end") and the option
20546bbee8e2SAlan Cox  *	OBJPR_CLEANONLY.  As a special case, if "end" is zero, then the range
20556bbee8e2SAlan Cox  *	extends from "start" to the end of the object.  If the option
20566bbee8e2SAlan Cox  *	OBJPR_CLEANONLY is specified, then only the non-dirty pages within the
20576bbee8e2SAlan Cox  *	specified range are affected.  If the option OBJPR_NOTMAPPED is
20586bbee8e2SAlan Cox  *	specified, then the pages within the specified range must have no
20596bbee8e2SAlan Cox  *	mappings.  Otherwise, if this option is not specified, any mappings to
20606bbee8e2SAlan Cox  *	the specified pages are removed before the pages are freed or
20616bbee8e2SAlan Cox  *	invalidated.
206268855966SAlan Cox  *
20636bbee8e2SAlan Cox  *	In general, this operation should only be performed on objects that
20646bbee8e2SAlan Cox  *	contain managed pages.  There are, however, two exceptions.  First, it
20656bbee8e2SAlan Cox  *	is performed on the kernel and kmem objects by vm_map_entry_delete().
20666bbee8e2SAlan Cox  *	Second, it is used by msync(..., MS_INVALIDATE) to invalidate device-
20676bbee8e2SAlan Cox  *	backed pages.  In both of these cases, the option OBJPR_CLEANONLY must
20686bbee8e2SAlan Cox  *	not be specified and the option OBJPR_NOTMAPPED must be specified.
2069df8bae1dSRodney W. Grimes  *
2070df8bae1dSRodney W. Grimes  *	The object must be locked.
2071df8bae1dSRodney W. Grimes  */
207226f9a767SRodney W. Grimes void
2073ecde4b32SAlan Cox vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
20746bbee8e2SAlan Cox     int options)
2075df8bae1dSRodney W. Grimes {
2076d031cff1SMatthew Dillon 	vm_page_t p, next;
2077df8bae1dSRodney W. Grimes 
207889f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
207928634820SAlan Cox 	KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
20806bbee8e2SAlan Cox 	    (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
20816bbee8e2SAlan Cox 	    ("vm_object_page_remove: illegal options for object %p", object));
2082ecde4b32SAlan Cox 	if (object->resident_page_count == 0)
20837667839aSAlan Cox 		return;
2084d474eaaaSDoug Rabson 	vm_object_pip_add(object, 1);
208526f9a767SRodney W. Grimes again:
2086b382c10aSKonstantin Belousov 	p = vm_page_find_least(object, start);
20872965a453SKip Macy 
208875741c04SAlan Cox 	/*
20896bbee8e2SAlan Cox 	 * Here, the variable "p" is either (1) the page with the least pindex
20906bbee8e2SAlan Cox 	 * greater than or equal to the parameter "start" or (2) NULL.
209175741c04SAlan Cox 	 */
20926bbee8e2SAlan Cox 	for (; p != NULL && (p->pindex < end || end == 0); p = next) {
2093b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(p, listq);
209475741c04SAlan Cox 
209559677d3cSAlan Cox 		/*
20966bbee8e2SAlan Cox 		 * If the page is wired for any reason besides the existence
20976bbee8e2SAlan Cox 		 * of managed, wired mappings, then it cannot be freed.  For
20986bbee8e2SAlan Cox 		 * example, fictitious pages, which represent device memory,
20996bbee8e2SAlan Cox 		 * are inherently wired and cannot be freed.  They can,
21006bbee8e2SAlan Cox 		 * however, be invalidated if the option OBJPR_CLEANONLY is
21016bbee8e2SAlan Cox 		 * not specified.
210259677d3cSAlan Cox 		 */
210363e97555SJeff Roberson 		if (vm_page_tryxbusy(p) == 0) {
21044cdea4a8SJeff Roberson 			vm_page_sleep_if_busy(p, "vmopar");
2105fee2a2faSMark Johnston 			goto again;
2106fee2a2faSMark Johnston 		}
2107d842aa51SMark Johnston 		if (vm_page_wired(p)) {
2108fee2a2faSMark Johnston wired:
2109cf060942SAlan Cox 			if ((options & OBJPR_NOTMAPPED) == 0 &&
2110cf060942SAlan Cox 			    object->ref_count != 0)
21114fec79beSAlan Cox 				pmap_remove_all(p);
21126bbee8e2SAlan Cox 			if ((options & OBJPR_CLEANONLY) == 0) {
21130012f373SJeff Roberson 				vm_page_invalid(p);
2114a28042d1SAlan Cox 				vm_page_undirty(p);
2115a28042d1SAlan Cox 			}
211663e97555SJeff Roberson 			vm_page_xunbusy(p);
211793c5d3a4SKonstantin Belousov 			continue;
21180d94caffSDavid Greenman 		}
211968855966SAlan Cox 		KASSERT((p->flags & PG_FICTITIOUS) == 0,
212068855966SAlan Cox 		    ("vm_object_page_remove: page %p is fictitious", p));
21210012f373SJeff Roberson 		if ((options & OBJPR_CLEANONLY) != 0 &&
21220012f373SJeff Roberson 		    !vm_page_none_valid(p)) {
2123cf060942SAlan Cox 			if ((options & OBJPR_NOTMAPPED) == 0 &&
2124fee2a2faSMark Johnston 			    object->ref_count != 0 &&
2125fee2a2faSMark Johnston 			    !vm_page_try_remove_write(p))
2126fee2a2faSMark Johnston 				goto wired;
212763e97555SJeff Roberson 			if (p->dirty != 0) {
212863e97555SJeff Roberson 				vm_page_xunbusy(p);
212993c5d3a4SKonstantin Belousov 				continue;
21302965a453SKip Macy 			}
213163e97555SJeff Roberson 		}
2132fee2a2faSMark Johnston 		if ((options & OBJPR_NOTMAPPED) == 0 &&
2133fee2a2faSMark Johnston 		    object->ref_count != 0 && !vm_page_try_remove_all(p))
2134fee2a2faSMark Johnston 			goto wired;
21355cd29d0fSMark Johnston 		vm_page_free(p);
21362965a453SKip Macy 	}
2137f919ebdeSDavid Greenman 	vm_object_pip_wakeup(object);
2138c0503609SDavid Greenman }
2139df8bae1dSRodney W. Grimes 
2140df8bae1dSRodney W. Grimes /*
21413138cd36SMark Johnston  *	vm_object_page_noreuse:
2142936c09acSJohn Baldwin  *
21433138cd36SMark Johnston  *	For the given object, attempt to move the specified pages to
21443138cd36SMark Johnston  *	the head of the inactive queue.  This bypasses regular LRU
21453138cd36SMark Johnston  *	operation and allows the pages to be reused quickly under memory
21463138cd36SMark Johnston  *	pressure.  If a page is wired for any reason, then it will not
21473138cd36SMark Johnston  *	be queued.  Pages are specified by the range ["start", "end").
21483138cd36SMark Johnston  *	As a special case, if "end" is zero, then the range extends from
21493138cd36SMark Johnston  *	"start" to the end of the object.
2150936c09acSJohn Baldwin  *
2151936c09acSJohn Baldwin  *	This operation should only be performed on objects that
215228634820SAlan Cox  *	contain non-fictitious, managed pages.
2153936c09acSJohn Baldwin  *
2154936c09acSJohn Baldwin  *	The object must be locked.
2155936c09acSJohn Baldwin  */
2156936c09acSJohn Baldwin void
21573138cd36SMark Johnston vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
2158936c09acSJohn Baldwin {
2159936c09acSJohn Baldwin 	vm_page_t p, next;
2160936c09acSJohn Baldwin 
216152d1addaSAlan Cox 	VM_OBJECT_ASSERT_LOCKED(object);
216228634820SAlan Cox 	KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0,
21633138cd36SMark Johnston 	    ("vm_object_page_noreuse: illegal object %p", object));
2164936c09acSJohn Baldwin 	if (object->resident_page_count == 0)
2165936c09acSJohn Baldwin 		return;
2166936c09acSJohn Baldwin 	p = vm_page_find_least(object, start);
2167936c09acSJohn Baldwin 
2168936c09acSJohn Baldwin 	/*
2169936c09acSJohn Baldwin 	 * Here, the variable "p" is either (1) the page with the least pindex
2170936c09acSJohn Baldwin 	 * greater than or equal to the parameter "start" or (2) NULL.
2171936c09acSJohn Baldwin 	 */
2172936c09acSJohn Baldwin 	for (; p != NULL && (p->pindex < end || end == 0); p = next) {
2173936c09acSJohn Baldwin 		next = TAILQ_NEXT(p, listq);
21743138cd36SMark Johnston 		vm_page_deactivate_noreuse(p);
2175936c09acSJohn Baldwin 	}
2176936c09acSJohn Baldwin }
2177936c09acSJohn Baldwin 
2178936c09acSJohn Baldwin /*
2179387aabc5SAlan Cox  *	Populate the specified range of the object with valid pages.  Returns
2180387aabc5SAlan Cox  *	TRUE if the range is successfully populated and FALSE otherwise.
2181387aabc5SAlan Cox  *
2182387aabc5SAlan Cox  *	Note: This function should be optimized to pass a larger array of
2183387aabc5SAlan Cox  *	pages to vm_pager_get_pages() before it is applied to a non-
2184387aabc5SAlan Cox  *	OBJT_DEVICE object.
2185387aabc5SAlan Cox  *
2186387aabc5SAlan Cox  *	The object must be locked.
2187387aabc5SAlan Cox  */
2188387aabc5SAlan Cox boolean_t
2189387aabc5SAlan Cox vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
2190387aabc5SAlan Cox {
2191093c7f39SGleb Smirnoff 	vm_page_t m;
2192387aabc5SAlan Cox 	vm_pindex_t pindex;
2193387aabc5SAlan Cox 	int rv;
2194387aabc5SAlan Cox 
219589f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
2196387aabc5SAlan Cox 	for (pindex = start; pindex < end; pindex++) {
2197c7575748SJeff Roberson 		rv = vm_page_grab_valid(&m, object, pindex, VM_ALLOC_NORMAL);
2198c7575748SJeff Roberson 		if (rv != VM_PAGER_OK)
2199387aabc5SAlan Cox 			break;
2200c7575748SJeff Roberson 
2201387aabc5SAlan Cox 		/*
2202387aabc5SAlan Cox 		 * Keep "m" busy because a subsequent iteration may unlock
2203387aabc5SAlan Cox 		 * the object.
2204387aabc5SAlan Cox 		 */
2205387aabc5SAlan Cox 	}
2206387aabc5SAlan Cox 	if (pindex > start) {
2207387aabc5SAlan Cox 		m = vm_page_lookup(object, start);
2208387aabc5SAlan Cox 		while (m != NULL && m->pindex < pindex) {
2209c7aebda8SAttilio Rao 			vm_page_xunbusy(m);
2210387aabc5SAlan Cox 			m = TAILQ_NEXT(m, listq);
2211387aabc5SAlan Cox 		}
2212387aabc5SAlan Cox 	}
2213387aabc5SAlan Cox 	return (pindex == end);
2214387aabc5SAlan Cox }
2215387aabc5SAlan Cox 
2216387aabc5SAlan Cox /*
2217df8bae1dSRodney W. Grimes  *	Routine:	vm_object_coalesce
2218df8bae1dSRodney W. Grimes  *	Function:	Coalesces two objects backing up adjoining
2219df8bae1dSRodney W. Grimes  *			regions of memory into a single object.
2220df8bae1dSRodney W. Grimes  *
2221df8bae1dSRodney W. Grimes  *	returns TRUE if objects were combined.
2222df8bae1dSRodney W. Grimes  *
2223df8bae1dSRodney W. Grimes  *	NOTE:	Only works at the moment if the second object is NULL -
2224df8bae1dSRodney W. Grimes  *		if it's not, which object do we lock first?
2225df8bae1dSRodney W. Grimes  *
2226df8bae1dSRodney W. Grimes  *	Parameters:
2227df8bae1dSRodney W. Grimes  *		prev_object	First object to coalesce
2228df8bae1dSRodney W. Grimes  *		prev_offset	Offset into prev_object
2229df8bae1dSRodney W. Grimes  *		prev_size	Size of reference to prev_object
223057a21abaSAlan Cox  *		next_size	Size of reference to the second object
22313364c323SKonstantin Belousov  *		reserved	Indicator that extension region has
22323364c323SKonstantin Belousov  *				swap accounted for
2233df8bae1dSRodney W. Grimes  *
2234df8bae1dSRodney W. Grimes  *	Conditions:
2235df8bae1dSRodney W. Grimes  *	The object must *not* be locked.
2236df8bae1dSRodney W. Grimes  */
22370d94caffSDavid Greenman boolean_t
223857a21abaSAlan Cox vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
22393364c323SKonstantin Belousov     vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
2240df8bae1dSRodney W. Grimes {
2241ea41812fSAlan Cox 	vm_pindex_t next_pindex;
2242df8bae1dSRodney W. Grimes 
224300e1854aSAlan Cox 	if (prev_object == NULL)
2244df8bae1dSRodney W. Grimes 		return (TRUE);
224563967687SJeff Roberson 	if ((prev_object->flags & OBJ_ANON) == 0)
224630dcfc09SJohn Dyson 		return (FALSE);
224730dcfc09SJohn Dyson 
224863967687SJeff Roberson 	VM_OBJECT_WLOCK(prev_object);
2249df8bae1dSRodney W. Grimes 	/*
225098087a06SJeff Roberson 	 * Try to collapse the object first.
2251df8bae1dSRodney W. Grimes 	 */
2252df8bae1dSRodney W. Grimes 	vm_object_collapse(prev_object);
2253df8bae1dSRodney W. Grimes 
2254df8bae1dSRodney W. Grimes 	/*
22550d94caffSDavid Greenman 	 * Can't coalesce if: . more than one reference . paged out . shadows
22560d94caffSDavid Greenman 	 * another object . has a copy elsewhere (any of which mean that the
22570d94caffSDavid Greenman 	 * pages not mapped to prev_entry may be in use anyway)
2258df8bae1dSRodney W. Grimes 	 */
22598cc7e047SJohn Dyson 	if (prev_object->backing_object != NULL) {
226089f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(prev_object);
2261df8bae1dSRodney W. Grimes 		return (FALSE);
2262df8bae1dSRodney W. Grimes 	}
2263a316d390SJohn Dyson 
2264a316d390SJohn Dyson 	prev_size >>= PAGE_SHIFT;
2265a316d390SJohn Dyson 	next_size >>= PAGE_SHIFT;
226657a21abaSAlan Cox 	next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
22678cc7e047SJohn Dyson 
22680e48e068SMark Johnston 	if (prev_object->ref_count > 1 &&
22690e48e068SMark Johnston 	    prev_object->size != next_pindex &&
22700e48e068SMark Johnston 	    (prev_object->flags & OBJ_ONEMAPPING) == 0) {
227189f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(prev_object);
22728cc7e047SJohn Dyson 		return (FALSE);
22738cc7e047SJohn Dyson 	}
22748cc7e047SJohn Dyson 
2275df8bae1dSRodney W. Grimes 	/*
22763364c323SKonstantin Belousov 	 * Account for the charge.
22773364c323SKonstantin Belousov 	 */
2278ef694c1aSEdward Tomasz Napierala 	if (prev_object->cred != NULL) {
22793364c323SKonstantin Belousov 
22803364c323SKonstantin Belousov 		/*
22813364c323SKonstantin Belousov 		 * If prev_object was charged, then this mapping,
2282763df3ecSPedro F. Giffuni 		 * although not charged now, may become writable
2283ef694c1aSEdward Tomasz Napierala 		 * later. Non-NULL cred in the object would prevent
22843364c323SKonstantin Belousov 		 * swap reservation during enabling of the write
22853364c323SKonstantin Belousov 		 * access, so reserve swap now. Failed reservation
22863364c323SKonstantin Belousov 		 * cause allocation of the separate object for the map
22873364c323SKonstantin Belousov 		 * entry, and swap reservation for this entry is
22883364c323SKonstantin Belousov 		 * managed in appropriate time.
22893364c323SKonstantin Belousov 		 */
2290ef694c1aSEdward Tomasz Napierala 		if (!reserved && !swap_reserve_by_cred(ptoa(next_size),
2291ef694c1aSEdward Tomasz Napierala 		    prev_object->cred)) {
22929f790a17SKonstantin Belousov 			VM_OBJECT_WUNLOCK(prev_object);
22933364c323SKonstantin Belousov 			return (FALSE);
22943364c323SKonstantin Belousov 		}
22953364c323SKonstantin Belousov 		prev_object->charge += ptoa(next_size);
22963364c323SKonstantin Belousov 	}
22973364c323SKonstantin Belousov 
22983364c323SKonstantin Belousov 	/*
22990d94caffSDavid Greenman 	 * Remove any pages that may still be in the object from a previous
23000d94caffSDavid Greenman 	 * deallocation.
2301df8bae1dSRodney W. Grimes 	 */
2302ea41812fSAlan Cox 	if (next_pindex < prev_object->size) {
23036bbee8e2SAlan Cox 		vm_object_page_remove(prev_object, next_pindex, next_pindex +
23046bbee8e2SAlan Cox 		    next_size, 0);
2305ea41812fSAlan Cox 		if (prev_object->type == OBJT_SWAP)
2306ea41812fSAlan Cox 			swap_pager_freespace(prev_object,
2307ea41812fSAlan Cox 					     next_pindex, next_size);
23083364c323SKonstantin Belousov #if 0
2309ef694c1aSEdward Tomasz Napierala 		if (prev_object->cred != NULL) {
23103364c323SKonstantin Belousov 			KASSERT(prev_object->charge >=
23113364c323SKonstantin Belousov 			    ptoa(prev_object->size - next_pindex),
23123364c323SKonstantin Belousov 			    ("object %p overcharged 1 %jx %jx", prev_object,
23133364c323SKonstantin Belousov 				(uintmax_t)next_pindex, (uintmax_t)next_size));
23143364c323SKonstantin Belousov 			prev_object->charge -= ptoa(prev_object->size -
23153364c323SKonstantin Belousov 			    next_pindex);
23163364c323SKonstantin Belousov 		}
23173364c323SKonstantin Belousov #endif
2318ea41812fSAlan Cox 	}
2319df8bae1dSRodney W. Grimes 
2320df8bae1dSRodney W. Grimes 	/*
2321df8bae1dSRodney W. Grimes 	 * Extend the object if necessary.
2322df8bae1dSRodney W. Grimes 	 */
2323ea41812fSAlan Cox 	if (next_pindex + next_size > prev_object->size)
2324ea41812fSAlan Cox 		prev_object->size = next_pindex + next_size;
2325df8bae1dSRodney W. Grimes 
232689f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(prev_object);
2327df8bae1dSRodney W. Grimes 	return (TRUE);
2328df8bae1dSRodney W. Grimes }
2329df8bae1dSRodney W. Grimes 
23307a5a6352SMatthew Dillon void
23317a5a6352SMatthew Dillon vm_object_set_writeable_dirty(vm_object_t object)
23327a5a6352SMatthew Dillon {
23337a5a6352SMatthew Dillon 
233467d0e293SJeff Roberson 	/* Only set for vnodes & tmpfs */
233567d0e293SJeff Roberson 	if (object->type != OBJT_VNODE &&
233667d0e293SJeff Roberson 	    (object->flags & OBJ_TMPFS_NODE) == 0)
23373280870dSKonstantin Belousov 		return;
233867d0e293SJeff Roberson 	atomic_add_int(&object->generation, 1);
23397a5a6352SMatthew Dillon }
23407a5a6352SMatthew Dillon 
234103462509SAlan Cox /*
234203462509SAlan Cox  *	vm_object_unwire:
234303462509SAlan Cox  *
234403462509SAlan Cox  *	For each page offset within the specified range of the given object,
234503462509SAlan Cox  *	find the highest-level page in the shadow chain and unwire it.  A page
234603462509SAlan Cox  *	must exist at every page offset, and the highest-level page must be
234703462509SAlan Cox  *	wired.
234803462509SAlan Cox  */
234903462509SAlan Cox void
235003462509SAlan Cox vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length,
235103462509SAlan Cox     uint8_t queue)
235203462509SAlan Cox {
235320e4afbfSKonstantin Belousov 	vm_object_t tobject, t1object;
235403462509SAlan Cox 	vm_page_t m, tm;
235503462509SAlan Cox 	vm_pindex_t end_pindex, pindex, tpindex;
235603462509SAlan Cox 	int depth, locked_depth;
235703462509SAlan Cox 
235803462509SAlan Cox 	KASSERT((offset & PAGE_MASK) == 0,
235903462509SAlan Cox 	    ("vm_object_unwire: offset is not page aligned"));
236003462509SAlan Cox 	KASSERT((length & PAGE_MASK) == 0,
236103462509SAlan Cox 	    ("vm_object_unwire: length is not a multiple of PAGE_SIZE"));
236203462509SAlan Cox 	/* The wired count of a fictitious page never changes. */
236303462509SAlan Cox 	if ((object->flags & OBJ_FICTITIOUS) != 0)
236403462509SAlan Cox 		return;
236503462509SAlan Cox 	pindex = OFF_TO_IDX(offset);
236603462509SAlan Cox 	end_pindex = pindex + atop(length);
236720e4afbfSKonstantin Belousov again:
236803462509SAlan Cox 	locked_depth = 1;
236903462509SAlan Cox 	VM_OBJECT_RLOCK(object);
237003462509SAlan Cox 	m = vm_page_find_least(object, pindex);
237103462509SAlan Cox 	while (pindex < end_pindex) {
237203462509SAlan Cox 		if (m == NULL || pindex < m->pindex) {
237303462509SAlan Cox 			/*
237403462509SAlan Cox 			 * The first object in the shadow chain doesn't
237503462509SAlan Cox 			 * contain a page at the current index.  Therefore,
237603462509SAlan Cox 			 * the page must exist in a backing object.
237703462509SAlan Cox 			 */
237803462509SAlan Cox 			tobject = object;
237903462509SAlan Cox 			tpindex = pindex;
238003462509SAlan Cox 			depth = 0;
238103462509SAlan Cox 			do {
238203462509SAlan Cox 				tpindex +=
238303462509SAlan Cox 				    OFF_TO_IDX(tobject->backing_object_offset);
238403462509SAlan Cox 				tobject = tobject->backing_object;
238503462509SAlan Cox 				KASSERT(tobject != NULL,
238603462509SAlan Cox 				    ("vm_object_unwire: missing page"));
238703462509SAlan Cox 				if ((tobject->flags & OBJ_FICTITIOUS) != 0)
238803462509SAlan Cox 					goto next_page;
238903462509SAlan Cox 				depth++;
239003462509SAlan Cox 				if (depth == locked_depth) {
239103462509SAlan Cox 					locked_depth++;
239203462509SAlan Cox 					VM_OBJECT_RLOCK(tobject);
239303462509SAlan Cox 				}
239403462509SAlan Cox 			} while ((tm = vm_page_lookup(tobject, tpindex)) ==
239503462509SAlan Cox 			    NULL);
239603462509SAlan Cox 		} else {
239703462509SAlan Cox 			tm = m;
239803462509SAlan Cox 			m = TAILQ_NEXT(m, listq);
239903462509SAlan Cox 		}
240063e97555SJeff Roberson 		if (vm_page_trysbusy(tm) == 0) {
240187e93ea6SMark Johnston 			for (tobject = object; locked_depth >= 1;
240220e4afbfSKonstantin Belousov 			    locked_depth--) {
240320e4afbfSKonstantin Belousov 				t1object = tobject->backing_object;
240487e93ea6SMark Johnston 				if (tm->object != tobject)
240520e4afbfSKonstantin Belousov 					VM_OBJECT_RUNLOCK(tobject);
240620e4afbfSKonstantin Belousov 				tobject = t1object;
240720e4afbfSKonstantin Belousov 			}
240820e4afbfSKonstantin Belousov 			vm_page_busy_sleep(tm, "unwbo", true);
240920e4afbfSKonstantin Belousov 			goto again;
241020e4afbfSKonstantin Belousov 		}
241103462509SAlan Cox 		vm_page_unwire(tm, queue);
241263e97555SJeff Roberson 		vm_page_sunbusy(tm);
241303462509SAlan Cox next_page:
241403462509SAlan Cox 		pindex++;
241503462509SAlan Cox 	}
241603462509SAlan Cox 	/* Release the accumulated object locks. */
241720e4afbfSKonstantin Belousov 	for (tobject = object; locked_depth >= 1; locked_depth--) {
241820e4afbfSKonstantin Belousov 		t1object = tobject->backing_object;
241920e4afbfSKonstantin Belousov 		VM_OBJECT_RUNLOCK(tobject);
242020e4afbfSKonstantin Belousov 		tobject = t1object;
242103462509SAlan Cox 	}
242203462509SAlan Cox }
242303462509SAlan Cox 
24240951bd36SEric van Gyzen /*
24250951bd36SEric van Gyzen  * Return the vnode for the given object, or NULL if none exists.
24260951bd36SEric van Gyzen  * For tmpfs objects, the function may return NULL if there is
24270951bd36SEric van Gyzen  * no vnode allocated at the time of the call.
24280951bd36SEric van Gyzen  */
242963e4c6cdSEric van Gyzen struct vnode *
243063e4c6cdSEric van Gyzen vm_object_vnode(vm_object_t object)
243163e4c6cdSEric van Gyzen {
24320951bd36SEric van Gyzen 	struct vnode *vp;
243363e4c6cdSEric van Gyzen 
243463e4c6cdSEric van Gyzen 	VM_OBJECT_ASSERT_LOCKED(object);
24350951bd36SEric van Gyzen 	if (object->type == OBJT_VNODE) {
24360951bd36SEric van Gyzen 		vp = object->handle;
24370951bd36SEric van Gyzen 		KASSERT(vp != NULL, ("%s: OBJT_VNODE has no vnode", __func__));
24380951bd36SEric van Gyzen 	} else if (object->type == OBJT_SWAP &&
24390951bd36SEric van Gyzen 	    (object->flags & OBJ_TMPFS) != 0) {
24400951bd36SEric van Gyzen 		vp = object->un_pager.swp.swp_tmpfs;
24410951bd36SEric van Gyzen 		KASSERT(vp != NULL, ("%s: OBJT_TMPFS has no vnode", __func__));
24420951bd36SEric van Gyzen 	} else {
24430951bd36SEric van Gyzen 		vp = NULL;
24440951bd36SEric van Gyzen 	}
24450951bd36SEric van Gyzen 	return (vp);
244663e4c6cdSEric van Gyzen }
244763e4c6cdSEric van Gyzen 
2448205be21dSJeff Roberson 
2449205be21dSJeff Roberson /*
2450205be21dSJeff Roberson  * Busy the vm object.  This prevents new pages belonging to the object from
2451205be21dSJeff Roberson  * becoming busy.  Existing pages persist as busy.  Callers are responsible
2452205be21dSJeff Roberson  * for checking page state before proceeding.
2453205be21dSJeff Roberson  */
2454205be21dSJeff Roberson void
2455205be21dSJeff Roberson vm_object_busy(vm_object_t obj)
2456205be21dSJeff Roberson {
2457205be21dSJeff Roberson 
2458205be21dSJeff Roberson 	VM_OBJECT_ASSERT_LOCKED(obj);
2459205be21dSJeff Roberson 
2460*c99d0c58SMark Johnston 	blockcount_acquire(&obj->busy, 1);
2461205be21dSJeff Roberson 	/* The fence is required to order loads of page busy. */
2462205be21dSJeff Roberson 	atomic_thread_fence_acq_rel();
2463205be21dSJeff Roberson }
2464205be21dSJeff Roberson 
2465205be21dSJeff Roberson void
2466205be21dSJeff Roberson vm_object_unbusy(vm_object_t obj)
2467205be21dSJeff Roberson {
2468205be21dSJeff Roberson 
2469*c99d0c58SMark Johnston 	blockcount_release(&obj->busy, 1);
2470205be21dSJeff Roberson }
2471205be21dSJeff Roberson 
2472205be21dSJeff Roberson void
2473205be21dSJeff Roberson vm_object_busy_wait(vm_object_t obj, const char *wmesg)
2474205be21dSJeff Roberson {
2475205be21dSJeff Roberson 
2476205be21dSJeff Roberson 	VM_OBJECT_ASSERT_UNLOCKED(obj);
2477205be21dSJeff Roberson 
2478*c99d0c58SMark Johnston 	(void)blockcount_sleep(&obj->busy, NULL, wmesg, PVM);
2479205be21dSJeff Roberson }
2480205be21dSJeff Roberson 
24815e38e3f5SEric van Gyzen /*
24825e38e3f5SEric van Gyzen  * Return the kvme type of the given object.
24835e38e3f5SEric van Gyzen  * If vpp is not NULL, set it to the object's vm_object_vnode() or NULL.
24845e38e3f5SEric van Gyzen  */
24855e38e3f5SEric van Gyzen int
24865e38e3f5SEric van Gyzen vm_object_kvme_type(vm_object_t object, struct vnode **vpp)
24875e38e3f5SEric van Gyzen {
24885e38e3f5SEric van Gyzen 
24895e38e3f5SEric van Gyzen 	VM_OBJECT_ASSERT_LOCKED(object);
24905e38e3f5SEric van Gyzen 	if (vpp != NULL)
24915e38e3f5SEric van Gyzen 		*vpp = vm_object_vnode(object);
24925e38e3f5SEric van Gyzen 	switch (object->type) {
24935e38e3f5SEric van Gyzen 	case OBJT_DEFAULT:
24945e38e3f5SEric van Gyzen 		return (KVME_TYPE_DEFAULT);
24955e38e3f5SEric van Gyzen 	case OBJT_VNODE:
24965e38e3f5SEric van Gyzen 		return (KVME_TYPE_VNODE);
24975e38e3f5SEric van Gyzen 	case OBJT_SWAP:
24985e38e3f5SEric van Gyzen 		if ((object->flags & OBJ_TMPFS_NODE) != 0)
24995e38e3f5SEric van Gyzen 			return (KVME_TYPE_VNODE);
25005e38e3f5SEric van Gyzen 		return (KVME_TYPE_SWAP);
25015e38e3f5SEric van Gyzen 	case OBJT_DEVICE:
25025e38e3f5SEric van Gyzen 		return (KVME_TYPE_DEVICE);
25035e38e3f5SEric van Gyzen 	case OBJT_PHYS:
25045e38e3f5SEric van Gyzen 		return (KVME_TYPE_PHYS);
25055e38e3f5SEric van Gyzen 	case OBJT_DEAD:
25065e38e3f5SEric van Gyzen 		return (KVME_TYPE_DEAD);
25075e38e3f5SEric van Gyzen 	case OBJT_SG:
25085e38e3f5SEric van Gyzen 		return (KVME_TYPE_SG);
25095e38e3f5SEric van Gyzen 	case OBJT_MGTDEVICE:
25105e38e3f5SEric van Gyzen 		return (KVME_TYPE_MGTDEVICE);
25115e38e3f5SEric van Gyzen 	default:
25125e38e3f5SEric van Gyzen 		return (KVME_TYPE_UNKNOWN);
25135e38e3f5SEric van Gyzen 	}
25145e38e3f5SEric van Gyzen }
25155e38e3f5SEric van Gyzen 
2516ff87ae35SJohn Baldwin static int
2517ff87ae35SJohn Baldwin sysctl_vm_object_list(SYSCTL_HANDLER_ARGS)
2518ff87ae35SJohn Baldwin {
25190ecee546SKonstantin Belousov 	struct kinfo_vmobject *kvo;
2520ff87ae35SJohn Baldwin 	char *fullpath, *freepath;
2521ff87ae35SJohn Baldwin 	struct vnode *vp;
2522ff87ae35SJohn Baldwin 	struct vattr va;
2523ff87ae35SJohn Baldwin 	vm_object_t obj;
2524ff87ae35SJohn Baldwin 	vm_page_t m;
2525ff87ae35SJohn Baldwin 	int count, error;
2526ff87ae35SJohn Baldwin 
2527ff87ae35SJohn Baldwin 	if (req->oldptr == NULL) {
2528ff87ae35SJohn Baldwin 		/*
2529ff87ae35SJohn Baldwin 		 * If an old buffer has not been provided, generate an
2530ff87ae35SJohn Baldwin 		 * estimate of the space needed for a subsequent call.
2531ff87ae35SJohn Baldwin 		 */
2532ff87ae35SJohn Baldwin 		mtx_lock(&vm_object_list_mtx);
2533ff87ae35SJohn Baldwin 		count = 0;
2534ff87ae35SJohn Baldwin 		TAILQ_FOREACH(obj, &vm_object_list, object_list) {
2535ff87ae35SJohn Baldwin 			if (obj->type == OBJT_DEAD)
2536ff87ae35SJohn Baldwin 				continue;
2537ff87ae35SJohn Baldwin 			count++;
2538ff87ae35SJohn Baldwin 		}
2539ff87ae35SJohn Baldwin 		mtx_unlock(&vm_object_list_mtx);
2540ff87ae35SJohn Baldwin 		return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) *
2541ff87ae35SJohn Baldwin 		    count * 11 / 10));
2542ff87ae35SJohn Baldwin 	}
2543ff87ae35SJohn Baldwin 
25440ecee546SKonstantin Belousov 	kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK);
2545ff87ae35SJohn Baldwin 	error = 0;
2546ff87ae35SJohn Baldwin 
2547ff87ae35SJohn Baldwin 	/*
2548ff87ae35SJohn Baldwin 	 * VM objects are type stable and are never removed from the
2549ff87ae35SJohn Baldwin 	 * list once added.  This allows us to safely read obj->object_list
2550ff87ae35SJohn Baldwin 	 * after reacquiring the VM object lock.
2551ff87ae35SJohn Baldwin 	 */
2552ff87ae35SJohn Baldwin 	mtx_lock(&vm_object_list_mtx);
2553ff87ae35SJohn Baldwin 	TAILQ_FOREACH(obj, &vm_object_list, object_list) {
2554ff87ae35SJohn Baldwin 		if (obj->type == OBJT_DEAD)
2555ff87ae35SJohn Baldwin 			continue;
2556ff87ae35SJohn Baldwin 		VM_OBJECT_RLOCK(obj);
2557ff87ae35SJohn Baldwin 		if (obj->type == OBJT_DEAD) {
2558ff87ae35SJohn Baldwin 			VM_OBJECT_RUNLOCK(obj);
2559ff87ae35SJohn Baldwin 			continue;
2560ff87ae35SJohn Baldwin 		}
2561ff87ae35SJohn Baldwin 		mtx_unlock(&vm_object_list_mtx);
25620ecee546SKonstantin Belousov 		kvo->kvo_size = ptoa(obj->size);
25630ecee546SKonstantin Belousov 		kvo->kvo_resident = obj->resident_page_count;
25640ecee546SKonstantin Belousov 		kvo->kvo_ref_count = obj->ref_count;
25650ecee546SKonstantin Belousov 		kvo->kvo_shadow_count = obj->shadow_count;
25660ecee546SKonstantin Belousov 		kvo->kvo_memattr = obj->memattr;
25670ecee546SKonstantin Belousov 		kvo->kvo_active = 0;
25680ecee546SKonstantin Belousov 		kvo->kvo_inactive = 0;
2569ff87ae35SJohn Baldwin 		TAILQ_FOREACH(m, &obj->memq, listq) {
2570ff87ae35SJohn Baldwin 			/*
2571ff87ae35SJohn Baldwin 			 * A page may belong to the object but be
2572ff87ae35SJohn Baldwin 			 * dequeued and set to PQ_NONE while the
2573ff87ae35SJohn Baldwin 			 * object lock is not held.  This makes the
2574ff87ae35SJohn Baldwin 			 * reads of m->queue below racy, and we do not
2575ff87ae35SJohn Baldwin 			 * count pages set to PQ_NONE.  However, this
2576ff87ae35SJohn Baldwin 			 * sysctl is only meant to give an
2577ff87ae35SJohn Baldwin 			 * approximation of the system anyway.
2578ff87ae35SJohn Baldwin 			 */
25795cff1f4dSMark Johnston 			if (m->a.queue == PQ_ACTIVE)
25800ecee546SKonstantin Belousov 				kvo->kvo_active++;
25815cff1f4dSMark Johnston 			else if (m->a.queue == PQ_INACTIVE)
25820ecee546SKonstantin Belousov 				kvo->kvo_inactive++;
2583ff87ae35SJohn Baldwin 		}
2584ff87ae35SJohn Baldwin 
25850ecee546SKonstantin Belousov 		kvo->kvo_vn_fileid = 0;
25860ecee546SKonstantin Belousov 		kvo->kvo_vn_fsid = 0;
25870ecee546SKonstantin Belousov 		kvo->kvo_vn_fsid_freebsd11 = 0;
2588ff87ae35SJohn Baldwin 		freepath = NULL;
2589ff87ae35SJohn Baldwin 		fullpath = "";
25905e38e3f5SEric van Gyzen 		kvo->kvo_type = vm_object_kvme_type(obj, &vp);
25915e38e3f5SEric van Gyzen 		if (vp != NULL)
2592ff87ae35SJohn Baldwin 			vref(vp);
2593ff87ae35SJohn Baldwin 		VM_OBJECT_RUNLOCK(obj);
2594ff87ae35SJohn Baldwin 		if (vp != NULL) {
2595ff87ae35SJohn Baldwin 			vn_fullpath(curthread, vp, &fullpath, &freepath);
2596ff87ae35SJohn Baldwin 			vn_lock(vp, LK_SHARED | LK_RETRY);
2597ff87ae35SJohn Baldwin 			if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) {
25980ecee546SKonstantin Belousov 				kvo->kvo_vn_fileid = va.va_fileid;
25990ecee546SKonstantin Belousov 				kvo->kvo_vn_fsid = va.va_fsid;
26000ecee546SKonstantin Belousov 				kvo->kvo_vn_fsid_freebsd11 = va.va_fsid;
260169921123SKonstantin Belousov 								/* truncate */
2602ff87ae35SJohn Baldwin 			}
2603ff87ae35SJohn Baldwin 			vput(vp);
2604ff87ae35SJohn Baldwin 		}
2605ff87ae35SJohn Baldwin 
26060ecee546SKonstantin Belousov 		strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path));
2607ff87ae35SJohn Baldwin 		if (freepath != NULL)
2608ff87ae35SJohn Baldwin 			free(freepath, M_TEMP);
2609ff87ae35SJohn Baldwin 
2610ff87ae35SJohn Baldwin 		/* Pack record size down */
26110ecee546SKonstantin Belousov 		kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path)
26120ecee546SKonstantin Belousov 		    + strlen(kvo->kvo_path) + 1;
26130ecee546SKonstantin Belousov 		kvo->kvo_structsize = roundup(kvo->kvo_structsize,
2614ff87ae35SJohn Baldwin 		    sizeof(uint64_t));
26150ecee546SKonstantin Belousov 		error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize);
2616ff87ae35SJohn Baldwin 		mtx_lock(&vm_object_list_mtx);
2617ff87ae35SJohn Baldwin 		if (error)
2618ff87ae35SJohn Baldwin 			break;
2619ff87ae35SJohn Baldwin 	}
2620ff87ae35SJohn Baldwin 	mtx_unlock(&vm_object_list_mtx);
26210ecee546SKonstantin Belousov 	free(kvo, M_TEMP);
2622ff87ae35SJohn Baldwin 	return (error);
2623ff87ae35SJohn Baldwin }
2624ff87ae35SJohn Baldwin SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP |
2625ff87ae35SJohn Baldwin     CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject",
2626ff87ae35SJohn Baldwin     "List of VM objects");
2627ff87ae35SJohn Baldwin 
2628c7c34a24SBruce Evans #include "opt_ddb.h"
2629c3cb3e12SDavid Greenman #ifdef DDB
2630c7c34a24SBruce Evans #include <sys/kernel.h>
2631c7c34a24SBruce Evans 
2632ce9edcf5SPoul-Henning Kamp #include <sys/cons.h>
2633c7c34a24SBruce Evans 
2634c7c34a24SBruce Evans #include <ddb/ddb.h>
2635c7c34a24SBruce Evans 
2636cac597e4SBruce Evans static int
26371b40f8c0SMatthew Dillon _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2638a1f6d91cSDavid Greenman {
2639a1f6d91cSDavid Greenman 	vm_map_t tmpm;
2640a1f6d91cSDavid Greenman 	vm_map_entry_t tmpe;
2641a1f6d91cSDavid Greenman 	vm_object_t obj;
2642a1f6d91cSDavid Greenman 
2643a1f6d91cSDavid Greenman 	if (map == 0)
2644a1f6d91cSDavid Greenman 		return 0;
2645a1f6d91cSDavid Greenman 
2646a1f6d91cSDavid Greenman 	if (entry == 0) {
26472288078cSDoug Moore 		VM_MAP_ENTRY_FOREACH(tmpe, map) {
2648a1f6d91cSDavid Greenman 			if (_vm_object_in_map(map, object, tmpe)) {
2649a1f6d91cSDavid Greenman 				return 1;
2650a1f6d91cSDavid Greenman 			}
2651a1f6d91cSDavid Greenman 		}
26529fdfe602SMatthew Dillon 	} else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
26539fdfe602SMatthew Dillon 		tmpm = entry->object.sub_map;
26542288078cSDoug Moore 		VM_MAP_ENTRY_FOREACH(tmpe, tmpm) {
2655a1f6d91cSDavid Greenman 			if (_vm_object_in_map(tmpm, object, tmpe)) {
2656a1f6d91cSDavid Greenman 				return 1;
2657a1f6d91cSDavid Greenman 			}
2658a1f6d91cSDavid Greenman 		}
26598aef1712SMatthew Dillon 	} else if ((obj = entry->object.vm_object) != NULL) {
266024a1cce3SDavid Greenman 		for (; obj; obj = obj->backing_object)
2661a1f6d91cSDavid Greenman 			if (obj == object) {
2662a1f6d91cSDavid Greenman 				return 1;
2663a1f6d91cSDavid Greenman 			}
2664a1f6d91cSDavid Greenman 	}
2665a1f6d91cSDavid Greenman 	return 0;
2666a1f6d91cSDavid Greenman }
2667a1f6d91cSDavid Greenman 
2668cac597e4SBruce Evans static int
26691b40f8c0SMatthew Dillon vm_object_in_map(vm_object_t object)
2670a1f6d91cSDavid Greenman {
2671a1f6d91cSDavid Greenman 	struct proc *p;
26721005a129SJohn Baldwin 
267360517fd1SJohn Baldwin 	/* sx_slock(&allproc_lock); */
2674f67af5c9SXin LI 	FOREACH_PROC_IN_SYSTEM(p) {
2675a1f6d91cSDavid Greenman 		if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
2676a1f6d91cSDavid Greenman 			continue;
2677553629ebSJake Burkholder 		if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
267860517fd1SJohn Baldwin 			/* sx_sunlock(&allproc_lock); */
2679a1f6d91cSDavid Greenman 			return 1;
2680a1f6d91cSDavid Greenman 		}
2681553629ebSJake Burkholder 	}
268260517fd1SJohn Baldwin 	/* sx_sunlock(&allproc_lock); */
2683a1f6d91cSDavid Greenman 	if (_vm_object_in_map(kernel_map, object, 0))
2684a1f6d91cSDavid Greenman 		return 1;
2685a1f6d91cSDavid Greenman 	return 0;
2686a1f6d91cSDavid Greenman }
2687a1f6d91cSDavid Greenman 
2688c7c34a24SBruce Evans DB_SHOW_COMMAND(vmochk, vm_object_check)
2689f708ef1bSPoul-Henning Kamp {
2690a1f6d91cSDavid Greenman 	vm_object_t object;
2691a1f6d91cSDavid Greenman 
2692a1f6d91cSDavid Greenman 	/*
2693a1f6d91cSDavid Greenman 	 * make sure that internal objs are in a map somewhere
2694a1f6d91cSDavid Greenman 	 * and none have zero ref counts.
2695a1f6d91cSDavid Greenman 	 */
2696cc64b484SAlfred Perlstein 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
269732362449SKonstantin Belousov 		if ((object->flags & OBJ_ANON) != 0) {
2698a1f6d91cSDavid Greenman 			if (object->ref_count == 0) {
26993efc015bSPeter Wemm 				db_printf("vmochk: internal obj has zero ref count: %ld\n",
27003efc015bSPeter Wemm 					(long)object->size);
2701a1f6d91cSDavid Greenman 			}
2702a1f6d91cSDavid Greenman 			if (!vm_object_in_map(object)) {
2703fc62ef1fSBruce Evans 				db_printf(
2704fc62ef1fSBruce Evans 			"vmochk: internal obj is not in a map: "
2705fc62ef1fSBruce Evans 			"ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2706fc62ef1fSBruce Evans 				    object->ref_count, (u_long)object->size,
2707fc62ef1fSBruce Evans 				    (u_long)object->size,
2708fc62ef1fSBruce Evans 				    (void *)object->backing_object);
2709a1f6d91cSDavid Greenman 			}
2710a1f6d91cSDavid Greenman 		}
2711a1f6d91cSDavid Greenman 	}
2712a1f6d91cSDavid Greenman }
2713a1f6d91cSDavid Greenman 
271426f9a767SRodney W. Grimes /*
2715df8bae1dSRodney W. Grimes  *	vm_object_print:	[ debug ]
2716df8bae1dSRodney W. Grimes  */
2717c7c34a24SBruce Evans DB_SHOW_COMMAND(object, vm_object_print_static)
2718df8bae1dSRodney W. Grimes {
2719c7c34a24SBruce Evans 	/* XXX convert args. */
2720c7c34a24SBruce Evans 	vm_object_t object = (vm_object_t)addr;
2721c7c34a24SBruce Evans 	boolean_t full = have_addr;
2722c7c34a24SBruce Evans 
2723d031cff1SMatthew Dillon 	vm_page_t p;
2724df8bae1dSRodney W. Grimes 
2725c7c34a24SBruce Evans 	/* XXX count is an (unused) arg.  Avoid shadowing it. */
2726c7c34a24SBruce Evans #define	count	was_count
2727c7c34a24SBruce Evans 
2728d031cff1SMatthew Dillon 	int count;
2729df8bae1dSRodney W. Grimes 
2730df8bae1dSRodney W. Grimes 	if (object == NULL)
2731df8bae1dSRodney W. Grimes 		return;
2732df8bae1dSRodney W. Grimes 
2733eb95adefSBruce Evans 	db_iprintf(
2734ef694c1aSEdward Tomasz Napierala 	    "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n",
2735e47cd172SMaxime Henrion 	    object, (int)object->type, (uintmax_t)object->size,
27363364c323SKonstantin Belousov 	    object->resident_page_count, object->ref_count, object->flags,
2737ef694c1aSEdward Tomasz Napierala 	    object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge);
2738e47cd172SMaxime Henrion 	db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
27391c7c3c6aSMatthew Dillon 	    object->shadow_count,
2740eb95adefSBruce Evans 	    object->backing_object ? object->backing_object->ref_count : 0,
2741e47cd172SMaxime Henrion 	    object->backing_object, (uintmax_t)object->backing_object_offset);
2742df8bae1dSRodney W. Grimes 
2743df8bae1dSRodney W. Grimes 	if (!full)
2744df8bae1dSRodney W. Grimes 		return;
2745df8bae1dSRodney W. Grimes 
2746c7c34a24SBruce Evans 	db_indent += 2;
2747df8bae1dSRodney W. Grimes 	count = 0;
2748fc2ffbe6SPoul-Henning Kamp 	TAILQ_FOREACH(p, &object->memq, listq) {
2749df8bae1dSRodney W. Grimes 		if (count == 0)
2750c7c34a24SBruce Evans 			db_iprintf("memory:=");
2751df8bae1dSRodney W. Grimes 		else if (count == 6) {
2752c7c34a24SBruce Evans 			db_printf("\n");
2753c7c34a24SBruce Evans 			db_iprintf(" ...");
2754df8bae1dSRodney W. Grimes 			count = 0;
2755df8bae1dSRodney W. Grimes 		} else
2756c7c34a24SBruce Evans 			db_printf(",");
2757df8bae1dSRodney W. Grimes 		count++;
2758df8bae1dSRodney W. Grimes 
2759e47cd172SMaxime Henrion 		db_printf("(off=0x%jx,page=0x%jx)",
2760e47cd172SMaxime Henrion 		    (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p));
2761df8bae1dSRodney W. Grimes 	}
2762df8bae1dSRodney W. Grimes 	if (count != 0)
2763c7c34a24SBruce Evans 		db_printf("\n");
2764c7c34a24SBruce Evans 	db_indent -= 2;
2765df8bae1dSRodney W. Grimes }
27665070c7f8SJohn Dyson 
2767c7c34a24SBruce Evans /* XXX. */
2768c7c34a24SBruce Evans #undef count
2769c7c34a24SBruce Evans 
2770c7c34a24SBruce Evans /* XXX need this non-static entry for calling from vm_map_print. */
27715070c7f8SJohn Dyson void
27721b40f8c0SMatthew Dillon vm_object_print(
27731b40f8c0SMatthew Dillon         /* db_expr_t */ long addr,
27741b40f8c0SMatthew Dillon 	boolean_t have_addr,
27751b40f8c0SMatthew Dillon 	/* db_expr_t */ long count,
27761b40f8c0SMatthew Dillon 	char *modif)
2777c7c34a24SBruce Evans {
2778c7c34a24SBruce Evans 	vm_object_print_static(addr, have_addr, count, modif);
2779c7c34a24SBruce Evans }
2780c7c34a24SBruce Evans 
2781c7c34a24SBruce Evans DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
27825070c7f8SJohn Dyson {
27835070c7f8SJohn Dyson 	vm_object_t object;
2784bb2ac86fSKonstantin Belousov 	vm_pindex_t fidx;
2785bb2ac86fSKonstantin Belousov 	vm_paddr_t pa;
2786bb2ac86fSKonstantin Belousov 	vm_page_t m, prev_m;
2787bb2ac86fSKonstantin Belousov 	int rcount, nl, c;
2788cc64b484SAlfred Perlstein 
2789bb2ac86fSKonstantin Belousov 	nl = 0;
2790cc64b484SAlfred Perlstein 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
2791fc62ef1fSBruce Evans 		db_printf("new object: %p\n", (void *)object);
27925070c7f8SJohn Dyson 		if (nl > 18) {
27935070c7f8SJohn Dyson 			c = cngetc();
27945070c7f8SJohn Dyson 			if (c != ' ')
27955070c7f8SJohn Dyson 				return;
27965070c7f8SJohn Dyson 			nl = 0;
27975070c7f8SJohn Dyson 		}
27985070c7f8SJohn Dyson 		nl++;
27995070c7f8SJohn Dyson 		rcount = 0;
28005070c7f8SJohn Dyson 		fidx = 0;
2801bb2ac86fSKonstantin Belousov 		pa = -1;
2802bb2ac86fSKonstantin Belousov 		TAILQ_FOREACH(m, &object->memq, listq) {
2803bb2ac86fSKonstantin Belousov 			if (m->pindex > 128)
2804bb2ac86fSKonstantin Belousov 				break;
2805bb2ac86fSKonstantin Belousov 			if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL &&
2806bb2ac86fSKonstantin Belousov 			    prev_m->pindex + 1 != m->pindex) {
28075070c7f8SJohn Dyson 				if (rcount) {
28083efc015bSPeter Wemm 					db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
28093efc015bSPeter Wemm 						(long)fidx, rcount, (long)pa);
28105070c7f8SJohn Dyson 					if (nl > 18) {
28115070c7f8SJohn Dyson 						c = cngetc();
28125070c7f8SJohn Dyson 						if (c != ' ')
28135070c7f8SJohn Dyson 							return;
28145070c7f8SJohn Dyson 						nl = 0;
28155070c7f8SJohn Dyson 					}
28165070c7f8SJohn Dyson 					nl++;
28175070c7f8SJohn Dyson 					rcount = 0;
28185070c7f8SJohn Dyson 				}
28195070c7f8SJohn Dyson 			}
28205070c7f8SJohn Dyson 			if (rcount &&
28215070c7f8SJohn Dyson 				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
28225070c7f8SJohn Dyson 				++rcount;
28235070c7f8SJohn Dyson 				continue;
28245070c7f8SJohn Dyson 			}
28255070c7f8SJohn Dyson 			if (rcount) {
28262446e4f0SAlan Cox 				db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
28273efc015bSPeter Wemm 					(long)fidx, rcount, (long)pa);
28285070c7f8SJohn Dyson 				if (nl > 18) {
28295070c7f8SJohn Dyson 					c = cngetc();
28305070c7f8SJohn Dyson 					if (c != ' ')
28315070c7f8SJohn Dyson 						return;
28325070c7f8SJohn Dyson 					nl = 0;
28335070c7f8SJohn Dyson 				}
28345070c7f8SJohn Dyson 				nl++;
28355070c7f8SJohn Dyson 			}
2836bb2ac86fSKonstantin Belousov 			fidx = m->pindex;
28375070c7f8SJohn Dyson 			pa = VM_PAGE_TO_PHYS(m);
28385070c7f8SJohn Dyson 			rcount = 1;
28395070c7f8SJohn Dyson 		}
28405070c7f8SJohn Dyson 		if (rcount) {
28413efc015bSPeter Wemm 			db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
28423efc015bSPeter Wemm 				(long)fidx, rcount, (long)pa);
28435070c7f8SJohn Dyson 			if (nl > 18) {
28445070c7f8SJohn Dyson 				c = cngetc();
28455070c7f8SJohn Dyson 				if (c != ' ')
28465070c7f8SJohn Dyson 					return;
28475070c7f8SJohn Dyson 				nl = 0;
28485070c7f8SJohn Dyson 			}
28495070c7f8SJohn Dyson 			nl++;
28505070c7f8SJohn Dyson 		}
28515070c7f8SJohn Dyson 	}
28525070c7f8SJohn Dyson }
2853c3cb3e12SDavid Greenman #endif /* DDB */
2854