xref: /freebsd/sys/vm/vm_object.c (revision 98087a066f71aa1af97512eb2ca2dbaa5c7c53ac)
160727d8bSWarner Losh /*-
2796df753SPedro F. Giffuni  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
351369649SPedro F. Giffuni  *
4df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
5df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
6df8bae1dSRodney W. Grimes  *
7df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
8df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
9df8bae1dSRodney W. Grimes  *
10df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
11df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
12df8bae1dSRodney W. Grimes  * are met:
13df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
15df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
17df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
18fbbd9655SWarner Losh  * 3. Neither the name of the University nor the names of its contributors
19df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
20df8bae1dSRodney W. Grimes  *    without specific prior written permission.
21df8bae1dSRodney W. Grimes  *
22df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
33df8bae1dSRodney W. Grimes  *
343c4dd356SDavid Greenman  *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
35df8bae1dSRodney W. Grimes  *
36df8bae1dSRodney W. Grimes  *
37df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38df8bae1dSRodney W. Grimes  * All rights reserved.
39df8bae1dSRodney W. Grimes  *
40df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
43df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
44df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
45df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
46df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
47df8bae1dSRodney W. Grimes  *
48df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51df8bae1dSRodney W. Grimes  *
52df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55df8bae1dSRodney W. Grimes  *  School of Computer Science
56df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
57df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
58df8bae1dSRodney W. Grimes  *
59df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
60df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
61df8bae1dSRodney W. Grimes  */
62df8bae1dSRodney W. Grimes 
63df8bae1dSRodney W. Grimes /*
64df8bae1dSRodney W. Grimes  *	Virtual memory object module.
65df8bae1dSRodney W. Grimes  */
66df8bae1dSRodney W. Grimes 
67874651b1SDavid E. O'Brien #include <sys/cdefs.h>
68874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
69874651b1SDavid E. O'Brien 
70f8a47341SAlan Cox #include "opt_vm.h"
71f8a47341SAlan Cox 
72df8bae1dSRodney W. Grimes #include <sys/param.h>
73df8bae1dSRodney W. Grimes #include <sys/systm.h>
743f289c3fSJeff Roberson #include <sys/cpuset.h>
75fb919e4dSMark Murray #include <sys/lock.h>
76867a482dSJohn Dyson #include <sys/mman.h>
77cf2819ccSJohn Dyson #include <sys/mount.h>
78b9b7a4beSMatthew Dillon #include <sys/kernel.h>
79f425ab8eSKonstantin Belousov #include <sys/pctrie.h>
80b9b7a4beSMatthew Dillon #include <sys/sysctl.h>
811b367556SJason Evans #include <sys/mutex.h>
82fb919e4dSMark Murray #include <sys/proc.h>		/* for curproc, pageproc */
83cf27e0d1SJeff Roberson #include <sys/refcount.h>
84fb919e4dSMark Murray #include <sys/socket.h>
853364c323SKonstantin Belousov #include <sys/resourcevar.h>
86205be21dSJeff Roberson #include <sys/refcount.h>
8789f6b863SAttilio Rao #include <sys/rwlock.h>
88ff87ae35SJohn Baldwin #include <sys/user.h>
89fb919e4dSMark Murray #include <sys/vnode.h>
90fb919e4dSMark Murray #include <sys/vmmeter.h>
911005a129SJohn Baldwin #include <sys/sx.h>
92df8bae1dSRodney W. Grimes 
93df8bae1dSRodney W. Grimes #include <vm/vm.h>
94efeaf95aSDavid Greenman #include <vm/vm_param.h>
95efeaf95aSDavid Greenman #include <vm/pmap.h>
96efeaf95aSDavid Greenman #include <vm/vm_map.h>
97efeaf95aSDavid Greenman #include <vm/vm_object.h>
98df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
9926f9a767SRodney W. Grimes #include <vm/vm_pageout.h>
1000d94caffSDavid Greenman #include <vm/vm_pager.h>
101e2068d0bSJeff Roberson #include <vm/vm_phys.h>
102e2068d0bSJeff Roberson #include <vm/vm_pagequeue.h>
10305f0fdd2SPoul-Henning Kamp #include <vm/swap_pager.h>
104a1f6d91cSDavid Greenman #include <vm/vm_kern.h>
105efeaf95aSDavid Greenman #include <vm/vm_extern.h>
106774d251dSAttilio Rao #include <vm/vm_radix.h>
107f8a47341SAlan Cox #include <vm/vm_reserv.h>
108670d17b5SJeff Roberson #include <vm/uma.h>
10926f9a767SRodney W. Grimes 
110c53f7aceSDag-Erling Smørgrav static int old_msync;
111c53f7aceSDag-Erling Smørgrav SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0,
112c53f7aceSDag-Erling Smørgrav     "Use old (insecure) msync behavior");
113c53f7aceSDag-Erling Smørgrav 
114757216f3SKonstantin Belousov static int	vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
11567d0e293SJeff Roberson 		    int pagerflags, int flags, boolean_t *allclean,
116126d6082SKonstantin Belousov 		    boolean_t *eio);
1173280870dSKonstantin Belousov static boolean_t vm_object_page_remove_write(vm_page_t p, int flags,
11867d0e293SJeff Roberson 		    boolean_t *allclean);
11951b867e5SJeff Roberson static void	vm_object_backing_remove(vm_object_t object);
120f6b04d2bSDavid Greenman 
121df8bae1dSRodney W. Grimes /*
122df8bae1dSRodney W. Grimes  *	Virtual memory objects maintain the actual data
123df8bae1dSRodney W. Grimes  *	associated with allocated virtual memory.  A given
124df8bae1dSRodney W. Grimes  *	page of memory exists within exactly one object.
125df8bae1dSRodney W. Grimes  *
126df8bae1dSRodney W. Grimes  *	An object is only deallocated when all "references"
127df8bae1dSRodney W. Grimes  *	are given up.  Only one "reference" to a given
128df8bae1dSRodney W. Grimes  *	region of an object should be writeable.
129df8bae1dSRodney W. Grimes  *
130df8bae1dSRodney W. Grimes  *	Associated with each object is a list of all resident
131df8bae1dSRodney W. Grimes  *	memory pages belonging to that object; this list is
132df8bae1dSRodney W. Grimes  *	maintained by the "vm_page" module, and locked by the object's
133df8bae1dSRodney W. Grimes  *	lock.
134df8bae1dSRodney W. Grimes  *
135df8bae1dSRodney W. Grimes  *	Each object also records a "pager" routine which is
136df8bae1dSRodney W. Grimes  *	used to retrieve (and store) pages to the proper backing
137df8bae1dSRodney W. Grimes  *	storage.  In addition, objects may be backed by other
138df8bae1dSRodney W. Grimes  *	objects from which they were virtual-copied.
139df8bae1dSRodney W. Grimes  *
140df8bae1dSRodney W. Grimes  *	The only items within the object structure which are
141df8bae1dSRodney W. Grimes  *	modified after time of creation are:
142df8bae1dSRodney W. Grimes  *		reference count		locked by object's lock
143df8bae1dSRodney W. Grimes  *		pager routine		locked by object's lock
144df8bae1dSRodney W. Grimes  *
145df8bae1dSRodney W. Grimes  */
146df8bae1dSRodney W. Grimes 
14728f8db14SBruce Evans struct object_q vm_object_list;
148a5698387SAlan Cox struct mtx vm_object_list_mtx;	/* lock for object list and count */
149cccf11b8SAlan Cox 
150cccf11b8SAlan Cox struct vm_object kernel_object_store;
151df8bae1dSRodney W. Grimes 
1526472ac3dSEd Schouten static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0,
1536472ac3dSEd Schouten     "VM object stats");
154604c2bbcSAlan Cox 
15511542376SAlan Cox static counter_u64_t object_collapses = EARLY_COUNTER;
15611542376SAlan Cox SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD,
15711542376SAlan Cox     &object_collapses,
15811542376SAlan Cox     "VM object collapses");
159604c2bbcSAlan Cox 
16011542376SAlan Cox static counter_u64_t object_bypasses = EARLY_COUNTER;
16111542376SAlan Cox SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD,
16211542376SAlan Cox     &object_bypasses,
16311542376SAlan Cox     "VM object bypasses");
16411542376SAlan Cox 
165*98087a06SJeff Roberson static counter_u64_t object_collapse_waits = EARLY_COUNTER;
166*98087a06SJeff Roberson SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapse_waits, CTLFLAG_RD,
167*98087a06SJeff Roberson     &object_collapse_waits,
168*98087a06SJeff Roberson     "Number of sleeps for collapse");
169*98087a06SJeff Roberson 
17011542376SAlan Cox static void
17111542376SAlan Cox counter_startup(void)
17211542376SAlan Cox {
17311542376SAlan Cox 
17411542376SAlan Cox 	object_collapses = counter_u64_alloc(M_WAITOK);
17511542376SAlan Cox 	object_bypasses = counter_u64_alloc(M_WAITOK);
176*98087a06SJeff Roberson 	object_collapse_waits = counter_u64_alloc(M_WAITOK);
17711542376SAlan Cox }
17811542376SAlan Cox SYSINIT(object_counters, SI_SUB_CPU, SI_ORDER_ANY, counter_startup, NULL);
179dad740e9SAlan Cox 
180670d17b5SJeff Roberson static uma_zone_t obj_zone;
1818355f576SJeff Roberson 
182b23f72e9SBrian Feldman static int vm_object_zinit(void *mem, int size, int flags);
1838355f576SJeff Roberson 
1848355f576SJeff Roberson #ifdef INVARIANTS
1858355f576SJeff Roberson static void vm_object_zdtor(void *mem, int size, void *arg);
1868355f576SJeff Roberson 
1878355f576SJeff Roberson static void
1888355f576SJeff Roberson vm_object_zdtor(void *mem, int size, void *arg)
1898355f576SJeff Roberson {
1908355f576SJeff Roberson 	vm_object_t object;
1918355f576SJeff Roberson 
1928355f576SJeff Roberson 	object = (vm_object_t)mem;
193e735691bSJohn Baldwin 	KASSERT(object->ref_count == 0,
194e735691bSJohn Baldwin 	    ("object %p ref_count = %d", object, object->ref_count));
19543186e53SAlan Cox 	KASSERT(TAILQ_EMPTY(&object->memq),
196198da1b2SAttilio Rao 	    ("object %p has resident pages in its memq", object));
197774d251dSAttilio Rao 	KASSERT(vm_radix_is_empty(&object->rtree),
198774d251dSAttilio Rao 	    ("object %p has resident pages in its trie", object));
199f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
200f8a47341SAlan Cox 	KASSERT(LIST_EMPTY(&object->rvq),
201f8a47341SAlan Cox 	    ("object %p has reservations",
202f8a47341SAlan Cox 	    object));
203f8a47341SAlan Cox #endif
20411b57401SHans Petter Selasky 	KASSERT(REFCOUNT_COUNT(object->paging_in_progress) == 0,
2058355f576SJeff Roberson 	    ("object %p paging_in_progress = %d",
20611b57401SHans Petter Selasky 	    object, REFCOUNT_COUNT(object->paging_in_progress)));
207205be21dSJeff Roberson 	KASSERT(object->busy == 0,
208205be21dSJeff Roberson 	    ("object %p busy = %d",
209205be21dSJeff Roberson 	    object, object->busy));
2108355f576SJeff Roberson 	KASSERT(object->resident_page_count == 0,
2118355f576SJeff Roberson 	    ("object %p resident_page_count = %d",
2128355f576SJeff Roberson 	    object, object->resident_page_count));
2138355f576SJeff Roberson 	KASSERT(object->shadow_count == 0,
2148355f576SJeff Roberson 	    ("object %p shadow_count = %d",
2158355f576SJeff Roberson 	    object, object->shadow_count));
216e735691bSJohn Baldwin 	KASSERT(object->type == OBJT_DEAD,
217e735691bSJohn Baldwin 	    ("object %p has non-dead type %d",
218e735691bSJohn Baldwin 	    object, object->type));
2198355f576SJeff Roberson }
2208355f576SJeff Roberson #endif
2218355f576SJeff Roberson 
222b23f72e9SBrian Feldman static int
223b23f72e9SBrian Feldman vm_object_zinit(void *mem, int size, int flags)
2248355f576SJeff Roberson {
2258355f576SJeff Roberson 	vm_object_t object;
2268355f576SJeff Roberson 
2278355f576SJeff Roberson 	object = (vm_object_t)mem;
228777a36c5SAlan Cox 	rw_init_flags(&object->lock, "vm object", RW_DUPOK | RW_NEW);
2298355f576SJeff Roberson 
2308355f576SJeff Roberson 	/* These are true for any object that has been freed */
231e735691bSJohn Baldwin 	object->type = OBJT_DEAD;
232cd1241fbSKonstantin Belousov 	vm_radix_init(&object->rtree);
23351df5321SJeff Roberson 	refcount_init(&object->ref_count, 0);
234cf27e0d1SJeff Roberson 	refcount_init(&object->paging_in_progress, 0);
235205be21dSJeff Roberson 	refcount_init(&object->busy, 0);
2368355f576SJeff Roberson 	object->resident_page_count = 0;
2378355f576SJeff Roberson 	object->shadow_count = 0;
238f425ab8eSKonstantin Belousov 	object->flags = OBJ_DEAD;
239e735691bSJohn Baldwin 
240e735691bSJohn Baldwin 	mtx_lock(&vm_object_list_mtx);
241e735691bSJohn Baldwin 	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
242e735691bSJohn Baldwin 	mtx_unlock(&vm_object_list_mtx);
243b23f72e9SBrian Feldman 	return (0);
2448355f576SJeff Roberson }
245df8bae1dSRodney W. Grimes 
246a4915c21SAttilio Rao static void
24763967687SJeff Roberson _vm_object_allocate(objtype_t type, vm_pindex_t size, u_short flags,
24867388836SKonstantin Belousov     vm_object_t object, void *handle)
249df8bae1dSRodney W. Grimes {
2500cddd8f0SMatthew Dillon 
251df8bae1dSRodney W. Grimes 	TAILQ_INIT(&object->memq);
2521c500307SAlan Cox 	LIST_INIT(&object->shadow_head);
253a1f6d91cSDavid Greenman 
25424a1cce3SDavid Greenman 	object->type = type;
255f425ab8eSKonstantin Belousov 	if (type == OBJT_SWAP)
256f425ab8eSKonstantin Belousov 		pctrie_init(&object->un_pager.swp.swp_blks);
257f425ab8eSKonstantin Belousov 
258f425ab8eSKonstantin Belousov 	/*
259f425ab8eSKonstantin Belousov 	 * Ensure that swap_pager_swapoff() iteration over object_list
260f425ab8eSKonstantin Belousov 	 * sees up to date type and pctrie head if it observed
261f425ab8eSKonstantin Belousov 	 * non-dead object.
262f425ab8eSKonstantin Belousov 	 */
263f425ab8eSKonstantin Belousov 	atomic_thread_fence_rel();
264f425ab8eSKonstantin Belousov 
26563967687SJeff Roberson 	object->pg_color = 0;
26663967687SJeff Roberson 	object->flags = flags;
267df8bae1dSRodney W. Grimes 	object->size = size;
2684c29d2deSMark Johnston 	object->domain.dr_policy = NULL;
269b881da26SAlan Cox 	object->generation = 1;
27067d0e293SJeff Roberson 	object->cleangeneration = 1;
27151df5321SJeff Roberson 	refcount_init(&object->ref_count, 1);
2723153e878SAlan Cox 	object->memattr = VM_MEMATTR_DEFAULT;
273ef694c1aSEdward Tomasz Napierala 	object->cred = NULL;
2743364c323SKonstantin Belousov 	object->charge = 0;
27567388836SKonstantin Belousov 	object->handle = handle;
27624a1cce3SDavid Greenman 	object->backing_object = NULL;
277a316d390SJohn Dyson 	object->backing_object_offset = (vm_ooffset_t) 0;
278f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
279f8a47341SAlan Cox 	LIST_INIT(&object->rvq);
280f8a47341SAlan Cox #endif
2811bdbd705SKonstantin Belousov 	umtx_shm_object_init(object);
282df8bae1dSRodney W. Grimes }
283df8bae1dSRodney W. Grimes 
284df8bae1dSRodney W. Grimes /*
28526f9a767SRodney W. Grimes  *	vm_object_init:
28626f9a767SRodney W. Grimes  *
28726f9a767SRodney W. Grimes  *	Initialize the VM objects module.
28826f9a767SRodney W. Grimes  */
28926f9a767SRodney W. Grimes void
2901b40f8c0SMatthew Dillon vm_object_init(void)
29126f9a767SRodney W. Grimes {
29226f9a767SRodney W. Grimes 	TAILQ_INIT(&vm_object_list);
2936008862bSJohn Baldwin 	mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
2940217125fSDavid Greenman 
29589f6b863SAttilio Rao 	rw_init(&kernel_object->lock, "kernel vm object");
296d1780e8dSKonstantin Belousov 	_vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS -
29767388836SKonstantin Belousov 	    VM_MIN_KERNEL_ADDRESS), OBJ_UNMANAGED, kernel_object, NULL);
298f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
299f8a47341SAlan Cox 	kernel_object->flags |= OBJ_COLORED;
300f8a47341SAlan Cox 	kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
301f8a47341SAlan Cox #endif
30226f9a767SRodney W. Grimes 
3038dbca793STor Egge 	/*
3048dbca793STor Egge 	 * The lock portion of struct vm_object must be type stable due
3058dbca793STor Egge 	 * to vm_pageout_fallback_object_lock locking a vm object
3068dbca793STor Egge 	 * without holding any references to it.
3078dbca793STor Egge 	 */
3088355f576SJeff Roberson 	obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL,
3098355f576SJeff Roberson #ifdef INVARIANTS
3108355f576SJeff Roberson 	    vm_object_zdtor,
3118355f576SJeff Roberson #else
3128355f576SJeff Roberson 	    NULL,
3138355f576SJeff Roberson #endif
3145df87b21SJeff Roberson 	    vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
315774d251dSAttilio Rao 
316cd1241fbSKonstantin Belousov 	vm_radix_zinit();
31799448ed1SJohn Dyson }
31899448ed1SJohn Dyson 
31999448ed1SJohn Dyson void
3201b40f8c0SMatthew Dillon vm_object_clear_flag(vm_object_t object, u_short bits)
3211b40f8c0SMatthew Dillon {
3225440b5a9SAlan Cox 
32389f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
324b06805adSJake Burkholder 	object->flags &= ~bits;
3251b40f8c0SMatthew Dillon }
3261b40f8c0SMatthew Dillon 
3273153e878SAlan Cox /*
3283153e878SAlan Cox  *	Sets the default memory attribute for the specified object.  Pages
3293153e878SAlan Cox  *	that are allocated to this object are by default assigned this memory
3303153e878SAlan Cox  *	attribute.
3313153e878SAlan Cox  *
3323153e878SAlan Cox  *	Presently, this function must be called before any pages are allocated
3333153e878SAlan Cox  *	to the object.  In the future, this requirement may be relaxed for
3343153e878SAlan Cox  *	"default" and "swap" objects.
3353153e878SAlan Cox  */
3363153e878SAlan Cox int
3373153e878SAlan Cox vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
3383153e878SAlan Cox {
3393153e878SAlan Cox 
34089f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
3413153e878SAlan Cox 	switch (object->type) {
3423153e878SAlan Cox 	case OBJT_DEFAULT:
3433153e878SAlan Cox 	case OBJT_DEVICE:
34496b0b92aSAlan Cox 	case OBJT_MGTDEVICE:
3453153e878SAlan Cox 	case OBJT_PHYS:
34601381811SJohn Baldwin 	case OBJT_SG:
3473153e878SAlan Cox 	case OBJT_SWAP:
3483153e878SAlan Cox 	case OBJT_VNODE:
3493153e878SAlan Cox 		if (!TAILQ_EMPTY(&object->memq))
3503153e878SAlan Cox 			return (KERN_FAILURE);
3513153e878SAlan Cox 		break;
3523153e878SAlan Cox 	case OBJT_DEAD:
3533153e878SAlan Cox 		return (KERN_INVALID_ARGUMENT);
35496b0b92aSAlan Cox 	default:
35596b0b92aSAlan Cox 		panic("vm_object_set_memattr: object %p is of undefined type",
35696b0b92aSAlan Cox 		    object);
3573153e878SAlan Cox 	}
3583153e878SAlan Cox 	object->memattr = memattr;
3593153e878SAlan Cox 	return (KERN_SUCCESS);
3603153e878SAlan Cox }
3613153e878SAlan Cox 
3621b40f8c0SMatthew Dillon void
3631b40f8c0SMatthew Dillon vm_object_pip_add(vm_object_t object, short i)
3641b40f8c0SMatthew Dillon {
365f279b88dSAlan Cox 
366cf27e0d1SJeff Roberson 	refcount_acquiren(&object->paging_in_progress, i);
3671b40f8c0SMatthew Dillon }
3681b40f8c0SMatthew Dillon 
3691b40f8c0SMatthew Dillon void
3701b40f8c0SMatthew Dillon vm_object_pip_wakeup(vm_object_t object)
3711b40f8c0SMatthew Dillon {
372f279b88dSAlan Cox 
373cf27e0d1SJeff Roberson 	refcount_release(&object->paging_in_progress);
3741b40f8c0SMatthew Dillon }
3751b40f8c0SMatthew Dillon 
3761b40f8c0SMatthew Dillon void
3771b40f8c0SMatthew Dillon vm_object_pip_wakeupn(vm_object_t object, short i)
3781b40f8c0SMatthew Dillon {
379d647a0edSAlan Cox 
380cf27e0d1SJeff Roberson 	refcount_releasen(&object->paging_in_progress, i);
3811b40f8c0SMatthew Dillon }
3821b40f8c0SMatthew Dillon 
383*98087a06SJeff Roberson /*
384*98087a06SJeff Roberson  * Atomically drop the interlock and wait for pip to drain.  This protects
385*98087a06SJeff Roberson  * from sleep/wakeup races due to identity changes.  The lock is not
386*98087a06SJeff Roberson  * re-acquired on return.
387*98087a06SJeff Roberson  */
388*98087a06SJeff Roberson static void
389*98087a06SJeff Roberson vm_object_pip_sleep(vm_object_t object, char *waitid)
390*98087a06SJeff Roberson {
391*98087a06SJeff Roberson 
392*98087a06SJeff Roberson 	refcount_sleep_interlock(&object->paging_in_progress,
393*98087a06SJeff Roberson 	    &object->lock, waitid, PVM);
394*98087a06SJeff Roberson }
395*98087a06SJeff Roberson 
3961b40f8c0SMatthew Dillon void
3971b40f8c0SMatthew Dillon vm_object_pip_wait(vm_object_t object, char *waitid)
3981b40f8c0SMatthew Dillon {
3991ca58953SAlan Cox 
40089f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
401cf27e0d1SJeff Roberson 
40211b57401SHans Petter Selasky 	while (REFCOUNT_COUNT(object->paging_in_progress) > 0) {
403*98087a06SJeff Roberson 		vm_object_pip_sleep(object, waitid);
404cf27e0d1SJeff Roberson 		VM_OBJECT_WLOCK(object);
4051ca58953SAlan Cox 	}
4061b40f8c0SMatthew Dillon }
4071b40f8c0SMatthew Dillon 
408cf27e0d1SJeff Roberson void
409cf27e0d1SJeff Roberson vm_object_pip_wait_unlocked(vm_object_t object, char *waitid)
410cf27e0d1SJeff Roberson {
411cf27e0d1SJeff Roberson 
412cf27e0d1SJeff Roberson 	VM_OBJECT_ASSERT_UNLOCKED(object);
413cf27e0d1SJeff Roberson 
41411b57401SHans Petter Selasky 	while (REFCOUNT_COUNT(object->paging_in_progress) > 0)
415cf27e0d1SJeff Roberson 		refcount_wait(&object->paging_in_progress, waitid, PVM);
416cf27e0d1SJeff Roberson }
417cf27e0d1SJeff Roberson 
41826f9a767SRodney W. Grimes /*
41926f9a767SRodney W. Grimes  *	vm_object_allocate:
42026f9a767SRodney W. Grimes  *
42126f9a767SRodney W. Grimes  *	Returns a new object with the given size.
42226f9a767SRodney W. Grimes  */
42326f9a767SRodney W. Grimes vm_object_t
4246395da54SIan Dowse vm_object_allocate(objtype_t type, vm_pindex_t size)
42526f9a767SRodney W. Grimes {
42690688d13SAlan Cox 	vm_object_t object;
42763967687SJeff Roberson 	u_short flags;
42863967687SJeff Roberson 
42963967687SJeff Roberson 	switch (type) {
43063967687SJeff Roberson 	case OBJT_DEAD:
43163967687SJeff Roberson 		panic("vm_object_allocate: can't create OBJT_DEAD");
43263967687SJeff Roberson 	case OBJT_DEFAULT:
43363967687SJeff Roberson 	case OBJT_SWAP:
43463967687SJeff Roberson 		flags = OBJ_COLORED;
43563967687SJeff Roberson 		break;
43663967687SJeff Roberson 	case OBJT_DEVICE:
43763967687SJeff Roberson 	case OBJT_SG:
43863967687SJeff Roberson 		flags = OBJ_FICTITIOUS | OBJ_UNMANAGED;
43963967687SJeff Roberson 		break;
44063967687SJeff Roberson 	case OBJT_MGTDEVICE:
44163967687SJeff Roberson 		flags = OBJ_FICTITIOUS;
44263967687SJeff Roberson 		break;
44363967687SJeff Roberson 	case OBJT_PHYS:
44463967687SJeff Roberson 		flags = OBJ_UNMANAGED;
44563967687SJeff Roberson 		break;
44663967687SJeff Roberson 	case OBJT_VNODE:
44763967687SJeff Roberson 		flags = 0;
44863967687SJeff Roberson 		break;
44963967687SJeff Roberson 	default:
45063967687SJeff Roberson 		panic("vm_object_allocate: type %d is undefined", type);
45163967687SJeff Roberson 	}
45263967687SJeff Roberson 	object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
45367388836SKonstantin Belousov 	_vm_object_allocate(type, size, flags, object, NULL);
45463967687SJeff Roberson 
45563967687SJeff Roberson 	return (object);
45663967687SJeff Roberson }
45763967687SJeff Roberson 
45863967687SJeff Roberson /*
45963967687SJeff Roberson  *	vm_object_allocate_anon:
46063967687SJeff Roberson  *
46163967687SJeff Roberson  *	Returns a new default object of the given size and marked as
46263967687SJeff Roberson  *	anonymous memory for special split/collapse handling.  Color
46363967687SJeff Roberson  *	to be initialized by the caller.
46463967687SJeff Roberson  */
46563967687SJeff Roberson vm_object_t
46667388836SKonstantin Belousov vm_object_allocate_anon(vm_pindex_t size, vm_object_t backing_object,
46767388836SKonstantin Belousov     struct ucred *cred, vm_size_t charge)
46863967687SJeff Roberson {
46967388836SKonstantin Belousov 	vm_object_t handle, object;
47090688d13SAlan Cox 
47167388836SKonstantin Belousov 	if (backing_object == NULL)
47267388836SKonstantin Belousov 		handle = NULL;
47367388836SKonstantin Belousov 	else if ((backing_object->flags & OBJ_ANON) != 0)
47467388836SKonstantin Belousov 		handle = backing_object->handle;
47567388836SKonstantin Belousov 	else
47667388836SKonstantin Belousov 		handle = backing_object;
47767388836SKonstantin Belousov 	object = uma_zalloc(obj_zone, M_WAITOK);
47863967687SJeff Roberson 	_vm_object_allocate(OBJT_DEFAULT, size, OBJ_ANON | OBJ_ONEMAPPING,
47967388836SKonstantin Belousov 	    object, handle);
48067388836SKonstantin Belousov 	object->cred = cred;
48167388836SKonstantin Belousov 	object->charge = cred != NULL ? charge : 0;
48290688d13SAlan Cox 	return (object);
48326f9a767SRodney W. Grimes }
48426f9a767SRodney W. Grimes 
485*98087a06SJeff Roberson static void
486*98087a06SJeff Roberson vm_object_reference_vnode(vm_object_t object)
487df8bae1dSRodney W. Grimes {
488a67d5408SJeff Roberson 	struct vnode *vp;
489a67d5408SJeff Roberson 	u_int old;
490a67d5408SJeff Roberson 
491a67d5408SJeff Roberson 	/*
492*98087a06SJeff Roberson 	 * vnode objects need the lock for the first reference
493*98087a06SJeff Roberson 	 * to serialize with vnode_object_deallocate().
494a67d5408SJeff Roberson 	 */
495*98087a06SJeff Roberson 	if (!refcount_acquire_if_gt(&object->ref_count, 0)) {
49651df5321SJeff Roberson 		VM_OBJECT_RLOCK(object);
497a67d5408SJeff Roberson 		old = refcount_acquire(&object->ref_count);
498a67d5408SJeff Roberson 		if (object->type == OBJT_VNODE && old == 0) {
499a67d5408SJeff Roberson 			vp = object->handle;
500a67d5408SJeff Roberson 			vref(vp);
501a67d5408SJeff Roberson 		}
50251df5321SJeff Roberson 		VM_OBJECT_RUNLOCK(object);
50395e5e988SJohn Dyson 	}
504a67d5408SJeff Roberson }
50595e5e988SJohn Dyson 
50623955314SAlfred Perlstein /*
507*98087a06SJeff Roberson  *	vm_object_reference:
508*98087a06SJeff Roberson  *
509*98087a06SJeff Roberson  *	Acquires a reference to the given object.
510*98087a06SJeff Roberson  */
511*98087a06SJeff Roberson void
512*98087a06SJeff Roberson vm_object_reference(vm_object_t object)
513*98087a06SJeff Roberson {
514*98087a06SJeff Roberson 
515*98087a06SJeff Roberson 	if (object == NULL)
516*98087a06SJeff Roberson 		return;
517*98087a06SJeff Roberson 
518*98087a06SJeff Roberson 	if (object->type == OBJT_VNODE)
519*98087a06SJeff Roberson 		vm_object_reference_vnode(object);
520*98087a06SJeff Roberson 	else
521*98087a06SJeff Roberson 		refcount_acquire(&object->ref_count);
522*98087a06SJeff Roberson 	KASSERT((object->flags & OBJ_DEAD) == 0,
523*98087a06SJeff Roberson 	    ("vm_object_reference: Referenced dead object."));
524*98087a06SJeff Roberson }
525*98087a06SJeff Roberson 
526*98087a06SJeff Roberson /*
527b921a12bSAlan Cox  *	vm_object_reference_locked:
528b921a12bSAlan Cox  *
529b921a12bSAlan Cox  *	Gets another reference to the given object.
530b921a12bSAlan Cox  *
531b921a12bSAlan Cox  *	The object must be locked.
532b921a12bSAlan Cox  */
533b921a12bSAlan Cox void
534b921a12bSAlan Cox vm_object_reference_locked(vm_object_t object)
535b921a12bSAlan Cox {
536b921a12bSAlan Cox 	struct vnode *vp;
537a67d5408SJeff Roberson 	u_int old;
538b921a12bSAlan Cox 
53951df5321SJeff Roberson 	VM_OBJECT_ASSERT_LOCKED(object);
540a67d5408SJeff Roberson 	old = refcount_acquire(&object->ref_count);
541a67d5408SJeff Roberson 	if (object->type == OBJT_VNODE && old == 0) {
542*98087a06SJeff Roberson 		vp = object->handle; vref(vp); }
543*98087a06SJeff Roberson 	KASSERT((object->flags & OBJ_DEAD) == 0,
544*98087a06SJeff Roberson 	    ("vm_object_reference: Referenced dead object."));
545b921a12bSAlan Cox }
546b921a12bSAlan Cox 
547b921a12bSAlan Cox /*
5489d5abbddSJens Schweikhardt  * Handle deallocating an object of type OBJT_VNODE.
54923955314SAlfred Perlstein  */
55002dd8331SAlan Cox static void
551*98087a06SJeff Roberson vm_object_deallocate_vnode(vm_object_t object)
55295e5e988SJohn Dyson {
55395e5e988SJohn Dyson 	struct vnode *vp = (struct vnode *) object->handle;
55426c4e983SJeff Roberson 	bool last;
555219cbf59SEivind Eklund 
5565526d2d9SEivind Eklund 	KASSERT(object->type == OBJT_VNODE,
557*98087a06SJeff Roberson 	    ("vm_object_deallocate_vnode: not a vnode object"));
558*98087a06SJeff Roberson 	KASSERT(vp != NULL, ("vm_object_deallocate_vnode: missing vp"));
55995e5e988SJohn Dyson 
56026c4e983SJeff Roberson 	/* Object lock to protect handle lookup. */
56126c4e983SJeff Roberson 	last = refcount_release(&object->ref_count);
56226c4e983SJeff Roberson 	VM_OBJECT_RUNLOCK(object);
56326c4e983SJeff Roberson 
56426c4e983SJeff Roberson 	if (!last)
56526c4e983SJeff Roberson 		return;
56626c4e983SJeff Roberson 
567a67d5408SJeff Roberson 	if (!umtx_shm_vnobj_persistent)
5681bdbd705SKonstantin Belousov 		umtx_shm_object_terminated(object);
5691bdbd705SKonstantin Belousov 
57003fa5b34SKonstantin Belousov 	/* vrele may need the vnode lock. */
57147221757SJohn Dyson 	vrele(vp);
57286769ac0SKonstantin Belousov }
573df8bae1dSRodney W. Grimes 
574*98087a06SJeff Roberson 
575*98087a06SJeff Roberson /*
576*98087a06SJeff Roberson  * We dropped a reference on an object and discovered that it had a
577*98087a06SJeff Roberson  * single remaining shadow.  This is a sibling of the reference we
578*98087a06SJeff Roberson  * dropped.  Attempt to collapse the sibling and backing object.
579*98087a06SJeff Roberson  */
580*98087a06SJeff Roberson static vm_object_t
581*98087a06SJeff Roberson vm_object_deallocate_anon(vm_object_t backing_object)
582*98087a06SJeff Roberson {
583*98087a06SJeff Roberson 	vm_object_t object;
584*98087a06SJeff Roberson 
585*98087a06SJeff Roberson 	/* Fetch the final shadow.  */
586*98087a06SJeff Roberson 	object = LIST_FIRST(&backing_object->shadow_head);
587*98087a06SJeff Roberson 	KASSERT(object != NULL && backing_object->shadow_count == 1,
588*98087a06SJeff Roberson 	    ("vm_object_anon_deallocate: ref_count: %d, shadow_count: %d",
589*98087a06SJeff Roberson 	    backing_object->ref_count, backing_object->shadow_count));
590*98087a06SJeff Roberson 	KASSERT((object->flags & (OBJ_TMPFS_NODE | OBJ_ANON)) == OBJ_ANON,
591*98087a06SJeff Roberson 	    ("invalid shadow object %p", object));
592*98087a06SJeff Roberson 
593*98087a06SJeff Roberson 	if (!VM_OBJECT_TRYWLOCK(object)) {
594*98087a06SJeff Roberson 		/*
595*98087a06SJeff Roberson 		 * Prevent object from disappearing since we do not have a
596*98087a06SJeff Roberson 		 * reference.
597*98087a06SJeff Roberson 		 */
598*98087a06SJeff Roberson 		vm_object_pip_add(object, 1);
599*98087a06SJeff Roberson 		VM_OBJECT_WUNLOCK(backing_object);
600*98087a06SJeff Roberson 		VM_OBJECT_WLOCK(object);
601*98087a06SJeff Roberson 		vm_object_pip_wakeup(object);
602*98087a06SJeff Roberson 	} else
603*98087a06SJeff Roberson 		VM_OBJECT_WUNLOCK(backing_object);
604*98087a06SJeff Roberson 
605*98087a06SJeff Roberson 	/*
606*98087a06SJeff Roberson 	 * Check for a collapse/terminate race with the last reference holder.
607*98087a06SJeff Roberson 	 */
608*98087a06SJeff Roberson 	if ((object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) != 0 ||
609*98087a06SJeff Roberson 	    !refcount_acquire_if_not_zero(&object->ref_count)) {
610*98087a06SJeff Roberson 		VM_OBJECT_WUNLOCK(object);
611*98087a06SJeff Roberson 		return (NULL);
612*98087a06SJeff Roberson 	}
613*98087a06SJeff Roberson 	backing_object = object->backing_object;
614*98087a06SJeff Roberson 	if (backing_object != NULL && (backing_object->flags & OBJ_ANON) != 0)
615*98087a06SJeff Roberson 		vm_object_collapse(object);
616*98087a06SJeff Roberson 	VM_OBJECT_WUNLOCK(object);
617*98087a06SJeff Roberson 
618*98087a06SJeff Roberson 	return (object);
619*98087a06SJeff Roberson }
620*98087a06SJeff Roberson 
621df8bae1dSRodney W. Grimes /*
622df8bae1dSRodney W. Grimes  *	vm_object_deallocate:
623df8bae1dSRodney W. Grimes  *
624df8bae1dSRodney W. Grimes  *	Release a reference to the specified object,
625df8bae1dSRodney W. Grimes  *	gained either through a vm_object_allocate
626df8bae1dSRodney W. Grimes  *	or a vm_object_reference call.  When all references
627df8bae1dSRodney W. Grimes  *	are gone, storage associated with this object
628df8bae1dSRodney W. Grimes  *	may be relinquished.
629df8bae1dSRodney W. Grimes  *
630df8bae1dSRodney W. Grimes  *	No object may be locked.
631df8bae1dSRodney W. Grimes  */
63226f9a767SRodney W. Grimes void
6331b40f8c0SMatthew Dillon vm_object_deallocate(vm_object_t object)
634df8bae1dSRodney W. Grimes {
635*98087a06SJeff Roberson 	vm_object_t temp;
63626c4e983SJeff Roberson 	bool released;
637df8bae1dSRodney W. Grimes 
638df8bae1dSRodney W. Grimes 	while (object != NULL) {
63951df5321SJeff Roberson 		/*
64051df5321SJeff Roberson 		 * If the reference count goes to 0 we start calling
64151df5321SJeff Roberson 		 * vm_object_terminate() on the object chain.  A ref count
64251df5321SJeff Roberson 		 * of 1 may be a special case depending on the shadow count
64351df5321SJeff Roberson 		 * being 0 or 1.  These cases require a write lock on the
64451df5321SJeff Roberson 		 * object.
64551df5321SJeff Roberson 		 */
64663967687SJeff Roberson 		if ((object->flags & OBJ_ANON) == 0)
64763967687SJeff Roberson 			released = refcount_release_if_gt(&object->ref_count, 1);
64863967687SJeff Roberson 		else
64951df5321SJeff Roberson 			released = refcount_release_if_gt(&object->ref_count, 2);
65051df5321SJeff Roberson 		if (released)
65151df5321SJeff Roberson 			return;
65251df5321SJeff Roberson 
653a67d5408SJeff Roberson 		if (object->type == OBJT_VNODE) {
65426c4e983SJeff Roberson 			VM_OBJECT_RLOCK(object);
65526c4e983SJeff Roberson 			if (object->type == OBJT_VNODE) {
656*98087a06SJeff Roberson 				vm_object_deallocate_vnode(object);
657a67d5408SJeff Roberson 				return;
658a67d5408SJeff Roberson 			}
65926c4e983SJeff Roberson 			VM_OBJECT_RUNLOCK(object);
66026c4e983SJeff Roberson 		}
66126c4e983SJeff Roberson 
66226c4e983SJeff Roberson 		VM_OBJECT_WLOCK(object);
66326c4e983SJeff Roberson 		KASSERT(object->ref_count > 0,
66426c4e983SJeff Roberson 		    ("vm_object_deallocate: object deallocated too many times: %d",
66526c4e983SJeff Roberson 		    object->type));
66626c4e983SJeff Roberson 
667b72b0115SAlan Cox 		/*
668*98087a06SJeff Roberson 		 * If this is not the final reference to an anonymous
669*98087a06SJeff Roberson 		 * object we may need to collapse the shadow chain.
670b72b0115SAlan Cox 		 */
671*98087a06SJeff Roberson 		if (!refcount_release(&object->ref_count)) {
672*98087a06SJeff Roberson 			if (object->ref_count > 1 ||
673*98087a06SJeff Roberson 			    object->shadow_count == 0) {
674*98087a06SJeff Roberson 				if ((object->flags & OBJ_ANON) != 0 &&
675*98087a06SJeff Roberson 				    object->ref_count == 1)
676*98087a06SJeff Roberson 					vm_object_set_flag(object,
677*98087a06SJeff Roberson 					    OBJ_ONEMAPPING);
67889f6b863SAttilio Rao 				VM_OBJECT_WUNLOCK(object);
67923b186d3SAlan Cox 				return;
68095e5e988SJohn Dyson 			}
681*98087a06SJeff Roberson 
682*98087a06SJeff Roberson 			/* Handle collapsing last ref on anonymous objects. */
683*98087a06SJeff Roberson 			object = vm_object_deallocate_anon(object);
684*98087a06SJeff Roberson 			continue;
685*98087a06SJeff Roberson 		}
686*98087a06SJeff Roberson 
687*98087a06SJeff Roberson 		/*
688*98087a06SJeff Roberson 		 * Handle the final reference to an object.  We restart
689*98087a06SJeff Roberson 		 * the loop with the backing object to avoid recursion.
690*98087a06SJeff Roberson 		 */
6911bdbd705SKonstantin Belousov 		umtx_shm_object_terminated(object);
69224a1cce3SDavid Greenman 		temp = object->backing_object;
693c9917419SAlan Cox 		if (temp != NULL) {
6944bace8e7SKonstantin Belousov 			KASSERT((object->flags & OBJ_TMPFS_NODE) == 0,
6954bace8e7SKonstantin Belousov 			    ("shadowed tmpfs v_object 2 %p", object));
69651b867e5SJeff Roberson 			vm_object_backing_remove(object);
697de5f6a77SJohn Dyson 		}
698*98087a06SJeff Roberson 
699*98087a06SJeff Roberson 		KASSERT((object->flags & OBJ_DEAD) == 0,
700*98087a06SJeff Roberson 		    ("vm_object_deallocate: Terminating dead object."));
701783a68aaSKonstantin Belousov 		vm_object_set_flag(object, OBJ_DEAD);
702df8bae1dSRodney W. Grimes 		vm_object_terminate(object);
703df8bae1dSRodney W. Grimes 		object = temp;
704df8bae1dSRodney W. Grimes 	}
705df8bae1dSRodney W. Grimes }
706df8bae1dSRodney W. Grimes 
707df8bae1dSRodney W. Grimes /*
7082ac78f0eSStephan Uphoff  *	vm_object_destroy removes the object from the global object list
7092ac78f0eSStephan Uphoff  *      and frees the space for the object.
7102ac78f0eSStephan Uphoff  */
7112ac78f0eSStephan Uphoff void
7122ac78f0eSStephan Uphoff vm_object_destroy(vm_object_t object)
7132ac78f0eSStephan Uphoff {
7142ac78f0eSStephan Uphoff 
7152ac78f0eSStephan Uphoff 	/*
7163364c323SKonstantin Belousov 	 * Release the allocation charge.
7173364c323SKonstantin Belousov 	 */
718ef694c1aSEdward Tomasz Napierala 	if (object->cred != NULL) {
719ef694c1aSEdward Tomasz Napierala 		swap_release_by_cred(object->charge, object->cred);
7203364c323SKonstantin Belousov 		object->charge = 0;
721ef694c1aSEdward Tomasz Napierala 		crfree(object->cred);
722ef694c1aSEdward Tomasz Napierala 		object->cred = NULL;
7233364c323SKonstantin Belousov 	}
7243364c323SKonstantin Belousov 
7253364c323SKonstantin Belousov 	/*
7262ac78f0eSStephan Uphoff 	 * Free the space for the object.
7272ac78f0eSStephan Uphoff 	 */
7282ac78f0eSStephan Uphoff 	uma_zfree(obj_zone, object);
7292ac78f0eSStephan Uphoff }
7302ac78f0eSStephan Uphoff 
73151b867e5SJeff Roberson static void
73251b867e5SJeff Roberson vm_object_backing_remove_locked(vm_object_t object)
73351b867e5SJeff Roberson {
73451b867e5SJeff Roberson 	vm_object_t backing_object;
73551b867e5SJeff Roberson 
73651b867e5SJeff Roberson 	backing_object = object->backing_object;
73751b867e5SJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
73851b867e5SJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(backing_object);
73951b867e5SJeff Roberson 
740*98087a06SJeff Roberson 	KASSERT((object->flags & OBJ_COLLAPSING) == 0,
741*98087a06SJeff Roberson 	    ("vm_object_backing_remove: Removing collapsing object."));
742*98087a06SJeff Roberson 
74351b867e5SJeff Roberson 	if ((object->flags & OBJ_SHADOWLIST) != 0) {
74451b867e5SJeff Roberson 		LIST_REMOVE(object, shadow_list);
74551b867e5SJeff Roberson 		backing_object->shadow_count--;
74651b867e5SJeff Roberson 		object->flags &= ~OBJ_SHADOWLIST;
74751b867e5SJeff Roberson 	}
74851b867e5SJeff Roberson 	object->backing_object = NULL;
74951b867e5SJeff Roberson }
75051b867e5SJeff Roberson 
75151b867e5SJeff Roberson static void
75251b867e5SJeff Roberson vm_object_backing_remove(vm_object_t object)
75351b867e5SJeff Roberson {
75451b867e5SJeff Roberson 	vm_object_t backing_object;
75551b867e5SJeff Roberson 
75651b867e5SJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
75751b867e5SJeff Roberson 
75851b867e5SJeff Roberson 	if ((object->flags & OBJ_SHADOWLIST) != 0) {
75951b867e5SJeff Roberson 		backing_object = object->backing_object;
76051b867e5SJeff Roberson 		VM_OBJECT_WLOCK(backing_object);
76151b867e5SJeff Roberson 		vm_object_backing_remove_locked(object);
76251b867e5SJeff Roberson 		VM_OBJECT_WUNLOCK(backing_object);
76351b867e5SJeff Roberson 	} else
76451b867e5SJeff Roberson 		object->backing_object = NULL;
76551b867e5SJeff Roberson }
76651b867e5SJeff Roberson 
76751b867e5SJeff Roberson static void
76851b867e5SJeff Roberson vm_object_backing_insert_locked(vm_object_t object, vm_object_t backing_object)
76951b867e5SJeff Roberson {
77051b867e5SJeff Roberson 
77151b867e5SJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
77251b867e5SJeff Roberson 
77351b867e5SJeff Roberson 	if ((backing_object->flags & OBJ_ANON) != 0) {
77451b867e5SJeff Roberson 		VM_OBJECT_ASSERT_WLOCKED(backing_object);
77551b867e5SJeff Roberson 		LIST_INSERT_HEAD(&backing_object->shadow_head, object,
77651b867e5SJeff Roberson 		    shadow_list);
77751b867e5SJeff Roberson 		backing_object->shadow_count++;
77851b867e5SJeff Roberson 		object->flags |= OBJ_SHADOWLIST;
77951b867e5SJeff Roberson 	}
78051b867e5SJeff Roberson 	object->backing_object = backing_object;
78151b867e5SJeff Roberson }
78251b867e5SJeff Roberson 
78351b867e5SJeff Roberson static void
78451b867e5SJeff Roberson vm_object_backing_insert(vm_object_t object, vm_object_t backing_object)
78551b867e5SJeff Roberson {
78651b867e5SJeff Roberson 
78751b867e5SJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
78851b867e5SJeff Roberson 
78951b867e5SJeff Roberson 	if ((backing_object->flags & OBJ_ANON) != 0) {
79051b867e5SJeff Roberson 		VM_OBJECT_WLOCK(backing_object);
79151b867e5SJeff Roberson 		vm_object_backing_insert_locked(object, backing_object);
79251b867e5SJeff Roberson 		VM_OBJECT_WUNLOCK(backing_object);
79351b867e5SJeff Roberson 	} else
79451b867e5SJeff Roberson 		object->backing_object = backing_object;
79551b867e5SJeff Roberson }
79651b867e5SJeff Roberson 
797*98087a06SJeff Roberson /*
798*98087a06SJeff Roberson  * Insert an object into a backing_object's shadow list with an additional
799*98087a06SJeff Roberson  * reference to the backing_object added.
800*98087a06SJeff Roberson  */
801*98087a06SJeff Roberson static void
802*98087a06SJeff Roberson vm_object_backing_insert_ref(vm_object_t object, vm_object_t backing_object)
803*98087a06SJeff Roberson {
804*98087a06SJeff Roberson 
805*98087a06SJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
806*98087a06SJeff Roberson 
807*98087a06SJeff Roberson 	if ((backing_object->flags & OBJ_ANON) != 0) {
808*98087a06SJeff Roberson 		VM_OBJECT_WLOCK(backing_object);
809*98087a06SJeff Roberson 		KASSERT((backing_object->flags & OBJ_DEAD) == 0,
810*98087a06SJeff Roberson 		    ("shadowing dead anonymous object"));
811*98087a06SJeff Roberson 		vm_object_reference_locked(backing_object);
812*98087a06SJeff Roberson 		vm_object_backing_insert_locked(object, backing_object);
813*98087a06SJeff Roberson 		vm_object_clear_flag(backing_object, OBJ_ONEMAPPING);
814*98087a06SJeff Roberson 		VM_OBJECT_WUNLOCK(backing_object);
815*98087a06SJeff Roberson 	} else {
816*98087a06SJeff Roberson 		vm_object_reference(backing_object);
817*98087a06SJeff Roberson 		object->backing_object = backing_object;
818*98087a06SJeff Roberson 	}
819*98087a06SJeff Roberson }
820*98087a06SJeff Roberson 
821*98087a06SJeff Roberson /*
822*98087a06SJeff Roberson  * Transfer a backing reference from backing_object to object.
823*98087a06SJeff Roberson  */
824*98087a06SJeff Roberson static void
825*98087a06SJeff Roberson vm_object_backing_transfer(vm_object_t object, vm_object_t backing_object)
826*98087a06SJeff Roberson {
827*98087a06SJeff Roberson 	vm_object_t new_backing_object;
828*98087a06SJeff Roberson 
829*98087a06SJeff Roberson 	/*
830*98087a06SJeff Roberson 	 * Note that the reference to backing_object->backing_object
831*98087a06SJeff Roberson 	 * moves from within backing_object to within object.
832*98087a06SJeff Roberson 	 */
833*98087a06SJeff Roberson 	vm_object_backing_remove_locked(object);
834*98087a06SJeff Roberson 	new_backing_object = backing_object->backing_object;
835*98087a06SJeff Roberson 	if (new_backing_object == NULL)
836*98087a06SJeff Roberson 		return;
837*98087a06SJeff Roberson 	if ((new_backing_object->flags & OBJ_ANON) != 0) {
838*98087a06SJeff Roberson 		VM_OBJECT_WLOCK(new_backing_object);
839*98087a06SJeff Roberson 		vm_object_backing_remove_locked(backing_object);
840*98087a06SJeff Roberson 		vm_object_backing_insert_locked(object, new_backing_object);
841*98087a06SJeff Roberson 		VM_OBJECT_WUNLOCK(new_backing_object);
842*98087a06SJeff Roberson 	} else {
843*98087a06SJeff Roberson 		object->backing_object = new_backing_object;
844*98087a06SJeff Roberson 		backing_object->backing_object = NULL;
845*98087a06SJeff Roberson 	}
846*98087a06SJeff Roberson }
847*98087a06SJeff Roberson 
848*98087a06SJeff Roberson /*
849*98087a06SJeff Roberson  * Wait for a concurrent collapse to settle.
850*98087a06SJeff Roberson  */
851*98087a06SJeff Roberson static void
852*98087a06SJeff Roberson vm_object_collapse_wait(vm_object_t object)
853*98087a06SJeff Roberson {
854*98087a06SJeff Roberson 
855*98087a06SJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
856*98087a06SJeff Roberson 
857*98087a06SJeff Roberson 	while ((object->flags & OBJ_COLLAPSING) != 0) {
858*98087a06SJeff Roberson 		vm_object_pip_wait(object, "vmcolwait");
859*98087a06SJeff Roberson 		counter_u64_add(object_collapse_waits, 1);
860*98087a06SJeff Roberson 	}
861*98087a06SJeff Roberson }
862*98087a06SJeff Roberson 
863*98087a06SJeff Roberson /*
864*98087a06SJeff Roberson  * Waits for a backing object to clear a pending collapse and returns
865*98087a06SJeff Roberson  * it locked if it is an ANON object.
866*98087a06SJeff Roberson  */
867*98087a06SJeff Roberson static vm_object_t
868*98087a06SJeff Roberson vm_object_backing_collapse_wait(vm_object_t object)
869*98087a06SJeff Roberson {
870*98087a06SJeff Roberson 	vm_object_t backing_object;
871*98087a06SJeff Roberson 
872*98087a06SJeff Roberson 	VM_OBJECT_ASSERT_WLOCKED(object);
873*98087a06SJeff Roberson 
874*98087a06SJeff Roberson 	for (;;) {
875*98087a06SJeff Roberson 		backing_object = object->backing_object;
876*98087a06SJeff Roberson 		if (backing_object == NULL ||
877*98087a06SJeff Roberson 		    (backing_object->flags & OBJ_ANON) == 0)
878*98087a06SJeff Roberson 			return (NULL);
879*98087a06SJeff Roberson 		VM_OBJECT_WLOCK(backing_object);
880*98087a06SJeff Roberson 		if ((backing_object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) == 0)
881*98087a06SJeff Roberson 			break;
882*98087a06SJeff Roberson 		VM_OBJECT_WUNLOCK(object);
883*98087a06SJeff Roberson 		vm_object_pip_sleep(backing_object, "vmbckwait");
884*98087a06SJeff Roberson 		counter_u64_add(object_collapse_waits, 1);
885*98087a06SJeff Roberson 		VM_OBJECT_WLOCK(object);
886*98087a06SJeff Roberson 	}
887*98087a06SJeff Roberson 	return (backing_object);
888*98087a06SJeff Roberson }
88951b867e5SJeff Roberson 
8902ac78f0eSStephan Uphoff /*
8917bbdb843SRuslan Bukin  *	vm_object_terminate_pages removes any remaining pageable pages
8927bbdb843SRuslan Bukin  *	from the object and resets the object to an empty state.
8937bbdb843SRuslan Bukin  */
8947bbdb843SRuslan Bukin static void
8957bbdb843SRuslan Bukin vm_object_terminate_pages(vm_object_t object)
8967bbdb843SRuslan Bukin {
8977bbdb843SRuslan Bukin 	vm_page_t p, p_next;
8987bbdb843SRuslan Bukin 
8997bbdb843SRuslan Bukin 	VM_OBJECT_ASSERT_WLOCKED(object);
9007bbdb843SRuslan Bukin 
9017bbdb843SRuslan Bukin 	/*
9027bbdb843SRuslan Bukin 	 * Free any remaining pageable pages.  This also removes them from the
9037bbdb843SRuslan Bukin 	 * paging queues.  However, don't free wired pages, just remove them
9047bbdb843SRuslan Bukin 	 * from the object.  Rather than incrementally removing each page from
9057bbdb843SRuslan Bukin 	 * the object, the page and object are reset to any empty state.
9067bbdb843SRuslan Bukin 	 */
9077bbdb843SRuslan Bukin 	TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) {
9087bbdb843SRuslan Bukin 		vm_page_assert_unbusied(p);
909fee2a2faSMark Johnston 		KASSERT(p->object == object &&
910fee2a2faSMark Johnston 		    (p->ref_count & VPRC_OBJREF) != 0,
911fee2a2faSMark Johnston 		    ("vm_object_terminate_pages: page %p is inconsistent", p));
912fee2a2faSMark Johnston 
9137bbdb843SRuslan Bukin 		p->object = NULL;
914fee2a2faSMark Johnston 		if (vm_page_drop(p, VPRC_OBJREF) == VPRC_OBJREF) {
9155cd29d0fSMark Johnston 			VM_CNT_INC(v_pfree);
9165cd29d0fSMark Johnston 			vm_page_free(p);
9174074d642SAlan Cox 		}
918fee2a2faSMark Johnston 	}
9192fcd1ff6SKonstantin Belousov 
9207bbdb843SRuslan Bukin 	/*
9217bbdb843SRuslan Bukin 	 * If the object contained any pages, then reset it to an empty state.
9227bbdb843SRuslan Bukin 	 * None of the object's fields, including "resident_page_count", were
9237bbdb843SRuslan Bukin 	 * modified by the preceding loop.
9247bbdb843SRuslan Bukin 	 */
9257bbdb843SRuslan Bukin 	if (object->resident_page_count != 0) {
9267bbdb843SRuslan Bukin 		vm_radix_reclaim_allnodes(&object->rtree);
9277bbdb843SRuslan Bukin 		TAILQ_INIT(&object->memq);
9287bbdb843SRuslan Bukin 		object->resident_page_count = 0;
9297bbdb843SRuslan Bukin 		if (object->type == OBJT_VNODE)
9307bbdb843SRuslan Bukin 			vdrop(object->handle);
9317bbdb843SRuslan Bukin 	}
9327bbdb843SRuslan Bukin }
9337bbdb843SRuslan Bukin 
9347bbdb843SRuslan Bukin /*
935df8bae1dSRodney W. Grimes  *	vm_object_terminate actually destroys the specified object, freeing
936df8bae1dSRodney W. Grimes  *	up all previously used resources.
937df8bae1dSRodney W. Grimes  *
938df8bae1dSRodney W. Grimes  *	The object must be locked.
9391c7c3c6aSMatthew Dillon  *	This routine may block.
940df8bae1dSRodney W. Grimes  */
94195e5e988SJohn Dyson void
9421b40f8c0SMatthew Dillon vm_object_terminate(vm_object_t object)
943df8bae1dSRodney W. Grimes {
944*98087a06SJeff Roberson 
94589f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
946783a68aaSKonstantin Belousov 	KASSERT((object->flags & OBJ_DEAD) != 0,
947783a68aaSKonstantin Belousov 	    ("terminating non-dead obj %p", object));
948*98087a06SJeff Roberson 	KASSERT((object->flags & OBJ_COLLAPSING) == 0,
949*98087a06SJeff Roberson 	    ("terminating collapsing obj %p", object));
950*98087a06SJeff Roberson 	KASSERT(object->backing_object == NULL,
951*98087a06SJeff Roberson 	    ("terminating shadow obj %p", object));
952bef608bdSJohn Dyson 
953cf27e0d1SJeff Roberson 	/*
954cf27e0d1SJeff Roberson 	 * wait for the pageout daemon to be done with the object
955cf27e0d1SJeff Roberson 	 */
956cf27e0d1SJeff Roberson 	vm_object_pip_wait(object, "objtrm");
957cf27e0d1SJeff Roberson 
95811b57401SHans Petter Selasky 	KASSERT(!REFCOUNT_COUNT(object->paging_in_progress),
959cf27e0d1SJeff Roberson 	    ("vm_object_terminate: pageout in progress"));
960cf27e0d1SJeff Roberson 
961971dd342SAlfred Perlstein 	KASSERT(object->ref_count == 0,
962971dd342SAlfred Perlstein 	    ("vm_object_terminate: object with references, ref_count=%d",
963971dd342SAlfred Perlstein 	    object->ref_count));
964996c772fSJohn Dyson 
9657bbdb843SRuslan Bukin 	if ((object->flags & OBJ_PG_DTOR) == 0)
9667bbdb843SRuslan Bukin 		vm_object_terminate_pages(object);
967bef608bdSJohn Dyson 
968f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
969f8a47341SAlan Cox 	if (__predict_false(!LIST_EMPTY(&object->rvq)))
970f8a47341SAlan Cox 		vm_reserv_break_all(object);
971f8a47341SAlan Cox #endif
9727bfda801SAlan Cox 
973e735691bSJohn Baldwin 	KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT ||
974e735691bSJohn Baldwin 	    object->type == OBJT_SWAP,
975e735691bSJohn Baldwin 	    ("%s: non-swap obj %p has cred", __func__, object));
976e735691bSJohn Baldwin 
9772d8acc0fSJohn Dyson 	/*
9789fcfb650SDavid Greenman 	 * Let the pager know object is dead.
9799fcfb650SDavid Greenman 	 */
9809fcfb650SDavid Greenman 	vm_pager_deallocate(object);
98189f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(object);
9829fcfb650SDavid Greenman 
9832ac78f0eSStephan Uphoff 	vm_object_destroy(object);
98447221757SJohn Dyson }
985df8bae1dSRodney W. Grimes 
986edf93b25SAlan Cox /*
987edf93b25SAlan Cox  * Make the page read-only so that we can clear the object flags.  However, if
988edf93b25SAlan Cox  * this is a nosync mmap then the object is likely to stay dirty so do not
989edf93b25SAlan Cox  * mess with the page and do not clear the object flags.  Returns TRUE if the
990edf93b25SAlan Cox  * page should be flushed, and FALSE otherwise.
991edf93b25SAlan Cox  */
9923280870dSKonstantin Belousov static boolean_t
99367d0e293SJeff Roberson vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *allclean)
9943280870dSKonstantin Belousov {
9953280870dSKonstantin Belousov 
996fff5403fSJeff Roberson 	vm_page_assert_busied(p);
997fff5403fSJeff Roberson 
9983280870dSKonstantin Belousov 	/*
9993280870dSKonstantin Belousov 	 * If we have been asked to skip nosync pages and this is a
10003280870dSKonstantin Belousov 	 * nosync page, skip it.  Note that the object flags were not
10013280870dSKonstantin Belousov 	 * cleared in this case so we do not have to set them.
10023280870dSKonstantin Belousov 	 */
10035cff1f4dSMark Johnston 	if ((flags & OBJPC_NOSYNC) != 0 && (p->a.flags & PGA_NOSYNC) != 0) {
100467d0e293SJeff Roberson 		*allclean = FALSE;
10053280870dSKonstantin Belousov 		return (FALSE);
10063280870dSKonstantin Belousov 	} else {
10073280870dSKonstantin Belousov 		pmap_remove_write(p);
10083280870dSKonstantin Belousov 		return (p->dirty != 0);
10093280870dSKonstantin Belousov 	}
10103280870dSKonstantin Belousov }
10113280870dSKonstantin Belousov 
1012df8bae1dSRodney W. Grimes /*
1013df8bae1dSRodney W. Grimes  *	vm_object_page_clean
1014df8bae1dSRodney W. Grimes  *
10154f79d873SMatthew Dillon  *	Clean all dirty pages in the specified range of object.  Leaves page
10164f79d873SMatthew Dillon  * 	on whatever queue it is currently on.   If NOSYNC is set then do not
1017fff5403fSJeff Roberson  *	write out pages with PGA_NOSYNC set (originally comes from MAP_NOSYNC),
10184f79d873SMatthew Dillon  *	leaving the object dirty.
101926f9a767SRodney W. Grimes  *
102043b7990eSMatthew Dillon  *	When stuffing pages asynchronously, allow clustering.  XXX we need a
102143b7990eSMatthew Dillon  *	synchronous clustering mode implementation.
102243b7990eSMatthew Dillon  *
102326f9a767SRodney W. Grimes  *	Odd semantics: if start == end, we clean everything.
102426f9a767SRodney W. Grimes  *
102526f9a767SRodney W. Grimes  *	The object must be locked.
1026126d6082SKonstantin Belousov  *
1027126d6082SKonstantin Belousov  *	Returns FALSE if some page from the range was not written, as
1028126d6082SKonstantin Belousov  *	reported by the pager, and TRUE otherwise.
102926f9a767SRodney W. Grimes  */
1030126d6082SKonstantin Belousov boolean_t
103117f3095dSAlan Cox vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
1032e239bb97SKonstantin Belousov     int flags)
1033f6b04d2bSDavid Greenman {
1034e239bb97SKonstantin Belousov 	vm_page_t np, p;
103517f3095dSAlan Cox 	vm_pindex_t pi, tend, tstart;
1036126d6082SKonstantin Belousov 	int curgeneration, n, pagerflags;
103767d0e293SJeff Roberson 	boolean_t eio, res, allclean;
1038f6b04d2bSDavid Greenman 
103989f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
1040e5f299ffSKonstantin Belousov 
104167d0e293SJeff Roberson 	if (object->type != OBJT_VNODE || !vm_object_mightbedirty(object) ||
1042e239bb97SKonstantin Belousov 	    object->resident_page_count == 0)
1043126d6082SKonstantin Belousov 		return (TRUE);
1044f6b04d2bSDavid Greenman 
1045e239bb97SKonstantin Belousov 	pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ?
1046e239bb97SKonstantin Belousov 	    VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
1047e239bb97SKonstantin Belousov 	pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0;
1048e239bb97SKonstantin Belousov 
104917f3095dSAlan Cox 	tstart = OFF_TO_IDX(start);
105017f3095dSAlan Cox 	tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK);
105167d0e293SJeff Roberson 	allclean = tstart == 0 && tend >= object->size;
1052126d6082SKonstantin Belousov 	res = TRUE;
1053f6b04d2bSDavid Greenman 
1054bd7e5f99SJohn Dyson rescan:
10552d8acc0fSJohn Dyson 	curgeneration = object->generation;
10562d8acc0fSJohn Dyson 
105717f3095dSAlan Cox 	for (p = vm_page_find_least(object, tstart); p != NULL; p = np) {
1058bd7e5f99SJohn Dyson 		pi = p->pindex;
1059e239bb97SKonstantin Belousov 		if (pi >= tend)
1060e239bb97SKonstantin Belousov 			break;
1061e239bb97SKonstantin Belousov 		np = TAILQ_NEXT(p, listq);
10620012f373SJeff Roberson 		if (vm_page_none_valid(p))
1063aef922f5SJohn Dyson 			continue;
106463e97555SJeff Roberson 		if (vm_page_busy_acquire(p, VM_ALLOC_WAITFAIL) == 0) {
106567d0e293SJeff Roberson 			if (object->generation != curgeneration &&
106667d0e293SJeff Roberson 			    (flags & OBJPC_SYNC) != 0)
1067e239bb97SKonstantin Belousov 				goto rescan;
1068780636b7SKonstantin Belousov 			np = vm_page_find_least(object, pi);
1069780636b7SKonstantin Belousov 			continue;
1070f6b04d2bSDavid Greenman 		}
107167d0e293SJeff Roberson 		if (!vm_object_page_remove_write(p, flags, &allclean)) {
107263e97555SJeff Roberson 			vm_page_xunbusy(p);
1073bd7e5f99SJohn Dyson 			continue;
107463e97555SJeff Roberson 		}
1075e239bb97SKonstantin Belousov 
10763280870dSKonstantin Belousov 		n = vm_object_page_collect_flush(object, p, pagerflags,
107767d0e293SJeff Roberson 		    flags, &allclean, &eio);
1078126d6082SKonstantin Belousov 		if (eio) {
1079126d6082SKonstantin Belousov 			res = FALSE;
108067d0e293SJeff Roberson 			allclean = FALSE;
1081126d6082SKonstantin Belousov 		}
108267d0e293SJeff Roberson 		if (object->generation != curgeneration &&
108367d0e293SJeff Roberson 		    (flags & OBJPC_SYNC) != 0)
1084b9b7a4beSMatthew Dillon 			goto rescan;
1085031ec8c1SKonstantin Belousov 
1086031ec8c1SKonstantin Belousov 		/*
1087031ec8c1SKonstantin Belousov 		 * If the VOP_PUTPAGES() did a truncated write, so
1088031ec8c1SKonstantin Belousov 		 * that even the first page of the run is not fully
1089031ec8c1SKonstantin Belousov 		 * written, vm_pageout_flush() returns 0 as the run
1090031ec8c1SKonstantin Belousov 		 * length.  Since the condition that caused truncated
1091031ec8c1SKonstantin Belousov 		 * write may be permanent, e.g. exhausted free space,
1092031ec8c1SKonstantin Belousov 		 * accepting n == 0 would cause an infinite loop.
1093031ec8c1SKonstantin Belousov 		 *
1094031ec8c1SKonstantin Belousov 		 * Forwarding the iterator leaves the unwritten page
1095031ec8c1SKonstantin Belousov 		 * behind, but there is not much we can do there if
1096031ec8c1SKonstantin Belousov 		 * filesystem refuses to write it.
1097031ec8c1SKonstantin Belousov 		 */
1098126d6082SKonstantin Belousov 		if (n == 0) {
1099031ec8c1SKonstantin Belousov 			n = 1;
110067d0e293SJeff Roberson 			allclean = FALSE;
1101126d6082SKonstantin Belousov 		}
1102e239bb97SKonstantin Belousov 		np = vm_page_find_least(object, pi + n);
1103b9b7a4beSMatthew Dillon 	}
1104b9b7a4beSMatthew Dillon #if 0
1105e239bb97SKonstantin Belousov 	VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0);
1106b9b7a4beSMatthew Dillon #endif
1107b9b7a4beSMatthew Dillon 
110867d0e293SJeff Roberson 	if (allclean)
110967d0e293SJeff Roberson 		object->cleangeneration = curgeneration;
1110126d6082SKonstantin Belousov 	return (res);
1111b9b7a4beSMatthew Dillon }
1112b9b7a4beSMatthew Dillon 
1113b9b7a4beSMatthew Dillon static int
11143280870dSKonstantin Belousov vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
111567d0e293SJeff Roberson     int flags, boolean_t *allclean, boolean_t *eio)
1116b9b7a4beSMatthew Dillon {
11173157c503SKonstantin Belousov 	vm_page_t ma[vm_pageout_page_count], p_first, tp;
11183157c503SKonstantin Belousov 	int count, i, mreq, runlen;
1119b9b7a4beSMatthew Dillon 
11207bec141bSKip Macy 	vm_page_lock_assert(p, MA_NOTOWNED);
112163e97555SJeff Roberson 	vm_page_assert_xbusied(p);
112289f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
11233157c503SKonstantin Belousov 
11243157c503SKonstantin Belousov 	count = 1;
11253157c503SKonstantin Belousov 	mreq = 0;
11263157c503SKonstantin Belousov 
11273157c503SKonstantin Belousov 	for (tp = p; count < vm_pageout_page_count; count++) {
11283157c503SKonstantin Belousov 		tp = vm_page_next(tp);
112963e97555SJeff Roberson 		if (tp == NULL || vm_page_tryxbusy(tp) == 0)
1130bd7e5f99SJohn Dyson 			break;
113167d0e293SJeff Roberson 		if (!vm_object_page_remove_write(tp, flags, allclean)) {
113263e97555SJeff Roberson 			vm_page_xunbusy(tp);
1133bd7e5f99SJohn Dyson 			break;
1134bd7e5f99SJohn Dyson 		}
113563e97555SJeff Roberson 	}
1136aef922f5SJohn Dyson 
11373157c503SKonstantin Belousov 	for (p_first = p; count < vm_pageout_page_count; count++) {
11383157c503SKonstantin Belousov 		tp = vm_page_prev(p_first);
113963e97555SJeff Roberson 		if (tp == NULL || vm_page_tryxbusy(tp) == 0)
1140bd7e5f99SJohn Dyson 			break;
114167d0e293SJeff Roberson 		if (!vm_object_page_remove_write(tp, flags, allclean)) {
114263e97555SJeff Roberson 			vm_page_xunbusy(tp);
1143bd7e5f99SJohn Dyson 			break;
114463e97555SJeff Roberson 		}
11453157c503SKonstantin Belousov 		p_first = tp;
11463157c503SKonstantin Belousov 		mreq++;
1147bd7e5f99SJohn Dyson 	}
1148bd7e5f99SJohn Dyson 
11493157c503SKonstantin Belousov 	for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++)
11503157c503SKonstantin Belousov 		ma[i] = tp;
1151cf2819ccSJohn Dyson 
1152126d6082SKonstantin Belousov 	vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio);
11531e8a675cSKonstantin Belousov 	return (runlen);
115426f9a767SRodney W. Grimes }
1155df8bae1dSRodney W. Grimes 
11561efb74fbSJohn Dyson /*
1157950f8459SAlan Cox  * Note that there is absolutely no sense in writing out
1158950f8459SAlan Cox  * anonymous objects, so we track down the vnode object
1159950f8459SAlan Cox  * to write out.
1160950f8459SAlan Cox  * We invalidate (remove) all pages from the address space
1161950f8459SAlan Cox  * for semantic correctness.
1162950f8459SAlan Cox  *
11636bbee8e2SAlan Cox  * If the backing object is a device object with unmanaged pages, then any
11646bbee8e2SAlan Cox  * mappings to the specified range of pages must be removed before this
11656bbee8e2SAlan Cox  * function is called.
11666bbee8e2SAlan Cox  *
1167950f8459SAlan Cox  * Note: certain anonymous maps, such as MAP_NOSYNC maps,
1168950f8459SAlan Cox  * may start out with a NULL object.
1169950f8459SAlan Cox  */
1170126d6082SKonstantin Belousov boolean_t
1171950f8459SAlan Cox vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
1172950f8459SAlan Cox     boolean_t syncio, boolean_t invalidate)
1173950f8459SAlan Cox {
1174950f8459SAlan Cox 	vm_object_t backing_object;
1175950f8459SAlan Cox 	struct vnode *vp;
11763b582b4eSTor Egge 	struct mount *mp;
1177126d6082SKonstantin Belousov 	int error, flags, fsync_after;
1178126d6082SKonstantin Belousov 	boolean_t res;
1179950f8459SAlan Cox 
1180950f8459SAlan Cox 	if (object == NULL)
1181126d6082SKonstantin Belousov 		return (TRUE);
1182126d6082SKonstantin Belousov 	res = TRUE;
1183126d6082SKonstantin Belousov 	error = 0;
118489f6b863SAttilio Rao 	VM_OBJECT_WLOCK(object);
1185950f8459SAlan Cox 	while ((backing_object = object->backing_object) != NULL) {
118689f6b863SAttilio Rao 		VM_OBJECT_WLOCK(backing_object);
118756e0670fSAlan Cox 		offset += object->backing_object_offset;
118889f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(object);
1189950f8459SAlan Cox 		object = backing_object;
1190950f8459SAlan Cox 		if (object->size < OFF_TO_IDX(offset + size))
1191950f8459SAlan Cox 			size = IDX_TO_OFF(object->size) - offset;
1192950f8459SAlan Cox 	}
1193950f8459SAlan Cox 	/*
1194950f8459SAlan Cox 	 * Flush pages if writing is allowed, invalidate them
1195950f8459SAlan Cox 	 * if invalidation requested.  Pages undergoing I/O
1196950f8459SAlan Cox 	 * will be ignored by vm_object_page_remove().
1197950f8459SAlan Cox 	 *
1198950f8459SAlan Cox 	 * We cannot lock the vnode and then wait for paging
1199950f8459SAlan Cox 	 * to complete without deadlocking against vm_fault.
1200950f8459SAlan Cox 	 * Instead we simply call vm_object_page_remove() and
1201950f8459SAlan Cox 	 * allow it to block internally on a page-by-page
1202950f8459SAlan Cox 	 * basis when it encounters pages undergoing async
1203950f8459SAlan Cox 	 * I/O.
1204950f8459SAlan Cox 	 */
1205950f8459SAlan Cox 	if (object->type == OBJT_VNODE &&
120667d0e293SJeff Roberson 	    vm_object_mightbedirty(object) != 0 &&
12075bf94937SKonstantin Belousov 	    ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) {
120889f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(object);
12093b582b4eSTor Egge 		(void) vn_start_write(vp, &mp, V_WAIT);
1210cb05b60aSAttilio Rao 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
121175ff604aSKonstantin Belousov 		if (syncio && !invalidate && offset == 0 &&
1212d1780e8dSKonstantin Belousov 		    atop(size) == object->size) {
121375ff604aSKonstantin Belousov 			/*
121475ff604aSKonstantin Belousov 			 * If syncing the whole mapping of the file,
121575ff604aSKonstantin Belousov 			 * it is faster to schedule all the writes in
121675ff604aSKonstantin Belousov 			 * async mode, also allowing the clustering,
121775ff604aSKonstantin Belousov 			 * and then wait for i/o to complete.
121875ff604aSKonstantin Belousov 			 */
121975ff604aSKonstantin Belousov 			flags = 0;
122075ff604aSKonstantin Belousov 			fsync_after = TRUE;
122175ff604aSKonstantin Belousov 		} else {
1222950f8459SAlan Cox 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
122375ff604aSKonstantin Belousov 			flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0;
122475ff604aSKonstantin Belousov 			fsync_after = FALSE;
122575ff604aSKonstantin Belousov 		}
122689f6b863SAttilio Rao 		VM_OBJECT_WLOCK(object);
1227126d6082SKonstantin Belousov 		res = vm_object_page_clean(object, offset, offset + size,
1228126d6082SKonstantin Belousov 		    flags);
122989f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(object);
123075ff604aSKonstantin Belousov 		if (fsync_after)
1231126d6082SKonstantin Belousov 			error = VOP_FSYNC(vp, MNT_WAIT, curthread);
1232b249ce48SMateusz Guzik 		VOP_UNLOCK(vp);
12333b582b4eSTor Egge 		vn_finished_write(mp);
1234126d6082SKonstantin Belousov 		if (error != 0)
1235126d6082SKonstantin Belousov 			res = FALSE;
123689f6b863SAttilio Rao 		VM_OBJECT_WLOCK(object);
1237950f8459SAlan Cox 	}
1238950f8459SAlan Cox 	if ((object->type == OBJT_VNODE ||
1239950f8459SAlan Cox 	     object->type == OBJT_DEVICE) && invalidate) {
12406bbee8e2SAlan Cox 		if (object->type == OBJT_DEVICE)
12416bbee8e2SAlan Cox 			/*
12426bbee8e2SAlan Cox 			 * The option OBJPR_NOTMAPPED must be passed here
12436bbee8e2SAlan Cox 			 * because vm_object_page_remove() cannot remove
12446bbee8e2SAlan Cox 			 * unmanaged mappings.
12456bbee8e2SAlan Cox 			 */
12466bbee8e2SAlan Cox 			flags = OBJPR_NOTMAPPED;
12476bbee8e2SAlan Cox 		else if (old_msync)
12486195b24aSKonstantin Belousov 			flags = 0;
12496bbee8e2SAlan Cox 		else
12506195b24aSKonstantin Belousov 			flags = OBJPR_CLEANONLY;
12516bbee8e2SAlan Cox 		vm_object_page_remove(object, OFF_TO_IDX(offset),
12526bbee8e2SAlan Cox 		    OFF_TO_IDX(offset + size + PAGE_MASK), flags);
1253950f8459SAlan Cox 	}
125489f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(object);
1255126d6082SKonstantin Belousov 	return (res);
1256950f8459SAlan Cox }
1257950f8459SAlan Cox 
1258950f8459SAlan Cox /*
1259aa3650eaSMark Johnston  * Determine whether the given advice can be applied to the object.  Advice is
1260aa3650eaSMark Johnston  * not applied to unmanaged pages since they never belong to page queues, and
1261aa3650eaSMark Johnston  * since MADV_FREE is destructive, it can apply only to anonymous pages that
1262aa3650eaSMark Johnston  * have been mapped at most once.
1263aa3650eaSMark Johnston  */
1264aa3650eaSMark Johnston static bool
1265aa3650eaSMark Johnston vm_object_advice_applies(vm_object_t object, int advice)
1266aa3650eaSMark Johnston {
1267aa3650eaSMark Johnston 
1268aa3650eaSMark Johnston 	if ((object->flags & OBJ_UNMANAGED) != 0)
1269aa3650eaSMark Johnston 		return (false);
1270aa3650eaSMark Johnston 	if (advice != MADV_FREE)
1271aa3650eaSMark Johnston 		return (true);
127263967687SJeff Roberson 	return ((object->flags & (OBJ_ONEMAPPING | OBJ_ANON)) ==
127363967687SJeff Roberson 	    (OBJ_ONEMAPPING | OBJ_ANON));
1274aa3650eaSMark Johnston }
1275aa3650eaSMark Johnston 
1276aa3650eaSMark Johnston static void
1277aa3650eaSMark Johnston vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex,
1278aa3650eaSMark Johnston     vm_size_t size)
1279aa3650eaSMark Johnston {
1280aa3650eaSMark Johnston 
1281aa3650eaSMark Johnston 	if (advice == MADV_FREE && object->type == OBJT_SWAP)
1282aa3650eaSMark Johnston 		swap_pager_freespace(object, pindex, size);
1283aa3650eaSMark Johnston }
1284aa3650eaSMark Johnston 
1285aa3650eaSMark Johnston /*
1286867a482dSJohn Dyson  *	vm_object_madvise:
1287867a482dSJohn Dyson  *
1288867a482dSJohn Dyson  *	Implements the madvise function at the object/page level.
12891c7c3c6aSMatthew Dillon  *
1290193b9358SAlan Cox  *	MADV_WILLNEED	(any object)
1291193b9358SAlan Cox  *
1292193b9358SAlan Cox  *	    Activate the specified pages if they are resident.
1293193b9358SAlan Cox  *
1294193b9358SAlan Cox  *	MADV_DONTNEED	(any object)
1295193b9358SAlan Cox  *
1296193b9358SAlan Cox  *	    Deactivate the specified pages if they are resident.
1297193b9358SAlan Cox  *
1298193b9358SAlan Cox  *	MADV_FREE	(OBJT_DEFAULT/OBJT_SWAP objects,
1299193b9358SAlan Cox  *			 OBJ_ONEMAPPING only)
1300193b9358SAlan Cox  *
1301193b9358SAlan Cox  *	    Deactivate and clean the specified pages if they are
1302193b9358SAlan Cox  *	    resident.  This permits the process to reuse the pages
1303193b9358SAlan Cox  *	    without faulting or the kernel to reclaim the pages
1304193b9358SAlan Cox  *	    without I/O.
1305867a482dSJohn Dyson  */
1306867a482dSJohn Dyson void
130792a59946SJohn Baldwin vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
1308c2655a40SMark Johnston     int advice)
1309867a482dSJohn Dyson {
131092a59946SJohn Baldwin 	vm_pindex_t tpindex;
131134567de7SAlan Cox 	vm_object_t backing_object, tobject;
1312aa3650eaSMark Johnston 	vm_page_t m, tm;
1313867a482dSJohn Dyson 
1314867a482dSJohn Dyson 	if (object == NULL)
1315867a482dSJohn Dyson 		return;
1316c2655a40SMark Johnston 
13176e20a165SJohn Dyson relookup:
1318aa3650eaSMark Johnston 	VM_OBJECT_WLOCK(object);
1319aa3650eaSMark Johnston 	if (!vm_object_advice_applies(object, advice)) {
1320aa3650eaSMark Johnston 		VM_OBJECT_WUNLOCK(object);
1321aa3650eaSMark Johnston 		return;
13226e20a165SJohn Dyson 	}
1323aa3650eaSMark Johnston 	for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) {
1324aa3650eaSMark Johnston 		tobject = object;
1325c2655a40SMark Johnston 
13261ce137beSMatthew Dillon 		/*
1327aa3650eaSMark Johnston 		 * If the next page isn't resident in the top-level object, we
1328aa3650eaSMark Johnston 		 * need to search the shadow chain.  When applying MADV_FREE, we
1329aa3650eaSMark Johnston 		 * take care to release any swap space used to store
1330aa3650eaSMark Johnston 		 * non-resident pages.
1331aa3650eaSMark Johnston 		 */
1332aa3650eaSMark Johnston 		if (m == NULL || pindex < m->pindex) {
1333aa3650eaSMark Johnston 			/*
1334aa3650eaSMark Johnston 			 * Optimize a common case: if the top-level object has
1335aa3650eaSMark Johnston 			 * no backing object, we can skip over the non-resident
1336aa3650eaSMark Johnston 			 * range in constant time.
13371ce137beSMatthew Dillon 			 */
1338c2655a40SMark Johnston 			if (object->backing_object == NULL) {
1339c2655a40SMark Johnston 				tpindex = (m != NULL && m->pindex < end) ?
1340c2655a40SMark Johnston 				    m->pindex : end;
1341aa3650eaSMark Johnston 				vm_object_madvise_freespace(object, advice,
1342aa3650eaSMark Johnston 				    pindex, tpindex - pindex);
1343c2655a40SMark Johnston 				if ((pindex = tpindex) == end)
1344c2655a40SMark Johnston 					break;
1345aa3650eaSMark Johnston 				goto next_page;
1346aa3650eaSMark Johnston 			}
1347aa3650eaSMark Johnston 
1348aa3650eaSMark Johnston 			tpindex = pindex;
1349aa3650eaSMark Johnston 			do {
1350aa3650eaSMark Johnston 				vm_object_madvise_freespace(tobject, advice,
1351aa3650eaSMark Johnston 				    tpindex, 1);
13521ce137beSMatthew Dillon 				/*
1353aa3650eaSMark Johnston 				 * Prepare to search the next object in the
1354aa3650eaSMark Johnston 				 * chain.
13551ce137beSMatthew Dillon 				 */
135634567de7SAlan Cox 				backing_object = tobject->backing_object;
135734567de7SAlan Cox 				if (backing_object == NULL)
1358aa3650eaSMark Johnston 					goto next_pindex;
135989f6b863SAttilio Rao 				VM_OBJECT_WLOCK(backing_object);
1360aa3650eaSMark Johnston 				tpindex +=
1361aa3650eaSMark Johnston 				    OFF_TO_IDX(tobject->backing_object_offset);
13629b98b796SAlan Cox 				if (tobject != object)
136389f6b863SAttilio Rao 					VM_OBJECT_WUNLOCK(tobject);
136434567de7SAlan Cox 				tobject = backing_object;
1365aa3650eaSMark Johnston 				if (!vm_object_advice_applies(tobject, advice))
1366aa3650eaSMark Johnston 					goto next_pindex;
1367aa3650eaSMark Johnston 			} while ((tm = vm_page_lookup(tobject, tpindex)) ==
1368aa3650eaSMark Johnston 			    NULL);
1369aa3650eaSMark Johnston 		} else {
1370aa3650eaSMark Johnston next_page:
1371aa3650eaSMark Johnston 			tm = m;
1372aa3650eaSMark Johnston 			m = TAILQ_NEXT(m, listq);
1373c2655a40SMark Johnston 		}
1374c2655a40SMark Johnston 
1375867a482dSJohn Dyson 		/*
13760012f373SJeff Roberson 		 * If the page is not in a normal state, skip it.  The page
13770012f373SJeff Roberson 		 * can not be invalidated while the object lock is held.
1378867a482dSJohn Dyson 		 */
13790012f373SJeff Roberson 		if (!vm_page_all_valid(tm) || vm_page_wired(tm))
1380aa3650eaSMark Johnston 			goto next_pindex;
1381aa3650eaSMark Johnston 		KASSERT((tm->flags & PG_FICTITIOUS) == 0,
1382aa3650eaSMark Johnston 		    ("vm_object_madvise: page %p is fictitious", tm));
1383aa3650eaSMark Johnston 		KASSERT((tm->oflags & VPO_UNMANAGED) == 0,
1384aa3650eaSMark Johnston 		    ("vm_object_madvise: page %p is not managed", tm));
138563e97555SJeff Roberson 		if (vm_page_tryxbusy(tm) == 0) {
1386aa3650eaSMark Johnston 			if (object != tobject)
1387aa3650eaSMark Johnston 				VM_OBJECT_WUNLOCK(object);
1388c2655a40SMark Johnston 			if (advice == MADV_WILLNEED) {
1389b11b56b5SAlan Cox 				/*
1390b11b56b5SAlan Cox 				 * Reference the page before unlocking and
1391b11b56b5SAlan Cox 				 * sleeping so that the page daemon is less
1392b11b56b5SAlan Cox 				 * likely to reclaim it.
1393b11b56b5SAlan Cox 				 */
1394aa3650eaSMark Johnston 				vm_page_aflag_set(tm, PGA_REFERENCED);
1395567e51e1SAlan Cox 			}
1396aa3650eaSMark Johnston 			vm_page_busy_sleep(tm, "madvpo", false);
13976e20a165SJohn Dyson   			goto relookup;
139834567de7SAlan Cox 		}
1399aa3650eaSMark Johnston 		vm_page_advise(tm, advice);
140063e97555SJeff Roberson 		vm_page_xunbusy(tm);
1401aa3650eaSMark Johnston 		vm_object_madvise_freespace(tobject, advice, tm->pindex, 1);
1402aa3650eaSMark Johnston next_pindex:
14039b98b796SAlan Cox 		if (tobject != object)
140489f6b863SAttilio Rao 			VM_OBJECT_WUNLOCK(tobject);
1405867a482dSJohn Dyson 	}
140689f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(object);
1407867a482dSJohn Dyson }
1408867a482dSJohn Dyson 
1409867a482dSJohn Dyson /*
1410df8bae1dSRodney W. Grimes  *	vm_object_shadow:
1411df8bae1dSRodney W. Grimes  *
1412df8bae1dSRodney W. Grimes  *	Create a new object which is backed by the
1413df8bae1dSRodney W. Grimes  *	specified existing object range.  The source
1414df8bae1dSRodney W. Grimes  *	object reference is deallocated.
1415df8bae1dSRodney W. Grimes  *
1416df8bae1dSRodney W. Grimes  *	The new object and offset into that object
1417df8bae1dSRodney W. Grimes  *	are returned in the source parameters.
1418df8bae1dSRodney W. Grimes  */
141926f9a767SRodney W. Grimes void
142067388836SKonstantin Belousov vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length,
142167388836SKonstantin Belousov     struct ucred *cred, bool shared)
1422df8bae1dSRodney W. Grimes {
1423d031cff1SMatthew Dillon 	vm_object_t source;
1424d031cff1SMatthew Dillon 	vm_object_t result;
1425df8bae1dSRodney W. Grimes 
1426df8bae1dSRodney W. Grimes 	source = *object;
1427df8bae1dSRodney W. Grimes 
1428df8bae1dSRodney W. Grimes 	/*
14299a2f6362SAlan Cox 	 * Don't create the new object if the old object isn't shared.
143063967687SJeff Roberson 	 *
143163967687SJeff Roberson 	 * If we hold the only reference we can guarantee that it won't
143263967687SJeff Roberson 	 * increase while we have the map locked.  Otherwise the race is
143363967687SJeff Roberson 	 * harmless and we will end up with an extra shadow object that
143463967687SJeff Roberson 	 * will be collapsed later.
14359a2f6362SAlan Cox 	 */
143663967687SJeff Roberson 	if (source != NULL && source->ref_count == 1 &&
143732362449SKonstantin Belousov 	    (source->flags & OBJ_ANON) != 0)
14389a2f6362SAlan Cox 		return;
14399a2f6362SAlan Cox 
14409a2f6362SAlan Cox 	/*
1441570a2f4aSAlan Cox 	 * Allocate a new object with the given length.
1442df8bae1dSRodney W. Grimes 	 */
144367388836SKonstantin Belousov 	result = vm_object_allocate_anon(atop(length), source, cred, length);
1444df8bae1dSRodney W. Grimes 
1445df8bae1dSRodney W. Grimes 	/*
144651b867e5SJeff Roberson 	 * Store the offset into the source object, and fix up the offset into
144751b867e5SJeff Roberson 	 * the new object.
144851b867e5SJeff Roberson 	 */
144951b867e5SJeff Roberson 	result->backing_object_offset = *offset;
145051b867e5SJeff Roberson 
145167388836SKonstantin Belousov 	if (shared || source != NULL) {
145267388836SKonstantin Belousov 		VM_OBJECT_WLOCK(result);
145367388836SKonstantin Belousov 
145451b867e5SJeff Roberson 		/*
145567388836SKonstantin Belousov 		 * The new object shadows the source object, adding a
145667388836SKonstantin Belousov 		 * reference to it.  Our caller changes his reference
145767388836SKonstantin Belousov 		 * to point to the new object, removing a reference to
145867388836SKonstantin Belousov 		 * the source object.  Net result: no change of
145967388836SKonstantin Belousov 		 * reference count, unless the caller needs to add one
146067388836SKonstantin Belousov 		 * more reference due to forking a shared map entry.
146167388836SKonstantin Belousov 		 */
146267388836SKonstantin Belousov 		if (shared) {
146367388836SKonstantin Belousov 			vm_object_reference_locked(result);
146467388836SKonstantin Belousov 			vm_object_clear_flag(result, OBJ_ONEMAPPING);
146567388836SKonstantin Belousov 		}
146667388836SKonstantin Belousov 
146767388836SKonstantin Belousov 		/*
146867388836SKonstantin Belousov 		 * Try to optimize the result object's page color when
146967388836SKonstantin Belousov 		 * shadowing in order to maintain page coloring
147067388836SKonstantin Belousov 		 * consistency in the combined shadowed object.
1471df8bae1dSRodney W. Grimes 		 */
1472570a2f4aSAlan Cox 		if (source != NULL) {
147351b867e5SJeff Roberson 			vm_object_backing_insert(result, source);
14743f289c3fSJeff Roberson 			result->domain = source->domain;
1475f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
14767b54b1a9SAlan Cox 			result->flags |= source->flags & OBJ_COLORED;
147767388836SKonstantin Belousov 			result->pg_color = (source->pg_color +
147867388836SKonstantin Belousov 			    OFF_TO_IDX(*offset)) & ((1 << (VM_NFREEORDER -
147967388836SKonstantin Belousov 			    1)) - 1);
1480f8a47341SAlan Cox #endif
148167388836SKonstantin Belousov 		}
148251b867e5SJeff Roberson 		VM_OBJECT_WUNLOCK(result);
1483de5f6a77SJohn Dyson 	}
1484df8bae1dSRodney W. Grimes 
1485df8bae1dSRodney W. Grimes 	/*
1486df8bae1dSRodney W. Grimes 	 * Return the new things
1487df8bae1dSRodney W. Grimes 	 */
1488df8bae1dSRodney W. Grimes 	*offset = 0;
1489df8bae1dSRodney W. Grimes 	*object = result;
1490df8bae1dSRodney W. Grimes }
1491df8bae1dSRodney W. Grimes 
1492c5aaa06dSAlan Cox /*
1493c5aaa06dSAlan Cox  *	vm_object_split:
1494c5aaa06dSAlan Cox  *
1495c5aaa06dSAlan Cox  * Split the pages in a map entry into a new object.  This affords
1496c5aaa06dSAlan Cox  * easier removal of unused pages, and keeps object inheritance from
1497c5aaa06dSAlan Cox  * being a negative impact on memory usage.
1498c5aaa06dSAlan Cox  */
1499c5aaa06dSAlan Cox void
1500c5aaa06dSAlan Cox vm_object_split(vm_map_entry_t entry)
1501c5aaa06dSAlan Cox {
150273000556SAlan Cox 	vm_page_t m, m_next;
1503*98087a06SJeff Roberson 	vm_object_t orig_object, new_object, backing_object;
150473000556SAlan Cox 	vm_pindex_t idx, offidxstart;
150573000556SAlan Cox 	vm_size_t size;
1506c5aaa06dSAlan Cox 
1507c5aaa06dSAlan Cox 	orig_object = entry->object.vm_object;
1508*98087a06SJeff Roberson 	KASSERT((orig_object->flags & OBJ_ONEMAPPING) != 0,
1509*98087a06SJeff Roberson 	    ("vm_object_split:  Splitting object with multiple mappings."));
151063967687SJeff Roberson 	if ((orig_object->flags & OBJ_ANON) == 0)
1511c5aaa06dSAlan Cox 		return;
1512c5aaa06dSAlan Cox 	if (orig_object->ref_count <= 1)
1513c5aaa06dSAlan Cox 		return;
151489f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(orig_object);
1515c5aaa06dSAlan Cox 
15164da9f125SAlan Cox 	offidxstart = OFF_TO_IDX(entry->offset);
151795442adfSAlan Cox 	size = atop(entry->end - entry->start);
1518c5aaa06dSAlan Cox 
15194da9f125SAlan Cox 	/*
15204da9f125SAlan Cox 	 * If swap_pager_copy() is later called, it will convert new_object
15214da9f125SAlan Cox 	 * into a swap object.
15224da9f125SAlan Cox 	 */
152367388836SKonstantin Belousov 	new_object = vm_object_allocate_anon(size, orig_object,
152467388836SKonstantin Belousov 	    orig_object->cred, ptoa(size));
1525c5aaa06dSAlan Cox 
1526c5474b8fSAlan Cox 	/*
1527*98087a06SJeff Roberson 	 * We must wait for the orig_object to complete any in-progress
1528*98087a06SJeff Roberson 	 * collapse so that the swap blocks are stable below.  The
1529*98087a06SJeff Roberson 	 * additional reference on backing_object by new object will
1530*98087a06SJeff Roberson 	 * prevent further collapse operations until split completes.
1531*98087a06SJeff Roberson 	 */
1532*98087a06SJeff Roberson 	VM_OBJECT_WLOCK(orig_object);
1533*98087a06SJeff Roberson 	vm_object_collapse_wait(orig_object);
1534*98087a06SJeff Roberson 
1535*98087a06SJeff Roberson 	/*
1536c5474b8fSAlan Cox 	 * At this point, the new object is still private, so the order in
1537c5474b8fSAlan Cox 	 * which the original and new objects are locked does not matter.
1538c5474b8fSAlan Cox 	 */
153989f6b863SAttilio Rao 	VM_OBJECT_WLOCK(new_object);
15403f289c3fSJeff Roberson 	new_object->domain = orig_object->domain;
1541*98087a06SJeff Roberson 	backing_object = orig_object->backing_object;
1542*98087a06SJeff Roberson 	if (backing_object != NULL) {
1543*98087a06SJeff Roberson 		vm_object_backing_insert_ref(new_object, backing_object);
1544c5aaa06dSAlan Cox 		new_object->backing_object_offset =
15454da9f125SAlan Cox 		    orig_object->backing_object_offset + entry->offset;
1546c5aaa06dSAlan Cox 	}
1547ef694c1aSEdward Tomasz Napierala 	if (orig_object->cred != NULL) {
1548ef694c1aSEdward Tomasz Napierala 		crhold(orig_object->cred);
15493364c323SKonstantin Belousov 		KASSERT(orig_object->charge >= ptoa(size),
15503364c323SKonstantin Belousov 		    ("orig_object->charge < 0"));
15513364c323SKonstantin Belousov 		orig_object->charge -= ptoa(size);
15523364c323SKonstantin Belousov 	}
1553*98087a06SJeff Roberson 
1554*98087a06SJeff Roberson 	/*
1555*98087a06SJeff Roberson 	 * Mark the split operation so that swap_pager_getpages() knows
1556*98087a06SJeff Roberson 	 * that the object is in transition.
1557*98087a06SJeff Roberson 	 */
1558*98087a06SJeff Roberson 	vm_object_set_flag(orig_object, OBJ_SPLIT);
1559c5aaa06dSAlan Cox retry:
1560b382c10aSKonstantin Belousov 	m = vm_page_find_least(orig_object, offidxstart);
156173000556SAlan Cox 	for (; m != NULL && (idx = m->pindex - offidxstart) < size;
156273000556SAlan Cox 	    m = m_next) {
156373000556SAlan Cox 		m_next = TAILQ_NEXT(m, listq);
1564c5aaa06dSAlan Cox 
1565c5aaa06dSAlan Cox 		/*
1566c5aaa06dSAlan Cox 		 * We must wait for pending I/O to complete before we can
1567c5aaa06dSAlan Cox 		 * rename the page.
1568c5aaa06dSAlan Cox 		 *
1569c5aaa06dSAlan Cox 		 * We do not have to VM_PROT_NONE the page as mappings should
1570c5aaa06dSAlan Cox 		 * not be changed by this operation.
1571c5aaa06dSAlan Cox 		 */
157263e97555SJeff Roberson 		if (vm_page_tryxbusy(m) == 0) {
157389f6b863SAttilio Rao 			VM_OBJECT_WUNLOCK(new_object);
15744cdea4a8SJeff Roberson 			vm_page_sleep_if_busy(m, "spltwt");
157589f6b863SAttilio Rao 			VM_OBJECT_WLOCK(new_object);
1576c5aaa06dSAlan Cox 			goto retry;
1577de33beddSAlan Cox 		}
1578e946b949SAttilio Rao 
15794bf95d00SJeff Roberson 		/*
15804bf95d00SJeff Roberson 		 * The page was left invalid.  Likely placed there by
15814bf95d00SJeff Roberson 		 * an incomplete fault.  Just remove and ignore.
15824bf95d00SJeff Roberson 		 */
15834bf95d00SJeff Roberson 		if (vm_page_none_valid(m)) {
15844bf95d00SJeff Roberson 			if (vm_page_remove(m))
15854bf95d00SJeff Roberson 				vm_page_free(m);
15864bf95d00SJeff Roberson 			continue;
15874bf95d00SJeff Roberson 		}
15884bf95d00SJeff Roberson 
15893453bca8SAlan Cox 		/* vm_page_rename() will dirty the page. */
1590e946b949SAttilio Rao 		if (vm_page_rename(m, new_object, idx)) {
159163e97555SJeff Roberson 			vm_page_xunbusy(m);
1592e946b949SAttilio Rao 			VM_OBJECT_WUNLOCK(new_object);
1593e946b949SAttilio Rao 			VM_OBJECT_WUNLOCK(orig_object);
15948d6fbbb8SJeff Roberson 			vm_radix_wait();
1595e946b949SAttilio Rao 			VM_OBJECT_WLOCK(orig_object);
1596e946b949SAttilio Rao 			VM_OBJECT_WLOCK(new_object);
1597e946b949SAttilio Rao 			goto retry;
1598e946b949SAttilio Rao 		}
159963e97555SJeff Roberson 
1600b5f359b7SAlan Cox #if VM_NRESERVLEVEL > 0
1601b5f359b7SAlan Cox 		/*
1602b5f359b7SAlan Cox 		 * If some of the reservation's allocated pages remain with
1603b5f359b7SAlan Cox 		 * the original object, then transferring the reservation to
1604b5f359b7SAlan Cox 		 * the new object is neither particularly beneficial nor
1605b5f359b7SAlan Cox 		 * particularly harmful as compared to leaving the reservation
1606b5f359b7SAlan Cox 		 * with the original object.  If, however, all of the
1607b5f359b7SAlan Cox 		 * reservation's allocated pages are transferred to the new
1608b5f359b7SAlan Cox 		 * object, then transferring the reservation is typically
1609b5f359b7SAlan Cox 		 * beneficial.  Determining which of these two cases applies
1610b5f359b7SAlan Cox 		 * would be more costly than unconditionally renaming the
1611b5f359b7SAlan Cox 		 * reservation.
1612b5f359b7SAlan Cox 		 */
1613b5f359b7SAlan Cox 		vm_reserv_rename(m, new_object, orig_object, offidxstart);
1614b5f359b7SAlan Cox #endif
16158da1c098SJeff Roberson 		if (orig_object->type != OBJT_SWAP)
16168da1c098SJeff Roberson 			vm_page_xunbusy(m);
1617c5aaa06dSAlan Cox 	}
1618d7a013c3SAlan Cox 	if (orig_object->type == OBJT_SWAP) {
1619c5aaa06dSAlan Cox 		/*
1620c7c8dd7eSAlan Cox 		 * swap_pager_copy() can sleep, in which case the orig_object's
1621c7c8dd7eSAlan Cox 		 * and new_object's locks are released and reacquired.
1622c5aaa06dSAlan Cox 		 */
1623c5aaa06dSAlan Cox 		swap_pager_copy(orig_object, new_object, offidxstart, 0);
1624dfd55c0cSAttilio Rao 		TAILQ_FOREACH(m, &new_object->memq, listq)
1625c7aebda8SAttilio Rao 			vm_page_xunbusy(m);
1626c5aaa06dSAlan Cox 	}
1627*98087a06SJeff Roberson 	vm_object_clear_flag(orig_object, OBJ_SPLIT);
162889f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(orig_object);
162989f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(new_object);
1630c5aaa06dSAlan Cox 	entry->object.vm_object = new_object;
1631c5aaa06dSAlan Cox 	entry->offset = 0LL;
1632c5aaa06dSAlan Cox 	vm_object_deallocate(orig_object);
163389f6b863SAttilio Rao 	VM_OBJECT_WLOCK(new_object);
1634c5aaa06dSAlan Cox }
1635c5aaa06dSAlan Cox 
163699a1570aSKonstantin Belousov static vm_page_t
1637*98087a06SJeff Roberson vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p)
163899a1570aSKonstantin Belousov {
163999a1570aSKonstantin Belousov 	vm_object_t backing_object;
164099a1570aSKonstantin Belousov 
164199a1570aSKonstantin Belousov 	VM_OBJECT_ASSERT_WLOCKED(object);
164299a1570aSKonstantin Belousov 	backing_object = object->backing_object;
164399a1570aSKonstantin Belousov 	VM_OBJECT_ASSERT_WLOCKED(backing_object);
164499a1570aSKonstantin Belousov 
164599a1570aSKonstantin Belousov 	KASSERT(p == NULL || p->object == object || p->object == backing_object,
164699a1570aSKonstantin Belousov 	    ("invalid ownership %p %p %p", p, object, backing_object));
16478d6fbbb8SJeff Roberson 	/* The page is only NULL when rename fails. */
16484cdea4a8SJeff Roberson 	if (p == NULL) {
16496a14746cSRyan Libby 		VM_OBJECT_WUNLOCK(object);
16506a14746cSRyan Libby 		VM_OBJECT_WUNLOCK(backing_object);
16518d6fbbb8SJeff Roberson 		vm_radix_wait();
16524cdea4a8SJeff Roberson 	} else {
16534cdea4a8SJeff Roberson 		if (p->object == object)
16544cdea4a8SJeff Roberson 			VM_OBJECT_WUNLOCK(backing_object);
165599a1570aSKonstantin Belousov 		else
16564cdea4a8SJeff Roberson 			VM_OBJECT_WUNLOCK(object);
16575975e53dSKonstantin Belousov 		vm_page_busy_sleep(p, "vmocol", false);
16584cdea4a8SJeff Roberson 	}
165999a1570aSKonstantin Belousov 	VM_OBJECT_WLOCK(object);
166099a1570aSKonstantin Belousov 	VM_OBJECT_WLOCK(backing_object);
166199a1570aSKonstantin Belousov 	return (TAILQ_FIRST(&backing_object->memq));
166299a1570aSKonstantin Belousov }
166399a1570aSKonstantin Belousov 
166499a1570aSKonstantin Belousov static bool
16654cc8daf7SConrad Meyer vm_object_scan_all_shadowed(vm_object_t object)
16664cc8daf7SConrad Meyer {
16674cc8daf7SConrad Meyer 	vm_object_t backing_object;
16684cc8daf7SConrad Meyer 	vm_page_t p, pp;
166977d6fd97SKonstantin Belousov 	vm_pindex_t backing_offset_index, new_pindex, pi, ps;
16704cc8daf7SConrad Meyer 
16714cc8daf7SConrad Meyer 	VM_OBJECT_ASSERT_WLOCKED(object);
16724cc8daf7SConrad Meyer 	VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
16734cc8daf7SConrad Meyer 
16744cc8daf7SConrad Meyer 	backing_object = object->backing_object;
16754cc8daf7SConrad Meyer 
167663967687SJeff Roberson 	if ((backing_object->flags & OBJ_ANON) == 0)
16774cc8daf7SConrad Meyer 		return (false);
16784cc8daf7SConrad Meyer 
167977d6fd97SKonstantin Belousov 	pi = backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
168077d6fd97SKonstantin Belousov 	p = vm_page_find_least(backing_object, pi);
168177d6fd97SKonstantin Belousov 	ps = swap_pager_find_least(backing_object, pi);
16824cc8daf7SConrad Meyer 
16834cc8daf7SConrad Meyer 	/*
168477d6fd97SKonstantin Belousov 	 * Only check pages inside the parent object's range and
168577d6fd97SKonstantin Belousov 	 * inside the parent object's mapping of the backing object.
16864cc8daf7SConrad Meyer 	 */
168777d6fd97SKonstantin Belousov 	for (;; pi++) {
168877d6fd97SKonstantin Belousov 		if (p != NULL && p->pindex < pi)
168977d6fd97SKonstantin Belousov 			p = TAILQ_NEXT(p, listq);
169077d6fd97SKonstantin Belousov 		if (ps < pi)
169177d6fd97SKonstantin Belousov 			ps = swap_pager_find_least(backing_object, pi);
169277d6fd97SKonstantin Belousov 		if (p == NULL && ps >= backing_object->size)
169377d6fd97SKonstantin Belousov 			break;
169477d6fd97SKonstantin Belousov 		else if (p == NULL)
169577d6fd97SKonstantin Belousov 			pi = ps;
169677d6fd97SKonstantin Belousov 		else
169777d6fd97SKonstantin Belousov 			pi = MIN(p->pindex, ps);
169877d6fd97SKonstantin Belousov 
169977d6fd97SKonstantin Belousov 		new_pindex = pi - backing_offset_index;
170077d6fd97SKonstantin Belousov 		if (new_pindex >= object->size)
170177d6fd97SKonstantin Belousov 			break;
17024cc8daf7SConrad Meyer 
17034cc8daf7SConrad Meyer 		/*
170458447749SJeff Roberson 		 * If the backing object page is busy a grandparent or older
170558447749SJeff Roberson 		 * page may still be undergoing CoW.  It is not safe to
170658447749SJeff Roberson 		 * collapse the backing object until it is quiesced.
170758447749SJeff Roberson 		 */
170858447749SJeff Roberson 		if (p != NULL && vm_page_busied(p))
170958447749SJeff Roberson 			return (false);
171058447749SJeff Roberson 
171158447749SJeff Roberson 		/*
17124cc8daf7SConrad Meyer 		 * See if the parent has the page or if the parent's object
17134cc8daf7SConrad Meyer 		 * pager has the page.  If the parent has the page but the page
17144cc8daf7SConrad Meyer 		 * is not valid, the parent's object pager must have the page.
17154cc8daf7SConrad Meyer 		 *
17164cc8daf7SConrad Meyer 		 * If this fails, the parent does not completely shadow the
17174cc8daf7SConrad Meyer 		 * object and we might as well give up now.
17184cc8daf7SConrad Meyer 		 */
17194cc8daf7SConrad Meyer 		pp = vm_page_lookup(object, new_pindex);
17200012f373SJeff Roberson 		/*
17210012f373SJeff Roberson 		 * The valid check here is stable due to object lock being
17220012f373SJeff Roberson 		 * required to clear valid and initiate paging.
17230012f373SJeff Roberson 		 */
17240012f373SJeff Roberson 		if ((pp == NULL || vm_page_none_valid(pp)) &&
17254cc8daf7SConrad Meyer 		    !vm_pager_has_page(object, new_pindex, NULL, NULL))
17264cc8daf7SConrad Meyer 			return (false);
17274cc8daf7SConrad Meyer 	}
17284cc8daf7SConrad Meyer 	return (true);
17294cc8daf7SConrad Meyer }
17304cc8daf7SConrad Meyer 
1731*98087a06SJeff Roberson static void
1732*98087a06SJeff Roberson vm_object_collapse_scan(vm_object_t object)
17332ad1a3f7SMatthew Dillon {
17342ad1a3f7SMatthew Dillon 	vm_object_t backing_object;
173599a1570aSKonstantin Belousov 	vm_page_t next, p, pp;
173699a1570aSKonstantin Belousov 	vm_pindex_t backing_offset_index, new_pindex;
17372ad1a3f7SMatthew Dillon 
173889f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
173989f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
17402ad1a3f7SMatthew Dillon 
17412ad1a3f7SMatthew Dillon 	backing_object = object->backing_object;
17422ad1a3f7SMatthew Dillon 	backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
17432ad1a3f7SMatthew Dillon 
17442ad1a3f7SMatthew Dillon 	/*
17452ad1a3f7SMatthew Dillon 	 * Our scan
17462ad1a3f7SMatthew Dillon 	 */
17474cc8daf7SConrad Meyer 	for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; p = next) {
174899a1570aSKonstantin Belousov 		next = TAILQ_NEXT(p, listq);
174999a1570aSKonstantin Belousov 		new_pindex = p->pindex - backing_offset_index;
17502ad1a3f7SMatthew Dillon 
17512ad1a3f7SMatthew Dillon 		/*
17522ad1a3f7SMatthew Dillon 		 * Check for busy page
17532ad1a3f7SMatthew Dillon 		 */
175463e97555SJeff Roberson 		if (vm_page_tryxbusy(p) == 0) {
1755*98087a06SJeff Roberson 			next = vm_object_collapse_scan_wait(object, p);
17562ad1a3f7SMatthew Dillon 			continue;
17572ad1a3f7SMatthew Dillon 		}
17582ad1a3f7SMatthew Dillon 
1759*98087a06SJeff Roberson 		KASSERT(object->backing_object == backing_object,
1760*98087a06SJeff Roberson 		    ("vm_object_collapse_scan: backing object mismatch %p != %p",
1761*98087a06SJeff Roberson 		    object->backing_object, backing_object));
176299a1570aSKonstantin Belousov 		KASSERT(p->object == backing_object,
1763*98087a06SJeff Roberson 		    ("vm_object_collapse_scan: object mismatch %p != %p",
1764*98087a06SJeff Roberson 		    p->object, backing_object));
17652ad1a3f7SMatthew Dillon 
176699a1570aSKonstantin Belousov 		if (p->pindex < backing_offset_index ||
176799a1570aSKonstantin Belousov 		    new_pindex >= object->size) {
1768e946b949SAttilio Rao 			if (backing_object->type == OBJT_SWAP)
17694cc8daf7SConrad Meyer 				swap_pager_freespace(backing_object, p->pindex,
17704cc8daf7SConrad Meyer 				    1);
1771e946b949SAttilio Rao 
1772f6d89838SAlan Cox 			KASSERT(!pmap_page_is_mapped(p),
1773f6d89838SAlan Cox 			    ("freeing mapped page %p", p));
17740fd977b3SMark Johnston 			if (vm_page_remove(p))
17752ad1a3f7SMatthew Dillon 				vm_page_free(p);
17762ad1a3f7SMatthew Dillon 			continue;
17772ad1a3f7SMatthew Dillon 		}
17782ad1a3f7SMatthew Dillon 
17792ad1a3f7SMatthew Dillon 		pp = vm_page_lookup(object, new_pindex);
178063e97555SJeff Roberson 		if (pp != NULL && vm_page_tryxbusy(pp) == 0) {
178163e97555SJeff Roberson 			vm_page_xunbusy(p);
1782e18cc7bfSMax Laier 			/*
17834cc8daf7SConrad Meyer 			 * The page in the parent is busy and possibly not
17844cc8daf7SConrad Meyer 			 * (yet) valid.  Until its state is finalized by the
17854cc8daf7SConrad Meyer 			 * busy bit owner, we can't tell whether it shadows the
1786*98087a06SJeff Roberson 			 * original page.
1787e18cc7bfSMax Laier 			 */
1788*98087a06SJeff Roberson 			next = vm_object_collapse_scan_wait(object, pp);
1789e18cc7bfSMax Laier 			continue;
1790e18cc7bfSMax Laier 		}
179199a1570aSKonstantin Belousov 
17924bf95d00SJeff Roberson 		if (pp != NULL && vm_page_none_valid(pp)) {
17934bf95d00SJeff Roberson 			/*
17944bf95d00SJeff Roberson 			 * The page was invalid in the parent.  Likely placed
17954bf95d00SJeff Roberson 			 * there by an incomplete fault.  Just remove and
17964bf95d00SJeff Roberson 			 * ignore.  p can replace it.
17974bf95d00SJeff Roberson 			 */
17984bf95d00SJeff Roberson 			if (vm_page_remove(pp))
17994bf95d00SJeff Roberson 				vm_page_free(pp);
18004bf95d00SJeff Roberson 			pp = NULL;
18014bf95d00SJeff Roberson 		}
180299a1570aSKonstantin Belousov 
18034cc8daf7SConrad Meyer 		if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL,
18044cc8daf7SConrad Meyer 			NULL)) {
180599a1570aSKonstantin Belousov 			/*
18064cc8daf7SConrad Meyer 			 * The page already exists in the parent OR swap exists
18074cc8daf7SConrad Meyer 			 * for this location in the parent.  Leave the parent's
18084cc8daf7SConrad Meyer 			 * page alone.  Destroy the original page from the
18094cc8daf7SConrad Meyer 			 * backing object.
181099a1570aSKonstantin Belousov 			 */
1811e946b949SAttilio Rao 			if (backing_object->type == OBJT_SWAP)
18124cc8daf7SConrad Meyer 				swap_pager_freespace(backing_object, p->pindex,
18134cc8daf7SConrad Meyer 				    1);
1814f6d89838SAlan Cox 			KASSERT(!pmap_page_is_mapped(p),
1815f6d89838SAlan Cox 			    ("freeing mapped page %p", p));
18160fd977b3SMark Johnston 			if (vm_page_remove(p))
18172ad1a3f7SMatthew Dillon 				vm_page_free(p);
181863e97555SJeff Roberson 			if (pp != NULL)
181963e97555SJeff Roberson 				vm_page_xunbusy(pp);
18202ad1a3f7SMatthew Dillon 			continue;
18212ad1a3f7SMatthew Dillon 		}
18222ad1a3f7SMatthew Dillon 
1823e946b949SAttilio Rao 		/*
18244cc8daf7SConrad Meyer 		 * Page does not exist in parent, rename the page from the
18254cc8daf7SConrad Meyer 		 * backing object to the main object.
1826e946b949SAttilio Rao 		 *
18274cc8daf7SConrad Meyer 		 * If the page was mapped to a process, it can remain mapped
18283453bca8SAlan Cox 		 * through the rename.  vm_page_rename() will dirty the page.
1829e946b949SAttilio Rao 		 */
1830e946b949SAttilio Rao 		if (vm_page_rename(p, object, new_pindex)) {
183163e97555SJeff Roberson 			vm_page_xunbusy(p);
1832*98087a06SJeff Roberson 			next = vm_object_collapse_scan_wait(object, NULL);
1833e946b949SAttilio Rao 			continue;
1834e946b949SAttilio Rao 		}
183514a5dc17SAttilio Rao 
183614a5dc17SAttilio Rao 		/* Use the old pindex to free the right page. */
1837e946b949SAttilio Rao 		if (backing_object->type == OBJT_SWAP)
183814a5dc17SAttilio Rao 			swap_pager_freespace(backing_object,
183914a5dc17SAttilio Rao 			    new_pindex + backing_offset_index, 1);
1840e946b949SAttilio Rao 
1841f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
1842f8a47341SAlan Cox 		/*
1843f8a47341SAlan Cox 		 * Rename the reservation.
1844f8a47341SAlan Cox 		 */
1845f8a47341SAlan Cox 		vm_reserv_rename(p, object, backing_object,
1846f8a47341SAlan Cox 		    backing_offset_index);
1847f8a47341SAlan Cox #endif
18488da1c098SJeff Roberson 		vm_page_xunbusy(p);
18492ad1a3f7SMatthew Dillon 	}
18502fe6e4d7SDavid Greenman 	return;
18512fe6e4d7SDavid Greenman }
18522fe6e4d7SDavid Greenman 
1853df8bae1dSRodney W. Grimes /*
1854df8bae1dSRodney W. Grimes  *	vm_object_collapse:
1855df8bae1dSRodney W. Grimes  *
1856df8bae1dSRodney W. Grimes  *	Collapse an object with the object backing it.
1857df8bae1dSRodney W. Grimes  *	Pages in the backing object are moved into the
1858df8bae1dSRodney W. Grimes  *	parent, and the backing object is deallocated.
1859df8bae1dSRodney W. Grimes  */
186026f9a767SRodney W. Grimes void
18611b40f8c0SMatthew Dillon vm_object_collapse(vm_object_t object)
1862df8bae1dSRodney W. Grimes {
186398f139daSKonstantin Belousov 	vm_object_t backing_object, new_backing_object;
186498f139daSKonstantin Belousov 
186589f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
186623955314SAlfred Perlstein 
1867df8bae1dSRodney W. Grimes 	while (TRUE) {
1868*98087a06SJeff Roberson 		KASSERT((object->flags & (OBJ_DEAD | OBJ_ANON)) == OBJ_ANON,
1869*98087a06SJeff Roberson 		    ("collapsing invalid object"));
1870*98087a06SJeff Roberson 
1871df8bae1dSRodney W. Grimes 		/*
1872*98087a06SJeff Roberson 		 * Wait for the backing_object to finish any pending
1873*98087a06SJeff Roberson 		 * collapse so that the caller sees the shortest possible
1874*98087a06SJeff Roberson 		 * shadow chain.
1875df8bae1dSRodney W. Grimes 		 */
1876*98087a06SJeff Roberson 		backing_object = vm_object_backing_collapse_wait(object);
1877*98087a06SJeff Roberson 		if (backing_object == NULL)
1878*98087a06SJeff Roberson 			return;
1879*98087a06SJeff Roberson 
1880*98087a06SJeff Roberson 		KASSERT(object->ref_count > 0 &&
1881*98087a06SJeff Roberson 		    object->ref_count > object->shadow_count,
1882*98087a06SJeff Roberson 		    ("collapse with invalid ref %d or shadow %d count.",
1883*98087a06SJeff Roberson 		    object->ref_count, object->shadow_count));
1884*98087a06SJeff Roberson 		KASSERT((backing_object->flags &
1885*98087a06SJeff Roberson 		    (OBJ_COLLAPSING | OBJ_DEAD)) == 0,
1886*98087a06SJeff Roberson 		    ("vm_object_collapse: Backing object already collapsing."));
1887*98087a06SJeff Roberson 		KASSERT((object->flags & (OBJ_COLLAPSING | OBJ_DEAD)) == 0,
1888*98087a06SJeff Roberson 		    ("vm_object_collapse: object is already collapsing."));
1889df8bae1dSRodney W. Grimes 
1890f919ebdeSDavid Greenman 		/*
1891*98087a06SJeff Roberson 		 * We know that we can either collapse the backing object if
1892*98087a06SJeff Roberson 		 * the parent is the only reference to it, or (perhaps) have
18932ad1a3f7SMatthew Dillon 		 * the parent bypass the object if the parent happens to shadow
18942ad1a3f7SMatthew Dillon 		 * all the resident pages in the entire backing object.
1895df8bae1dSRodney W. Grimes 		 */
1896df8bae1dSRodney W. Grimes 		if (backing_object->ref_count == 1) {
1897*98087a06SJeff Roberson 			KASSERT(backing_object->shadow_count == 1,
1898*98087a06SJeff Roberson 			    ("vm_object_collapse: shadow_count: %d",
1899*98087a06SJeff Roberson 			    backing_object->shadow_count));
1900aa9bc3b1SKonstantin Belousov 			vm_object_pip_add(object, 1);
1901*98087a06SJeff Roberson 			vm_object_set_flag(object, OBJ_COLLAPSING);
1902aa9bc3b1SKonstantin Belousov 			vm_object_pip_add(backing_object, 1);
1903*98087a06SJeff Roberson 			vm_object_set_flag(backing_object, OBJ_DEAD);
1904aa9bc3b1SKonstantin Belousov 
1905df8bae1dSRodney W. Grimes 			/*
19062ad1a3f7SMatthew Dillon 			 * If there is exactly one reference to the backing
19072ad1a3f7SMatthew Dillon 			 * object, we can collapse it into the parent.
1908df8bae1dSRodney W. Grimes 			 */
1909*98087a06SJeff Roberson 			vm_object_collapse_scan(object);
1910df8bae1dSRodney W. Grimes 
1911f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0
1912f8a47341SAlan Cox 			/*
1913f8a47341SAlan Cox 			 * Break any reservations from backing_object.
1914f8a47341SAlan Cox 			 */
1915f8a47341SAlan Cox 			if (__predict_false(!LIST_EMPTY(&backing_object->rvq)))
1916f8a47341SAlan Cox 				vm_reserv_break_all(backing_object);
1917f8a47341SAlan Cox #endif
1918f8a47341SAlan Cox 
1919df8bae1dSRodney W. Grimes 			/*
1920df8bae1dSRodney W. Grimes 			 * Move the pager from backing_object to object.
1921df8bae1dSRodney W. Grimes 			 */
19226be36525SAlan Cox 			if (backing_object->type == OBJT_SWAP) {
192324a1cce3SDavid Greenman 				/*
1924c7c8dd7eSAlan Cox 				 * swap_pager_copy() can sleep, in which case
1925c7c8dd7eSAlan Cox 				 * the backing_object's and object's locks are
1926c7c8dd7eSAlan Cox 				 * released and reacquired.
1927571a1e92SAttilio Rao 				 * Since swap_pager_copy() is being asked to
1928*98087a06SJeff Roberson 				 * destroy backing_object, it will change the
1929*98087a06SJeff Roberson 				 * type to OBJT_DEFAULT.
193024a1cce3SDavid Greenman 				 */
19311c7c3c6aSMatthew Dillon 				swap_pager_copy(
19321c7c3c6aSMatthew Dillon 				    backing_object,
19331c7c3c6aSMatthew Dillon 				    object,
19341c7c3c6aSMatthew Dillon 				    OFF_TO_IDX(object->backing_object_offset), TRUE);
1935c0503609SDavid Greenman 			}
1936*98087a06SJeff Roberson 
1937df8bae1dSRodney W. Grimes 			/*
1938df8bae1dSRodney W. Grimes 			 * Object now shadows whatever backing_object did.
1939df8bae1dSRodney W. Grimes 			 */
1940*98087a06SJeff Roberson 			vm_object_clear_flag(object, OBJ_COLLAPSING);
1941*98087a06SJeff Roberson 			vm_object_backing_transfer(object, backing_object);
19422ad1a3f7SMatthew Dillon 			object->backing_object_offset +=
19432ad1a3f7SMatthew Dillon 			    backing_object->backing_object_offset;
1944*98087a06SJeff Roberson 			VM_OBJECT_WUNLOCK(object);
1945*98087a06SJeff Roberson 			vm_object_pip_wakeup(object);
19462ad1a3f7SMatthew Dillon 
1947df8bae1dSRodney W. Grimes 			/*
1948df8bae1dSRodney W. Grimes 			 * Discard backing_object.
1949df8bae1dSRodney W. Grimes 			 *
19500d94caffSDavid Greenman 			 * Since the backing object has no pages, no pager left,
19510d94caffSDavid Greenman 			 * and no object references within it, all that is
19520d94caffSDavid Greenman 			 * necessary is to dispose of it.
1953df8bae1dSRodney W. Grimes 			 */
19549b4d473aSKonstantin Belousov 			KASSERT(backing_object->ref_count == 1, (
19559b4d473aSKonstantin Belousov "backing_object %p was somehow re-referenced during collapse!",
19569b4d473aSKonstantin Belousov 			    backing_object));
1957aa9bc3b1SKonstantin Belousov 			vm_object_pip_wakeup(backing_object);
1958*98087a06SJeff Roberson 			(void)refcount_release(&backing_object->ref_count);
1959*98087a06SJeff Roberson 			vm_object_terminate(backing_object);
196011542376SAlan Cox 			counter_u64_add(object_collapses, 1);
1961*98087a06SJeff Roberson 			VM_OBJECT_WLOCK(object);
19620d94caffSDavid Greenman 		} else {
1963df8bae1dSRodney W. Grimes 			/*
19642ad1a3f7SMatthew Dillon 			 * If we do not entirely shadow the backing object,
19652ad1a3f7SMatthew Dillon 			 * there is nothing we can do so we give up.
1966*98087a06SJeff Roberson 			 *
1967*98087a06SJeff Roberson 			 * The object lock and backing_object lock must not
1968*98087a06SJeff Roberson 			 * be dropped during this sequence.
1969df8bae1dSRodney W. Grimes 			 */
197058447749SJeff Roberson 			if (!vm_object_scan_all_shadowed(object)) {
197189f6b863SAttilio Rao 				VM_OBJECT_WUNLOCK(backing_object);
19722ad1a3f7SMatthew Dillon 				break;
197324a1cce3SDavid Greenman 			}
1974df8bae1dSRodney W. Grimes 
1975df8bae1dSRodney W. Grimes 			/*
19760d94caffSDavid Greenman 			 * Make the parent shadow the next object in the
19770d94caffSDavid Greenman 			 * chain.  Deallocating backing_object will not remove
19780d94caffSDavid Greenman 			 * it, since its reference count is at least 2.
1979df8bae1dSRodney W. Grimes 			 */
198051b867e5SJeff Roberson 			vm_object_backing_remove_locked(object);
198195e5e988SJohn Dyson 			new_backing_object = backing_object->backing_object;
198251b867e5SJeff Roberson 			if (new_backing_object != NULL) {
1983*98087a06SJeff Roberson 				vm_object_backing_insert_ref(object,
198451b867e5SJeff Roberson 				    new_backing_object);
198595e5e988SJohn Dyson 				object->backing_object_offset +=
198695e5e988SJohn Dyson 				    backing_object->backing_object_offset;
1987de5f6a77SJohn Dyson 			}
1988df8bae1dSRodney W. Grimes 
1989df8bae1dSRodney W. Grimes 			/*
19900d94caffSDavid Greenman 			 * Drop the reference count on backing_object. Since
199122ec553fSAlan Cox 			 * its ref_count was at least 2, it will not vanish.
1992df8bae1dSRodney W. Grimes 			 */
1993*98087a06SJeff Roberson 			(void)refcount_release(&backing_object->ref_count);
1994*98087a06SJeff Roberson 			KASSERT(backing_object->ref_count >= 1, (
1995*98087a06SJeff Roberson "backing_object %p was somehow dereferenced during collapse!",
1996*98087a06SJeff Roberson 			    backing_object));
199789f6b863SAttilio Rao 			VM_OBJECT_WUNLOCK(backing_object);
199811542376SAlan Cox 			counter_u64_add(object_bypasses, 1);
1999df8bae1dSRodney W. Grimes 		}
2000df8bae1dSRodney W. Grimes 
2001df8bae1dSRodney W. Grimes 		/*
2002df8bae1dSRodney W. Grimes 		 * Try again with this object's new backing object.
2003df8bae1dSRodney W. Grimes 		 */
2004df8bae1dSRodney W. Grimes 	}
2005df8bae1dSRodney W. Grimes }
2006df8bae1dSRodney W. Grimes 
2007df8bae1dSRodney W. Grimes /*
2008bff99f0dSAlan Cox  *	vm_object_page_remove:
2009df8bae1dSRodney W. Grimes  *
201068855966SAlan Cox  *	For the given object, either frees or invalidates each of the
20116bbee8e2SAlan Cox  *	specified pages.  In general, a page is freed.  However, if a page is
20126bbee8e2SAlan Cox  *	wired for any reason other than the existence of a managed, wired
20136bbee8e2SAlan Cox  *	mapping, then it may be invalidated but not removed from the object.
20146bbee8e2SAlan Cox  *	Pages are specified by the given range ["start", "end") and the option
20156bbee8e2SAlan Cox  *	OBJPR_CLEANONLY.  As a special case, if "end" is zero, then the range
20166bbee8e2SAlan Cox  *	extends from "start" to the end of the object.  If the option
20176bbee8e2SAlan Cox  *	OBJPR_CLEANONLY is specified, then only the non-dirty pages within the
20186bbee8e2SAlan Cox  *	specified range are affected.  If the option OBJPR_NOTMAPPED is
20196bbee8e2SAlan Cox  *	specified, then the pages within the specified range must have no
20206bbee8e2SAlan Cox  *	mappings.  Otherwise, if this option is not specified, any mappings to
20216bbee8e2SAlan Cox  *	the specified pages are removed before the pages are freed or
20226bbee8e2SAlan Cox  *	invalidated.
202368855966SAlan Cox  *
20246bbee8e2SAlan Cox  *	In general, this operation should only be performed on objects that
20256bbee8e2SAlan Cox  *	contain managed pages.  There are, however, two exceptions.  First, it
20266bbee8e2SAlan Cox  *	is performed on the kernel and kmem objects by vm_map_entry_delete().
20276bbee8e2SAlan Cox  *	Second, it is used by msync(..., MS_INVALIDATE) to invalidate device-
20286bbee8e2SAlan Cox  *	backed pages.  In both of these cases, the option OBJPR_CLEANONLY must
20296bbee8e2SAlan Cox  *	not be specified and the option OBJPR_NOTMAPPED must be specified.
2030df8bae1dSRodney W. Grimes  *
2031df8bae1dSRodney W. Grimes  *	The object must be locked.
2032df8bae1dSRodney W. Grimes  */
203326f9a767SRodney W. Grimes void
2034ecde4b32SAlan Cox vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
20356bbee8e2SAlan Cox     int options)
2036df8bae1dSRodney W. Grimes {
2037d031cff1SMatthew Dillon 	vm_page_t p, next;
2038df8bae1dSRodney W. Grimes 
203989f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
204028634820SAlan Cox 	KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
20416bbee8e2SAlan Cox 	    (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
20426bbee8e2SAlan Cox 	    ("vm_object_page_remove: illegal options for object %p", object));
2043ecde4b32SAlan Cox 	if (object->resident_page_count == 0)
20447667839aSAlan Cox 		return;
2045d474eaaaSDoug Rabson 	vm_object_pip_add(object, 1);
204626f9a767SRodney W. Grimes again:
2047b382c10aSKonstantin Belousov 	p = vm_page_find_least(object, start);
20482965a453SKip Macy 
204975741c04SAlan Cox 	/*
20506bbee8e2SAlan Cox 	 * Here, the variable "p" is either (1) the page with the least pindex
20516bbee8e2SAlan Cox 	 * greater than or equal to the parameter "start" or (2) NULL.
205275741c04SAlan Cox 	 */
20536bbee8e2SAlan Cox 	for (; p != NULL && (p->pindex < end || end == 0); p = next) {
2054b18bfc3dSJohn Dyson 		next = TAILQ_NEXT(p, listq);
205575741c04SAlan Cox 
205659677d3cSAlan Cox 		/*
20576bbee8e2SAlan Cox 		 * If the page is wired for any reason besides the existence
20586bbee8e2SAlan Cox 		 * of managed, wired mappings, then it cannot be freed.  For
20596bbee8e2SAlan Cox 		 * example, fictitious pages, which represent device memory,
20606bbee8e2SAlan Cox 		 * are inherently wired and cannot be freed.  They can,
20616bbee8e2SAlan Cox 		 * however, be invalidated if the option OBJPR_CLEANONLY is
20626bbee8e2SAlan Cox 		 * not specified.
206359677d3cSAlan Cox 		 */
206463e97555SJeff Roberson 		if (vm_page_tryxbusy(p) == 0) {
20654cdea4a8SJeff Roberson 			vm_page_sleep_if_busy(p, "vmopar");
2066fee2a2faSMark Johnston 			goto again;
2067fee2a2faSMark Johnston 		}
2068d842aa51SMark Johnston 		if (vm_page_wired(p)) {
2069fee2a2faSMark Johnston wired:
2070cf060942SAlan Cox 			if ((options & OBJPR_NOTMAPPED) == 0 &&
2071cf060942SAlan Cox 			    object->ref_count != 0)
20724fec79beSAlan Cox 				pmap_remove_all(p);
20736bbee8e2SAlan Cox 			if ((options & OBJPR_CLEANONLY) == 0) {
20740012f373SJeff Roberson 				vm_page_invalid(p);
2075a28042d1SAlan Cox 				vm_page_undirty(p);
2076a28042d1SAlan Cox 			}
207763e97555SJeff Roberson 			vm_page_xunbusy(p);
207893c5d3a4SKonstantin Belousov 			continue;
20790d94caffSDavid Greenman 		}
208068855966SAlan Cox 		KASSERT((p->flags & PG_FICTITIOUS) == 0,
208168855966SAlan Cox 		    ("vm_object_page_remove: page %p is fictitious", p));
20820012f373SJeff Roberson 		if ((options & OBJPR_CLEANONLY) != 0 &&
20830012f373SJeff Roberson 		    !vm_page_none_valid(p)) {
2084cf060942SAlan Cox 			if ((options & OBJPR_NOTMAPPED) == 0 &&
2085fee2a2faSMark Johnston 			    object->ref_count != 0 &&
2086fee2a2faSMark Johnston 			    !vm_page_try_remove_write(p))
2087fee2a2faSMark Johnston 				goto wired;
208863e97555SJeff Roberson 			if (p->dirty != 0) {
208963e97555SJeff Roberson 				vm_page_xunbusy(p);
209093c5d3a4SKonstantin Belousov 				continue;
20912965a453SKip Macy 			}
209263e97555SJeff Roberson 		}
2093fee2a2faSMark Johnston 		if ((options & OBJPR_NOTMAPPED) == 0 &&
2094fee2a2faSMark Johnston 		    object->ref_count != 0 && !vm_page_try_remove_all(p))
2095fee2a2faSMark Johnston 			goto wired;
20965cd29d0fSMark Johnston 		vm_page_free(p);
20972965a453SKip Macy 	}
2098f919ebdeSDavid Greenman 	vm_object_pip_wakeup(object);
2099c0503609SDavid Greenman }
2100df8bae1dSRodney W. Grimes 
2101df8bae1dSRodney W. Grimes /*
21023138cd36SMark Johnston  *	vm_object_page_noreuse:
2103936c09acSJohn Baldwin  *
21043138cd36SMark Johnston  *	For the given object, attempt to move the specified pages to
21053138cd36SMark Johnston  *	the head of the inactive queue.  This bypasses regular LRU
21063138cd36SMark Johnston  *	operation and allows the pages to be reused quickly under memory
21073138cd36SMark Johnston  *	pressure.  If a page is wired for any reason, then it will not
21083138cd36SMark Johnston  *	be queued.  Pages are specified by the range ["start", "end").
21093138cd36SMark Johnston  *	As a special case, if "end" is zero, then the range extends from
21103138cd36SMark Johnston  *	"start" to the end of the object.
2111936c09acSJohn Baldwin  *
2112936c09acSJohn Baldwin  *	This operation should only be performed on objects that
211328634820SAlan Cox  *	contain non-fictitious, managed pages.
2114936c09acSJohn Baldwin  *
2115936c09acSJohn Baldwin  *	The object must be locked.
2116936c09acSJohn Baldwin  */
2117936c09acSJohn Baldwin void
21183138cd36SMark Johnston vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
2119936c09acSJohn Baldwin {
2120936c09acSJohn Baldwin 	vm_page_t p, next;
2121936c09acSJohn Baldwin 
212252d1addaSAlan Cox 	VM_OBJECT_ASSERT_LOCKED(object);
212328634820SAlan Cox 	KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0,
21243138cd36SMark Johnston 	    ("vm_object_page_noreuse: illegal object %p", object));
2125936c09acSJohn Baldwin 	if (object->resident_page_count == 0)
2126936c09acSJohn Baldwin 		return;
2127936c09acSJohn Baldwin 	p = vm_page_find_least(object, start);
2128936c09acSJohn Baldwin 
2129936c09acSJohn Baldwin 	/*
2130936c09acSJohn Baldwin 	 * Here, the variable "p" is either (1) the page with the least pindex
2131936c09acSJohn Baldwin 	 * greater than or equal to the parameter "start" or (2) NULL.
2132936c09acSJohn Baldwin 	 */
2133936c09acSJohn Baldwin 	for (; p != NULL && (p->pindex < end || end == 0); p = next) {
2134936c09acSJohn Baldwin 		next = TAILQ_NEXT(p, listq);
21353138cd36SMark Johnston 		vm_page_deactivate_noreuse(p);
2136936c09acSJohn Baldwin 	}
2137936c09acSJohn Baldwin }
2138936c09acSJohn Baldwin 
2139936c09acSJohn Baldwin /*
2140387aabc5SAlan Cox  *	Populate the specified range of the object with valid pages.  Returns
2141387aabc5SAlan Cox  *	TRUE if the range is successfully populated and FALSE otherwise.
2142387aabc5SAlan Cox  *
2143387aabc5SAlan Cox  *	Note: This function should be optimized to pass a larger array of
2144387aabc5SAlan Cox  *	pages to vm_pager_get_pages() before it is applied to a non-
2145387aabc5SAlan Cox  *	OBJT_DEVICE object.
2146387aabc5SAlan Cox  *
2147387aabc5SAlan Cox  *	The object must be locked.
2148387aabc5SAlan Cox  */
2149387aabc5SAlan Cox boolean_t
2150387aabc5SAlan Cox vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
2151387aabc5SAlan Cox {
2152093c7f39SGleb Smirnoff 	vm_page_t m;
2153387aabc5SAlan Cox 	vm_pindex_t pindex;
2154387aabc5SAlan Cox 	int rv;
2155387aabc5SAlan Cox 
215689f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
2157387aabc5SAlan Cox 	for (pindex = start; pindex < end; pindex++) {
2158c7575748SJeff Roberson 		rv = vm_page_grab_valid(&m, object, pindex, VM_ALLOC_NORMAL);
2159c7575748SJeff Roberson 		if (rv != VM_PAGER_OK)
2160387aabc5SAlan Cox 			break;
2161c7575748SJeff Roberson 
2162387aabc5SAlan Cox 		/*
2163387aabc5SAlan Cox 		 * Keep "m" busy because a subsequent iteration may unlock
2164387aabc5SAlan Cox 		 * the object.
2165387aabc5SAlan Cox 		 */
2166387aabc5SAlan Cox 	}
2167387aabc5SAlan Cox 	if (pindex > start) {
2168387aabc5SAlan Cox 		m = vm_page_lookup(object, start);
2169387aabc5SAlan Cox 		while (m != NULL && m->pindex < pindex) {
2170c7aebda8SAttilio Rao 			vm_page_xunbusy(m);
2171387aabc5SAlan Cox 			m = TAILQ_NEXT(m, listq);
2172387aabc5SAlan Cox 		}
2173387aabc5SAlan Cox 	}
2174387aabc5SAlan Cox 	return (pindex == end);
2175387aabc5SAlan Cox }
2176387aabc5SAlan Cox 
2177387aabc5SAlan Cox /*
2178df8bae1dSRodney W. Grimes  *	Routine:	vm_object_coalesce
2179df8bae1dSRodney W. Grimes  *	Function:	Coalesces two objects backing up adjoining
2180df8bae1dSRodney W. Grimes  *			regions of memory into a single object.
2181df8bae1dSRodney W. Grimes  *
2182df8bae1dSRodney W. Grimes  *	returns TRUE if objects were combined.
2183df8bae1dSRodney W. Grimes  *
2184df8bae1dSRodney W. Grimes  *	NOTE:	Only works at the moment if the second object is NULL -
2185df8bae1dSRodney W. Grimes  *		if it's not, which object do we lock first?
2186df8bae1dSRodney W. Grimes  *
2187df8bae1dSRodney W. Grimes  *	Parameters:
2188df8bae1dSRodney W. Grimes  *		prev_object	First object to coalesce
2189df8bae1dSRodney W. Grimes  *		prev_offset	Offset into prev_object
2190df8bae1dSRodney W. Grimes  *		prev_size	Size of reference to prev_object
219157a21abaSAlan Cox  *		next_size	Size of reference to the second object
21923364c323SKonstantin Belousov  *		reserved	Indicator that extension region has
21933364c323SKonstantin Belousov  *				swap accounted for
2194df8bae1dSRodney W. Grimes  *
2195df8bae1dSRodney W. Grimes  *	Conditions:
2196df8bae1dSRodney W. Grimes  *	The object must *not* be locked.
2197df8bae1dSRodney W. Grimes  */
21980d94caffSDavid Greenman boolean_t
219957a21abaSAlan Cox vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
22003364c323SKonstantin Belousov     vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
2201df8bae1dSRodney W. Grimes {
2202ea41812fSAlan Cox 	vm_pindex_t next_pindex;
2203df8bae1dSRodney W. Grimes 
220400e1854aSAlan Cox 	if (prev_object == NULL)
2205df8bae1dSRodney W. Grimes 		return (TRUE);
220663967687SJeff Roberson 	if ((prev_object->flags & OBJ_ANON) == 0)
220730dcfc09SJohn Dyson 		return (FALSE);
220830dcfc09SJohn Dyson 
220963967687SJeff Roberson 	VM_OBJECT_WLOCK(prev_object);
2210df8bae1dSRodney W. Grimes 	/*
2211*98087a06SJeff Roberson 	 * Try to collapse the object first.
2212df8bae1dSRodney W. Grimes 	 */
2213df8bae1dSRodney W. Grimes 	vm_object_collapse(prev_object);
2214df8bae1dSRodney W. Grimes 
2215df8bae1dSRodney W. Grimes 	/*
22160d94caffSDavid Greenman 	 * Can't coalesce if: . more than one reference . paged out . shadows
22170d94caffSDavid Greenman 	 * another object . has a copy elsewhere (any of which mean that the
22180d94caffSDavid Greenman 	 * pages not mapped to prev_entry may be in use anyway)
2219df8bae1dSRodney W. Grimes 	 */
22208cc7e047SJohn Dyson 	if (prev_object->backing_object != NULL) {
222189f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(prev_object);
2222df8bae1dSRodney W. Grimes 		return (FALSE);
2223df8bae1dSRodney W. Grimes 	}
2224a316d390SJohn Dyson 
2225a316d390SJohn Dyson 	prev_size >>= PAGE_SHIFT;
2226a316d390SJohn Dyson 	next_size >>= PAGE_SHIFT;
222757a21abaSAlan Cox 	next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
22288cc7e047SJohn Dyson 
22290e48e068SMark Johnston 	if (prev_object->ref_count > 1 &&
22300e48e068SMark Johnston 	    prev_object->size != next_pindex &&
22310e48e068SMark Johnston 	    (prev_object->flags & OBJ_ONEMAPPING) == 0) {
223289f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(prev_object);
22338cc7e047SJohn Dyson 		return (FALSE);
22348cc7e047SJohn Dyson 	}
22358cc7e047SJohn Dyson 
2236df8bae1dSRodney W. Grimes 	/*
22373364c323SKonstantin Belousov 	 * Account for the charge.
22383364c323SKonstantin Belousov 	 */
2239ef694c1aSEdward Tomasz Napierala 	if (prev_object->cred != NULL) {
22403364c323SKonstantin Belousov 
22413364c323SKonstantin Belousov 		/*
22423364c323SKonstantin Belousov 		 * If prev_object was charged, then this mapping,
2243763df3ecSPedro F. Giffuni 		 * although not charged now, may become writable
2244ef694c1aSEdward Tomasz Napierala 		 * later. Non-NULL cred in the object would prevent
22453364c323SKonstantin Belousov 		 * swap reservation during enabling of the write
22463364c323SKonstantin Belousov 		 * access, so reserve swap now. Failed reservation
22473364c323SKonstantin Belousov 		 * cause allocation of the separate object for the map
22483364c323SKonstantin Belousov 		 * entry, and swap reservation for this entry is
22493364c323SKonstantin Belousov 		 * managed in appropriate time.
22503364c323SKonstantin Belousov 		 */
2251ef694c1aSEdward Tomasz Napierala 		if (!reserved && !swap_reserve_by_cred(ptoa(next_size),
2252ef694c1aSEdward Tomasz Napierala 		    prev_object->cred)) {
22539f790a17SKonstantin Belousov 			VM_OBJECT_WUNLOCK(prev_object);
22543364c323SKonstantin Belousov 			return (FALSE);
22553364c323SKonstantin Belousov 		}
22563364c323SKonstantin Belousov 		prev_object->charge += ptoa(next_size);
22573364c323SKonstantin Belousov 	}
22583364c323SKonstantin Belousov 
22593364c323SKonstantin Belousov 	/*
22600d94caffSDavid Greenman 	 * Remove any pages that may still be in the object from a previous
22610d94caffSDavid Greenman 	 * deallocation.
2262df8bae1dSRodney W. Grimes 	 */
2263ea41812fSAlan Cox 	if (next_pindex < prev_object->size) {
22646bbee8e2SAlan Cox 		vm_object_page_remove(prev_object, next_pindex, next_pindex +
22656bbee8e2SAlan Cox 		    next_size, 0);
2266ea41812fSAlan Cox 		if (prev_object->type == OBJT_SWAP)
2267ea41812fSAlan Cox 			swap_pager_freespace(prev_object,
2268ea41812fSAlan Cox 					     next_pindex, next_size);
22693364c323SKonstantin Belousov #if 0
2270ef694c1aSEdward Tomasz Napierala 		if (prev_object->cred != NULL) {
22713364c323SKonstantin Belousov 			KASSERT(prev_object->charge >=
22723364c323SKonstantin Belousov 			    ptoa(prev_object->size - next_pindex),
22733364c323SKonstantin Belousov 			    ("object %p overcharged 1 %jx %jx", prev_object,
22743364c323SKonstantin Belousov 				(uintmax_t)next_pindex, (uintmax_t)next_size));
22753364c323SKonstantin Belousov 			prev_object->charge -= ptoa(prev_object->size -
22763364c323SKonstantin Belousov 			    next_pindex);
22773364c323SKonstantin Belousov 		}
22783364c323SKonstantin Belousov #endif
2279ea41812fSAlan Cox 	}
2280df8bae1dSRodney W. Grimes 
2281df8bae1dSRodney W. Grimes 	/*
2282df8bae1dSRodney W. Grimes 	 * Extend the object if necessary.
2283df8bae1dSRodney W. Grimes 	 */
2284ea41812fSAlan Cox 	if (next_pindex + next_size > prev_object->size)
2285ea41812fSAlan Cox 		prev_object->size = next_pindex + next_size;
2286df8bae1dSRodney W. Grimes 
228789f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(prev_object);
2288df8bae1dSRodney W. Grimes 	return (TRUE);
2289df8bae1dSRodney W. Grimes }
2290df8bae1dSRodney W. Grimes 
22917a5a6352SMatthew Dillon void
22927a5a6352SMatthew Dillon vm_object_set_writeable_dirty(vm_object_t object)
22937a5a6352SMatthew Dillon {
22947a5a6352SMatthew Dillon 
229567d0e293SJeff Roberson 	/* Only set for vnodes & tmpfs */
229667d0e293SJeff Roberson 	if (object->type != OBJT_VNODE &&
229767d0e293SJeff Roberson 	    (object->flags & OBJ_TMPFS_NODE) == 0)
22983280870dSKonstantin Belousov 		return;
229967d0e293SJeff Roberson 	atomic_add_int(&object->generation, 1);
23007a5a6352SMatthew Dillon }
23017a5a6352SMatthew Dillon 
230203462509SAlan Cox /*
230303462509SAlan Cox  *	vm_object_unwire:
230403462509SAlan Cox  *
230503462509SAlan Cox  *	For each page offset within the specified range of the given object,
230603462509SAlan Cox  *	find the highest-level page in the shadow chain and unwire it.  A page
230703462509SAlan Cox  *	must exist at every page offset, and the highest-level page must be
230803462509SAlan Cox  *	wired.
230903462509SAlan Cox  */
231003462509SAlan Cox void
231103462509SAlan Cox vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length,
231203462509SAlan Cox     uint8_t queue)
231303462509SAlan Cox {
231420e4afbfSKonstantin Belousov 	vm_object_t tobject, t1object;
231503462509SAlan Cox 	vm_page_t m, tm;
231603462509SAlan Cox 	vm_pindex_t end_pindex, pindex, tpindex;
231703462509SAlan Cox 	int depth, locked_depth;
231803462509SAlan Cox 
231903462509SAlan Cox 	KASSERT((offset & PAGE_MASK) == 0,
232003462509SAlan Cox 	    ("vm_object_unwire: offset is not page aligned"));
232103462509SAlan Cox 	KASSERT((length & PAGE_MASK) == 0,
232203462509SAlan Cox 	    ("vm_object_unwire: length is not a multiple of PAGE_SIZE"));
232303462509SAlan Cox 	/* The wired count of a fictitious page never changes. */
232403462509SAlan Cox 	if ((object->flags & OBJ_FICTITIOUS) != 0)
232503462509SAlan Cox 		return;
232603462509SAlan Cox 	pindex = OFF_TO_IDX(offset);
232703462509SAlan Cox 	end_pindex = pindex + atop(length);
232820e4afbfSKonstantin Belousov again:
232903462509SAlan Cox 	locked_depth = 1;
233003462509SAlan Cox 	VM_OBJECT_RLOCK(object);
233103462509SAlan Cox 	m = vm_page_find_least(object, pindex);
233203462509SAlan Cox 	while (pindex < end_pindex) {
233303462509SAlan Cox 		if (m == NULL || pindex < m->pindex) {
233403462509SAlan Cox 			/*
233503462509SAlan Cox 			 * The first object in the shadow chain doesn't
233603462509SAlan Cox 			 * contain a page at the current index.  Therefore,
233703462509SAlan Cox 			 * the page must exist in a backing object.
233803462509SAlan Cox 			 */
233903462509SAlan Cox 			tobject = object;
234003462509SAlan Cox 			tpindex = pindex;
234103462509SAlan Cox 			depth = 0;
234203462509SAlan Cox 			do {
234303462509SAlan Cox 				tpindex +=
234403462509SAlan Cox 				    OFF_TO_IDX(tobject->backing_object_offset);
234503462509SAlan Cox 				tobject = tobject->backing_object;
234603462509SAlan Cox 				KASSERT(tobject != NULL,
234703462509SAlan Cox 				    ("vm_object_unwire: missing page"));
234803462509SAlan Cox 				if ((tobject->flags & OBJ_FICTITIOUS) != 0)
234903462509SAlan Cox 					goto next_page;
235003462509SAlan Cox 				depth++;
235103462509SAlan Cox 				if (depth == locked_depth) {
235203462509SAlan Cox 					locked_depth++;
235303462509SAlan Cox 					VM_OBJECT_RLOCK(tobject);
235403462509SAlan Cox 				}
235503462509SAlan Cox 			} while ((tm = vm_page_lookup(tobject, tpindex)) ==
235603462509SAlan Cox 			    NULL);
235703462509SAlan Cox 		} else {
235803462509SAlan Cox 			tm = m;
235903462509SAlan Cox 			m = TAILQ_NEXT(m, listq);
236003462509SAlan Cox 		}
236163e97555SJeff Roberson 		if (vm_page_trysbusy(tm) == 0) {
236287e93ea6SMark Johnston 			for (tobject = object; locked_depth >= 1;
236320e4afbfSKonstantin Belousov 			    locked_depth--) {
236420e4afbfSKonstantin Belousov 				t1object = tobject->backing_object;
236587e93ea6SMark Johnston 				if (tm->object != tobject)
236620e4afbfSKonstantin Belousov 					VM_OBJECT_RUNLOCK(tobject);
236720e4afbfSKonstantin Belousov 				tobject = t1object;
236820e4afbfSKonstantin Belousov 			}
236920e4afbfSKonstantin Belousov 			vm_page_busy_sleep(tm, "unwbo", true);
237020e4afbfSKonstantin Belousov 			goto again;
237120e4afbfSKonstantin Belousov 		}
237203462509SAlan Cox 		vm_page_unwire(tm, queue);
237363e97555SJeff Roberson 		vm_page_sunbusy(tm);
237403462509SAlan Cox next_page:
237503462509SAlan Cox 		pindex++;
237603462509SAlan Cox 	}
237703462509SAlan Cox 	/* Release the accumulated object locks. */
237820e4afbfSKonstantin Belousov 	for (tobject = object; locked_depth >= 1; locked_depth--) {
237920e4afbfSKonstantin Belousov 		t1object = tobject->backing_object;
238020e4afbfSKonstantin Belousov 		VM_OBJECT_RUNLOCK(tobject);
238120e4afbfSKonstantin Belousov 		tobject = t1object;
238203462509SAlan Cox 	}
238303462509SAlan Cox }
238403462509SAlan Cox 
23850951bd36SEric van Gyzen /*
23860951bd36SEric van Gyzen  * Return the vnode for the given object, or NULL if none exists.
23870951bd36SEric van Gyzen  * For tmpfs objects, the function may return NULL if there is
23880951bd36SEric van Gyzen  * no vnode allocated at the time of the call.
23890951bd36SEric van Gyzen  */
239063e4c6cdSEric van Gyzen struct vnode *
239163e4c6cdSEric van Gyzen vm_object_vnode(vm_object_t object)
239263e4c6cdSEric van Gyzen {
23930951bd36SEric van Gyzen 	struct vnode *vp;
239463e4c6cdSEric van Gyzen 
239563e4c6cdSEric van Gyzen 	VM_OBJECT_ASSERT_LOCKED(object);
23960951bd36SEric van Gyzen 	if (object->type == OBJT_VNODE) {
23970951bd36SEric van Gyzen 		vp = object->handle;
23980951bd36SEric van Gyzen 		KASSERT(vp != NULL, ("%s: OBJT_VNODE has no vnode", __func__));
23990951bd36SEric van Gyzen 	} else if (object->type == OBJT_SWAP &&
24000951bd36SEric van Gyzen 	    (object->flags & OBJ_TMPFS) != 0) {
24010951bd36SEric van Gyzen 		vp = object->un_pager.swp.swp_tmpfs;
24020951bd36SEric van Gyzen 		KASSERT(vp != NULL, ("%s: OBJT_TMPFS has no vnode", __func__));
24030951bd36SEric van Gyzen 	} else {
24040951bd36SEric van Gyzen 		vp = NULL;
24050951bd36SEric van Gyzen 	}
24060951bd36SEric van Gyzen 	return (vp);
240763e4c6cdSEric van Gyzen }
240863e4c6cdSEric van Gyzen 
2409205be21dSJeff Roberson 
2410205be21dSJeff Roberson /*
2411205be21dSJeff Roberson  * Busy the vm object.  This prevents new pages belonging to the object from
2412205be21dSJeff Roberson  * becoming busy.  Existing pages persist as busy.  Callers are responsible
2413205be21dSJeff Roberson  * for checking page state before proceeding.
2414205be21dSJeff Roberson  */
2415205be21dSJeff Roberson void
2416205be21dSJeff Roberson vm_object_busy(vm_object_t obj)
2417205be21dSJeff Roberson {
2418205be21dSJeff Roberson 
2419205be21dSJeff Roberson 	VM_OBJECT_ASSERT_LOCKED(obj);
2420205be21dSJeff Roberson 
2421205be21dSJeff Roberson 	refcount_acquire(&obj->busy);
2422205be21dSJeff Roberson 	/* The fence is required to order loads of page busy. */
2423205be21dSJeff Roberson 	atomic_thread_fence_acq_rel();
2424205be21dSJeff Roberson }
2425205be21dSJeff Roberson 
2426205be21dSJeff Roberson void
2427205be21dSJeff Roberson vm_object_unbusy(vm_object_t obj)
2428205be21dSJeff Roberson {
2429205be21dSJeff Roberson 
2430205be21dSJeff Roberson 
2431205be21dSJeff Roberson 	refcount_release(&obj->busy);
2432205be21dSJeff Roberson }
2433205be21dSJeff Roberson 
2434205be21dSJeff Roberson void
2435205be21dSJeff Roberson vm_object_busy_wait(vm_object_t obj, const char *wmesg)
2436205be21dSJeff Roberson {
2437205be21dSJeff Roberson 
2438205be21dSJeff Roberson 	VM_OBJECT_ASSERT_UNLOCKED(obj);
2439205be21dSJeff Roberson 
2440205be21dSJeff Roberson 	if (obj->busy)
2441205be21dSJeff Roberson 		refcount_sleep(&obj->busy, wmesg, PVM);
2442205be21dSJeff Roberson }
2443205be21dSJeff Roberson 
24445e38e3f5SEric van Gyzen /*
24455e38e3f5SEric van Gyzen  * Return the kvme type of the given object.
24465e38e3f5SEric van Gyzen  * If vpp is not NULL, set it to the object's vm_object_vnode() or NULL.
24475e38e3f5SEric van Gyzen  */
24485e38e3f5SEric van Gyzen int
24495e38e3f5SEric van Gyzen vm_object_kvme_type(vm_object_t object, struct vnode **vpp)
24505e38e3f5SEric van Gyzen {
24515e38e3f5SEric van Gyzen 
24525e38e3f5SEric van Gyzen 	VM_OBJECT_ASSERT_LOCKED(object);
24535e38e3f5SEric van Gyzen 	if (vpp != NULL)
24545e38e3f5SEric van Gyzen 		*vpp = vm_object_vnode(object);
24555e38e3f5SEric van Gyzen 	switch (object->type) {
24565e38e3f5SEric van Gyzen 	case OBJT_DEFAULT:
24575e38e3f5SEric van Gyzen 		return (KVME_TYPE_DEFAULT);
24585e38e3f5SEric van Gyzen 	case OBJT_VNODE:
24595e38e3f5SEric van Gyzen 		return (KVME_TYPE_VNODE);
24605e38e3f5SEric van Gyzen 	case OBJT_SWAP:
24615e38e3f5SEric van Gyzen 		if ((object->flags & OBJ_TMPFS_NODE) != 0)
24625e38e3f5SEric van Gyzen 			return (KVME_TYPE_VNODE);
24635e38e3f5SEric van Gyzen 		return (KVME_TYPE_SWAP);
24645e38e3f5SEric van Gyzen 	case OBJT_DEVICE:
24655e38e3f5SEric van Gyzen 		return (KVME_TYPE_DEVICE);
24665e38e3f5SEric van Gyzen 	case OBJT_PHYS:
24675e38e3f5SEric van Gyzen 		return (KVME_TYPE_PHYS);
24685e38e3f5SEric van Gyzen 	case OBJT_DEAD:
24695e38e3f5SEric van Gyzen 		return (KVME_TYPE_DEAD);
24705e38e3f5SEric van Gyzen 	case OBJT_SG:
24715e38e3f5SEric van Gyzen 		return (KVME_TYPE_SG);
24725e38e3f5SEric van Gyzen 	case OBJT_MGTDEVICE:
24735e38e3f5SEric van Gyzen 		return (KVME_TYPE_MGTDEVICE);
24745e38e3f5SEric van Gyzen 	default:
24755e38e3f5SEric van Gyzen 		return (KVME_TYPE_UNKNOWN);
24765e38e3f5SEric van Gyzen 	}
24775e38e3f5SEric van Gyzen }
24785e38e3f5SEric van Gyzen 
2479ff87ae35SJohn Baldwin static int
2480ff87ae35SJohn Baldwin sysctl_vm_object_list(SYSCTL_HANDLER_ARGS)
2481ff87ae35SJohn Baldwin {
24820ecee546SKonstantin Belousov 	struct kinfo_vmobject *kvo;
2483ff87ae35SJohn Baldwin 	char *fullpath, *freepath;
2484ff87ae35SJohn Baldwin 	struct vnode *vp;
2485ff87ae35SJohn Baldwin 	struct vattr va;
2486ff87ae35SJohn Baldwin 	vm_object_t obj;
2487ff87ae35SJohn Baldwin 	vm_page_t m;
2488ff87ae35SJohn Baldwin 	int count, error;
2489ff87ae35SJohn Baldwin 
2490ff87ae35SJohn Baldwin 	if (req->oldptr == NULL) {
2491ff87ae35SJohn Baldwin 		/*
2492ff87ae35SJohn Baldwin 		 * If an old buffer has not been provided, generate an
2493ff87ae35SJohn Baldwin 		 * estimate of the space needed for a subsequent call.
2494ff87ae35SJohn Baldwin 		 */
2495ff87ae35SJohn Baldwin 		mtx_lock(&vm_object_list_mtx);
2496ff87ae35SJohn Baldwin 		count = 0;
2497ff87ae35SJohn Baldwin 		TAILQ_FOREACH(obj, &vm_object_list, object_list) {
2498ff87ae35SJohn Baldwin 			if (obj->type == OBJT_DEAD)
2499ff87ae35SJohn Baldwin 				continue;
2500ff87ae35SJohn Baldwin 			count++;
2501ff87ae35SJohn Baldwin 		}
2502ff87ae35SJohn Baldwin 		mtx_unlock(&vm_object_list_mtx);
2503ff87ae35SJohn Baldwin 		return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) *
2504ff87ae35SJohn Baldwin 		    count * 11 / 10));
2505ff87ae35SJohn Baldwin 	}
2506ff87ae35SJohn Baldwin 
25070ecee546SKonstantin Belousov 	kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK);
2508ff87ae35SJohn Baldwin 	error = 0;
2509ff87ae35SJohn Baldwin 
2510ff87ae35SJohn Baldwin 	/*
2511ff87ae35SJohn Baldwin 	 * VM objects are type stable and are never removed from the
2512ff87ae35SJohn Baldwin 	 * list once added.  This allows us to safely read obj->object_list
2513ff87ae35SJohn Baldwin 	 * after reacquiring the VM object lock.
2514ff87ae35SJohn Baldwin 	 */
2515ff87ae35SJohn Baldwin 	mtx_lock(&vm_object_list_mtx);
2516ff87ae35SJohn Baldwin 	TAILQ_FOREACH(obj, &vm_object_list, object_list) {
2517ff87ae35SJohn Baldwin 		if (obj->type == OBJT_DEAD)
2518ff87ae35SJohn Baldwin 			continue;
2519ff87ae35SJohn Baldwin 		VM_OBJECT_RLOCK(obj);
2520ff87ae35SJohn Baldwin 		if (obj->type == OBJT_DEAD) {
2521ff87ae35SJohn Baldwin 			VM_OBJECT_RUNLOCK(obj);
2522ff87ae35SJohn Baldwin 			continue;
2523ff87ae35SJohn Baldwin 		}
2524ff87ae35SJohn Baldwin 		mtx_unlock(&vm_object_list_mtx);
25250ecee546SKonstantin Belousov 		kvo->kvo_size = ptoa(obj->size);
25260ecee546SKonstantin Belousov 		kvo->kvo_resident = obj->resident_page_count;
25270ecee546SKonstantin Belousov 		kvo->kvo_ref_count = obj->ref_count;
25280ecee546SKonstantin Belousov 		kvo->kvo_shadow_count = obj->shadow_count;
25290ecee546SKonstantin Belousov 		kvo->kvo_memattr = obj->memattr;
25300ecee546SKonstantin Belousov 		kvo->kvo_active = 0;
25310ecee546SKonstantin Belousov 		kvo->kvo_inactive = 0;
2532ff87ae35SJohn Baldwin 		TAILQ_FOREACH(m, &obj->memq, listq) {
2533ff87ae35SJohn Baldwin 			/*
2534ff87ae35SJohn Baldwin 			 * A page may belong to the object but be
2535ff87ae35SJohn Baldwin 			 * dequeued and set to PQ_NONE while the
2536ff87ae35SJohn Baldwin 			 * object lock is not held.  This makes the
2537ff87ae35SJohn Baldwin 			 * reads of m->queue below racy, and we do not
2538ff87ae35SJohn Baldwin 			 * count pages set to PQ_NONE.  However, this
2539ff87ae35SJohn Baldwin 			 * sysctl is only meant to give an
2540ff87ae35SJohn Baldwin 			 * approximation of the system anyway.
2541ff87ae35SJohn Baldwin 			 */
25425cff1f4dSMark Johnston 			if (m->a.queue == PQ_ACTIVE)
25430ecee546SKonstantin Belousov 				kvo->kvo_active++;
25445cff1f4dSMark Johnston 			else if (m->a.queue == PQ_INACTIVE)
25450ecee546SKonstantin Belousov 				kvo->kvo_inactive++;
2546ff87ae35SJohn Baldwin 		}
2547ff87ae35SJohn Baldwin 
25480ecee546SKonstantin Belousov 		kvo->kvo_vn_fileid = 0;
25490ecee546SKonstantin Belousov 		kvo->kvo_vn_fsid = 0;
25500ecee546SKonstantin Belousov 		kvo->kvo_vn_fsid_freebsd11 = 0;
2551ff87ae35SJohn Baldwin 		freepath = NULL;
2552ff87ae35SJohn Baldwin 		fullpath = "";
25535e38e3f5SEric van Gyzen 		kvo->kvo_type = vm_object_kvme_type(obj, &vp);
25545e38e3f5SEric van Gyzen 		if (vp != NULL)
2555ff87ae35SJohn Baldwin 			vref(vp);
2556ff87ae35SJohn Baldwin 		VM_OBJECT_RUNLOCK(obj);
2557ff87ae35SJohn Baldwin 		if (vp != NULL) {
2558ff87ae35SJohn Baldwin 			vn_fullpath(curthread, vp, &fullpath, &freepath);
2559ff87ae35SJohn Baldwin 			vn_lock(vp, LK_SHARED | LK_RETRY);
2560ff87ae35SJohn Baldwin 			if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) {
25610ecee546SKonstantin Belousov 				kvo->kvo_vn_fileid = va.va_fileid;
25620ecee546SKonstantin Belousov 				kvo->kvo_vn_fsid = va.va_fsid;
25630ecee546SKonstantin Belousov 				kvo->kvo_vn_fsid_freebsd11 = va.va_fsid;
256469921123SKonstantin Belousov 								/* truncate */
2565ff87ae35SJohn Baldwin 			}
2566ff87ae35SJohn Baldwin 			vput(vp);
2567ff87ae35SJohn Baldwin 		}
2568ff87ae35SJohn Baldwin 
25690ecee546SKonstantin Belousov 		strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path));
2570ff87ae35SJohn Baldwin 		if (freepath != NULL)
2571ff87ae35SJohn Baldwin 			free(freepath, M_TEMP);
2572ff87ae35SJohn Baldwin 
2573ff87ae35SJohn Baldwin 		/* Pack record size down */
25740ecee546SKonstantin Belousov 		kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path)
25750ecee546SKonstantin Belousov 		    + strlen(kvo->kvo_path) + 1;
25760ecee546SKonstantin Belousov 		kvo->kvo_structsize = roundup(kvo->kvo_structsize,
2577ff87ae35SJohn Baldwin 		    sizeof(uint64_t));
25780ecee546SKonstantin Belousov 		error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize);
2579ff87ae35SJohn Baldwin 		mtx_lock(&vm_object_list_mtx);
2580ff87ae35SJohn Baldwin 		if (error)
2581ff87ae35SJohn Baldwin 			break;
2582ff87ae35SJohn Baldwin 	}
2583ff87ae35SJohn Baldwin 	mtx_unlock(&vm_object_list_mtx);
25840ecee546SKonstantin Belousov 	free(kvo, M_TEMP);
2585ff87ae35SJohn Baldwin 	return (error);
2586ff87ae35SJohn Baldwin }
2587ff87ae35SJohn Baldwin SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP |
2588ff87ae35SJohn Baldwin     CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject",
2589ff87ae35SJohn Baldwin     "List of VM objects");
2590ff87ae35SJohn Baldwin 
2591c7c34a24SBruce Evans #include "opt_ddb.h"
2592c3cb3e12SDavid Greenman #ifdef DDB
2593c7c34a24SBruce Evans #include <sys/kernel.h>
2594c7c34a24SBruce Evans 
2595ce9edcf5SPoul-Henning Kamp #include <sys/cons.h>
2596c7c34a24SBruce Evans 
2597c7c34a24SBruce Evans #include <ddb/ddb.h>
2598c7c34a24SBruce Evans 
2599cac597e4SBruce Evans static int
26001b40f8c0SMatthew Dillon _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2601a1f6d91cSDavid Greenman {
2602a1f6d91cSDavid Greenman 	vm_map_t tmpm;
2603a1f6d91cSDavid Greenman 	vm_map_entry_t tmpe;
2604a1f6d91cSDavid Greenman 	vm_object_t obj;
2605a1f6d91cSDavid Greenman 
2606a1f6d91cSDavid Greenman 	if (map == 0)
2607a1f6d91cSDavid Greenman 		return 0;
2608a1f6d91cSDavid Greenman 
2609a1f6d91cSDavid Greenman 	if (entry == 0) {
26102288078cSDoug Moore 		VM_MAP_ENTRY_FOREACH(tmpe, map) {
2611a1f6d91cSDavid Greenman 			if (_vm_object_in_map(map, object, tmpe)) {
2612a1f6d91cSDavid Greenman 				return 1;
2613a1f6d91cSDavid Greenman 			}
2614a1f6d91cSDavid Greenman 		}
26159fdfe602SMatthew Dillon 	} else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
26169fdfe602SMatthew Dillon 		tmpm = entry->object.sub_map;
26172288078cSDoug Moore 		VM_MAP_ENTRY_FOREACH(tmpe, tmpm) {
2618a1f6d91cSDavid Greenman 			if (_vm_object_in_map(tmpm, object, tmpe)) {
2619a1f6d91cSDavid Greenman 				return 1;
2620a1f6d91cSDavid Greenman 			}
2621a1f6d91cSDavid Greenman 		}
26228aef1712SMatthew Dillon 	} else if ((obj = entry->object.vm_object) != NULL) {
262324a1cce3SDavid Greenman 		for (; obj; obj = obj->backing_object)
2624a1f6d91cSDavid Greenman 			if (obj == object) {
2625a1f6d91cSDavid Greenman 				return 1;
2626a1f6d91cSDavid Greenman 			}
2627a1f6d91cSDavid Greenman 	}
2628a1f6d91cSDavid Greenman 	return 0;
2629a1f6d91cSDavid Greenman }
2630a1f6d91cSDavid Greenman 
2631cac597e4SBruce Evans static int
26321b40f8c0SMatthew Dillon vm_object_in_map(vm_object_t object)
2633a1f6d91cSDavid Greenman {
2634a1f6d91cSDavid Greenman 	struct proc *p;
26351005a129SJohn Baldwin 
263660517fd1SJohn Baldwin 	/* sx_slock(&allproc_lock); */
2637f67af5c9SXin LI 	FOREACH_PROC_IN_SYSTEM(p) {
2638a1f6d91cSDavid Greenman 		if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
2639a1f6d91cSDavid Greenman 			continue;
2640553629ebSJake Burkholder 		if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
264160517fd1SJohn Baldwin 			/* sx_sunlock(&allproc_lock); */
2642a1f6d91cSDavid Greenman 			return 1;
2643a1f6d91cSDavid Greenman 		}
2644553629ebSJake Burkholder 	}
264560517fd1SJohn Baldwin 	/* sx_sunlock(&allproc_lock); */
2646a1f6d91cSDavid Greenman 	if (_vm_object_in_map(kernel_map, object, 0))
2647a1f6d91cSDavid Greenman 		return 1;
2648a1f6d91cSDavid Greenman 	return 0;
2649a1f6d91cSDavid Greenman }
2650a1f6d91cSDavid Greenman 
2651c7c34a24SBruce Evans DB_SHOW_COMMAND(vmochk, vm_object_check)
2652f708ef1bSPoul-Henning Kamp {
2653a1f6d91cSDavid Greenman 	vm_object_t object;
2654a1f6d91cSDavid Greenman 
2655a1f6d91cSDavid Greenman 	/*
2656a1f6d91cSDavid Greenman 	 * make sure that internal objs are in a map somewhere
2657a1f6d91cSDavid Greenman 	 * and none have zero ref counts.
2658a1f6d91cSDavid Greenman 	 */
2659cc64b484SAlfred Perlstein 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
266032362449SKonstantin Belousov 		if ((object->flags & OBJ_ANON) != 0) {
2661a1f6d91cSDavid Greenman 			if (object->ref_count == 0) {
26623efc015bSPeter Wemm 				db_printf("vmochk: internal obj has zero ref count: %ld\n",
26633efc015bSPeter Wemm 					(long)object->size);
2664a1f6d91cSDavid Greenman 			}
2665a1f6d91cSDavid Greenman 			if (!vm_object_in_map(object)) {
2666fc62ef1fSBruce Evans 				db_printf(
2667fc62ef1fSBruce Evans 			"vmochk: internal obj is not in a map: "
2668fc62ef1fSBruce Evans 			"ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2669fc62ef1fSBruce Evans 				    object->ref_count, (u_long)object->size,
2670fc62ef1fSBruce Evans 				    (u_long)object->size,
2671fc62ef1fSBruce Evans 				    (void *)object->backing_object);
2672a1f6d91cSDavid Greenman 			}
2673a1f6d91cSDavid Greenman 		}
2674a1f6d91cSDavid Greenman 	}
2675a1f6d91cSDavid Greenman }
2676a1f6d91cSDavid Greenman 
267726f9a767SRodney W. Grimes /*
2678df8bae1dSRodney W. Grimes  *	vm_object_print:	[ debug ]
2679df8bae1dSRodney W. Grimes  */
2680c7c34a24SBruce Evans DB_SHOW_COMMAND(object, vm_object_print_static)
2681df8bae1dSRodney W. Grimes {
2682c7c34a24SBruce Evans 	/* XXX convert args. */
2683c7c34a24SBruce Evans 	vm_object_t object = (vm_object_t)addr;
2684c7c34a24SBruce Evans 	boolean_t full = have_addr;
2685c7c34a24SBruce Evans 
2686d031cff1SMatthew Dillon 	vm_page_t p;
2687df8bae1dSRodney W. Grimes 
2688c7c34a24SBruce Evans 	/* XXX count is an (unused) arg.  Avoid shadowing it. */
2689c7c34a24SBruce Evans #define	count	was_count
2690c7c34a24SBruce Evans 
2691d031cff1SMatthew Dillon 	int count;
2692df8bae1dSRodney W. Grimes 
2693df8bae1dSRodney W. Grimes 	if (object == NULL)
2694df8bae1dSRodney W. Grimes 		return;
2695df8bae1dSRodney W. Grimes 
2696eb95adefSBruce Evans 	db_iprintf(
2697ef694c1aSEdward Tomasz Napierala 	    "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n",
2698e47cd172SMaxime Henrion 	    object, (int)object->type, (uintmax_t)object->size,
26993364c323SKonstantin Belousov 	    object->resident_page_count, object->ref_count, object->flags,
2700ef694c1aSEdward Tomasz Napierala 	    object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge);
2701e47cd172SMaxime Henrion 	db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
27021c7c3c6aSMatthew Dillon 	    object->shadow_count,
2703eb95adefSBruce Evans 	    object->backing_object ? object->backing_object->ref_count : 0,
2704e47cd172SMaxime Henrion 	    object->backing_object, (uintmax_t)object->backing_object_offset);
2705df8bae1dSRodney W. Grimes 
2706df8bae1dSRodney W. Grimes 	if (!full)
2707df8bae1dSRodney W. Grimes 		return;
2708df8bae1dSRodney W. Grimes 
2709c7c34a24SBruce Evans 	db_indent += 2;
2710df8bae1dSRodney W. Grimes 	count = 0;
2711fc2ffbe6SPoul-Henning Kamp 	TAILQ_FOREACH(p, &object->memq, listq) {
2712df8bae1dSRodney W. Grimes 		if (count == 0)
2713c7c34a24SBruce Evans 			db_iprintf("memory:=");
2714df8bae1dSRodney W. Grimes 		else if (count == 6) {
2715c7c34a24SBruce Evans 			db_printf("\n");
2716c7c34a24SBruce Evans 			db_iprintf(" ...");
2717df8bae1dSRodney W. Grimes 			count = 0;
2718df8bae1dSRodney W. Grimes 		} else
2719c7c34a24SBruce Evans 			db_printf(",");
2720df8bae1dSRodney W. Grimes 		count++;
2721df8bae1dSRodney W. Grimes 
2722e47cd172SMaxime Henrion 		db_printf("(off=0x%jx,page=0x%jx)",
2723e47cd172SMaxime Henrion 		    (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p));
2724df8bae1dSRodney W. Grimes 	}
2725df8bae1dSRodney W. Grimes 	if (count != 0)
2726c7c34a24SBruce Evans 		db_printf("\n");
2727c7c34a24SBruce Evans 	db_indent -= 2;
2728df8bae1dSRodney W. Grimes }
27295070c7f8SJohn Dyson 
2730c7c34a24SBruce Evans /* XXX. */
2731c7c34a24SBruce Evans #undef count
2732c7c34a24SBruce Evans 
2733c7c34a24SBruce Evans /* XXX need this non-static entry for calling from vm_map_print. */
27345070c7f8SJohn Dyson void
27351b40f8c0SMatthew Dillon vm_object_print(
27361b40f8c0SMatthew Dillon         /* db_expr_t */ long addr,
27371b40f8c0SMatthew Dillon 	boolean_t have_addr,
27381b40f8c0SMatthew Dillon 	/* db_expr_t */ long count,
27391b40f8c0SMatthew Dillon 	char *modif)
2740c7c34a24SBruce Evans {
2741c7c34a24SBruce Evans 	vm_object_print_static(addr, have_addr, count, modif);
2742c7c34a24SBruce Evans }
2743c7c34a24SBruce Evans 
2744c7c34a24SBruce Evans DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
27455070c7f8SJohn Dyson {
27465070c7f8SJohn Dyson 	vm_object_t object;
2747bb2ac86fSKonstantin Belousov 	vm_pindex_t fidx;
2748bb2ac86fSKonstantin Belousov 	vm_paddr_t pa;
2749bb2ac86fSKonstantin Belousov 	vm_page_t m, prev_m;
2750bb2ac86fSKonstantin Belousov 	int rcount, nl, c;
2751cc64b484SAlfred Perlstein 
2752bb2ac86fSKonstantin Belousov 	nl = 0;
2753cc64b484SAlfred Perlstein 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
2754fc62ef1fSBruce Evans 		db_printf("new object: %p\n", (void *)object);
27555070c7f8SJohn Dyson 		if (nl > 18) {
27565070c7f8SJohn Dyson 			c = cngetc();
27575070c7f8SJohn Dyson 			if (c != ' ')
27585070c7f8SJohn Dyson 				return;
27595070c7f8SJohn Dyson 			nl = 0;
27605070c7f8SJohn Dyson 		}
27615070c7f8SJohn Dyson 		nl++;
27625070c7f8SJohn Dyson 		rcount = 0;
27635070c7f8SJohn Dyson 		fidx = 0;
2764bb2ac86fSKonstantin Belousov 		pa = -1;
2765bb2ac86fSKonstantin Belousov 		TAILQ_FOREACH(m, &object->memq, listq) {
2766bb2ac86fSKonstantin Belousov 			if (m->pindex > 128)
2767bb2ac86fSKonstantin Belousov 				break;
2768bb2ac86fSKonstantin Belousov 			if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL &&
2769bb2ac86fSKonstantin Belousov 			    prev_m->pindex + 1 != m->pindex) {
27705070c7f8SJohn Dyson 				if (rcount) {
27713efc015bSPeter Wemm 					db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
27723efc015bSPeter Wemm 						(long)fidx, rcount, (long)pa);
27735070c7f8SJohn Dyson 					if (nl > 18) {
27745070c7f8SJohn Dyson 						c = cngetc();
27755070c7f8SJohn Dyson 						if (c != ' ')
27765070c7f8SJohn Dyson 							return;
27775070c7f8SJohn Dyson 						nl = 0;
27785070c7f8SJohn Dyson 					}
27795070c7f8SJohn Dyson 					nl++;
27805070c7f8SJohn Dyson 					rcount = 0;
27815070c7f8SJohn Dyson 				}
27825070c7f8SJohn Dyson 			}
27835070c7f8SJohn Dyson 			if (rcount &&
27845070c7f8SJohn Dyson 				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
27855070c7f8SJohn Dyson 				++rcount;
27865070c7f8SJohn Dyson 				continue;
27875070c7f8SJohn Dyson 			}
27885070c7f8SJohn Dyson 			if (rcount) {
27892446e4f0SAlan Cox 				db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
27903efc015bSPeter Wemm 					(long)fidx, rcount, (long)pa);
27915070c7f8SJohn Dyson 				if (nl > 18) {
27925070c7f8SJohn Dyson 					c = cngetc();
27935070c7f8SJohn Dyson 					if (c != ' ')
27945070c7f8SJohn Dyson 						return;
27955070c7f8SJohn Dyson 					nl = 0;
27965070c7f8SJohn Dyson 				}
27975070c7f8SJohn Dyson 				nl++;
27985070c7f8SJohn Dyson 			}
2799bb2ac86fSKonstantin Belousov 			fidx = m->pindex;
28005070c7f8SJohn Dyson 			pa = VM_PAGE_TO_PHYS(m);
28015070c7f8SJohn Dyson 			rcount = 1;
28025070c7f8SJohn Dyson 		}
28035070c7f8SJohn Dyson 		if (rcount) {
28043efc015bSPeter Wemm 			db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
28053efc015bSPeter Wemm 				(long)fidx, rcount, (long)pa);
28065070c7f8SJohn Dyson 			if (nl > 18) {
28075070c7f8SJohn Dyson 				c = cngetc();
28085070c7f8SJohn Dyson 				if (c != ' ')
28095070c7f8SJohn Dyson 					return;
28105070c7f8SJohn Dyson 				nl = 0;
28115070c7f8SJohn Dyson 			}
28125070c7f8SJohn Dyson 			nl++;
28135070c7f8SJohn Dyson 		}
28145070c7f8SJohn Dyson 	}
28155070c7f8SJohn Dyson }
2816c3cb3e12SDavid Greenman #endif /* DDB */
2817