xref: /freebsd/sys/vm/vm_object.c (revision 59e50df3cd1493537cfa916daf3c51a01b7ff06e)
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  */
62 
63 /*
64  *	Virtual memory object module.
65  */
66 
67 #include <sys/cdefs.h>
68 __FBSDID("$FreeBSD$");
69 
70 #include "opt_vm.h"
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/cpuset.h>
75 #include <sys/lock.h>
76 #include <sys/mman.h>
77 #include <sys/mount.h>
78 #include <sys/kernel.h>
79 #include <sys/pctrie.h>
80 #include <sys/sysctl.h>
81 #include <sys/mutex.h>
82 #include <sys/proc.h>		/* for curproc, pageproc */
83 #include <sys/refcount.h>
84 #include <sys/socket.h>
85 #include <sys/resourcevar.h>
86 #include <sys/rwlock.h>
87 #include <sys/user.h>
88 #include <sys/vnode.h>
89 #include <sys/vmmeter.h>
90 #include <sys/sx.h>
91 
92 #include <vm/vm.h>
93 #include <vm/vm_param.h>
94 #include <vm/pmap.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_pageout.h>
99 #include <vm/vm_pager.h>
100 #include <vm/vm_phys.h>
101 #include <vm/vm_pagequeue.h>
102 #include <vm/swap_pager.h>
103 #include <vm/vm_kern.h>
104 #include <vm/vm_extern.h>
105 #include <vm/vm_radix.h>
106 #include <vm/vm_reserv.h>
107 #include <vm/uma.h>
108 
109 static int old_msync;
110 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0,
111     "Use old (insecure) msync behavior");
112 
113 static int	vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
114 		    int pagerflags, int flags, boolean_t *clearobjflags,
115 		    boolean_t *eio);
116 static boolean_t vm_object_page_remove_write(vm_page_t p, int flags,
117 		    boolean_t *clearobjflags);
118 static void	vm_object_qcollapse(vm_object_t object);
119 static void	vm_object_vndeallocate(vm_object_t object);
120 
121 /*
122  *	Virtual memory objects maintain the actual data
123  *	associated with allocated virtual memory.  A given
124  *	page of memory exists within exactly one object.
125  *
126  *	An object is only deallocated when all "references"
127  *	are given up.  Only one "reference" to a given
128  *	region of an object should be writeable.
129  *
130  *	Associated with each object is a list of all resident
131  *	memory pages belonging to that object; this list is
132  *	maintained by the "vm_page" module, and locked by the object's
133  *	lock.
134  *
135  *	Each object also records a "pager" routine which is
136  *	used to retrieve (and store) pages to the proper backing
137  *	storage.  In addition, objects may be backed by other
138  *	objects from which they were virtual-copied.
139  *
140  *	The only items within the object structure which are
141  *	modified after time of creation are:
142  *		reference count		locked by object's lock
143  *		pager routine		locked by object's lock
144  *
145  */
146 
147 struct object_q vm_object_list;
148 struct mtx vm_object_list_mtx;	/* lock for object list and count */
149 
150 struct vm_object kernel_object_store;
151 
152 static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0,
153     "VM object stats");
154 
155 static counter_u64_t object_collapses = EARLY_COUNTER;
156 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD,
157     &object_collapses,
158     "VM object collapses");
159 
160 static counter_u64_t object_bypasses = EARLY_COUNTER;
161 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD,
162     &object_bypasses,
163     "VM object bypasses");
164 
165 static void
166 counter_startup(void)
167 {
168 
169 	object_collapses = counter_u64_alloc(M_WAITOK);
170 	object_bypasses = counter_u64_alloc(M_WAITOK);
171 }
172 SYSINIT(object_counters, SI_SUB_CPU, SI_ORDER_ANY, counter_startup, NULL);
173 
174 static uma_zone_t obj_zone;
175 
176 static int vm_object_zinit(void *mem, int size, int flags);
177 
178 #ifdef INVARIANTS
179 static void vm_object_zdtor(void *mem, int size, void *arg);
180 
181 static void
182 vm_object_zdtor(void *mem, int size, void *arg)
183 {
184 	vm_object_t object;
185 
186 	object = (vm_object_t)mem;
187 	KASSERT(object->ref_count == 0,
188 	    ("object %p ref_count = %d", object, object->ref_count));
189 	KASSERT(TAILQ_EMPTY(&object->memq),
190 	    ("object %p has resident pages in its memq", object));
191 	KASSERT(vm_radix_is_empty(&object->rtree),
192 	    ("object %p has resident pages in its trie", object));
193 #if VM_NRESERVLEVEL > 0
194 	KASSERT(LIST_EMPTY(&object->rvq),
195 	    ("object %p has reservations",
196 	    object));
197 #endif
198 	KASSERT(object->paging_in_progress == 0,
199 	    ("object %p paging_in_progress = %d",
200 	    object, object->paging_in_progress));
201 	KASSERT(object->resident_page_count == 0,
202 	    ("object %p resident_page_count = %d",
203 	    object, object->resident_page_count));
204 	KASSERT(object->shadow_count == 0,
205 	    ("object %p shadow_count = %d",
206 	    object, object->shadow_count));
207 	KASSERT(object->type == OBJT_DEAD,
208 	    ("object %p has non-dead type %d",
209 	    object, object->type));
210 }
211 #endif
212 
213 static int
214 vm_object_zinit(void *mem, int size, int flags)
215 {
216 	vm_object_t object;
217 
218 	object = (vm_object_t)mem;
219 	rw_init_flags(&object->lock, "vm object", RW_DUPOK | RW_NEW);
220 
221 	/* These are true for any object that has been freed */
222 	object->type = OBJT_DEAD;
223 	object->ref_count = 0;
224 	vm_radix_init(&object->rtree);
225 	refcount_init(&object->paging_in_progress, 0);
226 	object->resident_page_count = 0;
227 	object->shadow_count = 0;
228 	object->flags = OBJ_DEAD;
229 
230 	mtx_lock(&vm_object_list_mtx);
231 	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
232 	mtx_unlock(&vm_object_list_mtx);
233 	return (0);
234 }
235 
236 static void
237 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
238 {
239 
240 	TAILQ_INIT(&object->memq);
241 	LIST_INIT(&object->shadow_head);
242 
243 	object->type = type;
244 	if (type == OBJT_SWAP)
245 		pctrie_init(&object->un_pager.swp.swp_blks);
246 
247 	/*
248 	 * Ensure that swap_pager_swapoff() iteration over object_list
249 	 * sees up to date type and pctrie head if it observed
250 	 * non-dead object.
251 	 */
252 	atomic_thread_fence_rel();
253 
254 	switch (type) {
255 	case OBJT_DEAD:
256 		panic("_vm_object_allocate: can't create OBJT_DEAD");
257 	case OBJT_DEFAULT:
258 	case OBJT_SWAP:
259 		object->flags = OBJ_ONEMAPPING;
260 		break;
261 	case OBJT_DEVICE:
262 	case OBJT_SG:
263 		object->flags = OBJ_FICTITIOUS | OBJ_UNMANAGED;
264 		break;
265 	case OBJT_MGTDEVICE:
266 		object->flags = OBJ_FICTITIOUS;
267 		break;
268 	case OBJT_PHYS:
269 		object->flags = OBJ_UNMANAGED;
270 		break;
271 	case OBJT_VNODE:
272 		object->flags = 0;
273 		break;
274 	default:
275 		panic("_vm_object_allocate: type %d is undefined", type);
276 	}
277 	object->size = size;
278 	object->domain.dr_policy = NULL;
279 	object->generation = 1;
280 	object->ref_count = 1;
281 	object->memattr = VM_MEMATTR_DEFAULT;
282 	object->cred = NULL;
283 	object->charge = 0;
284 	object->handle = NULL;
285 	object->backing_object = NULL;
286 	object->backing_object_offset = (vm_ooffset_t) 0;
287 #if VM_NRESERVLEVEL > 0
288 	LIST_INIT(&object->rvq);
289 #endif
290 	umtx_shm_object_init(object);
291 }
292 
293 /*
294  *	vm_object_init:
295  *
296  *	Initialize the VM objects module.
297  */
298 void
299 vm_object_init(void)
300 {
301 	TAILQ_INIT(&vm_object_list);
302 	mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
303 
304 	rw_init(&kernel_object->lock, "kernel vm object");
305 	_vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS -
306 	    VM_MIN_KERNEL_ADDRESS), kernel_object);
307 #if VM_NRESERVLEVEL > 0
308 	kernel_object->flags |= OBJ_COLORED;
309 	kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
310 #endif
311 
312 	/*
313 	 * The lock portion of struct vm_object must be type stable due
314 	 * to vm_pageout_fallback_object_lock locking a vm object
315 	 * without holding any references to it.
316 	 */
317 	obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL,
318 #ifdef INVARIANTS
319 	    vm_object_zdtor,
320 #else
321 	    NULL,
322 #endif
323 	    vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
324 
325 	vm_radix_zinit();
326 }
327 
328 void
329 vm_object_clear_flag(vm_object_t object, u_short bits)
330 {
331 
332 	VM_OBJECT_ASSERT_WLOCKED(object);
333 	object->flags &= ~bits;
334 }
335 
336 /*
337  *	Sets the default memory attribute for the specified object.  Pages
338  *	that are allocated to this object are by default assigned this memory
339  *	attribute.
340  *
341  *	Presently, this function must be called before any pages are allocated
342  *	to the object.  In the future, this requirement may be relaxed for
343  *	"default" and "swap" objects.
344  */
345 int
346 vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
347 {
348 
349 	VM_OBJECT_ASSERT_WLOCKED(object);
350 	switch (object->type) {
351 	case OBJT_DEFAULT:
352 	case OBJT_DEVICE:
353 	case OBJT_MGTDEVICE:
354 	case OBJT_PHYS:
355 	case OBJT_SG:
356 	case OBJT_SWAP:
357 	case OBJT_VNODE:
358 		if (!TAILQ_EMPTY(&object->memq))
359 			return (KERN_FAILURE);
360 		break;
361 	case OBJT_DEAD:
362 		return (KERN_INVALID_ARGUMENT);
363 	default:
364 		panic("vm_object_set_memattr: object %p is of undefined type",
365 		    object);
366 	}
367 	object->memattr = memattr;
368 	return (KERN_SUCCESS);
369 }
370 
371 void
372 vm_object_pip_add(vm_object_t object, short i)
373 {
374 
375 	refcount_acquiren(&object->paging_in_progress, i);
376 }
377 
378 void
379 vm_object_pip_wakeup(vm_object_t object)
380 {
381 
382 	refcount_release(&object->paging_in_progress);
383 }
384 
385 void
386 vm_object_pip_wakeupn(vm_object_t object, short i)
387 {
388 
389 	refcount_releasen(&object->paging_in_progress, i);
390 }
391 
392 void
393 vm_object_pip_wait(vm_object_t object, char *waitid)
394 {
395 
396 	VM_OBJECT_ASSERT_WLOCKED(object);
397 
398 	while (object->paging_in_progress) {
399 		VM_OBJECT_WUNLOCK(object);
400 		refcount_wait(&object->paging_in_progress, waitid, PVM);
401 		VM_OBJECT_WLOCK(object);
402 	}
403 }
404 
405 void
406 vm_object_pip_wait_unlocked(vm_object_t object, char *waitid)
407 {
408 
409 	VM_OBJECT_ASSERT_UNLOCKED(object);
410 
411 	while (object->paging_in_progress)
412 		refcount_wait(&object->paging_in_progress, waitid, PVM);
413 }
414 
415 /*
416  *	vm_object_allocate:
417  *
418  *	Returns a new object with the given size.
419  */
420 vm_object_t
421 vm_object_allocate(objtype_t type, vm_pindex_t size)
422 {
423 	vm_object_t object;
424 
425 	object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
426 	_vm_object_allocate(type, size, object);
427 	return (object);
428 }
429 
430 
431 /*
432  *	vm_object_reference:
433  *
434  *	Gets another reference to the given object.  Note: OBJ_DEAD
435  *	objects can be referenced during final cleaning.
436  */
437 void
438 vm_object_reference(vm_object_t object)
439 {
440 	if (object == NULL)
441 		return;
442 	VM_OBJECT_WLOCK(object);
443 	vm_object_reference_locked(object);
444 	VM_OBJECT_WUNLOCK(object);
445 }
446 
447 /*
448  *	vm_object_reference_locked:
449  *
450  *	Gets another reference to the given object.
451  *
452  *	The object must be locked.
453  */
454 void
455 vm_object_reference_locked(vm_object_t object)
456 {
457 	struct vnode *vp;
458 
459 	VM_OBJECT_ASSERT_WLOCKED(object);
460 	object->ref_count++;
461 	if (object->type == OBJT_VNODE) {
462 		vp = object->handle;
463 		vref(vp);
464 	}
465 }
466 
467 /*
468  * Handle deallocating an object of type OBJT_VNODE.
469  */
470 static void
471 vm_object_vndeallocate(vm_object_t object)
472 {
473 	struct vnode *vp = (struct vnode *) object->handle;
474 
475 	VM_OBJECT_ASSERT_WLOCKED(object);
476 	KASSERT(object->type == OBJT_VNODE,
477 	    ("vm_object_vndeallocate: not a vnode object"));
478 	KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
479 #ifdef INVARIANTS
480 	if (object->ref_count == 0) {
481 		vn_printf(vp, "vm_object_vndeallocate ");
482 		panic("vm_object_vndeallocate: bad object reference count");
483 	}
484 #endif
485 
486 	if (!umtx_shm_vnobj_persistent && object->ref_count == 1)
487 		umtx_shm_object_terminated(object);
488 
489 	object->ref_count--;
490 
491 	/* vrele may need the vnode lock. */
492 	VM_OBJECT_WUNLOCK(object);
493 	vrele(vp);
494 }
495 
496 /*
497  *	vm_object_deallocate:
498  *
499  *	Release a reference to the specified object,
500  *	gained either through a vm_object_allocate
501  *	or a vm_object_reference call.  When all references
502  *	are gone, storage associated with this object
503  *	may be relinquished.
504  *
505  *	No object may be locked.
506  */
507 void
508 vm_object_deallocate(vm_object_t object)
509 {
510 	vm_object_t temp;
511 	struct vnode *vp;
512 
513 	while (object != NULL) {
514 		VM_OBJECT_WLOCK(object);
515 		if (object->type == OBJT_VNODE) {
516 			vm_object_vndeallocate(object);
517 			return;
518 		}
519 
520 		KASSERT(object->ref_count != 0,
521 			("vm_object_deallocate: object deallocated too many times: %d", object->type));
522 
523 		/*
524 		 * If the reference count goes to 0 we start calling
525 		 * vm_object_terminate() on the object chain.
526 		 * A ref count of 1 may be a special case depending on the
527 		 * shadow count being 0 or 1.
528 		 */
529 		object->ref_count--;
530 		if (object->ref_count > 1) {
531 			VM_OBJECT_WUNLOCK(object);
532 			return;
533 		} else if (object->ref_count == 1) {
534 			if (object->type == OBJT_SWAP &&
535 			    (object->flags & OBJ_TMPFS) != 0) {
536 				vp = object->un_pager.swp.swp_tmpfs;
537 				vhold(vp);
538 				VM_OBJECT_WUNLOCK(object);
539 				vn_lock(vp, LK_SHARED | LK_RETRY);
540 				VM_OBJECT_WLOCK(object);
541 				if (object->type == OBJT_DEAD ||
542 				    object->ref_count != 1) {
543 					VM_OBJECT_WUNLOCK(object);
544 					VOP_UNLOCK(vp, 0);
545 					vdrop(vp);
546 					return;
547 				}
548 				if ((object->flags & OBJ_TMPFS) != 0)
549 					VOP_UNSET_TEXT(vp);
550 				VOP_UNLOCK(vp, 0);
551 				vdrop(vp);
552 			}
553 			if (object->shadow_count == 0 &&
554 			    object->handle == NULL &&
555 			    (object->type == OBJT_DEFAULT ||
556 			    (object->type == OBJT_SWAP &&
557 			    (object->flags & OBJ_TMPFS_NODE) == 0))) {
558 				vm_object_set_flag(object, OBJ_ONEMAPPING);
559 			} else if ((object->shadow_count == 1) &&
560 			    (object->handle == NULL) &&
561 			    (object->type == OBJT_DEFAULT ||
562 			     object->type == OBJT_SWAP)) {
563 				vm_object_t robject;
564 
565 				robject = LIST_FIRST(&object->shadow_head);
566 				KASSERT(robject != NULL,
567 				    ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
568 					 object->ref_count,
569 					 object->shadow_count));
570 				KASSERT((robject->flags & OBJ_TMPFS_NODE) == 0,
571 				    ("shadowed tmpfs v_object %p", object));
572 				if (!VM_OBJECT_TRYWLOCK(robject)) {
573 					/*
574 					 * Avoid a potential deadlock.
575 					 */
576 					object->ref_count++;
577 					VM_OBJECT_WUNLOCK(object);
578 					/*
579 					 * More likely than not the thread
580 					 * holding robject's lock has lower
581 					 * priority than the current thread.
582 					 * Let the lower priority thread run.
583 					 */
584 					pause("vmo_de", 1);
585 					continue;
586 				}
587 				/*
588 				 * Collapse object into its shadow unless its
589 				 * shadow is dead.  In that case, object will
590 				 * be deallocated by the thread that is
591 				 * deallocating its shadow.
592 				 */
593 				if ((robject->flags & OBJ_DEAD) == 0 &&
594 				    (robject->handle == NULL) &&
595 				    (robject->type == OBJT_DEFAULT ||
596 				     robject->type == OBJT_SWAP)) {
597 
598 					robject->ref_count++;
599 retry:
600 					if (robject->paging_in_progress) {
601 						VM_OBJECT_WUNLOCK(object);
602 						vm_object_pip_wait(robject,
603 						    "objde1");
604 						temp = robject->backing_object;
605 						if (object == temp) {
606 							VM_OBJECT_WLOCK(object);
607 							goto retry;
608 						}
609 					} else if (object->paging_in_progress) {
610 						VM_OBJECT_WUNLOCK(robject);
611 						VM_OBJECT_WUNLOCK(object);
612 						refcount_wait(
613 						    &object->paging_in_progress,
614 						    "objde2", PVM);
615 						VM_OBJECT_WLOCK(robject);
616 						temp = robject->backing_object;
617 						if (object == temp) {
618 							VM_OBJECT_WLOCK(object);
619 							goto retry;
620 						}
621 					} else
622 						VM_OBJECT_WUNLOCK(object);
623 
624 					if (robject->ref_count == 1) {
625 						robject->ref_count--;
626 						object = robject;
627 						goto doterm;
628 					}
629 					object = robject;
630 					vm_object_collapse(object);
631 					VM_OBJECT_WUNLOCK(object);
632 					continue;
633 				}
634 				VM_OBJECT_WUNLOCK(robject);
635 			}
636 			VM_OBJECT_WUNLOCK(object);
637 			return;
638 		}
639 doterm:
640 		umtx_shm_object_terminated(object);
641 		temp = object->backing_object;
642 		if (temp != NULL) {
643 			KASSERT((object->flags & OBJ_TMPFS_NODE) == 0,
644 			    ("shadowed tmpfs v_object 2 %p", object));
645 			VM_OBJECT_WLOCK(temp);
646 			LIST_REMOVE(object, shadow_list);
647 			temp->shadow_count--;
648 			VM_OBJECT_WUNLOCK(temp);
649 			object->backing_object = NULL;
650 		}
651 		/*
652 		 * Don't double-terminate, we could be in a termination
653 		 * recursion due to the terminate having to sync data
654 		 * to disk.
655 		 */
656 		if ((object->flags & OBJ_DEAD) == 0) {
657 			vm_object_set_flag(object, OBJ_DEAD);
658 			vm_object_terminate(object);
659 		} else
660 			VM_OBJECT_WUNLOCK(object);
661 		object = temp;
662 	}
663 }
664 
665 /*
666  *	vm_object_destroy removes the object from the global object list
667  *      and frees the space for the object.
668  */
669 void
670 vm_object_destroy(vm_object_t object)
671 {
672 
673 	/*
674 	 * Release the allocation charge.
675 	 */
676 	if (object->cred != NULL) {
677 		swap_release_by_cred(object->charge, object->cred);
678 		object->charge = 0;
679 		crfree(object->cred);
680 		object->cred = NULL;
681 	}
682 
683 	/*
684 	 * Free the space for the object.
685 	 */
686 	uma_zfree(obj_zone, object);
687 }
688 
689 /*
690  *	vm_object_terminate_pages removes any remaining pageable pages
691  *	from the object and resets the object to an empty state.
692  */
693 static void
694 vm_object_terminate_pages(vm_object_t object)
695 {
696 	vm_page_t p, p_next;
697 	struct mtx *mtx;
698 
699 	VM_OBJECT_ASSERT_WLOCKED(object);
700 
701 	mtx = NULL;
702 
703 	/*
704 	 * Free any remaining pageable pages.  This also removes them from the
705 	 * paging queues.  However, don't free wired pages, just remove them
706 	 * from the object.  Rather than incrementally removing each page from
707 	 * the object, the page and object are reset to any empty state.
708 	 */
709 	TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) {
710 		vm_page_assert_unbusied(p);
711 		if ((object->flags & OBJ_UNMANAGED) == 0)
712 			/*
713 			 * vm_page_free_prep() only needs the page
714 			 * lock for managed pages.
715 			 */
716 			vm_page_change_lock(p, &mtx);
717 		p->object = NULL;
718 		if (vm_page_wired(p))
719 			continue;
720 		VM_CNT_INC(v_pfree);
721 		vm_page_free(p);
722 	}
723 	if (mtx != NULL)
724 		mtx_unlock(mtx);
725 
726 	/*
727 	 * If the object contained any pages, then reset it to an empty state.
728 	 * None of the object's fields, including "resident_page_count", were
729 	 * modified by the preceding loop.
730 	 */
731 	if (object->resident_page_count != 0) {
732 		vm_radix_reclaim_allnodes(&object->rtree);
733 		TAILQ_INIT(&object->memq);
734 		object->resident_page_count = 0;
735 		if (object->type == OBJT_VNODE)
736 			vdrop(object->handle);
737 	}
738 }
739 
740 /*
741  *	vm_object_terminate actually destroys the specified object, freeing
742  *	up all previously used resources.
743  *
744  *	The object must be locked.
745  *	This routine may block.
746  */
747 void
748 vm_object_terminate(vm_object_t object)
749 {
750 	VM_OBJECT_ASSERT_WLOCKED(object);
751 	KASSERT((object->flags & OBJ_DEAD) != 0,
752 	    ("terminating non-dead obj %p", object));
753 
754 	/*
755 	 * wait for the pageout daemon to be done with the object
756 	 */
757 	vm_object_pip_wait(object, "objtrm");
758 
759 	KASSERT(!object->paging_in_progress,
760 		("vm_object_terminate: pageout in progress"));
761 
762 	KASSERT(object->ref_count == 0,
763 		("vm_object_terminate: object with references, ref_count=%d",
764 		object->ref_count));
765 
766 	if ((object->flags & OBJ_PG_DTOR) == 0)
767 		vm_object_terminate_pages(object);
768 
769 #if VM_NRESERVLEVEL > 0
770 	if (__predict_false(!LIST_EMPTY(&object->rvq)))
771 		vm_reserv_break_all(object);
772 #endif
773 
774 	KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT ||
775 	    object->type == OBJT_SWAP,
776 	    ("%s: non-swap obj %p has cred", __func__, object));
777 
778 	/*
779 	 * Let the pager know object is dead.
780 	 */
781 	vm_pager_deallocate(object);
782 	VM_OBJECT_WUNLOCK(object);
783 
784 	vm_object_destroy(object);
785 }
786 
787 /*
788  * Make the page read-only so that we can clear the object flags.  However, if
789  * this is a nosync mmap then the object is likely to stay dirty so do not
790  * mess with the page and do not clear the object flags.  Returns TRUE if the
791  * page should be flushed, and FALSE otherwise.
792  */
793 static boolean_t
794 vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *clearobjflags)
795 {
796 
797 	/*
798 	 * If we have been asked to skip nosync pages and this is a
799 	 * nosync page, skip it.  Note that the object flags were not
800 	 * cleared in this case so we do not have to set them.
801 	 */
802 	if ((flags & OBJPC_NOSYNC) != 0 && (p->oflags & VPO_NOSYNC) != 0) {
803 		*clearobjflags = FALSE;
804 		return (FALSE);
805 	} else {
806 		pmap_remove_write(p);
807 		return (p->dirty != 0);
808 	}
809 }
810 
811 /*
812  *	vm_object_page_clean
813  *
814  *	Clean all dirty pages in the specified range of object.  Leaves page
815  * 	on whatever queue it is currently on.   If NOSYNC is set then do not
816  *	write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC),
817  *	leaving the object dirty.
818  *
819  *	When stuffing pages asynchronously, allow clustering.  XXX we need a
820  *	synchronous clustering mode implementation.
821  *
822  *	Odd semantics: if start == end, we clean everything.
823  *
824  *	The object must be locked.
825  *
826  *	Returns FALSE if some page from the range was not written, as
827  *	reported by the pager, and TRUE otherwise.
828  */
829 boolean_t
830 vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
831     int flags)
832 {
833 	vm_page_t np, p;
834 	vm_pindex_t pi, tend, tstart;
835 	int curgeneration, n, pagerflags;
836 	boolean_t clearobjflags, eio, res;
837 
838 	VM_OBJECT_ASSERT_WLOCKED(object);
839 
840 	/*
841 	 * The OBJ_MIGHTBEDIRTY flag is only set for OBJT_VNODE
842 	 * objects.  The check below prevents the function from
843 	 * operating on non-vnode objects.
844 	 */
845 	if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 ||
846 	    object->resident_page_count == 0)
847 		return (TRUE);
848 
849 	pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ?
850 	    VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
851 	pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0;
852 
853 	tstart = OFF_TO_IDX(start);
854 	tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK);
855 	clearobjflags = tstart == 0 && tend >= object->size;
856 	res = TRUE;
857 
858 rescan:
859 	curgeneration = object->generation;
860 
861 	for (p = vm_page_find_least(object, tstart); p != NULL; p = np) {
862 		pi = p->pindex;
863 		if (pi >= tend)
864 			break;
865 		np = TAILQ_NEXT(p, listq);
866 		if (p->valid == 0)
867 			continue;
868 		if (vm_page_sleep_if_busy(p, "vpcwai")) {
869 			if (object->generation != curgeneration) {
870 				if ((flags & OBJPC_SYNC) != 0)
871 					goto rescan;
872 				else
873 					clearobjflags = FALSE;
874 			}
875 			np = vm_page_find_least(object, pi);
876 			continue;
877 		}
878 		if (!vm_object_page_remove_write(p, flags, &clearobjflags))
879 			continue;
880 
881 		n = vm_object_page_collect_flush(object, p, pagerflags,
882 		    flags, &clearobjflags, &eio);
883 		if (eio) {
884 			res = FALSE;
885 			clearobjflags = FALSE;
886 		}
887 		if (object->generation != curgeneration) {
888 			if ((flags & OBJPC_SYNC) != 0)
889 				goto rescan;
890 			else
891 				clearobjflags = FALSE;
892 		}
893 
894 		/*
895 		 * If the VOP_PUTPAGES() did a truncated write, so
896 		 * that even the first page of the run is not fully
897 		 * written, vm_pageout_flush() returns 0 as the run
898 		 * length.  Since the condition that caused truncated
899 		 * write may be permanent, e.g. exhausted free space,
900 		 * accepting n == 0 would cause an infinite loop.
901 		 *
902 		 * Forwarding the iterator leaves the unwritten page
903 		 * behind, but there is not much we can do there if
904 		 * filesystem refuses to write it.
905 		 */
906 		if (n == 0) {
907 			n = 1;
908 			clearobjflags = FALSE;
909 		}
910 		np = vm_page_find_least(object, pi + n);
911 	}
912 #if 0
913 	VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0);
914 #endif
915 
916 	if (clearobjflags)
917 		vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY);
918 	return (res);
919 }
920 
921 static int
922 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
923     int flags, boolean_t *clearobjflags, boolean_t *eio)
924 {
925 	vm_page_t ma[vm_pageout_page_count], p_first, tp;
926 	int count, i, mreq, runlen;
927 
928 	vm_page_lock_assert(p, MA_NOTOWNED);
929 	VM_OBJECT_ASSERT_WLOCKED(object);
930 
931 	count = 1;
932 	mreq = 0;
933 
934 	for (tp = p; count < vm_pageout_page_count; count++) {
935 		tp = vm_page_next(tp);
936 		if (tp == NULL || vm_page_busied(tp))
937 			break;
938 		if (!vm_object_page_remove_write(tp, flags, clearobjflags))
939 			break;
940 	}
941 
942 	for (p_first = p; count < vm_pageout_page_count; count++) {
943 		tp = vm_page_prev(p_first);
944 		if (tp == NULL || vm_page_busied(tp))
945 			break;
946 		if (!vm_object_page_remove_write(tp, flags, clearobjflags))
947 			break;
948 		p_first = tp;
949 		mreq++;
950 	}
951 
952 	for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++)
953 		ma[i] = tp;
954 
955 	vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio);
956 	return (runlen);
957 }
958 
959 /*
960  * Note that there is absolutely no sense in writing out
961  * anonymous objects, so we track down the vnode object
962  * to write out.
963  * We invalidate (remove) all pages from the address space
964  * for semantic correctness.
965  *
966  * If the backing object is a device object with unmanaged pages, then any
967  * mappings to the specified range of pages must be removed before this
968  * function is called.
969  *
970  * Note: certain anonymous maps, such as MAP_NOSYNC maps,
971  * may start out with a NULL object.
972  */
973 boolean_t
974 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
975     boolean_t syncio, boolean_t invalidate)
976 {
977 	vm_object_t backing_object;
978 	struct vnode *vp;
979 	struct mount *mp;
980 	int error, flags, fsync_after;
981 	boolean_t res;
982 
983 	if (object == NULL)
984 		return (TRUE);
985 	res = TRUE;
986 	error = 0;
987 	VM_OBJECT_WLOCK(object);
988 	while ((backing_object = object->backing_object) != NULL) {
989 		VM_OBJECT_WLOCK(backing_object);
990 		offset += object->backing_object_offset;
991 		VM_OBJECT_WUNLOCK(object);
992 		object = backing_object;
993 		if (object->size < OFF_TO_IDX(offset + size))
994 			size = IDX_TO_OFF(object->size) - offset;
995 	}
996 	/*
997 	 * Flush pages if writing is allowed, invalidate them
998 	 * if invalidation requested.  Pages undergoing I/O
999 	 * will be ignored by vm_object_page_remove().
1000 	 *
1001 	 * We cannot lock the vnode and then wait for paging
1002 	 * to complete without deadlocking against vm_fault.
1003 	 * Instead we simply call vm_object_page_remove() and
1004 	 * allow it to block internally on a page-by-page
1005 	 * basis when it encounters pages undergoing async
1006 	 * I/O.
1007 	 */
1008 	if (object->type == OBJT_VNODE &&
1009 	    (object->flags & OBJ_MIGHTBEDIRTY) != 0 &&
1010 	    ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) {
1011 		VM_OBJECT_WUNLOCK(object);
1012 		(void) vn_start_write(vp, &mp, V_WAIT);
1013 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1014 		if (syncio && !invalidate && offset == 0 &&
1015 		    atop(size) == object->size) {
1016 			/*
1017 			 * If syncing the whole mapping of the file,
1018 			 * it is faster to schedule all the writes in
1019 			 * async mode, also allowing the clustering,
1020 			 * and then wait for i/o to complete.
1021 			 */
1022 			flags = 0;
1023 			fsync_after = TRUE;
1024 		} else {
1025 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
1026 			flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0;
1027 			fsync_after = FALSE;
1028 		}
1029 		VM_OBJECT_WLOCK(object);
1030 		res = vm_object_page_clean(object, offset, offset + size,
1031 		    flags);
1032 		VM_OBJECT_WUNLOCK(object);
1033 		if (fsync_after)
1034 			error = VOP_FSYNC(vp, MNT_WAIT, curthread);
1035 		VOP_UNLOCK(vp, 0);
1036 		vn_finished_write(mp);
1037 		if (error != 0)
1038 			res = FALSE;
1039 		VM_OBJECT_WLOCK(object);
1040 	}
1041 	if ((object->type == OBJT_VNODE ||
1042 	     object->type == OBJT_DEVICE) && invalidate) {
1043 		if (object->type == OBJT_DEVICE)
1044 			/*
1045 			 * The option OBJPR_NOTMAPPED must be passed here
1046 			 * because vm_object_page_remove() cannot remove
1047 			 * unmanaged mappings.
1048 			 */
1049 			flags = OBJPR_NOTMAPPED;
1050 		else if (old_msync)
1051 			flags = 0;
1052 		else
1053 			flags = OBJPR_CLEANONLY;
1054 		vm_object_page_remove(object, OFF_TO_IDX(offset),
1055 		    OFF_TO_IDX(offset + size + PAGE_MASK), flags);
1056 	}
1057 	VM_OBJECT_WUNLOCK(object);
1058 	return (res);
1059 }
1060 
1061 /*
1062  * Determine whether the given advice can be applied to the object.  Advice is
1063  * not applied to unmanaged pages since they never belong to page queues, and
1064  * since MADV_FREE is destructive, it can apply only to anonymous pages that
1065  * have been mapped at most once.
1066  */
1067 static bool
1068 vm_object_advice_applies(vm_object_t object, int advice)
1069 {
1070 
1071 	if ((object->flags & OBJ_UNMANAGED) != 0)
1072 		return (false);
1073 	if (advice != MADV_FREE)
1074 		return (true);
1075 	return ((object->type == OBJT_DEFAULT || object->type == OBJT_SWAP) &&
1076 	    (object->flags & OBJ_ONEMAPPING) != 0);
1077 }
1078 
1079 static void
1080 vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex,
1081     vm_size_t size)
1082 {
1083 
1084 	if (advice == MADV_FREE && object->type == OBJT_SWAP)
1085 		swap_pager_freespace(object, pindex, size);
1086 }
1087 
1088 /*
1089  *	vm_object_madvise:
1090  *
1091  *	Implements the madvise function at the object/page level.
1092  *
1093  *	MADV_WILLNEED	(any object)
1094  *
1095  *	    Activate the specified pages if they are resident.
1096  *
1097  *	MADV_DONTNEED	(any object)
1098  *
1099  *	    Deactivate the specified pages if they are resident.
1100  *
1101  *	MADV_FREE	(OBJT_DEFAULT/OBJT_SWAP objects,
1102  *			 OBJ_ONEMAPPING only)
1103  *
1104  *	    Deactivate and clean the specified pages if they are
1105  *	    resident.  This permits the process to reuse the pages
1106  *	    without faulting or the kernel to reclaim the pages
1107  *	    without I/O.
1108  */
1109 void
1110 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
1111     int advice)
1112 {
1113 	vm_pindex_t tpindex;
1114 	vm_object_t backing_object, tobject;
1115 	vm_page_t m, tm;
1116 
1117 	if (object == NULL)
1118 		return;
1119 
1120 relookup:
1121 	VM_OBJECT_WLOCK(object);
1122 	if (!vm_object_advice_applies(object, advice)) {
1123 		VM_OBJECT_WUNLOCK(object);
1124 		return;
1125 	}
1126 	for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) {
1127 		tobject = object;
1128 
1129 		/*
1130 		 * If the next page isn't resident in the top-level object, we
1131 		 * need to search the shadow chain.  When applying MADV_FREE, we
1132 		 * take care to release any swap space used to store
1133 		 * non-resident pages.
1134 		 */
1135 		if (m == NULL || pindex < m->pindex) {
1136 			/*
1137 			 * Optimize a common case: if the top-level object has
1138 			 * no backing object, we can skip over the non-resident
1139 			 * range in constant time.
1140 			 */
1141 			if (object->backing_object == NULL) {
1142 				tpindex = (m != NULL && m->pindex < end) ?
1143 				    m->pindex : end;
1144 				vm_object_madvise_freespace(object, advice,
1145 				    pindex, tpindex - pindex);
1146 				if ((pindex = tpindex) == end)
1147 					break;
1148 				goto next_page;
1149 			}
1150 
1151 			tpindex = pindex;
1152 			do {
1153 				vm_object_madvise_freespace(tobject, advice,
1154 				    tpindex, 1);
1155 				/*
1156 				 * Prepare to search the next object in the
1157 				 * chain.
1158 				 */
1159 				backing_object = tobject->backing_object;
1160 				if (backing_object == NULL)
1161 					goto next_pindex;
1162 				VM_OBJECT_WLOCK(backing_object);
1163 				tpindex +=
1164 				    OFF_TO_IDX(tobject->backing_object_offset);
1165 				if (tobject != object)
1166 					VM_OBJECT_WUNLOCK(tobject);
1167 				tobject = backing_object;
1168 				if (!vm_object_advice_applies(tobject, advice))
1169 					goto next_pindex;
1170 			} while ((tm = vm_page_lookup(tobject, tpindex)) ==
1171 			    NULL);
1172 		} else {
1173 next_page:
1174 			tm = m;
1175 			m = TAILQ_NEXT(m, listq);
1176 		}
1177 
1178 		/*
1179 		 * If the page is not in a normal state, skip it.
1180 		 */
1181 		if (tm->valid != VM_PAGE_BITS_ALL)
1182 			goto next_pindex;
1183 		vm_page_lock(tm);
1184 		if (vm_page_wired(tm)) {
1185 			vm_page_unlock(tm);
1186 			goto next_pindex;
1187 		}
1188 		KASSERT((tm->flags & PG_FICTITIOUS) == 0,
1189 		    ("vm_object_madvise: page %p is fictitious", tm));
1190 		KASSERT((tm->oflags & VPO_UNMANAGED) == 0,
1191 		    ("vm_object_madvise: page %p is not managed", tm));
1192 		if (vm_page_busied(tm)) {
1193 			if (object != tobject)
1194 				VM_OBJECT_WUNLOCK(tobject);
1195 			VM_OBJECT_WUNLOCK(object);
1196 			if (advice == MADV_WILLNEED) {
1197 				/*
1198 				 * Reference the page before unlocking and
1199 				 * sleeping so that the page daemon is less
1200 				 * likely to reclaim it.
1201 				 */
1202 				vm_page_aflag_set(tm, PGA_REFERENCED);
1203 			}
1204 			vm_page_busy_sleep(tm, "madvpo", false);
1205   			goto relookup;
1206 		}
1207 		vm_page_advise(tm, advice);
1208 		vm_page_unlock(tm);
1209 		vm_object_madvise_freespace(tobject, advice, tm->pindex, 1);
1210 next_pindex:
1211 		if (tobject != object)
1212 			VM_OBJECT_WUNLOCK(tobject);
1213 	}
1214 	VM_OBJECT_WUNLOCK(object);
1215 }
1216 
1217 /*
1218  *	vm_object_shadow:
1219  *
1220  *	Create a new object which is backed by the
1221  *	specified existing object range.  The source
1222  *	object reference is deallocated.
1223  *
1224  *	The new object and offset into that object
1225  *	are returned in the source parameters.
1226  */
1227 void
1228 vm_object_shadow(
1229 	vm_object_t *object,	/* IN/OUT */
1230 	vm_ooffset_t *offset,	/* IN/OUT */
1231 	vm_size_t length)
1232 {
1233 	vm_object_t source;
1234 	vm_object_t result;
1235 
1236 	source = *object;
1237 
1238 	/*
1239 	 * Don't create the new object if the old object isn't shared.
1240 	 */
1241 	if (source != NULL) {
1242 		VM_OBJECT_WLOCK(source);
1243 		if (source->ref_count == 1 &&
1244 		    source->handle == NULL &&
1245 		    (source->type == OBJT_DEFAULT ||
1246 		     source->type == OBJT_SWAP)) {
1247 			VM_OBJECT_WUNLOCK(source);
1248 			return;
1249 		}
1250 		VM_OBJECT_WUNLOCK(source);
1251 	}
1252 
1253 	/*
1254 	 * Allocate a new object with the given length.
1255 	 */
1256 	result = vm_object_allocate(OBJT_DEFAULT, atop(length));
1257 
1258 	/*
1259 	 * The new object shadows the source object, adding a reference to it.
1260 	 * Our caller changes his reference to point to the new object,
1261 	 * removing a reference to the source object.  Net result: no change
1262 	 * of reference count.
1263 	 *
1264 	 * Try to optimize the result object's page color when shadowing
1265 	 * in order to maintain page coloring consistency in the combined
1266 	 * shadowed object.
1267 	 */
1268 	result->backing_object = source;
1269 	/*
1270 	 * Store the offset into the source object, and fix up the offset into
1271 	 * the new object.
1272 	 */
1273 	result->backing_object_offset = *offset;
1274 	if (source != NULL) {
1275 		VM_OBJECT_WLOCK(source);
1276 		result->domain = source->domain;
1277 		LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1278 		source->shadow_count++;
1279 #if VM_NRESERVLEVEL > 0
1280 		result->flags |= source->flags & OBJ_COLORED;
1281 		result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) &
1282 		    ((1 << (VM_NFREEORDER - 1)) - 1);
1283 #endif
1284 		VM_OBJECT_WUNLOCK(source);
1285 	}
1286 
1287 
1288 	/*
1289 	 * Return the new things
1290 	 */
1291 	*offset = 0;
1292 	*object = result;
1293 }
1294 
1295 /*
1296  *	vm_object_split:
1297  *
1298  * Split the pages in a map entry into a new object.  This affords
1299  * easier removal of unused pages, and keeps object inheritance from
1300  * being a negative impact on memory usage.
1301  */
1302 void
1303 vm_object_split(vm_map_entry_t entry)
1304 {
1305 	vm_page_t m, m_next;
1306 	vm_object_t orig_object, new_object, source;
1307 	vm_pindex_t idx, offidxstart;
1308 	vm_size_t size;
1309 
1310 	orig_object = entry->object.vm_object;
1311 	if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
1312 		return;
1313 	if (orig_object->ref_count <= 1)
1314 		return;
1315 	VM_OBJECT_WUNLOCK(orig_object);
1316 
1317 	offidxstart = OFF_TO_IDX(entry->offset);
1318 	size = atop(entry->end - entry->start);
1319 
1320 	/*
1321 	 * If swap_pager_copy() is later called, it will convert new_object
1322 	 * into a swap object.
1323 	 */
1324 	new_object = vm_object_allocate(OBJT_DEFAULT, size);
1325 
1326 	/*
1327 	 * At this point, the new object is still private, so the order in
1328 	 * which the original and new objects are locked does not matter.
1329 	 */
1330 	VM_OBJECT_WLOCK(new_object);
1331 	VM_OBJECT_WLOCK(orig_object);
1332 	new_object->domain = orig_object->domain;
1333 	source = orig_object->backing_object;
1334 	if (source != NULL) {
1335 		VM_OBJECT_WLOCK(source);
1336 		if ((source->flags & OBJ_DEAD) != 0) {
1337 			VM_OBJECT_WUNLOCK(source);
1338 			VM_OBJECT_WUNLOCK(orig_object);
1339 			VM_OBJECT_WUNLOCK(new_object);
1340 			vm_object_deallocate(new_object);
1341 			VM_OBJECT_WLOCK(orig_object);
1342 			return;
1343 		}
1344 		LIST_INSERT_HEAD(&source->shadow_head,
1345 				  new_object, shadow_list);
1346 		source->shadow_count++;
1347 		vm_object_reference_locked(source);	/* for new_object */
1348 		vm_object_clear_flag(source, OBJ_ONEMAPPING);
1349 		VM_OBJECT_WUNLOCK(source);
1350 		new_object->backing_object_offset =
1351 			orig_object->backing_object_offset + entry->offset;
1352 		new_object->backing_object = source;
1353 	}
1354 	if (orig_object->cred != NULL) {
1355 		new_object->cred = orig_object->cred;
1356 		crhold(orig_object->cred);
1357 		new_object->charge = ptoa(size);
1358 		KASSERT(orig_object->charge >= ptoa(size),
1359 		    ("orig_object->charge < 0"));
1360 		orig_object->charge -= ptoa(size);
1361 	}
1362 retry:
1363 	m = vm_page_find_least(orig_object, offidxstart);
1364 	for (; m != NULL && (idx = m->pindex - offidxstart) < size;
1365 	    m = m_next) {
1366 		m_next = TAILQ_NEXT(m, listq);
1367 
1368 		/*
1369 		 * We must wait for pending I/O to complete before we can
1370 		 * rename the page.
1371 		 *
1372 		 * We do not have to VM_PROT_NONE the page as mappings should
1373 		 * not be changed by this operation.
1374 		 */
1375 		if (vm_page_busied(m)) {
1376 			VM_OBJECT_WUNLOCK(new_object);
1377 			vm_page_lock(m);
1378 			VM_OBJECT_WUNLOCK(orig_object);
1379 			vm_page_busy_sleep(m, "spltwt", false);
1380 			VM_OBJECT_WLOCK(orig_object);
1381 			VM_OBJECT_WLOCK(new_object);
1382 			goto retry;
1383 		}
1384 
1385 		/* vm_page_rename() will dirty the page. */
1386 		if (vm_page_rename(m, new_object, idx)) {
1387 			VM_OBJECT_WUNLOCK(new_object);
1388 			VM_OBJECT_WUNLOCK(orig_object);
1389 			vm_radix_wait();
1390 			VM_OBJECT_WLOCK(orig_object);
1391 			VM_OBJECT_WLOCK(new_object);
1392 			goto retry;
1393 		}
1394 #if VM_NRESERVLEVEL > 0
1395 		/*
1396 		 * If some of the reservation's allocated pages remain with
1397 		 * the original object, then transferring the reservation to
1398 		 * the new object is neither particularly beneficial nor
1399 		 * particularly harmful as compared to leaving the reservation
1400 		 * with the original object.  If, however, all of the
1401 		 * reservation's allocated pages are transferred to the new
1402 		 * object, then transferring the reservation is typically
1403 		 * beneficial.  Determining which of these two cases applies
1404 		 * would be more costly than unconditionally renaming the
1405 		 * reservation.
1406 		 */
1407 		vm_reserv_rename(m, new_object, orig_object, offidxstart);
1408 #endif
1409 		if (orig_object->type == OBJT_SWAP)
1410 			vm_page_xbusy(m);
1411 	}
1412 	if (orig_object->type == OBJT_SWAP) {
1413 		/*
1414 		 * swap_pager_copy() can sleep, in which case the orig_object's
1415 		 * and new_object's locks are released and reacquired.
1416 		 */
1417 		swap_pager_copy(orig_object, new_object, offidxstart, 0);
1418 		TAILQ_FOREACH(m, &new_object->memq, listq)
1419 			vm_page_xunbusy(m);
1420 	}
1421 	VM_OBJECT_WUNLOCK(orig_object);
1422 	VM_OBJECT_WUNLOCK(new_object);
1423 	entry->object.vm_object = new_object;
1424 	entry->offset = 0LL;
1425 	vm_object_deallocate(orig_object);
1426 	VM_OBJECT_WLOCK(new_object);
1427 }
1428 
1429 #define	OBSC_COLLAPSE_NOWAIT	0x0002
1430 #define	OBSC_COLLAPSE_WAIT	0x0004
1431 
1432 static vm_page_t
1433 vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p, vm_page_t next,
1434     int op)
1435 {
1436 	vm_object_t backing_object;
1437 
1438 	VM_OBJECT_ASSERT_WLOCKED(object);
1439 	backing_object = object->backing_object;
1440 	VM_OBJECT_ASSERT_WLOCKED(backing_object);
1441 
1442 	KASSERT(p == NULL || vm_page_busied(p), ("unbusy page %p", p));
1443 	KASSERT(p == NULL || p->object == object || p->object == backing_object,
1444 	    ("invalid ownership %p %p %p", p, object, backing_object));
1445 	if ((op & OBSC_COLLAPSE_NOWAIT) != 0)
1446 		return (next);
1447 	if (p != NULL)
1448 		vm_page_lock(p);
1449 	VM_OBJECT_WUNLOCK(object);
1450 	VM_OBJECT_WUNLOCK(backing_object);
1451 	/* The page is only NULL when rename fails. */
1452 	if (p == NULL)
1453 		vm_radix_wait();
1454 	else
1455 		vm_page_busy_sleep(p, "vmocol", false);
1456 	VM_OBJECT_WLOCK(object);
1457 	VM_OBJECT_WLOCK(backing_object);
1458 	return (TAILQ_FIRST(&backing_object->memq));
1459 }
1460 
1461 static bool
1462 vm_object_scan_all_shadowed(vm_object_t object)
1463 {
1464 	vm_object_t backing_object;
1465 	vm_page_t p, pp;
1466 	vm_pindex_t backing_offset_index, new_pindex, pi, ps;
1467 
1468 	VM_OBJECT_ASSERT_WLOCKED(object);
1469 	VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
1470 
1471 	backing_object = object->backing_object;
1472 
1473 	if (backing_object->type != OBJT_DEFAULT &&
1474 	    backing_object->type != OBJT_SWAP)
1475 		return (false);
1476 
1477 	pi = backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1478 	p = vm_page_find_least(backing_object, pi);
1479 	ps = swap_pager_find_least(backing_object, pi);
1480 
1481 	/*
1482 	 * Only check pages inside the parent object's range and
1483 	 * inside the parent object's mapping of the backing object.
1484 	 */
1485 	for (;; pi++) {
1486 		if (p != NULL && p->pindex < pi)
1487 			p = TAILQ_NEXT(p, listq);
1488 		if (ps < pi)
1489 			ps = swap_pager_find_least(backing_object, pi);
1490 		if (p == NULL && ps >= backing_object->size)
1491 			break;
1492 		else if (p == NULL)
1493 			pi = ps;
1494 		else
1495 			pi = MIN(p->pindex, ps);
1496 
1497 		new_pindex = pi - backing_offset_index;
1498 		if (new_pindex >= object->size)
1499 			break;
1500 
1501 		/*
1502 		 * See if the parent has the page or if the parent's object
1503 		 * pager has the page.  If the parent has the page but the page
1504 		 * is not valid, the parent's object pager must have the page.
1505 		 *
1506 		 * If this fails, the parent does not completely shadow the
1507 		 * object and we might as well give up now.
1508 		 */
1509 		pp = vm_page_lookup(object, new_pindex);
1510 		if ((pp == NULL || pp->valid == 0) &&
1511 		    !vm_pager_has_page(object, new_pindex, NULL, NULL))
1512 			return (false);
1513 	}
1514 	return (true);
1515 }
1516 
1517 static bool
1518 vm_object_collapse_scan(vm_object_t object, int op)
1519 {
1520 	vm_object_t backing_object;
1521 	vm_page_t next, p, pp;
1522 	vm_pindex_t backing_offset_index, new_pindex;
1523 
1524 	VM_OBJECT_ASSERT_WLOCKED(object);
1525 	VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
1526 
1527 	backing_object = object->backing_object;
1528 	backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1529 
1530 	/*
1531 	 * Initial conditions
1532 	 */
1533 	if ((op & OBSC_COLLAPSE_WAIT) != 0)
1534 		vm_object_set_flag(backing_object, OBJ_DEAD);
1535 
1536 	/*
1537 	 * Our scan
1538 	 */
1539 	for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; p = next) {
1540 		next = TAILQ_NEXT(p, listq);
1541 		new_pindex = p->pindex - backing_offset_index;
1542 
1543 		/*
1544 		 * Check for busy page
1545 		 */
1546 		if (vm_page_busied(p)) {
1547 			next = vm_object_collapse_scan_wait(object, p, next, op);
1548 			continue;
1549 		}
1550 
1551 		KASSERT(p->object == backing_object,
1552 		    ("vm_object_collapse_scan: object mismatch"));
1553 
1554 		if (p->pindex < backing_offset_index ||
1555 		    new_pindex >= object->size) {
1556 			if (backing_object->type == OBJT_SWAP)
1557 				swap_pager_freespace(backing_object, p->pindex,
1558 				    1);
1559 
1560 			/*
1561 			 * Page is out of the parent object's range, we can
1562 			 * simply destroy it.
1563 			 */
1564 			vm_page_lock(p);
1565 			KASSERT(!pmap_page_is_mapped(p),
1566 			    ("freeing mapped page %p", p));
1567 			if (vm_page_remove(p))
1568 				vm_page_free(p);
1569 			vm_page_unlock(p);
1570 			continue;
1571 		}
1572 
1573 		pp = vm_page_lookup(object, new_pindex);
1574 		if (pp != NULL && vm_page_busied(pp)) {
1575 			/*
1576 			 * The page in the parent is busy and possibly not
1577 			 * (yet) valid.  Until its state is finalized by the
1578 			 * busy bit owner, we can't tell whether it shadows the
1579 			 * original page.  Therefore, we must either skip it
1580 			 * and the original (backing_object) page or wait for
1581 			 * its state to be finalized.
1582 			 *
1583 			 * This is due to a race with vm_fault() where we must
1584 			 * unbusy the original (backing_obj) page before we can
1585 			 * (re)lock the parent.  Hence we can get here.
1586 			 */
1587 			next = vm_object_collapse_scan_wait(object, pp, next,
1588 			    op);
1589 			continue;
1590 		}
1591 
1592 		KASSERT(pp == NULL || pp->valid != 0,
1593 		    ("unbusy invalid page %p", pp));
1594 
1595 		if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL,
1596 			NULL)) {
1597 			/*
1598 			 * The page already exists in the parent OR swap exists
1599 			 * for this location in the parent.  Leave the parent's
1600 			 * page alone.  Destroy the original page from the
1601 			 * backing object.
1602 			 */
1603 			if (backing_object->type == OBJT_SWAP)
1604 				swap_pager_freespace(backing_object, p->pindex,
1605 				    1);
1606 			vm_page_lock(p);
1607 			KASSERT(!pmap_page_is_mapped(p),
1608 			    ("freeing mapped page %p", p));
1609 			if (vm_page_remove(p))
1610 				vm_page_free(p);
1611 			vm_page_unlock(p);
1612 			continue;
1613 		}
1614 
1615 		/*
1616 		 * Page does not exist in parent, rename the page from the
1617 		 * backing object to the main object.
1618 		 *
1619 		 * If the page was mapped to a process, it can remain mapped
1620 		 * through the rename.  vm_page_rename() will dirty the page.
1621 		 */
1622 		if (vm_page_rename(p, object, new_pindex)) {
1623 			next = vm_object_collapse_scan_wait(object, NULL, next,
1624 			    op);
1625 			continue;
1626 		}
1627 
1628 		/* Use the old pindex to free the right page. */
1629 		if (backing_object->type == OBJT_SWAP)
1630 			swap_pager_freespace(backing_object,
1631 			    new_pindex + backing_offset_index, 1);
1632 
1633 #if VM_NRESERVLEVEL > 0
1634 		/*
1635 		 * Rename the reservation.
1636 		 */
1637 		vm_reserv_rename(p, object, backing_object,
1638 		    backing_offset_index);
1639 #endif
1640 	}
1641 	return (true);
1642 }
1643 
1644 
1645 /*
1646  * this version of collapse allows the operation to occur earlier and
1647  * when paging_in_progress is true for an object...  This is not a complete
1648  * operation, but should plug 99.9% of the rest of the leaks.
1649  */
1650 static void
1651 vm_object_qcollapse(vm_object_t object)
1652 {
1653 	vm_object_t backing_object = object->backing_object;
1654 
1655 	VM_OBJECT_ASSERT_WLOCKED(object);
1656 	VM_OBJECT_ASSERT_WLOCKED(backing_object);
1657 
1658 	if (backing_object->ref_count != 1)
1659 		return;
1660 
1661 	vm_object_collapse_scan(object, OBSC_COLLAPSE_NOWAIT);
1662 }
1663 
1664 /*
1665  *	vm_object_collapse:
1666  *
1667  *	Collapse an object with the object backing it.
1668  *	Pages in the backing object are moved into the
1669  *	parent, and the backing object is deallocated.
1670  */
1671 void
1672 vm_object_collapse(vm_object_t object)
1673 {
1674 	vm_object_t backing_object, new_backing_object;
1675 
1676 	VM_OBJECT_ASSERT_WLOCKED(object);
1677 
1678 	while (TRUE) {
1679 		/*
1680 		 * Verify that the conditions are right for collapse:
1681 		 *
1682 		 * The object exists and the backing object exists.
1683 		 */
1684 		if ((backing_object = object->backing_object) == NULL)
1685 			break;
1686 
1687 		/*
1688 		 * we check the backing object first, because it is most likely
1689 		 * not collapsable.
1690 		 */
1691 		VM_OBJECT_WLOCK(backing_object);
1692 		if (backing_object->handle != NULL ||
1693 		    (backing_object->type != OBJT_DEFAULT &&
1694 		    backing_object->type != OBJT_SWAP) ||
1695 		    (backing_object->flags & (OBJ_DEAD | OBJ_NOSPLIT)) != 0 ||
1696 		    object->handle != NULL ||
1697 		    (object->type != OBJT_DEFAULT &&
1698 		     object->type != OBJT_SWAP) ||
1699 		    (object->flags & OBJ_DEAD)) {
1700 			VM_OBJECT_WUNLOCK(backing_object);
1701 			break;
1702 		}
1703 
1704 		if (object->paging_in_progress != 0 ||
1705 		    backing_object->paging_in_progress != 0) {
1706 			vm_object_qcollapse(object);
1707 			VM_OBJECT_WUNLOCK(backing_object);
1708 			break;
1709 		}
1710 
1711 		/*
1712 		 * We know that we can either collapse the backing object (if
1713 		 * the parent is the only reference to it) or (perhaps) have
1714 		 * the parent bypass the object if the parent happens to shadow
1715 		 * all the resident pages in the entire backing object.
1716 		 *
1717 		 * This is ignoring pager-backed pages such as swap pages.
1718 		 * vm_object_collapse_scan fails the shadowing test in this
1719 		 * case.
1720 		 */
1721 		if (backing_object->ref_count == 1) {
1722 			vm_object_pip_add(object, 1);
1723 			vm_object_pip_add(backing_object, 1);
1724 
1725 			/*
1726 			 * If there is exactly one reference to the backing
1727 			 * object, we can collapse it into the parent.
1728 			 */
1729 			vm_object_collapse_scan(object, OBSC_COLLAPSE_WAIT);
1730 
1731 #if VM_NRESERVLEVEL > 0
1732 			/*
1733 			 * Break any reservations from backing_object.
1734 			 */
1735 			if (__predict_false(!LIST_EMPTY(&backing_object->rvq)))
1736 				vm_reserv_break_all(backing_object);
1737 #endif
1738 
1739 			/*
1740 			 * Move the pager from backing_object to object.
1741 			 */
1742 			if (backing_object->type == OBJT_SWAP) {
1743 				/*
1744 				 * swap_pager_copy() can sleep, in which case
1745 				 * the backing_object's and object's locks are
1746 				 * released and reacquired.
1747 				 * Since swap_pager_copy() is being asked to
1748 				 * destroy the source, it will change the
1749 				 * backing_object's type to OBJT_DEFAULT.
1750 				 */
1751 				swap_pager_copy(
1752 				    backing_object,
1753 				    object,
1754 				    OFF_TO_IDX(object->backing_object_offset), TRUE);
1755 			}
1756 			/*
1757 			 * Object now shadows whatever backing_object did.
1758 			 * Note that the reference to
1759 			 * backing_object->backing_object moves from within
1760 			 * backing_object to within object.
1761 			 */
1762 			LIST_REMOVE(object, shadow_list);
1763 			backing_object->shadow_count--;
1764 			if (backing_object->backing_object) {
1765 				VM_OBJECT_WLOCK(backing_object->backing_object);
1766 				LIST_REMOVE(backing_object, shadow_list);
1767 				LIST_INSERT_HEAD(
1768 				    &backing_object->backing_object->shadow_head,
1769 				    object, shadow_list);
1770 				/*
1771 				 * The shadow_count has not changed.
1772 				 */
1773 				VM_OBJECT_WUNLOCK(backing_object->backing_object);
1774 			}
1775 			object->backing_object = backing_object->backing_object;
1776 			object->backing_object_offset +=
1777 			    backing_object->backing_object_offset;
1778 
1779 			/*
1780 			 * Discard backing_object.
1781 			 *
1782 			 * Since the backing object has no pages, no pager left,
1783 			 * and no object references within it, all that is
1784 			 * necessary is to dispose of it.
1785 			 */
1786 			KASSERT(backing_object->ref_count == 1, (
1787 "backing_object %p was somehow re-referenced during collapse!",
1788 			    backing_object));
1789 			vm_object_pip_wakeup(backing_object);
1790 			backing_object->type = OBJT_DEAD;
1791 			backing_object->ref_count = 0;
1792 			VM_OBJECT_WUNLOCK(backing_object);
1793 			vm_object_destroy(backing_object);
1794 
1795 			vm_object_pip_wakeup(object);
1796 			counter_u64_add(object_collapses, 1);
1797 		} else {
1798 			/*
1799 			 * If we do not entirely shadow the backing object,
1800 			 * there is nothing we can do so we give up.
1801 			 */
1802 			if (object->resident_page_count != object->size &&
1803 			    !vm_object_scan_all_shadowed(object)) {
1804 				VM_OBJECT_WUNLOCK(backing_object);
1805 				break;
1806 			}
1807 
1808 			/*
1809 			 * Make the parent shadow the next object in the
1810 			 * chain.  Deallocating backing_object will not remove
1811 			 * it, since its reference count is at least 2.
1812 			 */
1813 			LIST_REMOVE(object, shadow_list);
1814 			backing_object->shadow_count--;
1815 
1816 			new_backing_object = backing_object->backing_object;
1817 			if ((object->backing_object = new_backing_object) != NULL) {
1818 				VM_OBJECT_WLOCK(new_backing_object);
1819 				LIST_INSERT_HEAD(
1820 				    &new_backing_object->shadow_head,
1821 				    object,
1822 				    shadow_list
1823 				);
1824 				new_backing_object->shadow_count++;
1825 				vm_object_reference_locked(new_backing_object);
1826 				VM_OBJECT_WUNLOCK(new_backing_object);
1827 				object->backing_object_offset +=
1828 					backing_object->backing_object_offset;
1829 			}
1830 
1831 			/*
1832 			 * Drop the reference count on backing_object. Since
1833 			 * its ref_count was at least 2, it will not vanish.
1834 			 */
1835 			backing_object->ref_count--;
1836 			VM_OBJECT_WUNLOCK(backing_object);
1837 			counter_u64_add(object_bypasses, 1);
1838 		}
1839 
1840 		/*
1841 		 * Try again with this object's new backing object.
1842 		 */
1843 	}
1844 }
1845 
1846 /*
1847  *	vm_object_page_remove:
1848  *
1849  *	For the given object, either frees or invalidates each of the
1850  *	specified pages.  In general, a page is freed.  However, if a page is
1851  *	wired for any reason other than the existence of a managed, wired
1852  *	mapping, then it may be invalidated but not removed from the object.
1853  *	Pages are specified by the given range ["start", "end") and the option
1854  *	OBJPR_CLEANONLY.  As a special case, if "end" is zero, then the range
1855  *	extends from "start" to the end of the object.  If the option
1856  *	OBJPR_CLEANONLY is specified, then only the non-dirty pages within the
1857  *	specified range are affected.  If the option OBJPR_NOTMAPPED is
1858  *	specified, then the pages within the specified range must have no
1859  *	mappings.  Otherwise, if this option is not specified, any mappings to
1860  *	the specified pages are removed before the pages are freed or
1861  *	invalidated.
1862  *
1863  *	In general, this operation should only be performed on objects that
1864  *	contain managed pages.  There are, however, two exceptions.  First, it
1865  *	is performed on the kernel and kmem objects by vm_map_entry_delete().
1866  *	Second, it is used by msync(..., MS_INVALIDATE) to invalidate device-
1867  *	backed pages.  In both of these cases, the option OBJPR_CLEANONLY must
1868  *	not be specified and the option OBJPR_NOTMAPPED must be specified.
1869  *
1870  *	The object must be locked.
1871  */
1872 void
1873 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1874     int options)
1875 {
1876 	vm_page_t p, next;
1877 	struct mtx *mtx;
1878 
1879 	VM_OBJECT_ASSERT_WLOCKED(object);
1880 	KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
1881 	    (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
1882 	    ("vm_object_page_remove: illegal options for object %p", object));
1883 	if (object->resident_page_count == 0)
1884 		return;
1885 	vm_object_pip_add(object, 1);
1886 again:
1887 	p = vm_page_find_least(object, start);
1888 	mtx = NULL;
1889 
1890 	/*
1891 	 * Here, the variable "p" is either (1) the page with the least pindex
1892 	 * greater than or equal to the parameter "start" or (2) NULL.
1893 	 */
1894 	for (; p != NULL && (p->pindex < end || end == 0); p = next) {
1895 		next = TAILQ_NEXT(p, listq);
1896 
1897 		/*
1898 		 * If the page is wired for any reason besides the existence
1899 		 * of managed, wired mappings, then it cannot be freed.  For
1900 		 * example, fictitious pages, which represent device memory,
1901 		 * are inherently wired and cannot be freed.  They can,
1902 		 * however, be invalidated if the option OBJPR_CLEANONLY is
1903 		 * not specified.
1904 		 */
1905 		vm_page_change_lock(p, &mtx);
1906 		if (vm_page_xbusied(p)) {
1907 			VM_OBJECT_WUNLOCK(object);
1908 			vm_page_busy_sleep(p, "vmopax", true);
1909 			VM_OBJECT_WLOCK(object);
1910 			goto again;
1911 		}
1912 		if (vm_page_wired(p)) {
1913 			if ((options & OBJPR_NOTMAPPED) == 0 &&
1914 			    object->ref_count != 0)
1915 				pmap_remove_all(p);
1916 			if ((options & OBJPR_CLEANONLY) == 0) {
1917 				p->valid = 0;
1918 				vm_page_undirty(p);
1919 			}
1920 			continue;
1921 		}
1922 		if (vm_page_busied(p)) {
1923 			VM_OBJECT_WUNLOCK(object);
1924 			vm_page_busy_sleep(p, "vmopar", false);
1925 			VM_OBJECT_WLOCK(object);
1926 			goto again;
1927 		}
1928 		KASSERT((p->flags & PG_FICTITIOUS) == 0,
1929 		    ("vm_object_page_remove: page %p is fictitious", p));
1930 		if ((options & OBJPR_CLEANONLY) != 0 && p->valid != 0) {
1931 			if ((options & OBJPR_NOTMAPPED) == 0 &&
1932 			    object->ref_count != 0)
1933 				pmap_remove_write(p);
1934 			if (p->dirty != 0)
1935 				continue;
1936 		}
1937 		if ((options & OBJPR_NOTMAPPED) == 0 && object->ref_count != 0)
1938 			pmap_remove_all(p);
1939 		vm_page_free(p);
1940 	}
1941 	if (mtx != NULL)
1942 		mtx_unlock(mtx);
1943 	vm_object_pip_wakeup(object);
1944 }
1945 
1946 /*
1947  *	vm_object_page_noreuse:
1948  *
1949  *	For the given object, attempt to move the specified pages to
1950  *	the head of the inactive queue.  This bypasses regular LRU
1951  *	operation and allows the pages to be reused quickly under memory
1952  *	pressure.  If a page is wired for any reason, then it will not
1953  *	be queued.  Pages are specified by the range ["start", "end").
1954  *	As a special case, if "end" is zero, then the range extends from
1955  *	"start" to the end of the object.
1956  *
1957  *	This operation should only be performed on objects that
1958  *	contain non-fictitious, managed pages.
1959  *
1960  *	The object must be locked.
1961  */
1962 void
1963 vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1964 {
1965 	struct mtx *mtx;
1966 	vm_page_t p, next;
1967 
1968 	VM_OBJECT_ASSERT_LOCKED(object);
1969 	KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0,
1970 	    ("vm_object_page_noreuse: illegal object %p", object));
1971 	if (object->resident_page_count == 0)
1972 		return;
1973 	p = vm_page_find_least(object, start);
1974 
1975 	/*
1976 	 * Here, the variable "p" is either (1) the page with the least pindex
1977 	 * greater than or equal to the parameter "start" or (2) NULL.
1978 	 */
1979 	mtx = NULL;
1980 	for (; p != NULL && (p->pindex < end || end == 0); p = next) {
1981 		next = TAILQ_NEXT(p, listq);
1982 		vm_page_change_lock(p, &mtx);
1983 		vm_page_deactivate_noreuse(p);
1984 	}
1985 	if (mtx != NULL)
1986 		mtx_unlock(mtx);
1987 }
1988 
1989 /*
1990  *	Populate the specified range of the object with valid pages.  Returns
1991  *	TRUE if the range is successfully populated and FALSE otherwise.
1992  *
1993  *	Note: This function should be optimized to pass a larger array of
1994  *	pages to vm_pager_get_pages() before it is applied to a non-
1995  *	OBJT_DEVICE object.
1996  *
1997  *	The object must be locked.
1998  */
1999 boolean_t
2000 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
2001 {
2002 	vm_page_t m;
2003 	vm_pindex_t pindex;
2004 	int rv;
2005 
2006 	VM_OBJECT_ASSERT_WLOCKED(object);
2007 	for (pindex = start; pindex < end; pindex++) {
2008 		m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
2009 		if (m->valid != VM_PAGE_BITS_ALL) {
2010 			rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
2011 			if (rv != VM_PAGER_OK) {
2012 				vm_page_lock(m);
2013 				vm_page_free(m);
2014 				vm_page_unlock(m);
2015 				break;
2016 			}
2017 		}
2018 		/*
2019 		 * Keep "m" busy because a subsequent iteration may unlock
2020 		 * the object.
2021 		 */
2022 	}
2023 	if (pindex > start) {
2024 		m = vm_page_lookup(object, start);
2025 		while (m != NULL && m->pindex < pindex) {
2026 			vm_page_xunbusy(m);
2027 			m = TAILQ_NEXT(m, listq);
2028 		}
2029 	}
2030 	return (pindex == end);
2031 }
2032 
2033 /*
2034  *	Routine:	vm_object_coalesce
2035  *	Function:	Coalesces two objects backing up adjoining
2036  *			regions of memory into a single object.
2037  *
2038  *	returns TRUE if objects were combined.
2039  *
2040  *	NOTE:	Only works at the moment if the second object is NULL -
2041  *		if it's not, which object do we lock first?
2042  *
2043  *	Parameters:
2044  *		prev_object	First object to coalesce
2045  *		prev_offset	Offset into prev_object
2046  *		prev_size	Size of reference to prev_object
2047  *		next_size	Size of reference to the second object
2048  *		reserved	Indicator that extension region has
2049  *				swap accounted for
2050  *
2051  *	Conditions:
2052  *	The object must *not* be locked.
2053  */
2054 boolean_t
2055 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
2056     vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
2057 {
2058 	vm_pindex_t next_pindex;
2059 
2060 	if (prev_object == NULL)
2061 		return (TRUE);
2062 	VM_OBJECT_WLOCK(prev_object);
2063 	if ((prev_object->type != OBJT_DEFAULT &&
2064 	    prev_object->type != OBJT_SWAP) ||
2065 	    (prev_object->flags & OBJ_TMPFS_NODE) != 0) {
2066 		VM_OBJECT_WUNLOCK(prev_object);
2067 		return (FALSE);
2068 	}
2069 
2070 	/*
2071 	 * Try to collapse the object first
2072 	 */
2073 	vm_object_collapse(prev_object);
2074 
2075 	/*
2076 	 * Can't coalesce if: . more than one reference . paged out . shadows
2077 	 * another object . has a copy elsewhere (any of which mean that the
2078 	 * pages not mapped to prev_entry may be in use anyway)
2079 	 */
2080 	if (prev_object->backing_object != NULL) {
2081 		VM_OBJECT_WUNLOCK(prev_object);
2082 		return (FALSE);
2083 	}
2084 
2085 	prev_size >>= PAGE_SHIFT;
2086 	next_size >>= PAGE_SHIFT;
2087 	next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
2088 
2089 	if (prev_object->ref_count > 1 &&
2090 	    prev_object->size != next_pindex &&
2091 	    (prev_object->flags & OBJ_ONEMAPPING) == 0) {
2092 		VM_OBJECT_WUNLOCK(prev_object);
2093 		return (FALSE);
2094 	}
2095 
2096 	/*
2097 	 * Account for the charge.
2098 	 */
2099 	if (prev_object->cred != NULL) {
2100 
2101 		/*
2102 		 * If prev_object was charged, then this mapping,
2103 		 * although not charged now, may become writable
2104 		 * later. Non-NULL cred in the object would prevent
2105 		 * swap reservation during enabling of the write
2106 		 * access, so reserve swap now. Failed reservation
2107 		 * cause allocation of the separate object for the map
2108 		 * entry, and swap reservation for this entry is
2109 		 * managed in appropriate time.
2110 		 */
2111 		if (!reserved && !swap_reserve_by_cred(ptoa(next_size),
2112 		    prev_object->cred)) {
2113 			VM_OBJECT_WUNLOCK(prev_object);
2114 			return (FALSE);
2115 		}
2116 		prev_object->charge += ptoa(next_size);
2117 	}
2118 
2119 	/*
2120 	 * Remove any pages that may still be in the object from a previous
2121 	 * deallocation.
2122 	 */
2123 	if (next_pindex < prev_object->size) {
2124 		vm_object_page_remove(prev_object, next_pindex, next_pindex +
2125 		    next_size, 0);
2126 		if (prev_object->type == OBJT_SWAP)
2127 			swap_pager_freespace(prev_object,
2128 					     next_pindex, next_size);
2129 #if 0
2130 		if (prev_object->cred != NULL) {
2131 			KASSERT(prev_object->charge >=
2132 			    ptoa(prev_object->size - next_pindex),
2133 			    ("object %p overcharged 1 %jx %jx", prev_object,
2134 				(uintmax_t)next_pindex, (uintmax_t)next_size));
2135 			prev_object->charge -= ptoa(prev_object->size -
2136 			    next_pindex);
2137 		}
2138 #endif
2139 	}
2140 
2141 	/*
2142 	 * Extend the object if necessary.
2143 	 */
2144 	if (next_pindex + next_size > prev_object->size)
2145 		prev_object->size = next_pindex + next_size;
2146 
2147 	VM_OBJECT_WUNLOCK(prev_object);
2148 	return (TRUE);
2149 }
2150 
2151 void
2152 vm_object_set_writeable_dirty(vm_object_t object)
2153 {
2154 
2155 	VM_OBJECT_ASSERT_WLOCKED(object);
2156 	if (object->type != OBJT_VNODE) {
2157 		if ((object->flags & OBJ_TMPFS_NODE) != 0) {
2158 			KASSERT(object->type == OBJT_SWAP, ("non-swap tmpfs"));
2159 			vm_object_set_flag(object, OBJ_TMPFS_DIRTY);
2160 		}
2161 		return;
2162 	}
2163 	object->generation++;
2164 	if ((object->flags & OBJ_MIGHTBEDIRTY) != 0)
2165 		return;
2166 	vm_object_set_flag(object, OBJ_MIGHTBEDIRTY);
2167 }
2168 
2169 /*
2170  *	vm_object_unwire:
2171  *
2172  *	For each page offset within the specified range of the given object,
2173  *	find the highest-level page in the shadow chain and unwire it.  A page
2174  *	must exist at every page offset, and the highest-level page must be
2175  *	wired.
2176  */
2177 void
2178 vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length,
2179     uint8_t queue)
2180 {
2181 	vm_object_t tobject, t1object;
2182 	vm_page_t m, tm;
2183 	vm_pindex_t end_pindex, pindex, tpindex;
2184 	int depth, locked_depth;
2185 
2186 	KASSERT((offset & PAGE_MASK) == 0,
2187 	    ("vm_object_unwire: offset is not page aligned"));
2188 	KASSERT((length & PAGE_MASK) == 0,
2189 	    ("vm_object_unwire: length is not a multiple of PAGE_SIZE"));
2190 	/* The wired count of a fictitious page never changes. */
2191 	if ((object->flags & OBJ_FICTITIOUS) != 0)
2192 		return;
2193 	pindex = OFF_TO_IDX(offset);
2194 	end_pindex = pindex + atop(length);
2195 again:
2196 	locked_depth = 1;
2197 	VM_OBJECT_RLOCK(object);
2198 	m = vm_page_find_least(object, pindex);
2199 	while (pindex < end_pindex) {
2200 		if (m == NULL || pindex < m->pindex) {
2201 			/*
2202 			 * The first object in the shadow chain doesn't
2203 			 * contain a page at the current index.  Therefore,
2204 			 * the page must exist in a backing object.
2205 			 */
2206 			tobject = object;
2207 			tpindex = pindex;
2208 			depth = 0;
2209 			do {
2210 				tpindex +=
2211 				    OFF_TO_IDX(tobject->backing_object_offset);
2212 				tobject = tobject->backing_object;
2213 				KASSERT(tobject != NULL,
2214 				    ("vm_object_unwire: missing page"));
2215 				if ((tobject->flags & OBJ_FICTITIOUS) != 0)
2216 					goto next_page;
2217 				depth++;
2218 				if (depth == locked_depth) {
2219 					locked_depth++;
2220 					VM_OBJECT_RLOCK(tobject);
2221 				}
2222 			} while ((tm = vm_page_lookup(tobject, tpindex)) ==
2223 			    NULL);
2224 		} else {
2225 			tm = m;
2226 			m = TAILQ_NEXT(m, listq);
2227 		}
2228 		vm_page_lock(tm);
2229 		if (vm_page_xbusied(tm)) {
2230 			for (tobject = object; locked_depth >= 1;
2231 			    locked_depth--) {
2232 				t1object = tobject->backing_object;
2233 				VM_OBJECT_RUNLOCK(tobject);
2234 				tobject = t1object;
2235 			}
2236 			vm_page_busy_sleep(tm, "unwbo", true);
2237 			goto again;
2238 		}
2239 		vm_page_unwire(tm, queue);
2240 		vm_page_unlock(tm);
2241 next_page:
2242 		pindex++;
2243 	}
2244 	/* Release the accumulated object locks. */
2245 	for (tobject = object; locked_depth >= 1; locked_depth--) {
2246 		t1object = tobject->backing_object;
2247 		VM_OBJECT_RUNLOCK(tobject);
2248 		tobject = t1object;
2249 	}
2250 }
2251 
2252 /*
2253  * Return the vnode for the given object, or NULL if none exists.
2254  * For tmpfs objects, the function may return NULL if there is
2255  * no vnode allocated at the time of the call.
2256  */
2257 struct vnode *
2258 vm_object_vnode(vm_object_t object)
2259 {
2260 	struct vnode *vp;
2261 
2262 	VM_OBJECT_ASSERT_LOCKED(object);
2263 	if (object->type == OBJT_VNODE) {
2264 		vp = object->handle;
2265 		KASSERT(vp != NULL, ("%s: OBJT_VNODE has no vnode", __func__));
2266 	} else if (object->type == OBJT_SWAP &&
2267 	    (object->flags & OBJ_TMPFS) != 0) {
2268 		vp = object->un_pager.swp.swp_tmpfs;
2269 		KASSERT(vp != NULL, ("%s: OBJT_TMPFS has no vnode", __func__));
2270 	} else {
2271 		vp = NULL;
2272 	}
2273 	return (vp);
2274 }
2275 
2276 /*
2277  * Return the kvme type of the given object.
2278  * If vpp is not NULL, set it to the object's vm_object_vnode() or NULL.
2279  */
2280 int
2281 vm_object_kvme_type(vm_object_t object, struct vnode **vpp)
2282 {
2283 
2284 	VM_OBJECT_ASSERT_LOCKED(object);
2285 	if (vpp != NULL)
2286 		*vpp = vm_object_vnode(object);
2287 	switch (object->type) {
2288 	case OBJT_DEFAULT:
2289 		return (KVME_TYPE_DEFAULT);
2290 	case OBJT_VNODE:
2291 		return (KVME_TYPE_VNODE);
2292 	case OBJT_SWAP:
2293 		if ((object->flags & OBJ_TMPFS_NODE) != 0)
2294 			return (KVME_TYPE_VNODE);
2295 		return (KVME_TYPE_SWAP);
2296 	case OBJT_DEVICE:
2297 		return (KVME_TYPE_DEVICE);
2298 	case OBJT_PHYS:
2299 		return (KVME_TYPE_PHYS);
2300 	case OBJT_DEAD:
2301 		return (KVME_TYPE_DEAD);
2302 	case OBJT_SG:
2303 		return (KVME_TYPE_SG);
2304 	case OBJT_MGTDEVICE:
2305 		return (KVME_TYPE_MGTDEVICE);
2306 	default:
2307 		return (KVME_TYPE_UNKNOWN);
2308 	}
2309 }
2310 
2311 static int
2312 sysctl_vm_object_list(SYSCTL_HANDLER_ARGS)
2313 {
2314 	struct kinfo_vmobject *kvo;
2315 	char *fullpath, *freepath;
2316 	struct vnode *vp;
2317 	struct vattr va;
2318 	vm_object_t obj;
2319 	vm_page_t m;
2320 	int count, error;
2321 
2322 	if (req->oldptr == NULL) {
2323 		/*
2324 		 * If an old buffer has not been provided, generate an
2325 		 * estimate of the space needed for a subsequent call.
2326 		 */
2327 		mtx_lock(&vm_object_list_mtx);
2328 		count = 0;
2329 		TAILQ_FOREACH(obj, &vm_object_list, object_list) {
2330 			if (obj->type == OBJT_DEAD)
2331 				continue;
2332 			count++;
2333 		}
2334 		mtx_unlock(&vm_object_list_mtx);
2335 		return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) *
2336 		    count * 11 / 10));
2337 	}
2338 
2339 	kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK);
2340 	error = 0;
2341 
2342 	/*
2343 	 * VM objects are type stable and are never removed from the
2344 	 * list once added.  This allows us to safely read obj->object_list
2345 	 * after reacquiring the VM object lock.
2346 	 */
2347 	mtx_lock(&vm_object_list_mtx);
2348 	TAILQ_FOREACH(obj, &vm_object_list, object_list) {
2349 		if (obj->type == OBJT_DEAD)
2350 			continue;
2351 		VM_OBJECT_RLOCK(obj);
2352 		if (obj->type == OBJT_DEAD) {
2353 			VM_OBJECT_RUNLOCK(obj);
2354 			continue;
2355 		}
2356 		mtx_unlock(&vm_object_list_mtx);
2357 		kvo->kvo_size = ptoa(obj->size);
2358 		kvo->kvo_resident = obj->resident_page_count;
2359 		kvo->kvo_ref_count = obj->ref_count;
2360 		kvo->kvo_shadow_count = obj->shadow_count;
2361 		kvo->kvo_memattr = obj->memattr;
2362 		kvo->kvo_active = 0;
2363 		kvo->kvo_inactive = 0;
2364 		TAILQ_FOREACH(m, &obj->memq, listq) {
2365 			/*
2366 			 * A page may belong to the object but be
2367 			 * dequeued and set to PQ_NONE while the
2368 			 * object lock is not held.  This makes the
2369 			 * reads of m->queue below racy, and we do not
2370 			 * count pages set to PQ_NONE.  However, this
2371 			 * sysctl is only meant to give an
2372 			 * approximation of the system anyway.
2373 			 */
2374 			if (m->queue == PQ_ACTIVE)
2375 				kvo->kvo_active++;
2376 			else if (m->queue == PQ_INACTIVE)
2377 				kvo->kvo_inactive++;
2378 		}
2379 
2380 		kvo->kvo_vn_fileid = 0;
2381 		kvo->kvo_vn_fsid = 0;
2382 		kvo->kvo_vn_fsid_freebsd11 = 0;
2383 		freepath = NULL;
2384 		fullpath = "";
2385 		kvo->kvo_type = vm_object_kvme_type(obj, &vp);
2386 		if (vp != NULL)
2387 			vref(vp);
2388 		VM_OBJECT_RUNLOCK(obj);
2389 		if (vp != NULL) {
2390 			vn_fullpath(curthread, vp, &fullpath, &freepath);
2391 			vn_lock(vp, LK_SHARED | LK_RETRY);
2392 			if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) {
2393 				kvo->kvo_vn_fileid = va.va_fileid;
2394 				kvo->kvo_vn_fsid = va.va_fsid;
2395 				kvo->kvo_vn_fsid_freebsd11 = va.va_fsid;
2396 								/* truncate */
2397 			}
2398 			vput(vp);
2399 		}
2400 
2401 		strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path));
2402 		if (freepath != NULL)
2403 			free(freepath, M_TEMP);
2404 
2405 		/* Pack record size down */
2406 		kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path)
2407 		    + strlen(kvo->kvo_path) + 1;
2408 		kvo->kvo_structsize = roundup(kvo->kvo_structsize,
2409 		    sizeof(uint64_t));
2410 		error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize);
2411 		mtx_lock(&vm_object_list_mtx);
2412 		if (error)
2413 			break;
2414 	}
2415 	mtx_unlock(&vm_object_list_mtx);
2416 	free(kvo, M_TEMP);
2417 	return (error);
2418 }
2419 SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP |
2420     CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject",
2421     "List of VM objects");
2422 
2423 #include "opt_ddb.h"
2424 #ifdef DDB
2425 #include <sys/kernel.h>
2426 
2427 #include <sys/cons.h>
2428 
2429 #include <ddb/ddb.h>
2430 
2431 static int
2432 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2433 {
2434 	vm_map_t tmpm;
2435 	vm_map_entry_t tmpe;
2436 	vm_object_t obj;
2437 	int entcount;
2438 
2439 	if (map == 0)
2440 		return 0;
2441 
2442 	if (entry == 0) {
2443 		tmpe = map->header.next;
2444 		entcount = map->nentries;
2445 		while (entcount-- && (tmpe != &map->header)) {
2446 			if (_vm_object_in_map(map, object, tmpe)) {
2447 				return 1;
2448 			}
2449 			tmpe = tmpe->next;
2450 		}
2451 	} else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2452 		tmpm = entry->object.sub_map;
2453 		tmpe = tmpm->header.next;
2454 		entcount = tmpm->nentries;
2455 		while (entcount-- && tmpe != &tmpm->header) {
2456 			if (_vm_object_in_map(tmpm, object, tmpe)) {
2457 				return 1;
2458 			}
2459 			tmpe = tmpe->next;
2460 		}
2461 	} else if ((obj = entry->object.vm_object) != NULL) {
2462 		for (; obj; obj = obj->backing_object)
2463 			if (obj == object) {
2464 				return 1;
2465 			}
2466 	}
2467 	return 0;
2468 }
2469 
2470 static int
2471 vm_object_in_map(vm_object_t object)
2472 {
2473 	struct proc *p;
2474 
2475 	/* sx_slock(&allproc_lock); */
2476 	FOREACH_PROC_IN_SYSTEM(p) {
2477 		if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
2478 			continue;
2479 		if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
2480 			/* sx_sunlock(&allproc_lock); */
2481 			return 1;
2482 		}
2483 	}
2484 	/* sx_sunlock(&allproc_lock); */
2485 	if (_vm_object_in_map(kernel_map, object, 0))
2486 		return 1;
2487 	return 0;
2488 }
2489 
2490 DB_SHOW_COMMAND(vmochk, vm_object_check)
2491 {
2492 	vm_object_t object;
2493 
2494 	/*
2495 	 * make sure that internal objs are in a map somewhere
2496 	 * and none have zero ref counts.
2497 	 */
2498 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
2499 		if (object->handle == NULL &&
2500 		    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2501 			if (object->ref_count == 0) {
2502 				db_printf("vmochk: internal obj has zero ref count: %ld\n",
2503 					(long)object->size);
2504 			}
2505 			if (!vm_object_in_map(object)) {
2506 				db_printf(
2507 			"vmochk: internal obj is not in a map: "
2508 			"ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2509 				    object->ref_count, (u_long)object->size,
2510 				    (u_long)object->size,
2511 				    (void *)object->backing_object);
2512 			}
2513 		}
2514 	}
2515 }
2516 
2517 /*
2518  *	vm_object_print:	[ debug ]
2519  */
2520 DB_SHOW_COMMAND(object, vm_object_print_static)
2521 {
2522 	/* XXX convert args. */
2523 	vm_object_t object = (vm_object_t)addr;
2524 	boolean_t full = have_addr;
2525 
2526 	vm_page_t p;
2527 
2528 	/* XXX count is an (unused) arg.  Avoid shadowing it. */
2529 #define	count	was_count
2530 
2531 	int count;
2532 
2533 	if (object == NULL)
2534 		return;
2535 
2536 	db_iprintf(
2537 	    "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n",
2538 	    object, (int)object->type, (uintmax_t)object->size,
2539 	    object->resident_page_count, object->ref_count, object->flags,
2540 	    object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge);
2541 	db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
2542 	    object->shadow_count,
2543 	    object->backing_object ? object->backing_object->ref_count : 0,
2544 	    object->backing_object, (uintmax_t)object->backing_object_offset);
2545 
2546 	if (!full)
2547 		return;
2548 
2549 	db_indent += 2;
2550 	count = 0;
2551 	TAILQ_FOREACH(p, &object->memq, listq) {
2552 		if (count == 0)
2553 			db_iprintf("memory:=");
2554 		else if (count == 6) {
2555 			db_printf("\n");
2556 			db_iprintf(" ...");
2557 			count = 0;
2558 		} else
2559 			db_printf(",");
2560 		count++;
2561 
2562 		db_printf("(off=0x%jx,page=0x%jx)",
2563 		    (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p));
2564 	}
2565 	if (count != 0)
2566 		db_printf("\n");
2567 	db_indent -= 2;
2568 }
2569 
2570 /* XXX. */
2571 #undef count
2572 
2573 /* XXX need this non-static entry for calling from vm_map_print. */
2574 void
2575 vm_object_print(
2576         /* db_expr_t */ long addr,
2577 	boolean_t have_addr,
2578 	/* db_expr_t */ long count,
2579 	char *modif)
2580 {
2581 	vm_object_print_static(addr, have_addr, count, modif);
2582 }
2583 
2584 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
2585 {
2586 	vm_object_t object;
2587 	vm_pindex_t fidx;
2588 	vm_paddr_t pa;
2589 	vm_page_t m, prev_m;
2590 	int rcount, nl, c;
2591 
2592 	nl = 0;
2593 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
2594 		db_printf("new object: %p\n", (void *)object);
2595 		if (nl > 18) {
2596 			c = cngetc();
2597 			if (c != ' ')
2598 				return;
2599 			nl = 0;
2600 		}
2601 		nl++;
2602 		rcount = 0;
2603 		fidx = 0;
2604 		pa = -1;
2605 		TAILQ_FOREACH(m, &object->memq, listq) {
2606 			if (m->pindex > 128)
2607 				break;
2608 			if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL &&
2609 			    prev_m->pindex + 1 != m->pindex) {
2610 				if (rcount) {
2611 					db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2612 						(long)fidx, rcount, (long)pa);
2613 					if (nl > 18) {
2614 						c = cngetc();
2615 						if (c != ' ')
2616 							return;
2617 						nl = 0;
2618 					}
2619 					nl++;
2620 					rcount = 0;
2621 				}
2622 			}
2623 			if (rcount &&
2624 				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
2625 				++rcount;
2626 				continue;
2627 			}
2628 			if (rcount) {
2629 				db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2630 					(long)fidx, rcount, (long)pa);
2631 				if (nl > 18) {
2632 					c = cngetc();
2633 					if (c != ' ')
2634 						return;
2635 					nl = 0;
2636 				}
2637 				nl++;
2638 			}
2639 			fidx = m->pindex;
2640 			pa = VM_PAGE_TO_PHYS(m);
2641 			rcount = 1;
2642 		}
2643 		if (rcount) {
2644 			db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2645 				(long)fidx, rcount, (long)pa);
2646 			if (nl > 18) {
2647 				c = cngetc();
2648 				if (c != ' ')
2649 					return;
2650 				nl = 0;
2651 			}
2652 			nl++;
2653 		}
2654 	}
2655 }
2656 #endif /* DDB */
2657