xref: /freebsd/sys/vm/vm_object.c (revision 884a2a699669ec61e2366e3e358342dbc94be24a)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 /*
62  *	Virtual memory object module.
63  */
64 
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67 
68 #include "opt_vm.h"
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/lock.h>
73 #include <sys/mman.h>
74 #include <sys/mount.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/mutex.h>
78 #include <sys/proc.h>		/* for curproc, pageproc */
79 #include <sys/socket.h>
80 #include <sys/resourcevar.h>
81 #include <sys/vnode.h>
82 #include <sys/vmmeter.h>
83 #include <sys/sx.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/pmap.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_pager.h>
93 #include <vm/swap_pager.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_reserv.h>
97 #include <vm/uma.h>
98 
99 static int old_msync;
100 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0,
101     "Use old (insecure) msync behavior");
102 
103 static int	vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
104 		    int pagerflags, int flags, int *clearobjflags);
105 static boolean_t vm_object_page_remove_write(vm_page_t p, int flags,
106 		    int *clearobjflags);
107 static void	vm_object_qcollapse(vm_object_t object);
108 static void	vm_object_vndeallocate(vm_object_t object);
109 
110 /*
111  *	Virtual memory objects maintain the actual data
112  *	associated with allocated virtual memory.  A given
113  *	page of memory exists within exactly one object.
114  *
115  *	An object is only deallocated when all "references"
116  *	are given up.  Only one "reference" to a given
117  *	region of an object should be writeable.
118  *
119  *	Associated with each object is a list of all resident
120  *	memory pages belonging to that object; this list is
121  *	maintained by the "vm_page" module, and locked by the object's
122  *	lock.
123  *
124  *	Each object also records a "pager" routine which is
125  *	used to retrieve (and store) pages to the proper backing
126  *	storage.  In addition, objects may be backed by other
127  *	objects from which they were virtual-copied.
128  *
129  *	The only items within the object structure which are
130  *	modified after time of creation are:
131  *		reference count		locked by object's lock
132  *		pager routine		locked by object's lock
133  *
134  */
135 
136 struct object_q vm_object_list;
137 struct mtx vm_object_list_mtx;	/* lock for object list and count */
138 
139 struct vm_object kernel_object_store;
140 struct vm_object kmem_object_store;
141 
142 SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, "VM object stats");
143 
144 static long object_collapses;
145 SYSCTL_LONG(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD,
146     &object_collapses, 0, "VM object collapses");
147 
148 static long object_bypasses;
149 SYSCTL_LONG(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD,
150     &object_bypasses, 0, "VM object bypasses");
151 
152 static uma_zone_t obj_zone;
153 
154 static int vm_object_zinit(void *mem, int size, int flags);
155 
156 #ifdef INVARIANTS
157 static void vm_object_zdtor(void *mem, int size, void *arg);
158 
159 static void
160 vm_object_zdtor(void *mem, int size, void *arg)
161 {
162 	vm_object_t object;
163 
164 	object = (vm_object_t)mem;
165 	KASSERT(TAILQ_EMPTY(&object->memq),
166 	    ("object %p has resident pages",
167 	    object));
168 #if VM_NRESERVLEVEL > 0
169 	KASSERT(LIST_EMPTY(&object->rvq),
170 	    ("object %p has reservations",
171 	    object));
172 #endif
173 	KASSERT(object->cache == NULL,
174 	    ("object %p has cached pages",
175 	    object));
176 	KASSERT(object->paging_in_progress == 0,
177 	    ("object %p paging_in_progress = %d",
178 	    object, object->paging_in_progress));
179 	KASSERT(object->resident_page_count == 0,
180 	    ("object %p resident_page_count = %d",
181 	    object, object->resident_page_count));
182 	KASSERT(object->shadow_count == 0,
183 	    ("object %p shadow_count = %d",
184 	    object, object->shadow_count));
185 }
186 #endif
187 
188 static int
189 vm_object_zinit(void *mem, int size, int flags)
190 {
191 	vm_object_t object;
192 
193 	object = (vm_object_t)mem;
194 	bzero(&object->mtx, sizeof(object->mtx));
195 	VM_OBJECT_LOCK_INIT(object, "standard object");
196 
197 	/* These are true for any object that has been freed */
198 	object->paging_in_progress = 0;
199 	object->resident_page_count = 0;
200 	object->shadow_count = 0;
201 	return (0);
202 }
203 
204 void
205 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
206 {
207 
208 	TAILQ_INIT(&object->memq);
209 	LIST_INIT(&object->shadow_head);
210 
211 	object->root = NULL;
212 	object->type = type;
213 	object->size = size;
214 	object->generation = 1;
215 	object->ref_count = 1;
216 	object->memattr = VM_MEMATTR_DEFAULT;
217 	object->flags = 0;
218 	object->cred = NULL;
219 	object->charge = 0;
220 	if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
221 		object->flags = OBJ_ONEMAPPING;
222 	object->pg_color = 0;
223 	object->handle = NULL;
224 	object->backing_object = NULL;
225 	object->backing_object_offset = (vm_ooffset_t) 0;
226 #if VM_NRESERVLEVEL > 0
227 	LIST_INIT(&object->rvq);
228 #endif
229 	object->cache = NULL;
230 
231 	mtx_lock(&vm_object_list_mtx);
232 	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
233 	mtx_unlock(&vm_object_list_mtx);
234 }
235 
236 /*
237  *	vm_object_init:
238  *
239  *	Initialize the VM objects module.
240  */
241 void
242 vm_object_init(void)
243 {
244 	TAILQ_INIT(&vm_object_list);
245 	mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
246 
247 	VM_OBJECT_LOCK_INIT(kernel_object, "kernel object");
248 	_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
249 	    kernel_object);
250 #if VM_NRESERVLEVEL > 0
251 	kernel_object->flags |= OBJ_COLORED;
252 	kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
253 #endif
254 
255 	VM_OBJECT_LOCK_INIT(kmem_object, "kmem object");
256 	_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
257 	    kmem_object);
258 #if VM_NRESERVLEVEL > 0
259 	kmem_object->flags |= OBJ_COLORED;
260 	kmem_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
261 #endif
262 
263 	/*
264 	 * The lock portion of struct vm_object must be type stable due
265 	 * to vm_pageout_fallback_object_lock locking a vm object
266 	 * without holding any references to it.
267 	 */
268 	obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL,
269 #ifdef INVARIANTS
270 	    vm_object_zdtor,
271 #else
272 	    NULL,
273 #endif
274 	    vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
275 }
276 
277 void
278 vm_object_clear_flag(vm_object_t object, u_short bits)
279 {
280 
281 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
282 	object->flags &= ~bits;
283 }
284 
285 /*
286  *	Sets the default memory attribute for the specified object.  Pages
287  *	that are allocated to this object are by default assigned this memory
288  *	attribute.
289  *
290  *	Presently, this function must be called before any pages are allocated
291  *	to the object.  In the future, this requirement may be relaxed for
292  *	"default" and "swap" objects.
293  */
294 int
295 vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
296 {
297 
298 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
299 	switch (object->type) {
300 	case OBJT_DEFAULT:
301 	case OBJT_DEVICE:
302 	case OBJT_PHYS:
303 	case OBJT_SG:
304 	case OBJT_SWAP:
305 	case OBJT_VNODE:
306 		if (!TAILQ_EMPTY(&object->memq))
307 			return (KERN_FAILURE);
308 		break;
309 	case OBJT_DEAD:
310 		return (KERN_INVALID_ARGUMENT);
311 	}
312 	object->memattr = memattr;
313 	return (KERN_SUCCESS);
314 }
315 
316 void
317 vm_object_pip_add(vm_object_t object, short i)
318 {
319 
320 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
321 	object->paging_in_progress += i;
322 }
323 
324 void
325 vm_object_pip_subtract(vm_object_t object, short i)
326 {
327 
328 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
329 	object->paging_in_progress -= i;
330 }
331 
332 void
333 vm_object_pip_wakeup(vm_object_t object)
334 {
335 
336 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
337 	object->paging_in_progress--;
338 	if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
339 		vm_object_clear_flag(object, OBJ_PIPWNT);
340 		wakeup(object);
341 	}
342 }
343 
344 void
345 vm_object_pip_wakeupn(vm_object_t object, short i)
346 {
347 
348 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
349 	if (i)
350 		object->paging_in_progress -= i;
351 	if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
352 		vm_object_clear_flag(object, OBJ_PIPWNT);
353 		wakeup(object);
354 	}
355 }
356 
357 void
358 vm_object_pip_wait(vm_object_t object, char *waitid)
359 {
360 
361 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
362 	while (object->paging_in_progress) {
363 		object->flags |= OBJ_PIPWNT;
364 		msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0);
365 	}
366 }
367 
368 /*
369  *	vm_object_allocate:
370  *
371  *	Returns a new object with the given size.
372  */
373 vm_object_t
374 vm_object_allocate(objtype_t type, vm_pindex_t size)
375 {
376 	vm_object_t object;
377 
378 	object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
379 	_vm_object_allocate(type, size, object);
380 	return (object);
381 }
382 
383 
384 /*
385  *	vm_object_reference:
386  *
387  *	Gets another reference to the given object.  Note: OBJ_DEAD
388  *	objects can be referenced during final cleaning.
389  */
390 void
391 vm_object_reference(vm_object_t object)
392 {
393 	if (object == NULL)
394 		return;
395 	VM_OBJECT_LOCK(object);
396 	vm_object_reference_locked(object);
397 	VM_OBJECT_UNLOCK(object);
398 }
399 
400 /*
401  *	vm_object_reference_locked:
402  *
403  *	Gets another reference to the given object.
404  *
405  *	The object must be locked.
406  */
407 void
408 vm_object_reference_locked(vm_object_t object)
409 {
410 	struct vnode *vp;
411 
412 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
413 	object->ref_count++;
414 	if (object->type == OBJT_VNODE) {
415 		vp = object->handle;
416 		vref(vp);
417 	}
418 }
419 
420 /*
421  * Handle deallocating an object of type OBJT_VNODE.
422  */
423 static void
424 vm_object_vndeallocate(vm_object_t object)
425 {
426 	struct vnode *vp = (struct vnode *) object->handle;
427 
428 	VFS_ASSERT_GIANT(vp->v_mount);
429 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
430 	KASSERT(object->type == OBJT_VNODE,
431 	    ("vm_object_vndeallocate: not a vnode object"));
432 	KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
433 #ifdef INVARIANTS
434 	if (object->ref_count == 0) {
435 		vprint("vm_object_vndeallocate", vp);
436 		panic("vm_object_vndeallocate: bad object reference count");
437 	}
438 #endif
439 
440 	if (object->ref_count > 1) {
441 		object->ref_count--;
442 		VM_OBJECT_UNLOCK(object);
443 		/* vrele may need the vnode lock. */
444 		vrele(vp);
445 	} else {
446 		vhold(vp);
447 		VM_OBJECT_UNLOCK(object);
448 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
449 		vdrop(vp);
450 		VM_OBJECT_LOCK(object);
451 		object->ref_count--;
452 		if (object->type == OBJT_DEAD) {
453 			VM_OBJECT_UNLOCK(object);
454 			VOP_UNLOCK(vp, 0);
455 		} else {
456 			if (object->ref_count == 0)
457 				vp->v_vflag &= ~VV_TEXT;
458 			VM_OBJECT_UNLOCK(object);
459 			vput(vp);
460 		}
461 	}
462 }
463 
464 /*
465  *	vm_object_deallocate:
466  *
467  *	Release a reference to the specified object,
468  *	gained either through a vm_object_allocate
469  *	or a vm_object_reference call.  When all references
470  *	are gone, storage associated with this object
471  *	may be relinquished.
472  *
473  *	No object may be locked.
474  */
475 void
476 vm_object_deallocate(vm_object_t object)
477 {
478 	vm_object_t temp;
479 
480 	while (object != NULL) {
481 		int vfslocked;
482 
483 		vfslocked = 0;
484 	restart:
485 		VM_OBJECT_LOCK(object);
486 		if (object->type == OBJT_VNODE) {
487 			struct vnode *vp = (struct vnode *) object->handle;
488 
489 			/*
490 			 * Conditionally acquire Giant for a vnode-backed
491 			 * object.  We have to be careful since the type of
492 			 * a vnode object can change while the object is
493 			 * unlocked.
494 			 */
495 			if (VFS_NEEDSGIANT(vp->v_mount) && !vfslocked) {
496 				vfslocked = 1;
497 				if (!mtx_trylock(&Giant)) {
498 					VM_OBJECT_UNLOCK(object);
499 					mtx_lock(&Giant);
500 					goto restart;
501 				}
502 			}
503 			vm_object_vndeallocate(object);
504 			VFS_UNLOCK_GIANT(vfslocked);
505 			return;
506 		} else
507 			/*
508 			 * This is to handle the case that the object
509 			 * changed type while we dropped its lock to
510 			 * obtain Giant.
511 			 */
512 			VFS_UNLOCK_GIANT(vfslocked);
513 
514 		KASSERT(object->ref_count != 0,
515 			("vm_object_deallocate: object deallocated too many times: %d", object->type));
516 
517 		/*
518 		 * If the reference count goes to 0 we start calling
519 		 * vm_object_terminate() on the object chain.
520 		 * A ref count of 1 may be a special case depending on the
521 		 * shadow count being 0 or 1.
522 		 */
523 		object->ref_count--;
524 		if (object->ref_count > 1) {
525 			VM_OBJECT_UNLOCK(object);
526 			return;
527 		} else if (object->ref_count == 1) {
528 			if (object->shadow_count == 0 &&
529 			    object->handle == NULL &&
530 			    (object->type == OBJT_DEFAULT ||
531 			     object->type == OBJT_SWAP)) {
532 				vm_object_set_flag(object, OBJ_ONEMAPPING);
533 			} else if ((object->shadow_count == 1) &&
534 			    (object->handle == NULL) &&
535 			    (object->type == OBJT_DEFAULT ||
536 			     object->type == OBJT_SWAP)) {
537 				vm_object_t robject;
538 
539 				robject = LIST_FIRST(&object->shadow_head);
540 				KASSERT(robject != NULL,
541 				    ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
542 					 object->ref_count,
543 					 object->shadow_count));
544 				if (!VM_OBJECT_TRYLOCK(robject)) {
545 					/*
546 					 * Avoid a potential deadlock.
547 					 */
548 					object->ref_count++;
549 					VM_OBJECT_UNLOCK(object);
550 					/*
551 					 * More likely than not the thread
552 					 * holding robject's lock has lower
553 					 * priority than the current thread.
554 					 * Let the lower priority thread run.
555 					 */
556 					pause("vmo_de", 1);
557 					continue;
558 				}
559 				/*
560 				 * Collapse object into its shadow unless its
561 				 * shadow is dead.  In that case, object will
562 				 * be deallocated by the thread that is
563 				 * deallocating its shadow.
564 				 */
565 				if ((robject->flags & OBJ_DEAD) == 0 &&
566 				    (robject->handle == NULL) &&
567 				    (robject->type == OBJT_DEFAULT ||
568 				     robject->type == OBJT_SWAP)) {
569 
570 					robject->ref_count++;
571 retry:
572 					if (robject->paging_in_progress) {
573 						VM_OBJECT_UNLOCK(object);
574 						vm_object_pip_wait(robject,
575 						    "objde1");
576 						temp = robject->backing_object;
577 						if (object == temp) {
578 							VM_OBJECT_LOCK(object);
579 							goto retry;
580 						}
581 					} else if (object->paging_in_progress) {
582 						VM_OBJECT_UNLOCK(robject);
583 						object->flags |= OBJ_PIPWNT;
584 						msleep(object,
585 						    VM_OBJECT_MTX(object),
586 						    PDROP | PVM, "objde2", 0);
587 						VM_OBJECT_LOCK(robject);
588 						temp = robject->backing_object;
589 						if (object == temp) {
590 							VM_OBJECT_LOCK(object);
591 							goto retry;
592 						}
593 					} else
594 						VM_OBJECT_UNLOCK(object);
595 
596 					if (robject->ref_count == 1) {
597 						robject->ref_count--;
598 						object = robject;
599 						goto doterm;
600 					}
601 					object = robject;
602 					vm_object_collapse(object);
603 					VM_OBJECT_UNLOCK(object);
604 					continue;
605 				}
606 				VM_OBJECT_UNLOCK(robject);
607 			}
608 			VM_OBJECT_UNLOCK(object);
609 			return;
610 		}
611 doterm:
612 		temp = object->backing_object;
613 		if (temp != NULL) {
614 			VM_OBJECT_LOCK(temp);
615 			LIST_REMOVE(object, shadow_list);
616 			temp->shadow_count--;
617 			VM_OBJECT_UNLOCK(temp);
618 			object->backing_object = NULL;
619 		}
620 		/*
621 		 * Don't double-terminate, we could be in a termination
622 		 * recursion due to the terminate having to sync data
623 		 * to disk.
624 		 */
625 		if ((object->flags & OBJ_DEAD) == 0)
626 			vm_object_terminate(object);
627 		else
628 			VM_OBJECT_UNLOCK(object);
629 		object = temp;
630 	}
631 }
632 
633 /*
634  *	vm_object_destroy removes the object from the global object list
635  *      and frees the space for the object.
636  */
637 void
638 vm_object_destroy(vm_object_t object)
639 {
640 
641 	/*
642 	 * Remove the object from the global object list.
643 	 */
644 	mtx_lock(&vm_object_list_mtx);
645 	TAILQ_REMOVE(&vm_object_list, object, object_list);
646 	mtx_unlock(&vm_object_list_mtx);
647 
648 	/*
649 	 * Release the allocation charge.
650 	 */
651 	if (object->cred != NULL) {
652 		KASSERT(object->type == OBJT_DEFAULT ||
653 		    object->type == OBJT_SWAP,
654 		    ("vm_object_terminate: non-swap obj %p has cred",
655 		     object));
656 		swap_release_by_cred(object->charge, object->cred);
657 		object->charge = 0;
658 		crfree(object->cred);
659 		object->cred = NULL;
660 	}
661 
662 	/*
663 	 * Free the space for the object.
664 	 */
665 	uma_zfree(obj_zone, object);
666 }
667 
668 /*
669  *	vm_object_terminate actually destroys the specified object, freeing
670  *	up all previously used resources.
671  *
672  *	The object must be locked.
673  *	This routine may block.
674  */
675 void
676 vm_object_terminate(vm_object_t object)
677 {
678 	vm_page_t p, p_next;
679 
680 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
681 
682 	/*
683 	 * Make sure no one uses us.
684 	 */
685 	vm_object_set_flag(object, OBJ_DEAD);
686 
687 	/*
688 	 * wait for the pageout daemon to be done with the object
689 	 */
690 	vm_object_pip_wait(object, "objtrm");
691 
692 	KASSERT(!object->paging_in_progress,
693 		("vm_object_terminate: pageout in progress"));
694 
695 	/*
696 	 * Clean and free the pages, as appropriate. All references to the
697 	 * object are gone, so we don't need to lock it.
698 	 */
699 	if (object->type == OBJT_VNODE) {
700 		struct vnode *vp = (struct vnode *)object->handle;
701 
702 		/*
703 		 * Clean pages and flush buffers.
704 		 */
705 		vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
706 		VM_OBJECT_UNLOCK(object);
707 
708 		vinvalbuf(vp, V_SAVE, 0, 0);
709 
710 		VM_OBJECT_LOCK(object);
711 	}
712 
713 	KASSERT(object->ref_count == 0,
714 		("vm_object_terminate: object with references, ref_count=%d",
715 		object->ref_count));
716 
717 	/*
718 	 * Free any remaining pageable pages.  This also removes them from the
719 	 * paging queues.  However, don't free wired pages, just remove them
720 	 * from the object.  Rather than incrementally removing each page from
721 	 * the object, the page and object are reset to any empty state.
722 	 */
723 	TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) {
724 		KASSERT(!p->busy && (p->oflags & VPO_BUSY) == 0,
725 		    ("vm_object_terminate: freeing busy page %p", p));
726 		vm_page_lock(p);
727 		/*
728 		 * Optimize the page's removal from the object by resetting
729 		 * its "object" field.  Specifically, if the page is not
730 		 * wired, then the effect of this assignment is that
731 		 * vm_page_free()'s call to vm_page_remove() will return
732 		 * immediately without modifying the page or the object.
733 		 */
734 		p->object = NULL;
735 		if (p->wire_count == 0) {
736 			vm_page_free(p);
737 			PCPU_INC(cnt.v_pfree);
738 		}
739 		vm_page_unlock(p);
740 	}
741 	/*
742 	 * If the object contained any pages, then reset it to an empty state.
743 	 * None of the object's fields, including "resident_page_count", were
744 	 * modified by the preceding loop.
745 	 */
746 	if (object->resident_page_count != 0) {
747 		object->root = NULL;
748 		TAILQ_INIT(&object->memq);
749 		object->resident_page_count = 0;
750 		if (object->type == OBJT_VNODE)
751 			vdrop(object->handle);
752 	}
753 
754 #if VM_NRESERVLEVEL > 0
755 	if (__predict_false(!LIST_EMPTY(&object->rvq)))
756 		vm_reserv_break_all(object);
757 #endif
758 	if (__predict_false(object->cache != NULL))
759 		vm_page_cache_free(object, 0, 0);
760 
761 	/*
762 	 * Let the pager know object is dead.
763 	 */
764 	vm_pager_deallocate(object);
765 	VM_OBJECT_UNLOCK(object);
766 
767 	vm_object_destroy(object);
768 }
769 
770 /*
771  * Make the page read-only so that we can clear the object flags.  However, if
772  * this is a nosync mmap then the object is likely to stay dirty so do not
773  * mess with the page and do not clear the object flags.  Returns TRUE if the
774  * page should be flushed, and FALSE otherwise.
775  */
776 static boolean_t
777 vm_object_page_remove_write(vm_page_t p, int flags, int *clearobjflags)
778 {
779 
780 	/*
781 	 * If we have been asked to skip nosync pages and this is a
782 	 * nosync page, skip it.  Note that the object flags were not
783 	 * cleared in this case so we do not have to set them.
784 	 */
785 	if ((flags & OBJPC_NOSYNC) != 0 && (p->oflags & VPO_NOSYNC) != 0) {
786 		*clearobjflags = 0;
787 		return (FALSE);
788 	} else {
789 		pmap_remove_write(p);
790 		return (p->dirty != 0);
791 	}
792 }
793 
794 /*
795  *	vm_object_page_clean
796  *
797  *	Clean all dirty pages in the specified range of object.  Leaves page
798  * 	on whatever queue it is currently on.   If NOSYNC is set then do not
799  *	write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC),
800  *	leaving the object dirty.
801  *
802  *	When stuffing pages asynchronously, allow clustering.  XXX we need a
803  *	synchronous clustering mode implementation.
804  *
805  *	Odd semantics: if start == end, we clean everything.
806  *
807  *	The object must be locked.
808  */
809 void
810 vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
811     int flags)
812 {
813 	vm_page_t np, p;
814 	vm_pindex_t pi, tend, tstart;
815 	int clearobjflags, curgeneration, n, pagerflags;
816 
817 	mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED);
818 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
819 	KASSERT(object->type == OBJT_VNODE, ("Not a vnode object"));
820 	if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 ||
821 	    object->resident_page_count == 0)
822 		return;
823 
824 	pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ?
825 	    VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
826 	pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0;
827 
828 	tstart = OFF_TO_IDX(start);
829 	tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK);
830 	clearobjflags = tstart == 0 && tend >= object->size;
831 
832 rescan:
833 	curgeneration = object->generation;
834 
835 	for (p = vm_page_find_least(object, tstart); p != NULL; p = np) {
836 		pi = p->pindex;
837 		if (pi >= tend)
838 			break;
839 		np = TAILQ_NEXT(p, listq);
840 		if (p->valid == 0)
841 			continue;
842 		if (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) {
843 			if (object->generation != curgeneration)
844 				goto rescan;
845 			np = vm_page_find_least(object, pi);
846 			continue;
847 		}
848 		if (!vm_object_page_remove_write(p, flags, &clearobjflags))
849 			continue;
850 
851 		n = vm_object_page_collect_flush(object, p, pagerflags,
852 		    flags, &clearobjflags);
853 		if (object->generation != curgeneration)
854 			goto rescan;
855 
856 		/*
857 		 * If the VOP_PUTPAGES() did a truncated write, so
858 		 * that even the first page of the run is not fully
859 		 * written, vm_pageout_flush() returns 0 as the run
860 		 * length.  Since the condition that caused truncated
861 		 * write may be permanent, e.g. exhausted free space,
862 		 * accepting n == 0 would cause an infinite loop.
863 		 *
864 		 * Forwarding the iterator leaves the unwritten page
865 		 * behind, but there is not much we can do there if
866 		 * filesystem refuses to write it.
867 		 */
868 		if (n == 0)
869 			n = 1;
870 		np = vm_page_find_least(object, pi + n);
871 	}
872 #if 0
873 	VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0);
874 #endif
875 
876 	if (clearobjflags)
877 		vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY);
878 }
879 
880 static int
881 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
882     int flags, int *clearobjflags)
883 {
884 	vm_page_t ma[vm_pageout_page_count], p_first, tp;
885 	int count, i, mreq, runlen;
886 
887 	mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED);
888 	vm_page_lock_assert(p, MA_NOTOWNED);
889 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
890 
891 	count = 1;
892 	mreq = 0;
893 
894 	for (tp = p; count < vm_pageout_page_count; count++) {
895 		tp = vm_page_next(tp);
896 		if (tp == NULL || tp->busy != 0 || (tp->oflags & VPO_BUSY) != 0)
897 			break;
898 		if (!vm_object_page_remove_write(tp, flags, clearobjflags))
899 			break;
900 	}
901 
902 	for (p_first = p; count < vm_pageout_page_count; count++) {
903 		tp = vm_page_prev(p_first);
904 		if (tp == NULL || tp->busy != 0 || (tp->oflags & VPO_BUSY) != 0)
905 			break;
906 		if (!vm_object_page_remove_write(tp, flags, clearobjflags))
907 			break;
908 		p_first = tp;
909 		mreq++;
910 	}
911 
912 	for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++)
913 		ma[i] = tp;
914 
915 	vm_pageout_flush(ma, count, pagerflags, mreq, &runlen);
916 	return (runlen);
917 }
918 
919 /*
920  * Note that there is absolutely no sense in writing out
921  * anonymous objects, so we track down the vnode object
922  * to write out.
923  * We invalidate (remove) all pages from the address space
924  * for semantic correctness.
925  *
926  * Note: certain anonymous maps, such as MAP_NOSYNC maps,
927  * may start out with a NULL object.
928  */
929 void
930 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
931     boolean_t syncio, boolean_t invalidate)
932 {
933 	vm_object_t backing_object;
934 	struct vnode *vp;
935 	struct mount *mp;
936 	int flags;
937 
938 	if (object == NULL)
939 		return;
940 	VM_OBJECT_LOCK(object);
941 	while ((backing_object = object->backing_object) != NULL) {
942 		VM_OBJECT_LOCK(backing_object);
943 		offset += object->backing_object_offset;
944 		VM_OBJECT_UNLOCK(object);
945 		object = backing_object;
946 		if (object->size < OFF_TO_IDX(offset + size))
947 			size = IDX_TO_OFF(object->size) - offset;
948 	}
949 	/*
950 	 * Flush pages if writing is allowed, invalidate them
951 	 * if invalidation requested.  Pages undergoing I/O
952 	 * will be ignored by vm_object_page_remove().
953 	 *
954 	 * We cannot lock the vnode and then wait for paging
955 	 * to complete without deadlocking against vm_fault.
956 	 * Instead we simply call vm_object_page_remove() and
957 	 * allow it to block internally on a page-by-page
958 	 * basis when it encounters pages undergoing async
959 	 * I/O.
960 	 */
961 	if (object->type == OBJT_VNODE &&
962 	    (object->flags & OBJ_MIGHTBEDIRTY) != 0) {
963 		int vfslocked;
964 		vp = object->handle;
965 		VM_OBJECT_UNLOCK(object);
966 		(void) vn_start_write(vp, &mp, V_WAIT);
967 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
968 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
969 		flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
970 		flags |= invalidate ? OBJPC_INVAL : 0;
971 		VM_OBJECT_LOCK(object);
972 		vm_object_page_clean(object, offset, offset + size, flags);
973 		VM_OBJECT_UNLOCK(object);
974 		VOP_UNLOCK(vp, 0);
975 		VFS_UNLOCK_GIANT(vfslocked);
976 		vn_finished_write(mp);
977 		VM_OBJECT_LOCK(object);
978 	}
979 	if ((object->type == OBJT_VNODE ||
980 	     object->type == OBJT_DEVICE) && invalidate) {
981 		boolean_t purge;
982 		purge = old_msync || (object->type == OBJT_DEVICE);
983 		vm_object_page_remove(object,
984 		    OFF_TO_IDX(offset),
985 		    OFF_TO_IDX(offset + size + PAGE_MASK),
986 		    purge ? FALSE : TRUE);
987 	}
988 	VM_OBJECT_UNLOCK(object);
989 }
990 
991 /*
992  *	vm_object_madvise:
993  *
994  *	Implements the madvise function at the object/page level.
995  *
996  *	MADV_WILLNEED	(any object)
997  *
998  *	    Activate the specified pages if they are resident.
999  *
1000  *	MADV_DONTNEED	(any object)
1001  *
1002  *	    Deactivate the specified pages if they are resident.
1003  *
1004  *	MADV_FREE	(OBJT_DEFAULT/OBJT_SWAP objects,
1005  *			 OBJ_ONEMAPPING only)
1006  *
1007  *	    Deactivate and clean the specified pages if they are
1008  *	    resident.  This permits the process to reuse the pages
1009  *	    without faulting or the kernel to reclaim the pages
1010  *	    without I/O.
1011  */
1012 void
1013 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
1014 {
1015 	vm_pindex_t end, tpindex;
1016 	vm_object_t backing_object, tobject;
1017 	vm_page_t m;
1018 
1019 	if (object == NULL)
1020 		return;
1021 	VM_OBJECT_LOCK(object);
1022 	end = pindex + count;
1023 	/*
1024 	 * Locate and adjust resident pages
1025 	 */
1026 	for (; pindex < end; pindex += 1) {
1027 relookup:
1028 		tobject = object;
1029 		tpindex = pindex;
1030 shadowlookup:
1031 		/*
1032 		 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
1033 		 * and those pages must be OBJ_ONEMAPPING.
1034 		 */
1035 		if (advise == MADV_FREE) {
1036 			if ((tobject->type != OBJT_DEFAULT &&
1037 			     tobject->type != OBJT_SWAP) ||
1038 			    (tobject->flags & OBJ_ONEMAPPING) == 0) {
1039 				goto unlock_tobject;
1040 			}
1041 		} else if (tobject->type == OBJT_PHYS)
1042 			goto unlock_tobject;
1043 		m = vm_page_lookup(tobject, tpindex);
1044 		if (m == NULL && advise == MADV_WILLNEED) {
1045 			/*
1046 			 * If the page is cached, reactivate it.
1047 			 */
1048 			m = vm_page_alloc(tobject, tpindex, VM_ALLOC_IFCACHED |
1049 			    VM_ALLOC_NOBUSY);
1050 		}
1051 		if (m == NULL) {
1052 			/*
1053 			 * There may be swap even if there is no backing page
1054 			 */
1055 			if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1056 				swap_pager_freespace(tobject, tpindex, 1);
1057 			/*
1058 			 * next object
1059 			 */
1060 			backing_object = tobject->backing_object;
1061 			if (backing_object == NULL)
1062 				goto unlock_tobject;
1063 			VM_OBJECT_LOCK(backing_object);
1064 			tpindex += OFF_TO_IDX(tobject->backing_object_offset);
1065 			if (tobject != object)
1066 				VM_OBJECT_UNLOCK(tobject);
1067 			tobject = backing_object;
1068 			goto shadowlookup;
1069 		} else if (m->valid != VM_PAGE_BITS_ALL)
1070 			goto unlock_tobject;
1071 		/*
1072 		 * If the page is not in a normal state, skip it.
1073 		 */
1074 		vm_page_lock(m);
1075 		if (m->hold_count != 0 || m->wire_count != 0) {
1076 			vm_page_unlock(m);
1077 			goto unlock_tobject;
1078 		}
1079 		KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
1080 		    ("vm_object_madvise: page %p is not managed", m));
1081 		if ((m->oflags & VPO_BUSY) || m->busy) {
1082 			if (advise == MADV_WILLNEED) {
1083 				/*
1084 				 * Reference the page before unlocking and
1085 				 * sleeping so that the page daemon is less
1086 				 * likely to reclaim it.
1087 				 */
1088 				vm_page_lock_queues();
1089 				vm_page_flag_set(m, PG_REFERENCED);
1090 				vm_page_unlock_queues();
1091 			}
1092 			vm_page_unlock(m);
1093 			if (object != tobject)
1094 				VM_OBJECT_UNLOCK(object);
1095 			m->oflags |= VPO_WANTED;
1096 			msleep(m, VM_OBJECT_MTX(tobject), PDROP | PVM, "madvpo",
1097 			    0);
1098 			VM_OBJECT_LOCK(object);
1099   			goto relookup;
1100 		}
1101 		if (advise == MADV_WILLNEED) {
1102 			vm_page_activate(m);
1103 		} else if (advise == MADV_DONTNEED) {
1104 			vm_page_dontneed(m);
1105 		} else if (advise == MADV_FREE) {
1106 			/*
1107 			 * Mark the page clean.  This will allow the page
1108 			 * to be freed up by the system.  However, such pages
1109 			 * are often reused quickly by malloc()/free()
1110 			 * so we do not do anything that would cause
1111 			 * a page fault if we can help it.
1112 			 *
1113 			 * Specifically, we do not try to actually free
1114 			 * the page now nor do we try to put it in the
1115 			 * cache (which would cause a page fault on reuse).
1116 			 *
1117 			 * But we do make the page is freeable as we
1118 			 * can without actually taking the step of unmapping
1119 			 * it.
1120 			 */
1121 			pmap_clear_modify(m);
1122 			m->dirty = 0;
1123 			m->act_count = 0;
1124 			vm_page_dontneed(m);
1125 		}
1126 		vm_page_unlock(m);
1127 		if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1128 			swap_pager_freespace(tobject, tpindex, 1);
1129 unlock_tobject:
1130 		if (tobject != object)
1131 			VM_OBJECT_UNLOCK(tobject);
1132 	}
1133 	VM_OBJECT_UNLOCK(object);
1134 }
1135 
1136 /*
1137  *	vm_object_shadow:
1138  *
1139  *	Create a new object which is backed by the
1140  *	specified existing object range.  The source
1141  *	object reference is deallocated.
1142  *
1143  *	The new object and offset into that object
1144  *	are returned in the source parameters.
1145  */
1146 void
1147 vm_object_shadow(
1148 	vm_object_t *object,	/* IN/OUT */
1149 	vm_ooffset_t *offset,	/* IN/OUT */
1150 	vm_size_t length)
1151 {
1152 	vm_object_t source;
1153 	vm_object_t result;
1154 
1155 	source = *object;
1156 
1157 	/*
1158 	 * Don't create the new object if the old object isn't shared.
1159 	 */
1160 	if (source != NULL) {
1161 		VM_OBJECT_LOCK(source);
1162 		if (source->ref_count == 1 &&
1163 		    source->handle == NULL &&
1164 		    (source->type == OBJT_DEFAULT ||
1165 		     source->type == OBJT_SWAP)) {
1166 			VM_OBJECT_UNLOCK(source);
1167 			return;
1168 		}
1169 		VM_OBJECT_UNLOCK(source);
1170 	}
1171 
1172 	/*
1173 	 * Allocate a new object with the given length.
1174 	 */
1175 	result = vm_object_allocate(OBJT_DEFAULT, atop(length));
1176 
1177 	/*
1178 	 * The new object shadows the source object, adding a reference to it.
1179 	 * Our caller changes his reference to point to the new object,
1180 	 * removing a reference to the source object.  Net result: no change
1181 	 * of reference count.
1182 	 *
1183 	 * Try to optimize the result object's page color when shadowing
1184 	 * in order to maintain page coloring consistency in the combined
1185 	 * shadowed object.
1186 	 */
1187 	result->backing_object = source;
1188 	/*
1189 	 * Store the offset into the source object, and fix up the offset into
1190 	 * the new object.
1191 	 */
1192 	result->backing_object_offset = *offset;
1193 	if (source != NULL) {
1194 		VM_OBJECT_LOCK(source);
1195 		LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1196 		source->shadow_count++;
1197 #if VM_NRESERVLEVEL > 0
1198 		result->flags |= source->flags & OBJ_COLORED;
1199 		result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) &
1200 		    ((1 << (VM_NFREEORDER - 1)) - 1);
1201 #endif
1202 		VM_OBJECT_UNLOCK(source);
1203 	}
1204 
1205 
1206 	/*
1207 	 * Return the new things
1208 	 */
1209 	*offset = 0;
1210 	*object = result;
1211 }
1212 
1213 /*
1214  *	vm_object_split:
1215  *
1216  * Split the pages in a map entry into a new object.  This affords
1217  * easier removal of unused pages, and keeps object inheritance from
1218  * being a negative impact on memory usage.
1219  */
1220 void
1221 vm_object_split(vm_map_entry_t entry)
1222 {
1223 	vm_page_t m, m_next;
1224 	vm_object_t orig_object, new_object, source;
1225 	vm_pindex_t idx, offidxstart;
1226 	vm_size_t size;
1227 
1228 	orig_object = entry->object.vm_object;
1229 	if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
1230 		return;
1231 	if (orig_object->ref_count <= 1)
1232 		return;
1233 	VM_OBJECT_UNLOCK(orig_object);
1234 
1235 	offidxstart = OFF_TO_IDX(entry->offset);
1236 	size = atop(entry->end - entry->start);
1237 
1238 	/*
1239 	 * If swap_pager_copy() is later called, it will convert new_object
1240 	 * into a swap object.
1241 	 */
1242 	new_object = vm_object_allocate(OBJT_DEFAULT, size);
1243 
1244 	/*
1245 	 * At this point, the new object is still private, so the order in
1246 	 * which the original and new objects are locked does not matter.
1247 	 */
1248 	VM_OBJECT_LOCK(new_object);
1249 	VM_OBJECT_LOCK(orig_object);
1250 	source = orig_object->backing_object;
1251 	if (source != NULL) {
1252 		VM_OBJECT_LOCK(source);
1253 		if ((source->flags & OBJ_DEAD) != 0) {
1254 			VM_OBJECT_UNLOCK(source);
1255 			VM_OBJECT_UNLOCK(orig_object);
1256 			VM_OBJECT_UNLOCK(new_object);
1257 			vm_object_deallocate(new_object);
1258 			VM_OBJECT_LOCK(orig_object);
1259 			return;
1260 		}
1261 		LIST_INSERT_HEAD(&source->shadow_head,
1262 				  new_object, shadow_list);
1263 		source->shadow_count++;
1264 		vm_object_reference_locked(source);	/* for new_object */
1265 		vm_object_clear_flag(source, OBJ_ONEMAPPING);
1266 		VM_OBJECT_UNLOCK(source);
1267 		new_object->backing_object_offset =
1268 			orig_object->backing_object_offset + entry->offset;
1269 		new_object->backing_object = source;
1270 	}
1271 	if (orig_object->cred != NULL) {
1272 		new_object->cred = orig_object->cred;
1273 		crhold(orig_object->cred);
1274 		new_object->charge = ptoa(size);
1275 		KASSERT(orig_object->charge >= ptoa(size),
1276 		    ("orig_object->charge < 0"));
1277 		orig_object->charge -= ptoa(size);
1278 	}
1279 retry:
1280 	m = vm_page_find_least(orig_object, offidxstart);
1281 	for (; m != NULL && (idx = m->pindex - offidxstart) < size;
1282 	    m = m_next) {
1283 		m_next = TAILQ_NEXT(m, listq);
1284 
1285 		/*
1286 		 * We must wait for pending I/O to complete before we can
1287 		 * rename the page.
1288 		 *
1289 		 * We do not have to VM_PROT_NONE the page as mappings should
1290 		 * not be changed by this operation.
1291 		 */
1292 		if ((m->oflags & VPO_BUSY) || m->busy) {
1293 			VM_OBJECT_UNLOCK(new_object);
1294 			m->oflags |= VPO_WANTED;
1295 			msleep(m, VM_OBJECT_MTX(orig_object), PVM, "spltwt", 0);
1296 			VM_OBJECT_LOCK(new_object);
1297 			goto retry;
1298 		}
1299 		vm_page_lock(m);
1300 		vm_page_rename(m, new_object, idx);
1301 		vm_page_unlock(m);
1302 		/* page automatically made dirty by rename and cache handled */
1303 		vm_page_busy(m);
1304 	}
1305 	if (orig_object->type == OBJT_SWAP) {
1306 		/*
1307 		 * swap_pager_copy() can sleep, in which case the orig_object's
1308 		 * and new_object's locks are released and reacquired.
1309 		 */
1310 		swap_pager_copy(orig_object, new_object, offidxstart, 0);
1311 
1312 		/*
1313 		 * Transfer any cached pages from orig_object to new_object.
1314 		 */
1315 		if (__predict_false(orig_object->cache != NULL))
1316 			vm_page_cache_transfer(orig_object, offidxstart,
1317 			    new_object);
1318 	}
1319 	VM_OBJECT_UNLOCK(orig_object);
1320 	TAILQ_FOREACH(m, &new_object->memq, listq)
1321 		vm_page_wakeup(m);
1322 	VM_OBJECT_UNLOCK(new_object);
1323 	entry->object.vm_object = new_object;
1324 	entry->offset = 0LL;
1325 	vm_object_deallocate(orig_object);
1326 	VM_OBJECT_LOCK(new_object);
1327 }
1328 
1329 #define	OBSC_TEST_ALL_SHADOWED	0x0001
1330 #define	OBSC_COLLAPSE_NOWAIT	0x0002
1331 #define	OBSC_COLLAPSE_WAIT	0x0004
1332 
1333 static int
1334 vm_object_backing_scan(vm_object_t object, int op)
1335 {
1336 	int r = 1;
1337 	vm_page_t p;
1338 	vm_object_t backing_object;
1339 	vm_pindex_t backing_offset_index;
1340 
1341 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1342 	VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED);
1343 
1344 	backing_object = object->backing_object;
1345 	backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1346 
1347 	/*
1348 	 * Initial conditions
1349 	 */
1350 	if (op & OBSC_TEST_ALL_SHADOWED) {
1351 		/*
1352 		 * We do not want to have to test for the existence of cache
1353 		 * or swap pages in the backing object.  XXX but with the
1354 		 * new swapper this would be pretty easy to do.
1355 		 *
1356 		 * XXX what about anonymous MAP_SHARED memory that hasn't
1357 		 * been ZFOD faulted yet?  If we do not test for this, the
1358 		 * shadow test may succeed! XXX
1359 		 */
1360 		if (backing_object->type != OBJT_DEFAULT) {
1361 			return (0);
1362 		}
1363 	}
1364 	if (op & OBSC_COLLAPSE_WAIT) {
1365 		vm_object_set_flag(backing_object, OBJ_DEAD);
1366 	}
1367 
1368 	/*
1369 	 * Our scan
1370 	 */
1371 	p = TAILQ_FIRST(&backing_object->memq);
1372 	while (p) {
1373 		vm_page_t next = TAILQ_NEXT(p, listq);
1374 		vm_pindex_t new_pindex = p->pindex - backing_offset_index;
1375 
1376 		if (op & OBSC_TEST_ALL_SHADOWED) {
1377 			vm_page_t pp;
1378 
1379 			/*
1380 			 * Ignore pages outside the parent object's range
1381 			 * and outside the parent object's mapping of the
1382 			 * backing object.
1383 			 *
1384 			 * note that we do not busy the backing object's
1385 			 * page.
1386 			 */
1387 			if (
1388 			    p->pindex < backing_offset_index ||
1389 			    new_pindex >= object->size
1390 			) {
1391 				p = next;
1392 				continue;
1393 			}
1394 
1395 			/*
1396 			 * See if the parent has the page or if the parent's
1397 			 * object pager has the page.  If the parent has the
1398 			 * page but the page is not valid, the parent's
1399 			 * object pager must have the page.
1400 			 *
1401 			 * If this fails, the parent does not completely shadow
1402 			 * the object and we might as well give up now.
1403 			 */
1404 
1405 			pp = vm_page_lookup(object, new_pindex);
1406 			if (
1407 			    (pp == NULL || pp->valid == 0) &&
1408 			    !vm_pager_has_page(object, new_pindex, NULL, NULL)
1409 			) {
1410 				r = 0;
1411 				break;
1412 			}
1413 		}
1414 
1415 		/*
1416 		 * Check for busy page
1417 		 */
1418 		if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
1419 			vm_page_t pp;
1420 
1421 			if (op & OBSC_COLLAPSE_NOWAIT) {
1422 				if ((p->oflags & VPO_BUSY) ||
1423 				    !p->valid ||
1424 				    p->busy) {
1425 					p = next;
1426 					continue;
1427 				}
1428 			} else if (op & OBSC_COLLAPSE_WAIT) {
1429 				if ((p->oflags & VPO_BUSY) || p->busy) {
1430 					VM_OBJECT_UNLOCK(object);
1431 					p->oflags |= VPO_WANTED;
1432 					msleep(p, VM_OBJECT_MTX(backing_object),
1433 					    PDROP | PVM, "vmocol", 0);
1434 					VM_OBJECT_LOCK(object);
1435 					VM_OBJECT_LOCK(backing_object);
1436 					/*
1437 					 * If we slept, anything could have
1438 					 * happened.  Since the object is
1439 					 * marked dead, the backing offset
1440 					 * should not have changed so we
1441 					 * just restart our scan.
1442 					 */
1443 					p = TAILQ_FIRST(&backing_object->memq);
1444 					continue;
1445 				}
1446 			}
1447 
1448 			KASSERT(
1449 			    p->object == backing_object,
1450 			    ("vm_object_backing_scan: object mismatch")
1451 			);
1452 
1453 			/*
1454 			 * Destroy any associated swap
1455 			 */
1456 			if (backing_object->type == OBJT_SWAP) {
1457 				swap_pager_freespace(
1458 				    backing_object,
1459 				    p->pindex,
1460 				    1
1461 				);
1462 			}
1463 
1464 			if (
1465 			    p->pindex < backing_offset_index ||
1466 			    new_pindex >= object->size
1467 			) {
1468 				/*
1469 				 * Page is out of the parent object's range, we
1470 				 * can simply destroy it.
1471 				 */
1472 				vm_page_lock(p);
1473 				KASSERT(!pmap_page_is_mapped(p),
1474 				    ("freeing mapped page %p", p));
1475 				if (p->wire_count == 0)
1476 					vm_page_free(p);
1477 				else
1478 					vm_page_remove(p);
1479 				vm_page_unlock(p);
1480 				p = next;
1481 				continue;
1482 			}
1483 
1484 			pp = vm_page_lookup(object, new_pindex);
1485 			if (
1486 			    (op & OBSC_COLLAPSE_NOWAIT) != 0 &&
1487 			    (pp != NULL && pp->valid == 0)
1488 			) {
1489 				/*
1490 				 * The page in the parent is not (yet) valid.
1491 				 * We don't know anything about the state of
1492 				 * the original page.  It might be mapped,
1493 				 * so we must avoid the next if here.
1494 				 *
1495 				 * This is due to a race in vm_fault() where
1496 				 * we must unbusy the original (backing_obj)
1497 				 * page before we can (re)lock the parent.
1498 				 * Hence we can get here.
1499 				 */
1500 				p = next;
1501 				continue;
1502 			}
1503 			if (
1504 			    pp != NULL ||
1505 			    vm_pager_has_page(object, new_pindex, NULL, NULL)
1506 			) {
1507 				/*
1508 				 * page already exists in parent OR swap exists
1509 				 * for this location in the parent.  Destroy
1510 				 * the original page from the backing object.
1511 				 *
1512 				 * Leave the parent's page alone
1513 				 */
1514 				vm_page_lock(p);
1515 				KASSERT(!pmap_page_is_mapped(p),
1516 				    ("freeing mapped page %p", p));
1517 				if (p->wire_count == 0)
1518 					vm_page_free(p);
1519 				else
1520 					vm_page_remove(p);
1521 				vm_page_unlock(p);
1522 				p = next;
1523 				continue;
1524 			}
1525 
1526 #if VM_NRESERVLEVEL > 0
1527 			/*
1528 			 * Rename the reservation.
1529 			 */
1530 			vm_reserv_rename(p, object, backing_object,
1531 			    backing_offset_index);
1532 #endif
1533 
1534 			/*
1535 			 * Page does not exist in parent, rename the
1536 			 * page from the backing object to the main object.
1537 			 *
1538 			 * If the page was mapped to a process, it can remain
1539 			 * mapped through the rename.
1540 			 */
1541 			vm_page_lock(p);
1542 			vm_page_rename(p, object, new_pindex);
1543 			vm_page_unlock(p);
1544 			/* page automatically made dirty by rename */
1545 		}
1546 		p = next;
1547 	}
1548 	return (r);
1549 }
1550 
1551 
1552 /*
1553  * this version of collapse allows the operation to occur earlier and
1554  * when paging_in_progress is true for an object...  This is not a complete
1555  * operation, but should plug 99.9% of the rest of the leaks.
1556  */
1557 static void
1558 vm_object_qcollapse(vm_object_t object)
1559 {
1560 	vm_object_t backing_object = object->backing_object;
1561 
1562 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1563 	VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED);
1564 
1565 	if (backing_object->ref_count != 1)
1566 		return;
1567 
1568 	vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT);
1569 }
1570 
1571 /*
1572  *	vm_object_collapse:
1573  *
1574  *	Collapse an object with the object backing it.
1575  *	Pages in the backing object are moved into the
1576  *	parent, and the backing object is deallocated.
1577  */
1578 void
1579 vm_object_collapse(vm_object_t object)
1580 {
1581 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1582 
1583 	while (TRUE) {
1584 		vm_object_t backing_object;
1585 
1586 		/*
1587 		 * Verify that the conditions are right for collapse:
1588 		 *
1589 		 * The object exists and the backing object exists.
1590 		 */
1591 		if ((backing_object = object->backing_object) == NULL)
1592 			break;
1593 
1594 		/*
1595 		 * we check the backing object first, because it is most likely
1596 		 * not collapsable.
1597 		 */
1598 		VM_OBJECT_LOCK(backing_object);
1599 		if (backing_object->handle != NULL ||
1600 		    (backing_object->type != OBJT_DEFAULT &&
1601 		     backing_object->type != OBJT_SWAP) ||
1602 		    (backing_object->flags & OBJ_DEAD) ||
1603 		    object->handle != NULL ||
1604 		    (object->type != OBJT_DEFAULT &&
1605 		     object->type != OBJT_SWAP) ||
1606 		    (object->flags & OBJ_DEAD)) {
1607 			VM_OBJECT_UNLOCK(backing_object);
1608 			break;
1609 		}
1610 
1611 		if (
1612 		    object->paging_in_progress != 0 ||
1613 		    backing_object->paging_in_progress != 0
1614 		) {
1615 			vm_object_qcollapse(object);
1616 			VM_OBJECT_UNLOCK(backing_object);
1617 			break;
1618 		}
1619 		/*
1620 		 * We know that we can either collapse the backing object (if
1621 		 * the parent is the only reference to it) or (perhaps) have
1622 		 * the parent bypass the object if the parent happens to shadow
1623 		 * all the resident pages in the entire backing object.
1624 		 *
1625 		 * This is ignoring pager-backed pages such as swap pages.
1626 		 * vm_object_backing_scan fails the shadowing test in this
1627 		 * case.
1628 		 */
1629 		if (backing_object->ref_count == 1) {
1630 			/*
1631 			 * If there is exactly one reference to the backing
1632 			 * object, we can collapse it into the parent.
1633 			 */
1634 			vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
1635 
1636 #if VM_NRESERVLEVEL > 0
1637 			/*
1638 			 * Break any reservations from backing_object.
1639 			 */
1640 			if (__predict_false(!LIST_EMPTY(&backing_object->rvq)))
1641 				vm_reserv_break_all(backing_object);
1642 #endif
1643 
1644 			/*
1645 			 * Move the pager from backing_object to object.
1646 			 */
1647 			if (backing_object->type == OBJT_SWAP) {
1648 				/*
1649 				 * swap_pager_copy() can sleep, in which case
1650 				 * the backing_object's and object's locks are
1651 				 * released and reacquired.
1652 				 */
1653 				swap_pager_copy(
1654 				    backing_object,
1655 				    object,
1656 				    OFF_TO_IDX(object->backing_object_offset), TRUE);
1657 
1658 				/*
1659 				 * Free any cached pages from backing_object.
1660 				 */
1661 				if (__predict_false(backing_object->cache != NULL))
1662 					vm_page_cache_free(backing_object, 0, 0);
1663 			}
1664 			/*
1665 			 * Object now shadows whatever backing_object did.
1666 			 * Note that the reference to
1667 			 * backing_object->backing_object moves from within
1668 			 * backing_object to within object.
1669 			 */
1670 			LIST_REMOVE(object, shadow_list);
1671 			backing_object->shadow_count--;
1672 			if (backing_object->backing_object) {
1673 				VM_OBJECT_LOCK(backing_object->backing_object);
1674 				LIST_REMOVE(backing_object, shadow_list);
1675 				LIST_INSERT_HEAD(
1676 				    &backing_object->backing_object->shadow_head,
1677 				    object, shadow_list);
1678 				/*
1679 				 * The shadow_count has not changed.
1680 				 */
1681 				VM_OBJECT_UNLOCK(backing_object->backing_object);
1682 			}
1683 			object->backing_object = backing_object->backing_object;
1684 			object->backing_object_offset +=
1685 			    backing_object->backing_object_offset;
1686 
1687 			/*
1688 			 * Discard backing_object.
1689 			 *
1690 			 * Since the backing object has no pages, no pager left,
1691 			 * and no object references within it, all that is
1692 			 * necessary is to dispose of it.
1693 			 */
1694 			KASSERT(backing_object->ref_count == 1, (
1695 "backing_object %p was somehow re-referenced during collapse!",
1696 			    backing_object));
1697 			VM_OBJECT_UNLOCK(backing_object);
1698 			vm_object_destroy(backing_object);
1699 
1700 			object_collapses++;
1701 		} else {
1702 			vm_object_t new_backing_object;
1703 
1704 			/*
1705 			 * If we do not entirely shadow the backing object,
1706 			 * there is nothing we can do so we give up.
1707 			 */
1708 			if (object->resident_page_count != object->size &&
1709 			    vm_object_backing_scan(object,
1710 			    OBSC_TEST_ALL_SHADOWED) == 0) {
1711 				VM_OBJECT_UNLOCK(backing_object);
1712 				break;
1713 			}
1714 
1715 			/*
1716 			 * Make the parent shadow the next object in the
1717 			 * chain.  Deallocating backing_object will not remove
1718 			 * it, since its reference count is at least 2.
1719 			 */
1720 			LIST_REMOVE(object, shadow_list);
1721 			backing_object->shadow_count--;
1722 
1723 			new_backing_object = backing_object->backing_object;
1724 			if ((object->backing_object = new_backing_object) != NULL) {
1725 				VM_OBJECT_LOCK(new_backing_object);
1726 				LIST_INSERT_HEAD(
1727 				    &new_backing_object->shadow_head,
1728 				    object,
1729 				    shadow_list
1730 				);
1731 				new_backing_object->shadow_count++;
1732 				vm_object_reference_locked(new_backing_object);
1733 				VM_OBJECT_UNLOCK(new_backing_object);
1734 				object->backing_object_offset +=
1735 					backing_object->backing_object_offset;
1736 			}
1737 
1738 			/*
1739 			 * Drop the reference count on backing_object. Since
1740 			 * its ref_count was at least 2, it will not vanish.
1741 			 */
1742 			backing_object->ref_count--;
1743 			VM_OBJECT_UNLOCK(backing_object);
1744 			object_bypasses++;
1745 		}
1746 
1747 		/*
1748 		 * Try again with this object's new backing object.
1749 		 */
1750 	}
1751 }
1752 
1753 /*
1754  *	vm_object_page_remove:
1755  *
1756  *	For the given object, either frees or invalidates each of the
1757  *	specified pages.  In general, a page is freed.  However, if a
1758  *	page is wired for any reason other than the existence of a
1759  *	managed, wired mapping, then it may be invalidated but not
1760  *	removed from the object.  Pages are specified by the given
1761  *	range ["start", "end") and Boolean "clean_only".  As a
1762  *	special case, if "end" is zero, then the range extends from
1763  *	"start" to the end of the object.  If "clean_only" is TRUE,
1764  *	then only the non-dirty pages within the specified range are
1765  *	affected.
1766  *
1767  *	In general, this operation should only be performed on objects
1768  *	that contain managed pages.  There are two exceptions.  First,
1769  *	it may be performed on the kernel and kmem objects.  Second,
1770  *	it may be used by msync(..., MS_INVALIDATE) to invalidate
1771  *	device-backed pages.  In both of these cases, "clean_only"
1772  *	must be FALSE.
1773  *
1774  *	The object must be locked.
1775  */
1776 void
1777 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1778     boolean_t clean_only)
1779 {
1780 	vm_page_t p, next;
1781 	int wirings;
1782 
1783 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1784 	if (object->resident_page_count == 0)
1785 		goto skipmemq;
1786 
1787 	/*
1788 	 * Since physically-backed objects do not use managed pages, we can't
1789 	 * remove pages from the object (we must instead remove the page
1790 	 * references, and then destroy the object).
1791 	 */
1792 	KASSERT(object->type != OBJT_PHYS || object == kernel_object ||
1793 	    object == kmem_object,
1794 	    ("attempt to remove pages from a physical object"));
1795 
1796 	vm_object_pip_add(object, 1);
1797 again:
1798 	p = vm_page_find_least(object, start);
1799 
1800 	/*
1801 	 * Assert: the variable p is either (1) the page with the
1802 	 * least pindex greater than or equal to the parameter pindex
1803 	 * or (2) NULL.
1804 	 */
1805 	for (;
1806 	     p != NULL && (p->pindex < end || end == 0);
1807 	     p = next) {
1808 		next = TAILQ_NEXT(p, listq);
1809 
1810 		/*
1811 		 * If the page is wired for any reason besides the
1812 		 * existence of managed, wired mappings, then it cannot
1813 		 * be freed.  For example, fictitious pages, which
1814 		 * represent device memory, are inherently wired and
1815 		 * cannot be freed.  They can, however, be invalidated
1816 		 * if "clean_only" is FALSE.
1817 		 */
1818 		vm_page_lock(p);
1819 		if ((wirings = p->wire_count) != 0 &&
1820 		    (wirings = pmap_page_wired_mappings(p)) != p->wire_count) {
1821 			/* Fictitious pages do not have managed mappings. */
1822 			if ((p->flags & PG_FICTITIOUS) == 0)
1823 				pmap_remove_all(p);
1824 			/* Account for removal of managed, wired mappings. */
1825 			p->wire_count -= wirings;
1826 			if (!clean_only) {
1827 				p->valid = 0;
1828 				vm_page_undirty(p);
1829 			}
1830 			vm_page_unlock(p);
1831 			continue;
1832 		}
1833 		if (vm_page_sleep_if_busy(p, TRUE, "vmopar"))
1834 			goto again;
1835 		KASSERT((p->flags & PG_FICTITIOUS) == 0,
1836 		    ("vm_object_page_remove: page %p is fictitious", p));
1837 		if (clean_only && p->valid) {
1838 			pmap_remove_write(p);
1839 			if (p->dirty) {
1840 				vm_page_unlock(p);
1841 				continue;
1842 			}
1843 		}
1844 		pmap_remove_all(p);
1845 		/* Account for removal of managed, wired mappings. */
1846 		if (wirings != 0)
1847 			p->wire_count -= wirings;
1848 		vm_page_free(p);
1849 		vm_page_unlock(p);
1850 	}
1851 	vm_object_pip_wakeup(object);
1852 skipmemq:
1853 	if (__predict_false(object->cache != NULL))
1854 		vm_page_cache_free(object, start, end);
1855 }
1856 
1857 /*
1858  *	Populate the specified range of the object with valid pages.  Returns
1859  *	TRUE if the range is successfully populated and FALSE otherwise.
1860  *
1861  *	Note: This function should be optimized to pass a larger array of
1862  *	pages to vm_pager_get_pages() before it is applied to a non-
1863  *	OBJT_DEVICE object.
1864  *
1865  *	The object must be locked.
1866  */
1867 boolean_t
1868 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1869 {
1870 	vm_page_t m, ma[1];
1871 	vm_pindex_t pindex;
1872 	int rv;
1873 
1874 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1875 	for (pindex = start; pindex < end; pindex++) {
1876 		m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL |
1877 		    VM_ALLOC_RETRY);
1878 		if (m->valid != VM_PAGE_BITS_ALL) {
1879 			ma[0] = m;
1880 			rv = vm_pager_get_pages(object, ma, 1, 0);
1881 			m = vm_page_lookup(object, pindex);
1882 			if (m == NULL)
1883 				break;
1884 			if (rv != VM_PAGER_OK) {
1885 				vm_page_lock(m);
1886 				vm_page_free(m);
1887 				vm_page_unlock(m);
1888 				break;
1889 			}
1890 		}
1891 		/*
1892 		 * Keep "m" busy because a subsequent iteration may unlock
1893 		 * the object.
1894 		 */
1895 	}
1896 	if (pindex > start) {
1897 		m = vm_page_lookup(object, start);
1898 		while (m != NULL && m->pindex < pindex) {
1899 			vm_page_wakeup(m);
1900 			m = TAILQ_NEXT(m, listq);
1901 		}
1902 	}
1903 	return (pindex == end);
1904 }
1905 
1906 /*
1907  *	Routine:	vm_object_coalesce
1908  *	Function:	Coalesces two objects backing up adjoining
1909  *			regions of memory into a single object.
1910  *
1911  *	returns TRUE if objects were combined.
1912  *
1913  *	NOTE:	Only works at the moment if the second object is NULL -
1914  *		if it's not, which object do we lock first?
1915  *
1916  *	Parameters:
1917  *		prev_object	First object to coalesce
1918  *		prev_offset	Offset into prev_object
1919  *		prev_size	Size of reference to prev_object
1920  *		next_size	Size of reference to the second object
1921  *		reserved	Indicator that extension region has
1922  *				swap accounted for
1923  *
1924  *	Conditions:
1925  *	The object must *not* be locked.
1926  */
1927 boolean_t
1928 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
1929     vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
1930 {
1931 	vm_pindex_t next_pindex;
1932 
1933 	if (prev_object == NULL)
1934 		return (TRUE);
1935 	VM_OBJECT_LOCK(prev_object);
1936 	if (prev_object->type != OBJT_DEFAULT &&
1937 	    prev_object->type != OBJT_SWAP) {
1938 		VM_OBJECT_UNLOCK(prev_object);
1939 		return (FALSE);
1940 	}
1941 
1942 	/*
1943 	 * Try to collapse the object first
1944 	 */
1945 	vm_object_collapse(prev_object);
1946 
1947 	/*
1948 	 * Can't coalesce if: . more than one reference . paged out . shadows
1949 	 * another object . has a copy elsewhere (any of which mean that the
1950 	 * pages not mapped to prev_entry may be in use anyway)
1951 	 */
1952 	if (prev_object->backing_object != NULL) {
1953 		VM_OBJECT_UNLOCK(prev_object);
1954 		return (FALSE);
1955 	}
1956 
1957 	prev_size >>= PAGE_SHIFT;
1958 	next_size >>= PAGE_SHIFT;
1959 	next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
1960 
1961 	if ((prev_object->ref_count > 1) &&
1962 	    (prev_object->size != next_pindex)) {
1963 		VM_OBJECT_UNLOCK(prev_object);
1964 		return (FALSE);
1965 	}
1966 
1967 	/*
1968 	 * Account for the charge.
1969 	 */
1970 	if (prev_object->cred != NULL) {
1971 
1972 		/*
1973 		 * If prev_object was charged, then this mapping,
1974 		 * althought not charged now, may become writable
1975 		 * later. Non-NULL cred in the object would prevent
1976 		 * swap reservation during enabling of the write
1977 		 * access, so reserve swap now. Failed reservation
1978 		 * cause allocation of the separate object for the map
1979 		 * entry, and swap reservation for this entry is
1980 		 * managed in appropriate time.
1981 		 */
1982 		if (!reserved && !swap_reserve_by_cred(ptoa(next_size),
1983 		    prev_object->cred)) {
1984 			return (FALSE);
1985 		}
1986 		prev_object->charge += ptoa(next_size);
1987 	}
1988 
1989 	/*
1990 	 * Remove any pages that may still be in the object from a previous
1991 	 * deallocation.
1992 	 */
1993 	if (next_pindex < prev_object->size) {
1994 		vm_object_page_remove(prev_object,
1995 				      next_pindex,
1996 				      next_pindex + next_size, FALSE);
1997 		if (prev_object->type == OBJT_SWAP)
1998 			swap_pager_freespace(prev_object,
1999 					     next_pindex, next_size);
2000 #if 0
2001 		if (prev_object->cred != NULL) {
2002 			KASSERT(prev_object->charge >=
2003 			    ptoa(prev_object->size - next_pindex),
2004 			    ("object %p overcharged 1 %jx %jx", prev_object,
2005 				(uintmax_t)next_pindex, (uintmax_t)next_size));
2006 			prev_object->charge -= ptoa(prev_object->size -
2007 			    next_pindex);
2008 		}
2009 #endif
2010 	}
2011 
2012 	/*
2013 	 * Extend the object if necessary.
2014 	 */
2015 	if (next_pindex + next_size > prev_object->size)
2016 		prev_object->size = next_pindex + next_size;
2017 
2018 	VM_OBJECT_UNLOCK(prev_object);
2019 	return (TRUE);
2020 }
2021 
2022 void
2023 vm_object_set_writeable_dirty(vm_object_t object)
2024 {
2025 
2026 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2027 	if (object->type != OBJT_VNODE)
2028 		return;
2029 	object->generation++;
2030 	if ((object->flags & OBJ_MIGHTBEDIRTY) != 0)
2031 		return;
2032 	vm_object_set_flag(object, OBJ_MIGHTBEDIRTY);
2033 }
2034 
2035 #include "opt_ddb.h"
2036 #ifdef DDB
2037 #include <sys/kernel.h>
2038 
2039 #include <sys/cons.h>
2040 
2041 #include <ddb/ddb.h>
2042 
2043 static int
2044 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2045 {
2046 	vm_map_t tmpm;
2047 	vm_map_entry_t tmpe;
2048 	vm_object_t obj;
2049 	int entcount;
2050 
2051 	if (map == 0)
2052 		return 0;
2053 
2054 	if (entry == 0) {
2055 		tmpe = map->header.next;
2056 		entcount = map->nentries;
2057 		while (entcount-- && (tmpe != &map->header)) {
2058 			if (_vm_object_in_map(map, object, tmpe)) {
2059 				return 1;
2060 			}
2061 			tmpe = tmpe->next;
2062 		}
2063 	} else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2064 		tmpm = entry->object.sub_map;
2065 		tmpe = tmpm->header.next;
2066 		entcount = tmpm->nentries;
2067 		while (entcount-- && tmpe != &tmpm->header) {
2068 			if (_vm_object_in_map(tmpm, object, tmpe)) {
2069 				return 1;
2070 			}
2071 			tmpe = tmpe->next;
2072 		}
2073 	} else if ((obj = entry->object.vm_object) != NULL) {
2074 		for (; obj; obj = obj->backing_object)
2075 			if (obj == object) {
2076 				return 1;
2077 			}
2078 	}
2079 	return 0;
2080 }
2081 
2082 static int
2083 vm_object_in_map(vm_object_t object)
2084 {
2085 	struct proc *p;
2086 
2087 	/* sx_slock(&allproc_lock); */
2088 	FOREACH_PROC_IN_SYSTEM(p) {
2089 		if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
2090 			continue;
2091 		if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
2092 			/* sx_sunlock(&allproc_lock); */
2093 			return 1;
2094 		}
2095 	}
2096 	/* sx_sunlock(&allproc_lock); */
2097 	if (_vm_object_in_map(kernel_map, object, 0))
2098 		return 1;
2099 	if (_vm_object_in_map(kmem_map, object, 0))
2100 		return 1;
2101 	if (_vm_object_in_map(pager_map, object, 0))
2102 		return 1;
2103 	if (_vm_object_in_map(buffer_map, object, 0))
2104 		return 1;
2105 	return 0;
2106 }
2107 
2108 DB_SHOW_COMMAND(vmochk, vm_object_check)
2109 {
2110 	vm_object_t object;
2111 
2112 	/*
2113 	 * make sure that internal objs are in a map somewhere
2114 	 * and none have zero ref counts.
2115 	 */
2116 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
2117 		if (object->handle == NULL &&
2118 		    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2119 			if (object->ref_count == 0) {
2120 				db_printf("vmochk: internal obj has zero ref count: %ld\n",
2121 					(long)object->size);
2122 			}
2123 			if (!vm_object_in_map(object)) {
2124 				db_printf(
2125 			"vmochk: internal obj is not in a map: "
2126 			"ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2127 				    object->ref_count, (u_long)object->size,
2128 				    (u_long)object->size,
2129 				    (void *)object->backing_object);
2130 			}
2131 		}
2132 	}
2133 }
2134 
2135 /*
2136  *	vm_object_print:	[ debug ]
2137  */
2138 DB_SHOW_COMMAND(object, vm_object_print_static)
2139 {
2140 	/* XXX convert args. */
2141 	vm_object_t object = (vm_object_t)addr;
2142 	boolean_t full = have_addr;
2143 
2144 	vm_page_t p;
2145 
2146 	/* XXX count is an (unused) arg.  Avoid shadowing it. */
2147 #define	count	was_count
2148 
2149 	int count;
2150 
2151 	if (object == NULL)
2152 		return;
2153 
2154 	db_iprintf(
2155 	    "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n",
2156 	    object, (int)object->type, (uintmax_t)object->size,
2157 	    object->resident_page_count, object->ref_count, object->flags,
2158 	    object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge);
2159 	db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
2160 	    object->shadow_count,
2161 	    object->backing_object ? object->backing_object->ref_count : 0,
2162 	    object->backing_object, (uintmax_t)object->backing_object_offset);
2163 
2164 	if (!full)
2165 		return;
2166 
2167 	db_indent += 2;
2168 	count = 0;
2169 	TAILQ_FOREACH(p, &object->memq, listq) {
2170 		if (count == 0)
2171 			db_iprintf("memory:=");
2172 		else if (count == 6) {
2173 			db_printf("\n");
2174 			db_iprintf(" ...");
2175 			count = 0;
2176 		} else
2177 			db_printf(",");
2178 		count++;
2179 
2180 		db_printf("(off=0x%jx,page=0x%jx)",
2181 		    (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p));
2182 	}
2183 	if (count != 0)
2184 		db_printf("\n");
2185 	db_indent -= 2;
2186 }
2187 
2188 /* XXX. */
2189 #undef count
2190 
2191 /* XXX need this non-static entry for calling from vm_map_print. */
2192 void
2193 vm_object_print(
2194         /* db_expr_t */ long addr,
2195 	boolean_t have_addr,
2196 	/* db_expr_t */ long count,
2197 	char *modif)
2198 {
2199 	vm_object_print_static(addr, have_addr, count, modif);
2200 }
2201 
2202 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
2203 {
2204 	vm_object_t object;
2205 	vm_pindex_t fidx;
2206 	vm_paddr_t pa;
2207 	vm_page_t m, prev_m;
2208 	int rcount, nl, c;
2209 
2210 	nl = 0;
2211 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
2212 		db_printf("new object: %p\n", (void *)object);
2213 		if (nl > 18) {
2214 			c = cngetc();
2215 			if (c != ' ')
2216 				return;
2217 			nl = 0;
2218 		}
2219 		nl++;
2220 		rcount = 0;
2221 		fidx = 0;
2222 		pa = -1;
2223 		TAILQ_FOREACH(m, &object->memq, listq) {
2224 			if (m->pindex > 128)
2225 				break;
2226 			if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL &&
2227 			    prev_m->pindex + 1 != m->pindex) {
2228 				if (rcount) {
2229 					db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2230 						(long)fidx, rcount, (long)pa);
2231 					if (nl > 18) {
2232 						c = cngetc();
2233 						if (c != ' ')
2234 							return;
2235 						nl = 0;
2236 					}
2237 					nl++;
2238 					rcount = 0;
2239 				}
2240 			}
2241 			if (rcount &&
2242 				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
2243 				++rcount;
2244 				continue;
2245 			}
2246 			if (rcount) {
2247 				db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2248 					(long)fidx, rcount, (long)pa);
2249 				if (nl > 18) {
2250 					c = cngetc();
2251 					if (c != ' ')
2252 						return;
2253 					nl = 0;
2254 				}
2255 				nl++;
2256 			}
2257 			fidx = m->pindex;
2258 			pa = VM_PAGE_TO_PHYS(m);
2259 			rcount = 1;
2260 		}
2261 		if (rcount) {
2262 			db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2263 				(long)fidx, rcount, (long)pa);
2264 			if (nl > 18) {
2265 				c = cngetc();
2266 				if (c != ' ')
2267 					return;
2268 				nl = 0;
2269 			}
2270 			nl++;
2271 		}
2272 	}
2273 }
2274 #endif /* DDB */
2275