xref: /freebsd/sys/vm/vm_object.c (revision 36daf0495aa68d669ac6abf004940ec1b1e83e42)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 /*
62  *	Virtual memory object module.
63  */
64 
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67 
68 #include "opt_vm.h"
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/lock.h>
73 #include <sys/mman.h>
74 #include <sys/mount.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/mutex.h>
78 #include <sys/proc.h>		/* for curproc, pageproc */
79 #include <sys/socket.h>
80 #include <sys/resourcevar.h>
81 #include <sys/vnode.h>
82 #include <sys/vmmeter.h>
83 #include <sys/sx.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/pmap.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_pager.h>
93 #include <vm/swap_pager.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_reserv.h>
97 #include <vm/uma.h>
98 
99 static int old_msync;
100 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0,
101     "Use old (insecure) msync behavior");
102 
103 static int	vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
104 		    int pagerflags, int flags, int *clearobjflags);
105 static boolean_t vm_object_page_remove_write(vm_page_t p, int flags,
106 		    int *clearobjflags);
107 static void	vm_object_qcollapse(vm_object_t object);
108 static void	vm_object_vndeallocate(vm_object_t object);
109 
110 /*
111  *	Virtual memory objects maintain the actual data
112  *	associated with allocated virtual memory.  A given
113  *	page of memory exists within exactly one object.
114  *
115  *	An object is only deallocated when all "references"
116  *	are given up.  Only one "reference" to a given
117  *	region of an object should be writeable.
118  *
119  *	Associated with each object is a list of all resident
120  *	memory pages belonging to that object; this list is
121  *	maintained by the "vm_page" module, and locked by the object's
122  *	lock.
123  *
124  *	Each object also records a "pager" routine which is
125  *	used to retrieve (and store) pages to the proper backing
126  *	storage.  In addition, objects may be backed by other
127  *	objects from which they were virtual-copied.
128  *
129  *	The only items within the object structure which are
130  *	modified after time of creation are:
131  *		reference count		locked by object's lock
132  *		pager routine		locked by object's lock
133  *
134  */
135 
136 struct object_q vm_object_list;
137 struct mtx vm_object_list_mtx;	/* lock for object list and count */
138 
139 struct vm_object kernel_object_store;
140 struct vm_object kmem_object_store;
141 
142 SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, "VM object stats");
143 
144 static long object_collapses;
145 SYSCTL_LONG(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD,
146     &object_collapses, 0, "VM object collapses");
147 
148 static long object_bypasses;
149 SYSCTL_LONG(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD,
150     &object_bypasses, 0, "VM object bypasses");
151 
152 static uma_zone_t obj_zone;
153 
154 static int vm_object_zinit(void *mem, int size, int flags);
155 
156 #ifdef INVARIANTS
157 static void vm_object_zdtor(void *mem, int size, void *arg);
158 
159 static void
160 vm_object_zdtor(void *mem, int size, void *arg)
161 {
162 	vm_object_t object;
163 
164 	object = (vm_object_t)mem;
165 	KASSERT(TAILQ_EMPTY(&object->memq),
166 	    ("object %p has resident pages",
167 	    object));
168 #if VM_NRESERVLEVEL > 0
169 	KASSERT(LIST_EMPTY(&object->rvq),
170 	    ("object %p has reservations",
171 	    object));
172 #endif
173 	KASSERT(object->cache == NULL,
174 	    ("object %p has cached pages",
175 	    object));
176 	KASSERT(object->paging_in_progress == 0,
177 	    ("object %p paging_in_progress = %d",
178 	    object, object->paging_in_progress));
179 	KASSERT(object->resident_page_count == 0,
180 	    ("object %p resident_page_count = %d",
181 	    object, object->resident_page_count));
182 	KASSERT(object->shadow_count == 0,
183 	    ("object %p shadow_count = %d",
184 	    object, object->shadow_count));
185 }
186 #endif
187 
188 static int
189 vm_object_zinit(void *mem, int size, int flags)
190 {
191 	vm_object_t object;
192 
193 	object = (vm_object_t)mem;
194 	bzero(&object->mtx, sizeof(object->mtx));
195 	VM_OBJECT_LOCK_INIT(object, "standard object");
196 
197 	/* These are true for any object that has been freed */
198 	object->paging_in_progress = 0;
199 	object->resident_page_count = 0;
200 	object->shadow_count = 0;
201 	return (0);
202 }
203 
204 void
205 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
206 {
207 
208 	TAILQ_INIT(&object->memq);
209 	LIST_INIT(&object->shadow_head);
210 
211 	object->root = NULL;
212 	object->type = type;
213 	object->size = size;
214 	object->generation = 1;
215 	object->ref_count = 1;
216 	object->memattr = VM_MEMATTR_DEFAULT;
217 	object->flags = 0;
218 	object->cred = NULL;
219 	object->charge = 0;
220 	if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
221 		object->flags = OBJ_ONEMAPPING;
222 	object->pg_color = 0;
223 	object->handle = NULL;
224 	object->backing_object = NULL;
225 	object->backing_object_offset = (vm_ooffset_t) 0;
226 #if VM_NRESERVLEVEL > 0
227 	LIST_INIT(&object->rvq);
228 #endif
229 	object->cache = NULL;
230 
231 	mtx_lock(&vm_object_list_mtx);
232 	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
233 	mtx_unlock(&vm_object_list_mtx);
234 }
235 
236 /*
237  *	vm_object_init:
238  *
239  *	Initialize the VM objects module.
240  */
241 void
242 vm_object_init(void)
243 {
244 	TAILQ_INIT(&vm_object_list);
245 	mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
246 
247 	VM_OBJECT_LOCK_INIT(kernel_object, "kernel object");
248 	_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
249 	    kernel_object);
250 #if VM_NRESERVLEVEL > 0
251 	kernel_object->flags |= OBJ_COLORED;
252 	kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
253 #endif
254 
255 	VM_OBJECT_LOCK_INIT(kmem_object, "kmem object");
256 	_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
257 	    kmem_object);
258 #if VM_NRESERVLEVEL > 0
259 	kmem_object->flags |= OBJ_COLORED;
260 	kmem_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
261 #endif
262 
263 	/*
264 	 * The lock portion of struct vm_object must be type stable due
265 	 * to vm_pageout_fallback_object_lock locking a vm object
266 	 * without holding any references to it.
267 	 */
268 	obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL,
269 #ifdef INVARIANTS
270 	    vm_object_zdtor,
271 #else
272 	    NULL,
273 #endif
274 	    vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
275 }
276 
277 void
278 vm_object_clear_flag(vm_object_t object, u_short bits)
279 {
280 
281 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
282 	object->flags &= ~bits;
283 }
284 
285 /*
286  *	Sets the default memory attribute for the specified object.  Pages
287  *	that are allocated to this object are by default assigned this memory
288  *	attribute.
289  *
290  *	Presently, this function must be called before any pages are allocated
291  *	to the object.  In the future, this requirement may be relaxed for
292  *	"default" and "swap" objects.
293  */
294 int
295 vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
296 {
297 
298 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
299 	switch (object->type) {
300 	case OBJT_DEFAULT:
301 	case OBJT_DEVICE:
302 	case OBJT_PHYS:
303 	case OBJT_SG:
304 	case OBJT_SWAP:
305 	case OBJT_VNODE:
306 		if (!TAILQ_EMPTY(&object->memq))
307 			return (KERN_FAILURE);
308 		break;
309 	case OBJT_DEAD:
310 		return (KERN_INVALID_ARGUMENT);
311 	}
312 	object->memattr = memattr;
313 	return (KERN_SUCCESS);
314 }
315 
316 void
317 vm_object_pip_add(vm_object_t object, short i)
318 {
319 
320 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
321 	object->paging_in_progress += i;
322 }
323 
324 void
325 vm_object_pip_subtract(vm_object_t object, short i)
326 {
327 
328 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
329 	object->paging_in_progress -= i;
330 }
331 
332 void
333 vm_object_pip_wakeup(vm_object_t object)
334 {
335 
336 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
337 	object->paging_in_progress--;
338 	if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
339 		vm_object_clear_flag(object, OBJ_PIPWNT);
340 		wakeup(object);
341 	}
342 }
343 
344 void
345 vm_object_pip_wakeupn(vm_object_t object, short i)
346 {
347 
348 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
349 	if (i)
350 		object->paging_in_progress -= i;
351 	if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
352 		vm_object_clear_flag(object, OBJ_PIPWNT);
353 		wakeup(object);
354 	}
355 }
356 
357 void
358 vm_object_pip_wait(vm_object_t object, char *waitid)
359 {
360 
361 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
362 	while (object->paging_in_progress) {
363 		object->flags |= OBJ_PIPWNT;
364 		msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0);
365 	}
366 }
367 
368 /*
369  *	vm_object_allocate:
370  *
371  *	Returns a new object with the given size.
372  */
373 vm_object_t
374 vm_object_allocate(objtype_t type, vm_pindex_t size)
375 {
376 	vm_object_t object;
377 
378 	object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
379 	_vm_object_allocate(type, size, object);
380 	return (object);
381 }
382 
383 
384 /*
385  *	vm_object_reference:
386  *
387  *	Gets another reference to the given object.  Note: OBJ_DEAD
388  *	objects can be referenced during final cleaning.
389  */
390 void
391 vm_object_reference(vm_object_t object)
392 {
393 	if (object == NULL)
394 		return;
395 	VM_OBJECT_LOCK(object);
396 	vm_object_reference_locked(object);
397 	VM_OBJECT_UNLOCK(object);
398 }
399 
400 /*
401  *	vm_object_reference_locked:
402  *
403  *	Gets another reference to the given object.
404  *
405  *	The object must be locked.
406  */
407 void
408 vm_object_reference_locked(vm_object_t object)
409 {
410 	struct vnode *vp;
411 
412 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
413 	object->ref_count++;
414 	if (object->type == OBJT_VNODE) {
415 		vp = object->handle;
416 		vref(vp);
417 	}
418 }
419 
420 /*
421  * Handle deallocating an object of type OBJT_VNODE.
422  */
423 static void
424 vm_object_vndeallocate(vm_object_t object)
425 {
426 	struct vnode *vp = (struct vnode *) object->handle;
427 
428 	VFS_ASSERT_GIANT(vp->v_mount);
429 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
430 	KASSERT(object->type == OBJT_VNODE,
431 	    ("vm_object_vndeallocate: not a vnode object"));
432 	KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
433 #ifdef INVARIANTS
434 	if (object->ref_count == 0) {
435 		vprint("vm_object_vndeallocate", vp);
436 		panic("vm_object_vndeallocate: bad object reference count");
437 	}
438 #endif
439 
440 	if (object->ref_count > 1) {
441 		object->ref_count--;
442 		VM_OBJECT_UNLOCK(object);
443 		/* vrele may need the vnode lock. */
444 		vrele(vp);
445 	} else {
446 		vhold(vp);
447 		VM_OBJECT_UNLOCK(object);
448 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
449 		vdrop(vp);
450 		VM_OBJECT_LOCK(object);
451 		object->ref_count--;
452 		if (object->type == OBJT_DEAD) {
453 			VM_OBJECT_UNLOCK(object);
454 			VOP_UNLOCK(vp, 0);
455 		} else {
456 			if (object->ref_count == 0)
457 				vp->v_vflag &= ~VV_TEXT;
458 			VM_OBJECT_UNLOCK(object);
459 			vput(vp);
460 		}
461 	}
462 }
463 
464 /*
465  *	vm_object_deallocate:
466  *
467  *	Release a reference to the specified object,
468  *	gained either through a vm_object_allocate
469  *	or a vm_object_reference call.  When all references
470  *	are gone, storage associated with this object
471  *	may be relinquished.
472  *
473  *	No object may be locked.
474  */
475 void
476 vm_object_deallocate(vm_object_t object)
477 {
478 	vm_object_t temp;
479 
480 	while (object != NULL) {
481 		int vfslocked;
482 
483 		vfslocked = 0;
484 	restart:
485 		VM_OBJECT_LOCK(object);
486 		if (object->type == OBJT_VNODE) {
487 			struct vnode *vp = (struct vnode *) object->handle;
488 
489 			/*
490 			 * Conditionally acquire Giant for a vnode-backed
491 			 * object.  We have to be careful since the type of
492 			 * a vnode object can change while the object is
493 			 * unlocked.
494 			 */
495 			if (VFS_NEEDSGIANT(vp->v_mount) && !vfslocked) {
496 				vfslocked = 1;
497 				if (!mtx_trylock(&Giant)) {
498 					VM_OBJECT_UNLOCK(object);
499 					mtx_lock(&Giant);
500 					goto restart;
501 				}
502 			}
503 			vm_object_vndeallocate(object);
504 			VFS_UNLOCK_GIANT(vfslocked);
505 			return;
506 		} else
507 			/*
508 			 * This is to handle the case that the object
509 			 * changed type while we dropped its lock to
510 			 * obtain Giant.
511 			 */
512 			VFS_UNLOCK_GIANT(vfslocked);
513 
514 		KASSERT(object->ref_count != 0,
515 			("vm_object_deallocate: object deallocated too many times: %d", object->type));
516 
517 		/*
518 		 * If the reference count goes to 0 we start calling
519 		 * vm_object_terminate() on the object chain.
520 		 * A ref count of 1 may be a special case depending on the
521 		 * shadow count being 0 or 1.
522 		 */
523 		object->ref_count--;
524 		if (object->ref_count > 1) {
525 			VM_OBJECT_UNLOCK(object);
526 			return;
527 		} else if (object->ref_count == 1) {
528 			if (object->shadow_count == 0 &&
529 			    object->handle == NULL &&
530 			    (object->type == OBJT_DEFAULT ||
531 			     object->type == OBJT_SWAP)) {
532 				vm_object_set_flag(object, OBJ_ONEMAPPING);
533 			} else if ((object->shadow_count == 1) &&
534 			    (object->handle == NULL) &&
535 			    (object->type == OBJT_DEFAULT ||
536 			     object->type == OBJT_SWAP)) {
537 				vm_object_t robject;
538 
539 				robject = LIST_FIRST(&object->shadow_head);
540 				KASSERT(robject != NULL,
541 				    ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
542 					 object->ref_count,
543 					 object->shadow_count));
544 				if (!VM_OBJECT_TRYLOCK(robject)) {
545 					/*
546 					 * Avoid a potential deadlock.
547 					 */
548 					object->ref_count++;
549 					VM_OBJECT_UNLOCK(object);
550 					/*
551 					 * More likely than not the thread
552 					 * holding robject's lock has lower
553 					 * priority than the current thread.
554 					 * Let the lower priority thread run.
555 					 */
556 					pause("vmo_de", 1);
557 					continue;
558 				}
559 				/*
560 				 * Collapse object into its shadow unless its
561 				 * shadow is dead.  In that case, object will
562 				 * be deallocated by the thread that is
563 				 * deallocating its shadow.
564 				 */
565 				if ((robject->flags & OBJ_DEAD) == 0 &&
566 				    (robject->handle == NULL) &&
567 				    (robject->type == OBJT_DEFAULT ||
568 				     robject->type == OBJT_SWAP)) {
569 
570 					robject->ref_count++;
571 retry:
572 					if (robject->paging_in_progress) {
573 						VM_OBJECT_UNLOCK(object);
574 						vm_object_pip_wait(robject,
575 						    "objde1");
576 						temp = robject->backing_object;
577 						if (object == temp) {
578 							VM_OBJECT_LOCK(object);
579 							goto retry;
580 						}
581 					} else if (object->paging_in_progress) {
582 						VM_OBJECT_UNLOCK(robject);
583 						object->flags |= OBJ_PIPWNT;
584 						msleep(object,
585 						    VM_OBJECT_MTX(object),
586 						    PDROP | PVM, "objde2", 0);
587 						VM_OBJECT_LOCK(robject);
588 						temp = robject->backing_object;
589 						if (object == temp) {
590 							VM_OBJECT_LOCK(object);
591 							goto retry;
592 						}
593 					} else
594 						VM_OBJECT_UNLOCK(object);
595 
596 					if (robject->ref_count == 1) {
597 						robject->ref_count--;
598 						object = robject;
599 						goto doterm;
600 					}
601 					object = robject;
602 					vm_object_collapse(object);
603 					VM_OBJECT_UNLOCK(object);
604 					continue;
605 				}
606 				VM_OBJECT_UNLOCK(robject);
607 			}
608 			VM_OBJECT_UNLOCK(object);
609 			return;
610 		}
611 doterm:
612 		temp = object->backing_object;
613 		if (temp != NULL) {
614 			VM_OBJECT_LOCK(temp);
615 			LIST_REMOVE(object, shadow_list);
616 			temp->shadow_count--;
617 			VM_OBJECT_UNLOCK(temp);
618 			object->backing_object = NULL;
619 		}
620 		/*
621 		 * Don't double-terminate, we could be in a termination
622 		 * recursion due to the terminate having to sync data
623 		 * to disk.
624 		 */
625 		if ((object->flags & OBJ_DEAD) == 0)
626 			vm_object_terminate(object);
627 		else
628 			VM_OBJECT_UNLOCK(object);
629 		object = temp;
630 	}
631 }
632 
633 /*
634  *	vm_object_destroy removes the object from the global object list
635  *      and frees the space for the object.
636  */
637 void
638 vm_object_destroy(vm_object_t object)
639 {
640 
641 	/*
642 	 * Remove the object from the global object list.
643 	 */
644 	mtx_lock(&vm_object_list_mtx);
645 	TAILQ_REMOVE(&vm_object_list, object, object_list);
646 	mtx_unlock(&vm_object_list_mtx);
647 
648 	/*
649 	 * Release the allocation charge.
650 	 */
651 	if (object->cred != NULL) {
652 		KASSERT(object->type == OBJT_DEFAULT ||
653 		    object->type == OBJT_SWAP,
654 		    ("vm_object_terminate: non-swap obj %p has cred",
655 		     object));
656 		swap_release_by_cred(object->charge, object->cred);
657 		object->charge = 0;
658 		crfree(object->cred);
659 		object->cred = NULL;
660 	}
661 
662 	/*
663 	 * Free the space for the object.
664 	 */
665 	uma_zfree(obj_zone, object);
666 }
667 
668 /*
669  *	vm_object_terminate actually destroys the specified object, freeing
670  *	up all previously used resources.
671  *
672  *	The object must be locked.
673  *	This routine may block.
674  */
675 void
676 vm_object_terminate(vm_object_t object)
677 {
678 	vm_page_t p, p_next;
679 
680 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
681 
682 	/*
683 	 * Make sure no one uses us.
684 	 */
685 	vm_object_set_flag(object, OBJ_DEAD);
686 
687 	/*
688 	 * wait for the pageout daemon to be done with the object
689 	 */
690 	vm_object_pip_wait(object, "objtrm");
691 
692 	KASSERT(!object->paging_in_progress,
693 		("vm_object_terminate: pageout in progress"));
694 
695 	/*
696 	 * Clean and free the pages, as appropriate. All references to the
697 	 * object are gone, so we don't need to lock it.
698 	 */
699 	if (object->type == OBJT_VNODE) {
700 		struct vnode *vp = (struct vnode *)object->handle;
701 
702 		/*
703 		 * Clean pages and flush buffers.
704 		 */
705 		vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
706 		VM_OBJECT_UNLOCK(object);
707 
708 		vinvalbuf(vp, V_SAVE, 0, 0);
709 
710 		VM_OBJECT_LOCK(object);
711 	}
712 
713 	KASSERT(object->ref_count == 0,
714 		("vm_object_terminate: object with references, ref_count=%d",
715 		object->ref_count));
716 
717 	/*
718 	 * Free any remaining pageable pages.  This also removes them from the
719 	 * paging queues.  However, don't free wired pages, just remove them
720 	 * from the object.  Rather than incrementally removing each page from
721 	 * the object, the page and object are reset to any empty state.
722 	 */
723 	TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) {
724 		KASSERT(!p->busy && (p->oflags & VPO_BUSY) == 0,
725 		    ("vm_object_terminate: freeing busy page %p", p));
726 		vm_page_lock(p);
727 		/*
728 		 * Optimize the page's removal from the object by resetting
729 		 * its "object" field.  Specifically, if the page is not
730 		 * wired, then the effect of this assignment is that
731 		 * vm_page_free()'s call to vm_page_remove() will return
732 		 * immediately without modifying the page or the object.
733 		 */
734 		p->object = NULL;
735 		if (p->wire_count == 0) {
736 			vm_page_free(p);
737 			PCPU_INC(cnt.v_pfree);
738 		}
739 		vm_page_unlock(p);
740 	}
741 	/*
742 	 * If the object contained any pages, then reset it to an empty state.
743 	 * None of the object's fields, including "resident_page_count", were
744 	 * modified by the preceding loop.
745 	 */
746 	if (object->resident_page_count != 0) {
747 		object->root = NULL;
748 		TAILQ_INIT(&object->memq);
749 		object->resident_page_count = 0;
750 		if (object->type == OBJT_VNODE)
751 			vdrop(object->handle);
752 	}
753 
754 #if VM_NRESERVLEVEL > 0
755 	if (__predict_false(!LIST_EMPTY(&object->rvq)))
756 		vm_reserv_break_all(object);
757 #endif
758 	if (__predict_false(object->cache != NULL))
759 		vm_page_cache_free(object, 0, 0);
760 
761 	/*
762 	 * Let the pager know object is dead.
763 	 */
764 	vm_pager_deallocate(object);
765 	VM_OBJECT_UNLOCK(object);
766 
767 	vm_object_destroy(object);
768 }
769 
770 /*
771  * Make the page read-only so that we can clear the object flags.  However, if
772  * this is a nosync mmap then the object is likely to stay dirty so do not
773  * mess with the page and do not clear the object flags.  Returns TRUE if the
774  * page should be flushed, and FALSE otherwise.
775  */
776 static boolean_t
777 vm_object_page_remove_write(vm_page_t p, int flags, int *clearobjflags)
778 {
779 
780 	/*
781 	 * If we have been asked to skip nosync pages and this is a
782 	 * nosync page, skip it.  Note that the object flags were not
783 	 * cleared in this case so we do not have to set them.
784 	 */
785 	if ((flags & OBJPC_NOSYNC) != 0 && (p->oflags & VPO_NOSYNC) != 0) {
786 		*clearobjflags = 0;
787 		return (FALSE);
788 	} else {
789 		pmap_remove_write(p);
790 		return (p->dirty != 0);
791 	}
792 }
793 
794 /*
795  *	vm_object_page_clean
796  *
797  *	Clean all dirty pages in the specified range of object.  Leaves page
798  * 	on whatever queue it is currently on.   If NOSYNC is set then do not
799  *	write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC),
800  *	leaving the object dirty.
801  *
802  *	When stuffing pages asynchronously, allow clustering.  XXX we need a
803  *	synchronous clustering mode implementation.
804  *
805  *	Odd semantics: if start == end, we clean everything.
806  *
807  *	The object must be locked.
808  */
809 void
810 vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
811     int flags)
812 {
813 	vm_page_t np, p;
814 	vm_pindex_t pi, tend, tstart;
815 	int clearobjflags, curgeneration, n, pagerflags;
816 
817 	mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED);
818 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
819 	KASSERT(object->type == OBJT_VNODE, ("Not a vnode object"));
820 	if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 ||
821 	    object->resident_page_count == 0)
822 		return;
823 
824 	pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ?
825 	    VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
826 	pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0;
827 
828 	tstart = OFF_TO_IDX(start);
829 	tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK);
830 	clearobjflags = tstart == 0 && tend >= object->size;
831 
832 rescan:
833 	curgeneration = object->generation;
834 
835 	for (p = vm_page_find_least(object, tstart); p != NULL; p = np) {
836 		pi = p->pindex;
837 		if (pi >= tend)
838 			break;
839 		np = TAILQ_NEXT(p, listq);
840 		if (p->valid == 0)
841 			continue;
842 		if (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) {
843 			if (object->generation != curgeneration)
844 				goto rescan;
845 			np = vm_page_find_least(object, pi);
846 			continue;
847 		}
848 		if (!vm_object_page_remove_write(p, flags, &clearobjflags))
849 			continue;
850 
851 		n = vm_object_page_collect_flush(object, p, pagerflags,
852 		    flags, &clearobjflags);
853 		if (object->generation != curgeneration)
854 			goto rescan;
855 
856 		/*
857 		 * If the VOP_PUTPAGES() did a truncated write, so
858 		 * that even the first page of the run is not fully
859 		 * written, vm_pageout_flush() returns 0 as the run
860 		 * length.  Since the condition that caused truncated
861 		 * write may be permanent, e.g. exhausted free space,
862 		 * accepting n == 0 would cause an infinite loop.
863 		 *
864 		 * Forwarding the iterator leaves the unwritten page
865 		 * behind, but there is not much we can do there if
866 		 * filesystem refuses to write it.
867 		 */
868 		if (n == 0)
869 			n = 1;
870 		np = vm_page_find_least(object, pi + n);
871 	}
872 #if 0
873 	VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0);
874 #endif
875 
876 	if (clearobjflags)
877 		vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY);
878 }
879 
880 static int
881 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
882     int flags, int *clearobjflags)
883 {
884 	vm_page_t ma[vm_pageout_page_count], p_first, tp;
885 	int count, i, mreq, runlen;
886 
887 	mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED);
888 	vm_page_lock_assert(p, MA_NOTOWNED);
889 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
890 
891 	count = 1;
892 	mreq = 0;
893 
894 	for (tp = p; count < vm_pageout_page_count; count++) {
895 		tp = vm_page_next(tp);
896 		if (tp == NULL || tp->busy != 0 || (tp->oflags & VPO_BUSY) != 0)
897 			break;
898 		if (!vm_object_page_remove_write(tp, flags, clearobjflags))
899 			break;
900 	}
901 
902 	for (p_first = p; count < vm_pageout_page_count; count++) {
903 		tp = vm_page_prev(p_first);
904 		if (tp == NULL || tp->busy != 0 || (tp->oflags & VPO_BUSY) != 0)
905 			break;
906 		if (!vm_object_page_remove_write(tp, flags, clearobjflags))
907 			break;
908 		p_first = tp;
909 		mreq++;
910 	}
911 
912 	for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++)
913 		ma[i] = tp;
914 
915 	vm_pageout_flush(ma, count, pagerflags, mreq, &runlen);
916 	return (runlen);
917 }
918 
919 /*
920  * Note that there is absolutely no sense in writing out
921  * anonymous objects, so we track down the vnode object
922  * to write out.
923  * We invalidate (remove) all pages from the address space
924  * for semantic correctness.
925  *
926  * If the backing object is a device object with unmanaged pages, then any
927  * mappings to the specified range of pages must be removed before this
928  * function is called.
929  *
930  * Note: certain anonymous maps, such as MAP_NOSYNC maps,
931  * may start out with a NULL object.
932  */
933 void
934 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
935     boolean_t syncio, boolean_t invalidate)
936 {
937 	vm_object_t backing_object;
938 	struct vnode *vp;
939 	struct mount *mp;
940 	int flags;
941 
942 	if (object == NULL)
943 		return;
944 	VM_OBJECT_LOCK(object);
945 	while ((backing_object = object->backing_object) != NULL) {
946 		VM_OBJECT_LOCK(backing_object);
947 		offset += object->backing_object_offset;
948 		VM_OBJECT_UNLOCK(object);
949 		object = backing_object;
950 		if (object->size < OFF_TO_IDX(offset + size))
951 			size = IDX_TO_OFF(object->size) - offset;
952 	}
953 	/*
954 	 * Flush pages if writing is allowed, invalidate them
955 	 * if invalidation requested.  Pages undergoing I/O
956 	 * will be ignored by vm_object_page_remove().
957 	 *
958 	 * We cannot lock the vnode and then wait for paging
959 	 * to complete without deadlocking against vm_fault.
960 	 * Instead we simply call vm_object_page_remove() and
961 	 * allow it to block internally on a page-by-page
962 	 * basis when it encounters pages undergoing async
963 	 * I/O.
964 	 */
965 	if (object->type == OBJT_VNODE &&
966 	    (object->flags & OBJ_MIGHTBEDIRTY) != 0) {
967 		int vfslocked;
968 		vp = object->handle;
969 		VM_OBJECT_UNLOCK(object);
970 		(void) vn_start_write(vp, &mp, V_WAIT);
971 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
972 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
973 		flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
974 		flags |= invalidate ? OBJPC_INVAL : 0;
975 		VM_OBJECT_LOCK(object);
976 		vm_object_page_clean(object, offset, offset + size, flags);
977 		VM_OBJECT_UNLOCK(object);
978 		VOP_UNLOCK(vp, 0);
979 		VFS_UNLOCK_GIANT(vfslocked);
980 		vn_finished_write(mp);
981 		VM_OBJECT_LOCK(object);
982 	}
983 	if ((object->type == OBJT_VNODE ||
984 	     object->type == OBJT_DEVICE) && invalidate) {
985 		if (object->type == OBJT_DEVICE)
986 			/*
987 			 * The option OBJPR_NOTMAPPED must be passed here
988 			 * because vm_object_page_remove() cannot remove
989 			 * unmanaged mappings.
990 			 */
991 			flags = OBJPR_NOTMAPPED;
992 		else if (old_msync)
993 			flags = 0;
994 		else
995 			flags = OBJPR_CLEANONLY;
996 		vm_object_page_remove(object, OFF_TO_IDX(offset),
997 		    OFF_TO_IDX(offset + size + PAGE_MASK), flags);
998 	}
999 	VM_OBJECT_UNLOCK(object);
1000 }
1001 
1002 /*
1003  *	vm_object_madvise:
1004  *
1005  *	Implements the madvise function at the object/page level.
1006  *
1007  *	MADV_WILLNEED	(any object)
1008  *
1009  *	    Activate the specified pages if they are resident.
1010  *
1011  *	MADV_DONTNEED	(any object)
1012  *
1013  *	    Deactivate the specified pages if they are resident.
1014  *
1015  *	MADV_FREE	(OBJT_DEFAULT/OBJT_SWAP objects,
1016  *			 OBJ_ONEMAPPING only)
1017  *
1018  *	    Deactivate and clean the specified pages if they are
1019  *	    resident.  This permits the process to reuse the pages
1020  *	    without faulting or the kernel to reclaim the pages
1021  *	    without I/O.
1022  */
1023 void
1024 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
1025 {
1026 	vm_pindex_t end, tpindex;
1027 	vm_object_t backing_object, tobject;
1028 	vm_page_t m;
1029 
1030 	if (object == NULL)
1031 		return;
1032 	VM_OBJECT_LOCK(object);
1033 	end = pindex + count;
1034 	/*
1035 	 * Locate and adjust resident pages
1036 	 */
1037 	for (; pindex < end; pindex += 1) {
1038 relookup:
1039 		tobject = object;
1040 		tpindex = pindex;
1041 shadowlookup:
1042 		/*
1043 		 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
1044 		 * and those pages must be OBJ_ONEMAPPING.
1045 		 */
1046 		if (advise == MADV_FREE) {
1047 			if ((tobject->type != OBJT_DEFAULT &&
1048 			     tobject->type != OBJT_SWAP) ||
1049 			    (tobject->flags & OBJ_ONEMAPPING) == 0) {
1050 				goto unlock_tobject;
1051 			}
1052 		} else if (tobject->type == OBJT_PHYS)
1053 			goto unlock_tobject;
1054 		m = vm_page_lookup(tobject, tpindex);
1055 		if (m == NULL && advise == MADV_WILLNEED) {
1056 			/*
1057 			 * If the page is cached, reactivate it.
1058 			 */
1059 			m = vm_page_alloc(tobject, tpindex, VM_ALLOC_IFCACHED |
1060 			    VM_ALLOC_NOBUSY);
1061 		}
1062 		if (m == NULL) {
1063 			/*
1064 			 * There may be swap even if there is no backing page
1065 			 */
1066 			if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1067 				swap_pager_freespace(tobject, tpindex, 1);
1068 			/*
1069 			 * next object
1070 			 */
1071 			backing_object = tobject->backing_object;
1072 			if (backing_object == NULL)
1073 				goto unlock_tobject;
1074 			VM_OBJECT_LOCK(backing_object);
1075 			tpindex += OFF_TO_IDX(tobject->backing_object_offset);
1076 			if (tobject != object)
1077 				VM_OBJECT_UNLOCK(tobject);
1078 			tobject = backing_object;
1079 			goto shadowlookup;
1080 		} else if (m->valid != VM_PAGE_BITS_ALL)
1081 			goto unlock_tobject;
1082 		/*
1083 		 * If the page is not in a normal state, skip it.
1084 		 */
1085 		vm_page_lock(m);
1086 		if (m->hold_count != 0 || m->wire_count != 0) {
1087 			vm_page_unlock(m);
1088 			goto unlock_tobject;
1089 		}
1090 		KASSERT((m->flags & PG_FICTITIOUS) == 0,
1091 		    ("vm_object_madvise: page %p is fictitious", m));
1092 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1093 		    ("vm_object_madvise: page %p is not managed", m));
1094 		if ((m->oflags & VPO_BUSY) || m->busy) {
1095 			if (advise == MADV_WILLNEED) {
1096 				/*
1097 				 * Reference the page before unlocking and
1098 				 * sleeping so that the page daemon is less
1099 				 * likely to reclaim it.
1100 				 */
1101 				vm_page_aflag_set(m, PGA_REFERENCED);
1102 			}
1103 			vm_page_unlock(m);
1104 			if (object != tobject)
1105 				VM_OBJECT_UNLOCK(object);
1106 			m->oflags |= VPO_WANTED;
1107 			msleep(m, VM_OBJECT_MTX(tobject), PDROP | PVM, "madvpo",
1108 			    0);
1109 			VM_OBJECT_LOCK(object);
1110   			goto relookup;
1111 		}
1112 		if (advise == MADV_WILLNEED) {
1113 			vm_page_activate(m);
1114 		} else if (advise == MADV_DONTNEED) {
1115 			vm_page_dontneed(m);
1116 		} else if (advise == MADV_FREE) {
1117 			/*
1118 			 * Mark the page clean.  This will allow the page
1119 			 * to be freed up by the system.  However, such pages
1120 			 * are often reused quickly by malloc()/free()
1121 			 * so we do not do anything that would cause
1122 			 * a page fault if we can help it.
1123 			 *
1124 			 * Specifically, we do not try to actually free
1125 			 * the page now nor do we try to put it in the
1126 			 * cache (which would cause a page fault on reuse).
1127 			 *
1128 			 * But we do make the page is freeable as we
1129 			 * can without actually taking the step of unmapping
1130 			 * it.
1131 			 */
1132 			pmap_clear_modify(m);
1133 			m->dirty = 0;
1134 			m->act_count = 0;
1135 			vm_page_dontneed(m);
1136 		}
1137 		vm_page_unlock(m);
1138 		if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1139 			swap_pager_freespace(tobject, tpindex, 1);
1140 unlock_tobject:
1141 		if (tobject != object)
1142 			VM_OBJECT_UNLOCK(tobject);
1143 	}
1144 	VM_OBJECT_UNLOCK(object);
1145 }
1146 
1147 /*
1148  *	vm_object_shadow:
1149  *
1150  *	Create a new object which is backed by the
1151  *	specified existing object range.  The source
1152  *	object reference is deallocated.
1153  *
1154  *	The new object and offset into that object
1155  *	are returned in the source parameters.
1156  */
1157 void
1158 vm_object_shadow(
1159 	vm_object_t *object,	/* IN/OUT */
1160 	vm_ooffset_t *offset,	/* IN/OUT */
1161 	vm_size_t length)
1162 {
1163 	vm_object_t source;
1164 	vm_object_t result;
1165 
1166 	source = *object;
1167 
1168 	/*
1169 	 * Don't create the new object if the old object isn't shared.
1170 	 */
1171 	if (source != NULL) {
1172 		VM_OBJECT_LOCK(source);
1173 		if (source->ref_count == 1 &&
1174 		    source->handle == NULL &&
1175 		    (source->type == OBJT_DEFAULT ||
1176 		     source->type == OBJT_SWAP)) {
1177 			VM_OBJECT_UNLOCK(source);
1178 			return;
1179 		}
1180 		VM_OBJECT_UNLOCK(source);
1181 	}
1182 
1183 	/*
1184 	 * Allocate a new object with the given length.
1185 	 */
1186 	result = vm_object_allocate(OBJT_DEFAULT, atop(length));
1187 
1188 	/*
1189 	 * The new object shadows the source object, adding a reference to it.
1190 	 * Our caller changes his reference to point to the new object,
1191 	 * removing a reference to the source object.  Net result: no change
1192 	 * of reference count.
1193 	 *
1194 	 * Try to optimize the result object's page color when shadowing
1195 	 * in order to maintain page coloring consistency in the combined
1196 	 * shadowed object.
1197 	 */
1198 	result->backing_object = source;
1199 	/*
1200 	 * Store the offset into the source object, and fix up the offset into
1201 	 * the new object.
1202 	 */
1203 	result->backing_object_offset = *offset;
1204 	if (source != NULL) {
1205 		VM_OBJECT_LOCK(source);
1206 		LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1207 		source->shadow_count++;
1208 #if VM_NRESERVLEVEL > 0
1209 		result->flags |= source->flags & OBJ_COLORED;
1210 		result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) &
1211 		    ((1 << (VM_NFREEORDER - 1)) - 1);
1212 #endif
1213 		VM_OBJECT_UNLOCK(source);
1214 	}
1215 
1216 
1217 	/*
1218 	 * Return the new things
1219 	 */
1220 	*offset = 0;
1221 	*object = result;
1222 }
1223 
1224 /*
1225  *	vm_object_split:
1226  *
1227  * Split the pages in a map entry into a new object.  This affords
1228  * easier removal of unused pages, and keeps object inheritance from
1229  * being a negative impact on memory usage.
1230  */
1231 void
1232 vm_object_split(vm_map_entry_t entry)
1233 {
1234 	vm_page_t m, m_next;
1235 	vm_object_t orig_object, new_object, source;
1236 	vm_pindex_t idx, offidxstart;
1237 	vm_size_t size;
1238 
1239 	orig_object = entry->object.vm_object;
1240 	if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
1241 		return;
1242 	if (orig_object->ref_count <= 1)
1243 		return;
1244 	VM_OBJECT_UNLOCK(orig_object);
1245 
1246 	offidxstart = OFF_TO_IDX(entry->offset);
1247 	size = atop(entry->end - entry->start);
1248 
1249 	/*
1250 	 * If swap_pager_copy() is later called, it will convert new_object
1251 	 * into a swap object.
1252 	 */
1253 	new_object = vm_object_allocate(OBJT_DEFAULT, size);
1254 
1255 	/*
1256 	 * At this point, the new object is still private, so the order in
1257 	 * which the original and new objects are locked does not matter.
1258 	 */
1259 	VM_OBJECT_LOCK(new_object);
1260 	VM_OBJECT_LOCK(orig_object);
1261 	source = orig_object->backing_object;
1262 	if (source != NULL) {
1263 		VM_OBJECT_LOCK(source);
1264 		if ((source->flags & OBJ_DEAD) != 0) {
1265 			VM_OBJECT_UNLOCK(source);
1266 			VM_OBJECT_UNLOCK(orig_object);
1267 			VM_OBJECT_UNLOCK(new_object);
1268 			vm_object_deallocate(new_object);
1269 			VM_OBJECT_LOCK(orig_object);
1270 			return;
1271 		}
1272 		LIST_INSERT_HEAD(&source->shadow_head,
1273 				  new_object, shadow_list);
1274 		source->shadow_count++;
1275 		vm_object_reference_locked(source);	/* for new_object */
1276 		vm_object_clear_flag(source, OBJ_ONEMAPPING);
1277 		VM_OBJECT_UNLOCK(source);
1278 		new_object->backing_object_offset =
1279 			orig_object->backing_object_offset + entry->offset;
1280 		new_object->backing_object = source;
1281 	}
1282 	if (orig_object->cred != NULL) {
1283 		new_object->cred = orig_object->cred;
1284 		crhold(orig_object->cred);
1285 		new_object->charge = ptoa(size);
1286 		KASSERT(orig_object->charge >= ptoa(size),
1287 		    ("orig_object->charge < 0"));
1288 		orig_object->charge -= ptoa(size);
1289 	}
1290 retry:
1291 	m = vm_page_find_least(orig_object, offidxstart);
1292 	for (; m != NULL && (idx = m->pindex - offidxstart) < size;
1293 	    m = m_next) {
1294 		m_next = TAILQ_NEXT(m, listq);
1295 
1296 		/*
1297 		 * We must wait for pending I/O to complete before we can
1298 		 * rename the page.
1299 		 *
1300 		 * We do not have to VM_PROT_NONE the page as mappings should
1301 		 * not be changed by this operation.
1302 		 */
1303 		if ((m->oflags & VPO_BUSY) || m->busy) {
1304 			VM_OBJECT_UNLOCK(new_object);
1305 			m->oflags |= VPO_WANTED;
1306 			msleep(m, VM_OBJECT_MTX(orig_object), PVM, "spltwt", 0);
1307 			VM_OBJECT_LOCK(new_object);
1308 			goto retry;
1309 		}
1310 		vm_page_lock(m);
1311 		vm_page_rename(m, new_object, idx);
1312 		vm_page_unlock(m);
1313 		/* page automatically made dirty by rename and cache handled */
1314 		vm_page_busy(m);
1315 	}
1316 	if (orig_object->type == OBJT_SWAP) {
1317 		/*
1318 		 * swap_pager_copy() can sleep, in which case the orig_object's
1319 		 * and new_object's locks are released and reacquired.
1320 		 */
1321 		swap_pager_copy(orig_object, new_object, offidxstart, 0);
1322 
1323 		/*
1324 		 * Transfer any cached pages from orig_object to new_object.
1325 		 */
1326 		if (__predict_false(orig_object->cache != NULL))
1327 			vm_page_cache_transfer(orig_object, offidxstart,
1328 			    new_object);
1329 	}
1330 	VM_OBJECT_UNLOCK(orig_object);
1331 	TAILQ_FOREACH(m, &new_object->memq, listq)
1332 		vm_page_wakeup(m);
1333 	VM_OBJECT_UNLOCK(new_object);
1334 	entry->object.vm_object = new_object;
1335 	entry->offset = 0LL;
1336 	vm_object_deallocate(orig_object);
1337 	VM_OBJECT_LOCK(new_object);
1338 }
1339 
1340 #define	OBSC_TEST_ALL_SHADOWED	0x0001
1341 #define	OBSC_COLLAPSE_NOWAIT	0x0002
1342 #define	OBSC_COLLAPSE_WAIT	0x0004
1343 
1344 static int
1345 vm_object_backing_scan(vm_object_t object, int op)
1346 {
1347 	int r = 1;
1348 	vm_page_t p;
1349 	vm_object_t backing_object;
1350 	vm_pindex_t backing_offset_index;
1351 
1352 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1353 	VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED);
1354 
1355 	backing_object = object->backing_object;
1356 	backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1357 
1358 	/*
1359 	 * Initial conditions
1360 	 */
1361 	if (op & OBSC_TEST_ALL_SHADOWED) {
1362 		/*
1363 		 * We do not want to have to test for the existence of cache
1364 		 * or swap pages in the backing object.  XXX but with the
1365 		 * new swapper this would be pretty easy to do.
1366 		 *
1367 		 * XXX what about anonymous MAP_SHARED memory that hasn't
1368 		 * been ZFOD faulted yet?  If we do not test for this, the
1369 		 * shadow test may succeed! XXX
1370 		 */
1371 		if (backing_object->type != OBJT_DEFAULT) {
1372 			return (0);
1373 		}
1374 	}
1375 	if (op & OBSC_COLLAPSE_WAIT) {
1376 		vm_object_set_flag(backing_object, OBJ_DEAD);
1377 	}
1378 
1379 	/*
1380 	 * Our scan
1381 	 */
1382 	p = TAILQ_FIRST(&backing_object->memq);
1383 	while (p) {
1384 		vm_page_t next = TAILQ_NEXT(p, listq);
1385 		vm_pindex_t new_pindex = p->pindex - backing_offset_index;
1386 
1387 		if (op & OBSC_TEST_ALL_SHADOWED) {
1388 			vm_page_t pp;
1389 
1390 			/*
1391 			 * Ignore pages outside the parent object's range
1392 			 * and outside the parent object's mapping of the
1393 			 * backing object.
1394 			 *
1395 			 * note that we do not busy the backing object's
1396 			 * page.
1397 			 */
1398 			if (
1399 			    p->pindex < backing_offset_index ||
1400 			    new_pindex >= object->size
1401 			) {
1402 				p = next;
1403 				continue;
1404 			}
1405 
1406 			/*
1407 			 * See if the parent has the page or if the parent's
1408 			 * object pager has the page.  If the parent has the
1409 			 * page but the page is not valid, the parent's
1410 			 * object pager must have the page.
1411 			 *
1412 			 * If this fails, the parent does not completely shadow
1413 			 * the object and we might as well give up now.
1414 			 */
1415 
1416 			pp = vm_page_lookup(object, new_pindex);
1417 			if (
1418 			    (pp == NULL || pp->valid == 0) &&
1419 			    !vm_pager_has_page(object, new_pindex, NULL, NULL)
1420 			) {
1421 				r = 0;
1422 				break;
1423 			}
1424 		}
1425 
1426 		/*
1427 		 * Check for busy page
1428 		 */
1429 		if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
1430 			vm_page_t pp;
1431 
1432 			if (op & OBSC_COLLAPSE_NOWAIT) {
1433 				if ((p->oflags & VPO_BUSY) ||
1434 				    !p->valid ||
1435 				    p->busy) {
1436 					p = next;
1437 					continue;
1438 				}
1439 			} else if (op & OBSC_COLLAPSE_WAIT) {
1440 				if ((p->oflags & VPO_BUSY) || p->busy) {
1441 					VM_OBJECT_UNLOCK(object);
1442 					p->oflags |= VPO_WANTED;
1443 					msleep(p, VM_OBJECT_MTX(backing_object),
1444 					    PDROP | PVM, "vmocol", 0);
1445 					VM_OBJECT_LOCK(object);
1446 					VM_OBJECT_LOCK(backing_object);
1447 					/*
1448 					 * If we slept, anything could have
1449 					 * happened.  Since the object is
1450 					 * marked dead, the backing offset
1451 					 * should not have changed so we
1452 					 * just restart our scan.
1453 					 */
1454 					p = TAILQ_FIRST(&backing_object->memq);
1455 					continue;
1456 				}
1457 			}
1458 
1459 			KASSERT(
1460 			    p->object == backing_object,
1461 			    ("vm_object_backing_scan: object mismatch")
1462 			);
1463 
1464 			/*
1465 			 * Destroy any associated swap
1466 			 */
1467 			if (backing_object->type == OBJT_SWAP) {
1468 				swap_pager_freespace(
1469 				    backing_object,
1470 				    p->pindex,
1471 				    1
1472 				);
1473 			}
1474 
1475 			if (
1476 			    p->pindex < backing_offset_index ||
1477 			    new_pindex >= object->size
1478 			) {
1479 				/*
1480 				 * Page is out of the parent object's range, we
1481 				 * can simply destroy it.
1482 				 */
1483 				vm_page_lock(p);
1484 				KASSERT(!pmap_page_is_mapped(p),
1485 				    ("freeing mapped page %p", p));
1486 				if (p->wire_count == 0)
1487 					vm_page_free(p);
1488 				else
1489 					vm_page_remove(p);
1490 				vm_page_unlock(p);
1491 				p = next;
1492 				continue;
1493 			}
1494 
1495 			pp = vm_page_lookup(object, new_pindex);
1496 			if (
1497 			    (op & OBSC_COLLAPSE_NOWAIT) != 0 &&
1498 			    (pp != NULL && pp->valid == 0)
1499 			) {
1500 				/*
1501 				 * The page in the parent is not (yet) valid.
1502 				 * We don't know anything about the state of
1503 				 * the original page.  It might be mapped,
1504 				 * so we must avoid the next if here.
1505 				 *
1506 				 * This is due to a race in vm_fault() where
1507 				 * we must unbusy the original (backing_obj)
1508 				 * page before we can (re)lock the parent.
1509 				 * Hence we can get here.
1510 				 */
1511 				p = next;
1512 				continue;
1513 			}
1514 			if (
1515 			    pp != NULL ||
1516 			    vm_pager_has_page(object, new_pindex, NULL, NULL)
1517 			) {
1518 				/*
1519 				 * page already exists in parent OR swap exists
1520 				 * for this location in the parent.  Destroy
1521 				 * the original page from the backing object.
1522 				 *
1523 				 * Leave the parent's page alone
1524 				 */
1525 				vm_page_lock(p);
1526 				KASSERT(!pmap_page_is_mapped(p),
1527 				    ("freeing mapped page %p", p));
1528 				if (p->wire_count == 0)
1529 					vm_page_free(p);
1530 				else
1531 					vm_page_remove(p);
1532 				vm_page_unlock(p);
1533 				p = next;
1534 				continue;
1535 			}
1536 
1537 #if VM_NRESERVLEVEL > 0
1538 			/*
1539 			 * Rename the reservation.
1540 			 */
1541 			vm_reserv_rename(p, object, backing_object,
1542 			    backing_offset_index);
1543 #endif
1544 
1545 			/*
1546 			 * Page does not exist in parent, rename the
1547 			 * page from the backing object to the main object.
1548 			 *
1549 			 * If the page was mapped to a process, it can remain
1550 			 * mapped through the rename.
1551 			 */
1552 			vm_page_lock(p);
1553 			vm_page_rename(p, object, new_pindex);
1554 			vm_page_unlock(p);
1555 			/* page automatically made dirty by rename */
1556 		}
1557 		p = next;
1558 	}
1559 	return (r);
1560 }
1561 
1562 
1563 /*
1564  * this version of collapse allows the operation to occur earlier and
1565  * when paging_in_progress is true for an object...  This is not a complete
1566  * operation, but should plug 99.9% of the rest of the leaks.
1567  */
1568 static void
1569 vm_object_qcollapse(vm_object_t object)
1570 {
1571 	vm_object_t backing_object = object->backing_object;
1572 
1573 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1574 	VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED);
1575 
1576 	if (backing_object->ref_count != 1)
1577 		return;
1578 
1579 	vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT);
1580 }
1581 
1582 /*
1583  *	vm_object_collapse:
1584  *
1585  *	Collapse an object with the object backing it.
1586  *	Pages in the backing object are moved into the
1587  *	parent, and the backing object is deallocated.
1588  */
1589 void
1590 vm_object_collapse(vm_object_t object)
1591 {
1592 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1593 
1594 	while (TRUE) {
1595 		vm_object_t backing_object;
1596 
1597 		/*
1598 		 * Verify that the conditions are right for collapse:
1599 		 *
1600 		 * The object exists and the backing object exists.
1601 		 */
1602 		if ((backing_object = object->backing_object) == NULL)
1603 			break;
1604 
1605 		/*
1606 		 * we check the backing object first, because it is most likely
1607 		 * not collapsable.
1608 		 */
1609 		VM_OBJECT_LOCK(backing_object);
1610 		if (backing_object->handle != NULL ||
1611 		    (backing_object->type != OBJT_DEFAULT &&
1612 		     backing_object->type != OBJT_SWAP) ||
1613 		    (backing_object->flags & OBJ_DEAD) ||
1614 		    object->handle != NULL ||
1615 		    (object->type != OBJT_DEFAULT &&
1616 		     object->type != OBJT_SWAP) ||
1617 		    (object->flags & OBJ_DEAD)) {
1618 			VM_OBJECT_UNLOCK(backing_object);
1619 			break;
1620 		}
1621 
1622 		if (
1623 		    object->paging_in_progress != 0 ||
1624 		    backing_object->paging_in_progress != 0
1625 		) {
1626 			vm_object_qcollapse(object);
1627 			VM_OBJECT_UNLOCK(backing_object);
1628 			break;
1629 		}
1630 		/*
1631 		 * We know that we can either collapse the backing object (if
1632 		 * the parent is the only reference to it) or (perhaps) have
1633 		 * the parent bypass the object if the parent happens to shadow
1634 		 * all the resident pages in the entire backing object.
1635 		 *
1636 		 * This is ignoring pager-backed pages such as swap pages.
1637 		 * vm_object_backing_scan fails the shadowing test in this
1638 		 * case.
1639 		 */
1640 		if (backing_object->ref_count == 1) {
1641 			/*
1642 			 * If there is exactly one reference to the backing
1643 			 * object, we can collapse it into the parent.
1644 			 */
1645 			vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
1646 
1647 #if VM_NRESERVLEVEL > 0
1648 			/*
1649 			 * Break any reservations from backing_object.
1650 			 */
1651 			if (__predict_false(!LIST_EMPTY(&backing_object->rvq)))
1652 				vm_reserv_break_all(backing_object);
1653 #endif
1654 
1655 			/*
1656 			 * Move the pager from backing_object to object.
1657 			 */
1658 			if (backing_object->type == OBJT_SWAP) {
1659 				/*
1660 				 * swap_pager_copy() can sleep, in which case
1661 				 * the backing_object's and object's locks are
1662 				 * released and reacquired.
1663 				 */
1664 				swap_pager_copy(
1665 				    backing_object,
1666 				    object,
1667 				    OFF_TO_IDX(object->backing_object_offset), TRUE);
1668 
1669 				/*
1670 				 * Free any cached pages from backing_object.
1671 				 */
1672 				if (__predict_false(backing_object->cache != NULL))
1673 					vm_page_cache_free(backing_object, 0, 0);
1674 			}
1675 			/*
1676 			 * Object now shadows whatever backing_object did.
1677 			 * Note that the reference to
1678 			 * backing_object->backing_object moves from within
1679 			 * backing_object to within object.
1680 			 */
1681 			LIST_REMOVE(object, shadow_list);
1682 			backing_object->shadow_count--;
1683 			if (backing_object->backing_object) {
1684 				VM_OBJECT_LOCK(backing_object->backing_object);
1685 				LIST_REMOVE(backing_object, shadow_list);
1686 				LIST_INSERT_HEAD(
1687 				    &backing_object->backing_object->shadow_head,
1688 				    object, shadow_list);
1689 				/*
1690 				 * The shadow_count has not changed.
1691 				 */
1692 				VM_OBJECT_UNLOCK(backing_object->backing_object);
1693 			}
1694 			object->backing_object = backing_object->backing_object;
1695 			object->backing_object_offset +=
1696 			    backing_object->backing_object_offset;
1697 
1698 			/*
1699 			 * Discard backing_object.
1700 			 *
1701 			 * Since the backing object has no pages, no pager left,
1702 			 * and no object references within it, all that is
1703 			 * necessary is to dispose of it.
1704 			 */
1705 			KASSERT(backing_object->ref_count == 1, (
1706 "backing_object %p was somehow re-referenced during collapse!",
1707 			    backing_object));
1708 			VM_OBJECT_UNLOCK(backing_object);
1709 			vm_object_destroy(backing_object);
1710 
1711 			object_collapses++;
1712 		} else {
1713 			vm_object_t new_backing_object;
1714 
1715 			/*
1716 			 * If we do not entirely shadow the backing object,
1717 			 * there is nothing we can do so we give up.
1718 			 */
1719 			if (object->resident_page_count != object->size &&
1720 			    vm_object_backing_scan(object,
1721 			    OBSC_TEST_ALL_SHADOWED) == 0) {
1722 				VM_OBJECT_UNLOCK(backing_object);
1723 				break;
1724 			}
1725 
1726 			/*
1727 			 * Make the parent shadow the next object in the
1728 			 * chain.  Deallocating backing_object will not remove
1729 			 * it, since its reference count is at least 2.
1730 			 */
1731 			LIST_REMOVE(object, shadow_list);
1732 			backing_object->shadow_count--;
1733 
1734 			new_backing_object = backing_object->backing_object;
1735 			if ((object->backing_object = new_backing_object) != NULL) {
1736 				VM_OBJECT_LOCK(new_backing_object);
1737 				LIST_INSERT_HEAD(
1738 				    &new_backing_object->shadow_head,
1739 				    object,
1740 				    shadow_list
1741 				);
1742 				new_backing_object->shadow_count++;
1743 				vm_object_reference_locked(new_backing_object);
1744 				VM_OBJECT_UNLOCK(new_backing_object);
1745 				object->backing_object_offset +=
1746 					backing_object->backing_object_offset;
1747 			}
1748 
1749 			/*
1750 			 * Drop the reference count on backing_object. Since
1751 			 * its ref_count was at least 2, it will not vanish.
1752 			 */
1753 			backing_object->ref_count--;
1754 			VM_OBJECT_UNLOCK(backing_object);
1755 			object_bypasses++;
1756 		}
1757 
1758 		/*
1759 		 * Try again with this object's new backing object.
1760 		 */
1761 	}
1762 }
1763 
1764 /*
1765  *	vm_object_page_remove:
1766  *
1767  *	For the given object, either frees or invalidates each of the
1768  *	specified pages.  In general, a page is freed.  However, if a page is
1769  *	wired for any reason other than the existence of a managed, wired
1770  *	mapping, then it may be invalidated but not removed from the object.
1771  *	Pages are specified by the given range ["start", "end") and the option
1772  *	OBJPR_CLEANONLY.  As a special case, if "end" is zero, then the range
1773  *	extends from "start" to the end of the object.  If the option
1774  *	OBJPR_CLEANONLY is specified, then only the non-dirty pages within the
1775  *	specified range are affected.  If the option OBJPR_NOTMAPPED is
1776  *	specified, then the pages within the specified range must have no
1777  *	mappings.  Otherwise, if this option is not specified, any mappings to
1778  *	the specified pages are removed before the pages are freed or
1779  *	invalidated.
1780  *
1781  *	In general, this operation should only be performed on objects that
1782  *	contain managed pages.  There are, however, two exceptions.  First, it
1783  *	is performed on the kernel and kmem objects by vm_map_entry_delete().
1784  *	Second, it is used by msync(..., MS_INVALIDATE) to invalidate device-
1785  *	backed pages.  In both of these cases, the option OBJPR_CLEANONLY must
1786  *	not be specified and the option OBJPR_NOTMAPPED must be specified.
1787  *
1788  *	The object must be locked.
1789  */
1790 void
1791 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1792     int options)
1793 {
1794 	vm_page_t p, next;
1795 	int wirings;
1796 
1797 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1798 	KASSERT((object->type != OBJT_DEVICE && object->type != OBJT_PHYS) ||
1799 	    (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
1800 	    ("vm_object_page_remove: illegal options for object %p", object));
1801 	if (object->resident_page_count == 0)
1802 		goto skipmemq;
1803 	vm_object_pip_add(object, 1);
1804 again:
1805 	p = vm_page_find_least(object, start);
1806 
1807 	/*
1808 	 * Here, the variable "p" is either (1) the page with the least pindex
1809 	 * greater than or equal to the parameter "start" or (2) NULL.
1810 	 */
1811 	for (; p != NULL && (p->pindex < end || end == 0); p = next) {
1812 		next = TAILQ_NEXT(p, listq);
1813 
1814 		/*
1815 		 * If the page is wired for any reason besides the existence
1816 		 * of managed, wired mappings, then it cannot be freed.  For
1817 		 * example, fictitious pages, which represent device memory,
1818 		 * are inherently wired and cannot be freed.  They can,
1819 		 * however, be invalidated if the option OBJPR_CLEANONLY is
1820 		 * not specified.
1821 		 */
1822 		vm_page_lock(p);
1823 		if ((wirings = p->wire_count) != 0 &&
1824 		    (wirings = pmap_page_wired_mappings(p)) != p->wire_count) {
1825 			if ((options & OBJPR_NOTMAPPED) == 0) {
1826 				pmap_remove_all(p);
1827 				/* Account for removal of wired mappings. */
1828 				if (wirings != 0)
1829 					p->wire_count -= wirings;
1830 			}
1831 			if ((options & OBJPR_CLEANONLY) == 0) {
1832 				p->valid = 0;
1833 				vm_page_undirty(p);
1834 			}
1835 			vm_page_unlock(p);
1836 			continue;
1837 		}
1838 		if (vm_page_sleep_if_busy(p, TRUE, "vmopar"))
1839 			goto again;
1840 		KASSERT((p->flags & PG_FICTITIOUS) == 0,
1841 		    ("vm_object_page_remove: page %p is fictitious", p));
1842 		if ((options & OBJPR_CLEANONLY) != 0 && p->valid != 0) {
1843 			if ((options & OBJPR_NOTMAPPED) == 0)
1844 				pmap_remove_write(p);
1845 			if (p->dirty) {
1846 				vm_page_unlock(p);
1847 				continue;
1848 			}
1849 		}
1850 		if ((options & OBJPR_NOTMAPPED) == 0) {
1851 			pmap_remove_all(p);
1852 			/* Account for removal of wired mappings. */
1853 			if (wirings != 0)
1854 				p->wire_count -= wirings;
1855 		}
1856 		vm_page_free(p);
1857 		vm_page_unlock(p);
1858 	}
1859 	vm_object_pip_wakeup(object);
1860 skipmemq:
1861 	if (__predict_false(object->cache != NULL))
1862 		vm_page_cache_free(object, start, end);
1863 }
1864 
1865 /*
1866  *	Populate the specified range of the object with valid pages.  Returns
1867  *	TRUE if the range is successfully populated and FALSE otherwise.
1868  *
1869  *	Note: This function should be optimized to pass a larger array of
1870  *	pages to vm_pager_get_pages() before it is applied to a non-
1871  *	OBJT_DEVICE object.
1872  *
1873  *	The object must be locked.
1874  */
1875 boolean_t
1876 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1877 {
1878 	vm_page_t m, ma[1];
1879 	vm_pindex_t pindex;
1880 	int rv;
1881 
1882 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1883 	for (pindex = start; pindex < end; pindex++) {
1884 		m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL |
1885 		    VM_ALLOC_RETRY);
1886 		if (m->valid != VM_PAGE_BITS_ALL) {
1887 			ma[0] = m;
1888 			rv = vm_pager_get_pages(object, ma, 1, 0);
1889 			m = vm_page_lookup(object, pindex);
1890 			if (m == NULL)
1891 				break;
1892 			if (rv != VM_PAGER_OK) {
1893 				vm_page_lock(m);
1894 				vm_page_free(m);
1895 				vm_page_unlock(m);
1896 				break;
1897 			}
1898 		}
1899 		/*
1900 		 * Keep "m" busy because a subsequent iteration may unlock
1901 		 * the object.
1902 		 */
1903 	}
1904 	if (pindex > start) {
1905 		m = vm_page_lookup(object, start);
1906 		while (m != NULL && m->pindex < pindex) {
1907 			vm_page_wakeup(m);
1908 			m = TAILQ_NEXT(m, listq);
1909 		}
1910 	}
1911 	return (pindex == end);
1912 }
1913 
1914 /*
1915  *	Routine:	vm_object_coalesce
1916  *	Function:	Coalesces two objects backing up adjoining
1917  *			regions of memory into a single object.
1918  *
1919  *	returns TRUE if objects were combined.
1920  *
1921  *	NOTE:	Only works at the moment if the second object is NULL -
1922  *		if it's not, which object do we lock first?
1923  *
1924  *	Parameters:
1925  *		prev_object	First object to coalesce
1926  *		prev_offset	Offset into prev_object
1927  *		prev_size	Size of reference to prev_object
1928  *		next_size	Size of reference to the second object
1929  *		reserved	Indicator that extension region has
1930  *				swap accounted for
1931  *
1932  *	Conditions:
1933  *	The object must *not* be locked.
1934  */
1935 boolean_t
1936 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
1937     vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
1938 {
1939 	vm_pindex_t next_pindex;
1940 
1941 	if (prev_object == NULL)
1942 		return (TRUE);
1943 	VM_OBJECT_LOCK(prev_object);
1944 	if (prev_object->type != OBJT_DEFAULT &&
1945 	    prev_object->type != OBJT_SWAP) {
1946 		VM_OBJECT_UNLOCK(prev_object);
1947 		return (FALSE);
1948 	}
1949 
1950 	/*
1951 	 * Try to collapse the object first
1952 	 */
1953 	vm_object_collapse(prev_object);
1954 
1955 	/*
1956 	 * Can't coalesce if: . more than one reference . paged out . shadows
1957 	 * another object . has a copy elsewhere (any of which mean that the
1958 	 * pages not mapped to prev_entry may be in use anyway)
1959 	 */
1960 	if (prev_object->backing_object != NULL) {
1961 		VM_OBJECT_UNLOCK(prev_object);
1962 		return (FALSE);
1963 	}
1964 
1965 	prev_size >>= PAGE_SHIFT;
1966 	next_size >>= PAGE_SHIFT;
1967 	next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
1968 
1969 	if ((prev_object->ref_count > 1) &&
1970 	    (prev_object->size != next_pindex)) {
1971 		VM_OBJECT_UNLOCK(prev_object);
1972 		return (FALSE);
1973 	}
1974 
1975 	/*
1976 	 * Account for the charge.
1977 	 */
1978 	if (prev_object->cred != NULL) {
1979 
1980 		/*
1981 		 * If prev_object was charged, then this mapping,
1982 		 * althought not charged now, may become writable
1983 		 * later. Non-NULL cred in the object would prevent
1984 		 * swap reservation during enabling of the write
1985 		 * access, so reserve swap now. Failed reservation
1986 		 * cause allocation of the separate object for the map
1987 		 * entry, and swap reservation for this entry is
1988 		 * managed in appropriate time.
1989 		 */
1990 		if (!reserved && !swap_reserve_by_cred(ptoa(next_size),
1991 		    prev_object->cred)) {
1992 			return (FALSE);
1993 		}
1994 		prev_object->charge += ptoa(next_size);
1995 	}
1996 
1997 	/*
1998 	 * Remove any pages that may still be in the object from a previous
1999 	 * deallocation.
2000 	 */
2001 	if (next_pindex < prev_object->size) {
2002 		vm_object_page_remove(prev_object, next_pindex, next_pindex +
2003 		    next_size, 0);
2004 		if (prev_object->type == OBJT_SWAP)
2005 			swap_pager_freespace(prev_object,
2006 					     next_pindex, next_size);
2007 #if 0
2008 		if (prev_object->cred != NULL) {
2009 			KASSERT(prev_object->charge >=
2010 			    ptoa(prev_object->size - next_pindex),
2011 			    ("object %p overcharged 1 %jx %jx", prev_object,
2012 				(uintmax_t)next_pindex, (uintmax_t)next_size));
2013 			prev_object->charge -= ptoa(prev_object->size -
2014 			    next_pindex);
2015 		}
2016 #endif
2017 	}
2018 
2019 	/*
2020 	 * Extend the object if necessary.
2021 	 */
2022 	if (next_pindex + next_size > prev_object->size)
2023 		prev_object->size = next_pindex + next_size;
2024 
2025 	VM_OBJECT_UNLOCK(prev_object);
2026 	return (TRUE);
2027 }
2028 
2029 void
2030 vm_object_set_writeable_dirty(vm_object_t object)
2031 {
2032 
2033 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2034 	if (object->type != OBJT_VNODE)
2035 		return;
2036 	object->generation++;
2037 	if ((object->flags & OBJ_MIGHTBEDIRTY) != 0)
2038 		return;
2039 	vm_object_set_flag(object, OBJ_MIGHTBEDIRTY);
2040 }
2041 
2042 #include "opt_ddb.h"
2043 #ifdef DDB
2044 #include <sys/kernel.h>
2045 
2046 #include <sys/cons.h>
2047 
2048 #include <ddb/ddb.h>
2049 
2050 static int
2051 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2052 {
2053 	vm_map_t tmpm;
2054 	vm_map_entry_t tmpe;
2055 	vm_object_t obj;
2056 	int entcount;
2057 
2058 	if (map == 0)
2059 		return 0;
2060 
2061 	if (entry == 0) {
2062 		tmpe = map->header.next;
2063 		entcount = map->nentries;
2064 		while (entcount-- && (tmpe != &map->header)) {
2065 			if (_vm_object_in_map(map, object, tmpe)) {
2066 				return 1;
2067 			}
2068 			tmpe = tmpe->next;
2069 		}
2070 	} else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2071 		tmpm = entry->object.sub_map;
2072 		tmpe = tmpm->header.next;
2073 		entcount = tmpm->nentries;
2074 		while (entcount-- && tmpe != &tmpm->header) {
2075 			if (_vm_object_in_map(tmpm, object, tmpe)) {
2076 				return 1;
2077 			}
2078 			tmpe = tmpe->next;
2079 		}
2080 	} else if ((obj = entry->object.vm_object) != NULL) {
2081 		for (; obj; obj = obj->backing_object)
2082 			if (obj == object) {
2083 				return 1;
2084 			}
2085 	}
2086 	return 0;
2087 }
2088 
2089 static int
2090 vm_object_in_map(vm_object_t object)
2091 {
2092 	struct proc *p;
2093 
2094 	/* sx_slock(&allproc_lock); */
2095 	FOREACH_PROC_IN_SYSTEM(p) {
2096 		if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
2097 			continue;
2098 		if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
2099 			/* sx_sunlock(&allproc_lock); */
2100 			return 1;
2101 		}
2102 	}
2103 	/* sx_sunlock(&allproc_lock); */
2104 	if (_vm_object_in_map(kernel_map, object, 0))
2105 		return 1;
2106 	if (_vm_object_in_map(kmem_map, object, 0))
2107 		return 1;
2108 	if (_vm_object_in_map(pager_map, object, 0))
2109 		return 1;
2110 	if (_vm_object_in_map(buffer_map, object, 0))
2111 		return 1;
2112 	return 0;
2113 }
2114 
2115 DB_SHOW_COMMAND(vmochk, vm_object_check)
2116 {
2117 	vm_object_t object;
2118 
2119 	/*
2120 	 * make sure that internal objs are in a map somewhere
2121 	 * and none have zero ref counts.
2122 	 */
2123 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
2124 		if (object->handle == NULL &&
2125 		    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2126 			if (object->ref_count == 0) {
2127 				db_printf("vmochk: internal obj has zero ref count: %ld\n",
2128 					(long)object->size);
2129 			}
2130 			if (!vm_object_in_map(object)) {
2131 				db_printf(
2132 			"vmochk: internal obj is not in a map: "
2133 			"ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2134 				    object->ref_count, (u_long)object->size,
2135 				    (u_long)object->size,
2136 				    (void *)object->backing_object);
2137 			}
2138 		}
2139 	}
2140 }
2141 
2142 /*
2143  *	vm_object_print:	[ debug ]
2144  */
2145 DB_SHOW_COMMAND(object, vm_object_print_static)
2146 {
2147 	/* XXX convert args. */
2148 	vm_object_t object = (vm_object_t)addr;
2149 	boolean_t full = have_addr;
2150 
2151 	vm_page_t p;
2152 
2153 	/* XXX count is an (unused) arg.  Avoid shadowing it. */
2154 #define	count	was_count
2155 
2156 	int count;
2157 
2158 	if (object == NULL)
2159 		return;
2160 
2161 	db_iprintf(
2162 	    "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n",
2163 	    object, (int)object->type, (uintmax_t)object->size,
2164 	    object->resident_page_count, object->ref_count, object->flags,
2165 	    object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge);
2166 	db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
2167 	    object->shadow_count,
2168 	    object->backing_object ? object->backing_object->ref_count : 0,
2169 	    object->backing_object, (uintmax_t)object->backing_object_offset);
2170 
2171 	if (!full)
2172 		return;
2173 
2174 	db_indent += 2;
2175 	count = 0;
2176 	TAILQ_FOREACH(p, &object->memq, listq) {
2177 		if (count == 0)
2178 			db_iprintf("memory:=");
2179 		else if (count == 6) {
2180 			db_printf("\n");
2181 			db_iprintf(" ...");
2182 			count = 0;
2183 		} else
2184 			db_printf(",");
2185 		count++;
2186 
2187 		db_printf("(off=0x%jx,page=0x%jx)",
2188 		    (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p));
2189 	}
2190 	if (count != 0)
2191 		db_printf("\n");
2192 	db_indent -= 2;
2193 }
2194 
2195 /* XXX. */
2196 #undef count
2197 
2198 /* XXX need this non-static entry for calling from vm_map_print. */
2199 void
2200 vm_object_print(
2201         /* db_expr_t */ long addr,
2202 	boolean_t have_addr,
2203 	/* db_expr_t */ long count,
2204 	char *modif)
2205 {
2206 	vm_object_print_static(addr, have_addr, count, modif);
2207 }
2208 
2209 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
2210 {
2211 	vm_object_t object;
2212 	vm_pindex_t fidx;
2213 	vm_paddr_t pa;
2214 	vm_page_t m, prev_m;
2215 	int rcount, nl, c;
2216 
2217 	nl = 0;
2218 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
2219 		db_printf("new object: %p\n", (void *)object);
2220 		if (nl > 18) {
2221 			c = cngetc();
2222 			if (c != ' ')
2223 				return;
2224 			nl = 0;
2225 		}
2226 		nl++;
2227 		rcount = 0;
2228 		fidx = 0;
2229 		pa = -1;
2230 		TAILQ_FOREACH(m, &object->memq, listq) {
2231 			if (m->pindex > 128)
2232 				break;
2233 			if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL &&
2234 			    prev_m->pindex + 1 != m->pindex) {
2235 				if (rcount) {
2236 					db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2237 						(long)fidx, rcount, (long)pa);
2238 					if (nl > 18) {
2239 						c = cngetc();
2240 						if (c != ' ')
2241 							return;
2242 						nl = 0;
2243 					}
2244 					nl++;
2245 					rcount = 0;
2246 				}
2247 			}
2248 			if (rcount &&
2249 				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
2250 				++rcount;
2251 				continue;
2252 			}
2253 			if (rcount) {
2254 				db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2255 					(long)fidx, rcount, (long)pa);
2256 				if (nl > 18) {
2257 					c = cngetc();
2258 					if (c != ' ')
2259 						return;
2260 					nl = 0;
2261 				}
2262 				nl++;
2263 			}
2264 			fidx = m->pindex;
2265 			pa = VM_PAGE_TO_PHYS(m);
2266 			rcount = 1;
2267 		}
2268 		if (rcount) {
2269 			db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2270 				(long)fidx, rcount, (long)pa);
2271 			if (nl > 18) {
2272 				c = cngetc();
2273 				if (c != ' ')
2274 					return;
2275 				nl = 0;
2276 			}
2277 			nl++;
2278 		}
2279 	}
2280 }
2281 #endif /* DDB */
2282