xref: /freebsd/sys/vm/vm_object.c (revision 3e0f6b97b257a96f7275e4442204263e44b16686)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD$
65  */
66 
67 /*
68  *	Virtual memory object module.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/proc.h>		/* for curproc, pageproc */
75 #include <sys/malloc.h>
76 #include <sys/vnode.h>
77 #include <sys/mount.h>
78 #include <sys/vmmeter.h>
79 #include <sys/mman.h>
80 
81 #include <vm/vm.h>
82 #include <vm/vm_param.h>
83 #include <vm/vm_prot.h>
84 #include <sys/lock.h>
85 #include <vm/pmap.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_pager.h>
91 #include <vm/swap_pager.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_extern.h>
94 
95 static void	_vm_object_allocate __P((objtype_t, vm_size_t, vm_object_t));
96 static void	vm_object_qcollapse __P((vm_object_t object));
97 #ifdef not_used
98 static void	vm_object_deactivate_pages __P((vm_object_t));
99 #endif
100 static void	vm_object_terminate __P((vm_object_t));
101 static void	vm_object_cache_trim __P((void));
102 
103 /*
104  *	Virtual memory objects maintain the actual data
105  *	associated with allocated virtual memory.  A given
106  *	page of memory exists within exactly one object.
107  *
108  *	An object is only deallocated when all "references"
109  *	are given up.  Only one "reference" to a given
110  *	region of an object should be writeable.
111  *
112  *	Associated with each object is a list of all resident
113  *	memory pages belonging to that object; this list is
114  *	maintained by the "vm_page" module, and locked by the object's
115  *	lock.
116  *
117  *	Each object also records a "pager" routine which is
118  *	used to retrieve (and store) pages to the proper backing
119  *	storage.  In addition, objects may be backed by other
120  *	objects from which they were virtual-copied.
121  *
122  *	The only items within the object structure which are
123  *	modified after time of creation are:
124  *		reference count		locked by object's lock
125  *		pager routine		locked by object's lock
126  *
127  */
128 
129 int vm_object_cache_max;
130 struct object_q vm_object_cached_list;
131 static int vm_object_cached;
132 struct object_q vm_object_list;
133 struct simplelock vm_object_list_lock;
134 static long vm_object_count;
135 vm_object_t kernel_object;
136 vm_object_t kmem_object;
137 static struct vm_object kernel_object_store;
138 static struct vm_object kmem_object_store;
139 extern int vm_pageout_page_count;
140 
141 static long object_collapses;
142 static long object_bypasses;
143 static int next_index;
144 
145 static void
146 _vm_object_allocate(type, size, object)
147 	objtype_t type;
148 	vm_size_t size;
149 	register vm_object_t object;
150 {
151 	TAILQ_INIT(&object->memq);
152 	TAILQ_INIT(&object->shadow_head);
153 
154 	object->type = type;
155 	object->size = size;
156 	object->ref_count = 1;
157 	object->flags = 0;
158 	object->behavior = OBJ_NORMAL;
159 	object->paging_in_progress = 0;
160 	object->resident_page_count = 0;
161 	object->shadow_count = 0;
162 	object->pg_color = next_index;
163 	next_index = (next_index + PQ_PRIME1) & PQ_L2_MASK;
164 	object->handle = NULL;
165 	object->paging_offset = (vm_ooffset_t) 0;
166 	object->backing_object = NULL;
167 	object->backing_object_offset = (vm_ooffset_t) 0;
168 	object->page_hint = NULL;
169 
170 	object->last_read = 0;
171 
172 	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
173 	vm_object_count++;
174 }
175 
176 /*
177  *	vm_object_init:
178  *
179  *	Initialize the VM objects module.
180  */
181 void
182 vm_object_init()
183 {
184 	TAILQ_INIT(&vm_object_cached_list);
185 	TAILQ_INIT(&vm_object_list);
186 	simple_lock_init(&vm_object_list_lock);
187 	vm_object_count = 0;
188 
189 	vm_object_cache_max = 84;
190 	if (cnt.v_page_count > 1000)
191 		vm_object_cache_max += (cnt.v_page_count - 1000) / 4;
192 
193 	kernel_object = &kernel_object_store;
194 	_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
195 	    kernel_object);
196 
197 	kmem_object = &kmem_object_store;
198 	_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
199 	    kmem_object);
200 }
201 
202 /*
203  *	vm_object_allocate:
204  *
205  *	Returns a new object with the given size.
206  */
207 
208 vm_object_t
209 vm_object_allocate(type, size)
210 	objtype_t type;
211 	vm_size_t size;
212 {
213 	register vm_object_t result;
214 
215 	result = (vm_object_t)
216 	    malloc((u_long) sizeof *result, M_VMOBJ, M_WAITOK);
217 
218 
219 	_vm_object_allocate(type, size, result);
220 
221 	return (result);
222 }
223 
224 
225 /*
226  *	vm_object_reference:
227  *
228  *	Gets another reference to the given object.
229  */
230 void
231 vm_object_reference(object)
232 	register vm_object_t object;
233 {
234 	if (object == NULL)
235 		return;
236 
237 	if (object->ref_count == 0) {
238 		if ((object->flags & OBJ_CANPERSIST) == 0)
239 			panic("vm_object_reference: non-persistent object with 0 ref_count");
240 		TAILQ_REMOVE(&vm_object_cached_list, object, cached_list);
241 		vm_object_cached--;
242 	}
243 	object->ref_count++;
244 }
245 
246 /*
247  *	vm_object_deallocate:
248  *
249  *	Release a reference to the specified object,
250  *	gained either through a vm_object_allocate
251  *	or a vm_object_reference call.  When all references
252  *	are gone, storage associated with this object
253  *	may be relinquished.
254  *
255  *	No object may be locked.
256  */
257 void
258 vm_object_deallocate(object)
259 	vm_object_t object;
260 {
261 	vm_object_t temp;
262 
263 	while (object != NULL) {
264 
265 		if (object->ref_count == 0)
266 			panic("vm_object_deallocate: object deallocated too many times");
267 
268 		/*
269 		 * Lose the reference
270 		 */
271 		object->ref_count--;
272 		if (object->ref_count != 0) {
273 			if ((object->ref_count == 1) &&
274 			    (object->handle == NULL) &&
275 			    (object->type == OBJT_DEFAULT ||
276 			     object->type == OBJT_SWAP)) {
277 				vm_object_t robject;
278 				robject = TAILQ_FIRST(&object->shadow_head);
279 				if ((robject != NULL) &&
280 				    (robject->handle == NULL) &&
281 				    (robject->type == OBJT_DEFAULT ||
282 				     robject->type == OBJT_SWAP)) {
283 					int s;
284 					robject->ref_count += 2;
285 					object->ref_count += 2;
286 
287 					do {
288 						s = splvm();
289 						while (robject->paging_in_progress) {
290 							robject->flags |= OBJ_PIPWNT;
291 							tsleep(robject, PVM, "objde1", 0);
292 						}
293 
294 						while (object->paging_in_progress) {
295 							object->flags |= OBJ_PIPWNT;
296 							tsleep(object, PVM, "objde2", 0);
297 						}
298 						splx(s);
299 
300 					} while( object->paging_in_progress || robject->paging_in_progress);
301 
302 					object->ref_count -= 2;
303 					robject->ref_count -= 2;
304 					if( robject->ref_count == 0) {
305 						robject->ref_count += 1;
306 						object = robject;
307 						continue;
308 					}
309 					vm_object_collapse(robject);
310 					return;
311 				}
312 			}
313 			/*
314 			 * If there are still references, then we are done.
315 			 */
316 			return;
317 		}
318 
319 		if (object->type == OBJT_VNODE) {
320 			struct vnode *vp = object->handle;
321 
322 			vp->v_flag &= ~VTEXT;
323 		}
324 
325 		/*
326 		 * See if this object can persist and has some resident
327 		 * pages.  If so, enter it in the cache.
328 		 */
329 		if (object->flags & OBJ_CANPERSIST) {
330 			if (object->resident_page_count != 0) {
331 #if 0
332 				vm_object_page_clean(object, 0, 0 ,TRUE, TRUE);
333 #endif
334 				TAILQ_INSERT_TAIL(&vm_object_cached_list, object,
335 				    cached_list);
336 				vm_object_cached++;
337 
338 				vm_object_cache_trim();
339 				return;
340 			} else {
341 				object->flags &= ~OBJ_CANPERSIST;
342 			}
343 		}
344 
345 		/*
346 		 * Make sure no one uses us.
347 		 */
348 		object->flags |= OBJ_DEAD;
349 
350 		temp = object->backing_object;
351 		if (temp) {
352 			TAILQ_REMOVE(&temp->shadow_head, object, shadow_list);
353 			--temp->shadow_count;
354 		}
355 		vm_object_terminate(object);
356 		/* unlocks and deallocates object */
357 		object = temp;
358 	}
359 }
360 
361 /*
362  *	vm_object_terminate actually destroys the specified object, freeing
363  *	up all previously used resources.
364  *
365  *	The object must be locked.
366  */
367 static void
368 vm_object_terminate(object)
369 	register vm_object_t object;
370 {
371 	register vm_page_t p;
372 	int s;
373 
374 	/*
375 	 * wait for the pageout daemon to be done with the object
376 	 */
377 	s = splvm();
378 	while (object->paging_in_progress) {
379 		object->flags |= OBJ_PIPWNT;
380 		tsleep(object, PVM, "objtrm", 0);
381 	}
382 	splx(s);
383 
384 	if (object->paging_in_progress != 0)
385 		panic("vm_object_deallocate: pageout in progress");
386 
387 	/*
388 	 * Clean and free the pages, as appropriate. All references to the
389 	 * object are gone, so we don't need to lock it.
390 	 */
391 	if (object->type == OBJT_VNODE) {
392 		struct vnode *vp = object->handle;
393 		struct proc *p = curproc;	/* XXX */
394 		int waslocked;
395 
396 		waslocked = VOP_ISLOCKED(vp);
397 		if (!waslocked)
398 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
399 		vm_object_page_clean(object, 0, 0, TRUE, FALSE);
400 		vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
401 		if (!waslocked)
402 			VOP_UNLOCK(vp, 0, p);
403 	}
404 
405 	/*
406 	 * Now free the pages. For internal objects, this also removes them
407 	 * from paging queues.
408 	 */
409 	while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
410 		if (p->busy || (p->flags & PG_BUSY))
411 			printf("vm_object_terminate: freeing busy page\n");
412 		PAGE_WAKEUP(p);
413 		vm_page_free(p);
414 		cnt.v_pfree++;
415 	}
416 
417 	/*
418 	 * Let the pager know object is dead.
419 	 */
420 	vm_pager_deallocate(object);
421 
422 	simple_lock(&vm_object_list_lock);
423 	TAILQ_REMOVE(&vm_object_list, object, object_list);
424 	vm_object_count--;
425 	simple_unlock(&vm_object_list_lock);
426 
427 	wakeup(object);
428 
429 	/*
430 	 * Free the space for the object.
431 	 */
432 	free((caddr_t) object, M_VMOBJ);
433 }
434 
435 /*
436  *	vm_object_page_clean
437  *
438  *	Clean all dirty pages in the specified range of object.
439  *	Leaves page on whatever queue it is currently on.
440  *
441  *	Odd semantics: if start == end, we clean everything.
442  *
443  *	The object must be locked.
444  */
445 
446 void
447 vm_object_page_clean(object, start, end, syncio, lockflag)
448 	vm_object_t object;
449 	vm_pindex_t start;
450 	vm_pindex_t end;
451 	boolean_t syncio;
452 	boolean_t lockflag;
453 {
454 	register vm_page_t p, np, tp;
455 	register vm_offset_t tstart, tend;
456 	vm_pindex_t pi;
457 	int s;
458 	struct vnode *vp;
459 	int runlen;
460 	int maxf;
461 	int chkb;
462 	int maxb;
463 	int i;
464 	vm_page_t maf[vm_pageout_page_count];
465 	vm_page_t mab[vm_pageout_page_count];
466 	vm_page_t ma[vm_pageout_page_count];
467 	struct proc *pproc = curproc;	/* XXX */
468 
469 	if (object->type != OBJT_VNODE ||
470 		(object->flags & OBJ_MIGHTBEDIRTY) == 0)
471 		return;
472 
473 	vp = object->handle;
474 
475 	if (lockflag)
476 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, pproc);
477 	object->flags |= OBJ_CLEANING;
478 
479 	tstart = start;
480 	if (end == 0) {
481 		tend = object->size;
482 	} else {
483 		tend = end;
484 	}
485 	if ((tstart == 0) && (tend == object->size)) {
486 		object->flags &= ~(OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
487 	}
488 	for(p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq))
489 		p->flags |= PG_CLEANCHK;
490 
491 rescan:
492 	for(p = TAILQ_FIRST(&object->memq); p; p = np) {
493 		np = TAILQ_NEXT(p, listq);
494 
495 		pi = p->pindex;
496 		if (((p->flags & PG_CLEANCHK) == 0) ||
497 			(pi < tstart) || (pi >= tend) ||
498 			(p->valid == 0) ||
499 			((p->queue - p->pc) == PQ_CACHE)) {
500 			p->flags &= ~PG_CLEANCHK;
501 			continue;
502 		}
503 
504 		vm_page_test_dirty(p);
505 		if ((p->dirty & p->valid) == 0) {
506 			p->flags &= ~PG_CLEANCHK;
507 			continue;
508 		}
509 
510 		s = splvm();
511 		if ((p->flags & PG_BUSY) || p->busy) {
512 			p->flags |= PG_WANTED|PG_REFERENCED;
513 			tsleep(p, PVM, "vpcwai", 0);
514 			splx(s);
515 			goto rescan;
516 		}
517 		splx(s);
518 
519 		s = splvm();
520 		maxf = 0;
521 		for(i=1;i<vm_pageout_page_count;i++) {
522 			if (tp = vm_page_lookup(object, pi + i)) {
523 				if ((tp->flags & PG_BUSY) ||
524 					(tp->flags & PG_CLEANCHK) == 0)
525 					break;
526 				if((tp->queue - tp->pc) == PQ_CACHE) {
527 					tp->flags &= ~PG_CLEANCHK;
528 					break;
529 				}
530 				vm_page_test_dirty(tp);
531 				if ((tp->dirty & tp->valid) == 0) {
532 					tp->flags &= ~PG_CLEANCHK;
533 					break;
534 				}
535 				maf[ i - 1 ] = tp;
536 				maxf++;
537 				continue;
538 			}
539 			break;
540 		}
541 
542 		maxb = 0;
543 		chkb = vm_pageout_page_count -  maxf;
544 		if (chkb) {
545 			for(i = 1; i < chkb;i++) {
546 				if (tp = vm_page_lookup(object, pi - i)) {
547 					if ((tp->flags & PG_BUSY) ||
548 						(tp->flags & PG_CLEANCHK) == 0)
549 						break;
550 					if((tp->queue - tp->pc) == PQ_CACHE) {
551 						tp->flags &= ~PG_CLEANCHK;
552 						break;
553 					}
554 					vm_page_test_dirty(tp);
555 					if ((tp->dirty & tp->valid) == 0) {
556 						tp->flags &= ~PG_CLEANCHK;
557 						break;
558 					}
559 					mab[ i - 1 ] = tp;
560 					maxb++;
561 					continue;
562 				}
563 				break;
564 			}
565 		}
566 
567 		for(i=0;i<maxb;i++) {
568 			int index = (maxb - i) - 1;
569 			ma[index] = mab[i];
570 			ma[index]->flags |= PG_BUSY;
571 			ma[index]->flags &= ~PG_CLEANCHK;
572 			vm_page_protect(ma[index], VM_PROT_READ);
573 		}
574 		vm_page_protect(p, VM_PROT_READ);
575 		p->flags |= PG_BUSY;
576 		p->flags &= ~PG_CLEANCHK;
577 		ma[maxb] = p;
578 		for(i=0;i<maxf;i++) {
579 			int index = (maxb + i) + 1;
580 			ma[index] = maf[i];
581 			ma[index]->flags |= PG_BUSY;
582 			ma[index]->flags &= ~PG_CLEANCHK;
583 			vm_page_protect(ma[index], VM_PROT_READ);
584 		}
585 		runlen = maxb + maxf + 1;
586 		splx(s);
587 		vm_pageout_flush(ma, runlen, 0);
588 		goto rescan;
589 	}
590 
591 	VOP_FSYNC(vp, NULL, syncio, curproc);
592 
593 	if (lockflag)
594 		VOP_UNLOCK(vp, 0, pproc);
595 	object->flags &= ~OBJ_CLEANING;
596 	return;
597 }
598 
599 #ifdef not_used
600 /* XXX I cannot tell if this should be an exported symbol */
601 /*
602  *	vm_object_deactivate_pages
603  *
604  *	Deactivate all pages in the specified object.  (Keep its pages
605  *	in memory even though it is no longer referenced.)
606  *
607  *	The object must be locked.
608  */
609 static void
610 vm_object_deactivate_pages(object)
611 	register vm_object_t object;
612 {
613 	register vm_page_t p, next;
614 
615 	for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) {
616 		next = TAILQ_NEXT(p, listq);
617 		vm_page_deactivate(p);
618 	}
619 }
620 #endif
621 
622 /*
623  *	Trim the object cache to size.
624  */
625 static void
626 vm_object_cache_trim()
627 {
628 	register vm_object_t object;
629 
630 	while (vm_object_cached > vm_object_cache_max) {
631 		object = TAILQ_FIRST(&vm_object_cached_list);
632 
633 		vm_object_reference(object);
634 		pager_cache(object, FALSE);
635 	}
636 }
637 
638 
639 /*
640  *	vm_object_pmap_copy:
641  *
642  *	Makes all physical pages in the specified
643  *	object range copy-on-write.  No writeable
644  *	references to these pages should remain.
645  *
646  *	The object must *not* be locked.
647  */
648 void
649 vm_object_pmap_copy(object, start, end)
650 	register vm_object_t object;
651 	register vm_pindex_t start;
652 	register vm_pindex_t end;
653 {
654 	register vm_page_t p;
655 
656 	if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
657 		return;
658 
659 	for (p = TAILQ_FIRST(&object->memq);
660 		p != NULL;
661 		p = TAILQ_NEXT(p, listq)) {
662 		vm_page_protect(p, VM_PROT_READ);
663 	}
664 
665 	object->flags &= ~OBJ_WRITEABLE;
666 }
667 
668 /*
669  *	vm_object_pmap_remove:
670  *
671  *	Removes all physical pages in the specified
672  *	object range from all physical maps.
673  *
674  *	The object must *not* be locked.
675  */
676 void
677 vm_object_pmap_remove(object, start, end)
678 	register vm_object_t object;
679 	register vm_pindex_t start;
680 	register vm_pindex_t end;
681 {
682 	register vm_page_t p;
683 	if (object == NULL)
684 		return;
685 	for (p = TAILQ_FIRST(&object->memq);
686 		p != NULL;
687 		p = TAILQ_NEXT(p, listq)) {
688 		if (p->pindex >= start && p->pindex < end)
689 			vm_page_protect(p, VM_PROT_NONE);
690 	}
691 	if ((start == 0) && (object->size == end))
692 		object->flags &= ~OBJ_WRITEABLE;
693 }
694 
695 /*
696  *	vm_object_madvise:
697  *
698  *	Implements the madvise function at the object/page level.
699  */
700 void
701 vm_object_madvise(object, pindex, count, advise)
702 	vm_object_t object;
703 	vm_pindex_t pindex;
704 	int count;
705 	int advise;
706 {
707 	int s;
708 	vm_pindex_t end, tpindex;
709 	vm_object_t tobject;
710 	vm_page_t m;
711 
712 	if (object == NULL)
713 		return;
714 
715 	end = pindex + count;
716 
717 	for (; pindex < end; pindex += 1) {
718 
719 relookup:
720 		tobject = object;
721 		tpindex = pindex;
722 shadowlookup:
723 		m = vm_page_lookup(tobject, tpindex);
724 		if (m == NULL) {
725 			if (tobject->type != OBJT_DEFAULT) {
726 				continue;
727 			}
728 
729 			tobject = tobject->backing_object;
730 			if ((tobject == NULL) || (tobject->ref_count != 1)) {
731 				continue;
732 			}
733 			tpindex += OFF_TO_IDX(tobject->backing_object_offset);
734 			goto shadowlookup;
735 		}
736 
737 		/*
738 		 * If the page is busy or not in a normal active state,
739 		 * we skip it.  Things can break if we mess with pages
740 		 * in any of the below states.
741 		 */
742 		if (m->hold_count || m->wire_count ||
743 			m->valid != VM_PAGE_BITS_ALL) {
744 			continue;
745 		}
746 
747 		if (m->busy || (m->flags & PG_BUSY)) {
748 			s = splvm();
749 			if (m->busy || (m->flags & PG_BUSY)) {
750 				m->flags |= PG_WANTED;
751 				tsleep(m, PVM, "madvpw", 0);
752 			}
753 			splx(s);
754 			goto relookup;
755 		}
756 
757 		if (advise == MADV_WILLNEED) {
758 			if (m->queue != PQ_ACTIVE)
759 				vm_page_activate(m);
760 		} else if (advise == MADV_DONTNEED) {
761 			vm_page_deactivate(m);
762 		} else if (advise == MADV_FREE) {
763 			pmap_clear_modify(VM_PAGE_TO_PHYS(m));
764 			m->dirty = 0;
765 			/*
766 			 * Force a demand zero if attempt to read from swap.
767 			 * We currently don't handle vnode files correctly,
768 			 * and will reread stale contents unnecessarily.
769 			 */
770 			if (object->type == OBJT_SWAP)
771 				swap_pager_dmzspace(tobject, m->pindex, 1);
772 		}
773 	}
774 }
775 
776 /*
777  *	vm_object_shadow:
778  *
779  *	Create a new object which is backed by the
780  *	specified existing object range.  The source
781  *	object reference is deallocated.
782  *
783  *	The new object and offset into that object
784  *	are returned in the source parameters.
785  */
786 
787 void
788 vm_object_shadow(object, offset, length)
789 	vm_object_t *object;	/* IN/OUT */
790 	vm_ooffset_t *offset;	/* IN/OUT */
791 	vm_size_t length;
792 {
793 	register vm_object_t source;
794 	register vm_object_t result;
795 
796 	source = *object;
797 
798 	/*
799 	 * Allocate a new object with the given length
800 	 */
801 
802 	if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL)
803 		panic("vm_object_shadow: no object for shadowing");
804 
805 	/*
806 	 * The new object shadows the source object, adding a reference to it.
807 	 * Our caller changes his reference to point to the new object,
808 	 * removing a reference to the source object.  Net result: no change
809 	 * of reference count.
810 	 */
811 	result->backing_object = source;
812 	if (source) {
813 		TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list);
814 		++source->shadow_count;
815 	}
816 
817 	/*
818 	 * Store the offset into the source object, and fix up the offset into
819 	 * the new object.
820 	 */
821 
822 	result->backing_object_offset = *offset;
823 
824 	/*
825 	 * Return the new things
826 	 */
827 
828 	*offset = 0;
829 	*object = result;
830 }
831 
832 
833 /*
834  * this version of collapse allows the operation to occur earlier and
835  * when paging_in_progress is true for an object...  This is not a complete
836  * operation, but should plug 99.9% of the rest of the leaks.
837  */
838 static void
839 vm_object_qcollapse(object)
840 	register vm_object_t object;
841 {
842 	register vm_object_t backing_object;
843 	register vm_pindex_t backing_offset_index, paging_offset_index;
844 	vm_pindex_t backing_object_paging_offset_index;
845 	vm_pindex_t new_pindex;
846 	register vm_page_t p, pp;
847 	register vm_size_t size;
848 
849 	backing_object = object->backing_object;
850 	if (backing_object->ref_count != 1)
851 		return;
852 
853 	backing_object->ref_count += 2;
854 
855 	backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
856 	backing_object_paging_offset_index = OFF_TO_IDX(backing_object->paging_offset);
857 	paging_offset_index = OFF_TO_IDX(object->paging_offset);
858 	size = object->size;
859 	p = TAILQ_FIRST(&backing_object->memq);
860 	while (p) {
861 		vm_page_t next;
862 
863 		next = TAILQ_NEXT(p, listq);
864 		if ((p->flags & (PG_BUSY | PG_FICTITIOUS)) ||
865 		    ((p->queue - p->pc) == PQ_CACHE) ||
866 		    !p->valid || p->hold_count || p->wire_count || p->busy) {
867 			p = next;
868 			continue;
869 		}
870 		new_pindex = p->pindex - backing_offset_index;
871 		if (p->pindex < backing_offset_index ||
872 		    new_pindex >= size) {
873 			if (backing_object->type == OBJT_SWAP)
874 				swap_pager_freespace(backing_object,
875 				    backing_object_paging_offset_index+p->pindex,
876 				    1);
877 			vm_page_protect(p, VM_PROT_NONE);
878 			vm_page_free(p);
879 		} else {
880 			pp = vm_page_lookup(object, new_pindex);
881 			if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object,
882 				    paging_offset_index + new_pindex, NULL, NULL))) {
883 				if (backing_object->type == OBJT_SWAP)
884 					swap_pager_freespace(backing_object,
885 					    backing_object_paging_offset_index + p->pindex, 1);
886 				vm_page_protect(p, VM_PROT_NONE);
887 				vm_page_free(p);
888 			} else {
889 				if (backing_object->type == OBJT_SWAP)
890 					swap_pager_freespace(backing_object,
891 					    backing_object_paging_offset_index + p->pindex, 1);
892 				vm_page_rename(p, object, new_pindex);
893 				vm_page_protect(p, VM_PROT_NONE);
894 				p->dirty = VM_PAGE_BITS_ALL;
895 			}
896 		}
897 		p = next;
898 	}
899 	backing_object->ref_count -= 2;
900 }
901 
902 /*
903  *	vm_object_collapse:
904  *
905  *	Collapse an object with the object backing it.
906  *	Pages in the backing object are moved into the
907  *	parent, and the backing object is deallocated.
908  */
909 void
910 vm_object_collapse(object)
911 	vm_object_t object;
912 
913 {
914 	vm_object_t backing_object;
915 	vm_ooffset_t backing_offset;
916 	vm_size_t size;
917 	vm_pindex_t new_pindex, backing_offset_index;
918 	vm_page_t p, pp;
919 
920 	while (TRUE) {
921 		/*
922 		 * Verify that the conditions are right for collapse:
923 		 *
924 		 * The object exists and no pages in it are currently being paged
925 		 * out.
926 		 */
927 		if (object == NULL)
928 			return;
929 
930 		/*
931 		 * Make sure there is a backing object.
932 		 */
933 		if ((backing_object = object->backing_object) == NULL)
934 			return;
935 
936 		/*
937 		 * we check the backing object first, because it is most likely
938 		 * not collapsable.
939 		 */
940 		if (backing_object->handle != NULL ||
941 		    (backing_object->type != OBJT_DEFAULT &&
942 		     backing_object->type != OBJT_SWAP) ||
943 		    (backing_object->flags & OBJ_DEAD) ||
944 		    object->handle != NULL ||
945 		    (object->type != OBJT_DEFAULT &&
946 		     object->type != OBJT_SWAP) ||
947 		    (object->flags & OBJ_DEAD)) {
948 			return;
949 		}
950 
951 		if (object->paging_in_progress != 0 ||
952 		    backing_object->paging_in_progress != 0) {
953 			vm_object_qcollapse(object);
954 			return;
955 		}
956 
957 		/*
958 		 * We know that we can either collapse the backing object (if
959 		 * the parent is the only reference to it) or (perhaps) remove
960 		 * the parent's reference to it.
961 		 */
962 
963 		backing_offset = object->backing_object_offset;
964 		backing_offset_index = OFF_TO_IDX(backing_offset);
965 		size = object->size;
966 
967 		/*
968 		 * If there is exactly one reference to the backing object, we
969 		 * can collapse it into the parent.
970 		 */
971 
972 		if (backing_object->ref_count == 1) {
973 
974 			backing_object->flags |= OBJ_DEAD;
975 			/*
976 			 * We can collapse the backing object.
977 			 *
978 			 * Move all in-memory pages from backing_object to the
979 			 * parent.  Pages that have been paged out will be
980 			 * overwritten by any of the parent's pages that
981 			 * shadow them.
982 			 */
983 
984 			while ((p = TAILQ_FIRST(&backing_object->memq)) != 0) {
985 
986 				new_pindex = p->pindex - backing_offset_index;
987 
988 				/*
989 				 * If the parent has a page here, or if this
990 				 * page falls outside the parent, dispose of
991 				 * it.
992 				 *
993 				 * Otherwise, move it as planned.
994 				 */
995 
996 				if (p->pindex < backing_offset_index ||
997 				    new_pindex >= size) {
998 					vm_page_protect(p, VM_PROT_NONE);
999 					PAGE_WAKEUP(p);
1000 					vm_page_free(p);
1001 				} else {
1002 					pp = vm_page_lookup(object, new_pindex);
1003 					if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object,
1004 					    OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL))) {
1005 						vm_page_protect(p, VM_PROT_NONE);
1006 						PAGE_WAKEUP(p);
1007 						vm_page_free(p);
1008 					} else {
1009 						vm_page_protect(p, VM_PROT_NONE);
1010 						vm_page_rename(p, object, new_pindex);
1011 						p->dirty = VM_PAGE_BITS_ALL;
1012 					}
1013 				}
1014 			}
1015 
1016 			/*
1017 			 * Move the pager from backing_object to object.
1018 			 */
1019 
1020 			if (backing_object->type == OBJT_SWAP) {
1021 				backing_object->paging_in_progress++;
1022 				if (object->type == OBJT_SWAP) {
1023 					object->paging_in_progress++;
1024 					/*
1025 					 * copy shadow object pages into ours
1026 					 * and destroy unneeded pages in
1027 					 * shadow object.
1028 					 */
1029 					swap_pager_copy(
1030 					    backing_object,
1031 					    OFF_TO_IDX(backing_object->paging_offset),
1032 					    object,
1033 					    OFF_TO_IDX(object->paging_offset),
1034 					    OFF_TO_IDX(object->backing_object_offset));
1035 					vm_object_pip_wakeup(object);
1036 				} else {
1037 					object->paging_in_progress++;
1038 					/*
1039 					 * move the shadow backing_object's pager data to
1040 					 * "object" and convert "object" type to OBJT_SWAP.
1041 					 */
1042 					object->type = OBJT_SWAP;
1043 					object->un_pager.swp.swp_nblocks =
1044 					    backing_object->un_pager.swp.swp_nblocks;
1045 					object->un_pager.swp.swp_allocsize =
1046 					    backing_object->un_pager.swp.swp_allocsize;
1047 					object->un_pager.swp.swp_blocks =
1048 					    backing_object->un_pager.swp.swp_blocks;
1049 					object->un_pager.swp.swp_poip =		/* XXX */
1050 					    backing_object->un_pager.swp.swp_poip;
1051 					object->paging_offset = backing_object->paging_offset + backing_offset;
1052 					TAILQ_INSERT_TAIL(&swap_pager_un_object_list, object, pager_object_list);
1053 
1054 					/*
1055 					 * Convert backing object from OBJT_SWAP to
1056 					 * OBJT_DEFAULT. XXX - only the TAILQ_REMOVE is
1057 					 * actually necessary.
1058 					 */
1059 					backing_object->type = OBJT_DEFAULT;
1060 					TAILQ_REMOVE(&swap_pager_un_object_list, backing_object, pager_object_list);
1061 					/*
1062 					 * free unnecessary blocks
1063 					 */
1064 					swap_pager_freespace(object, 0,
1065 						OFF_TO_IDX(object->paging_offset));
1066 					vm_object_pip_wakeup(object);
1067 				}
1068 
1069 				vm_object_pip_wakeup(backing_object);
1070 			}
1071 			/*
1072 			 * Object now shadows whatever backing_object did.
1073 			 * Note that the reference to backing_object->backing_object
1074 			 * moves from within backing_object to within object.
1075 			 */
1076 
1077 			TAILQ_REMOVE(&object->backing_object->shadow_head, object,
1078 			    shadow_list);
1079 			--object->backing_object->shadow_count;
1080 			if (backing_object->backing_object) {
1081 				TAILQ_REMOVE(&backing_object->backing_object->shadow_head,
1082 				    backing_object, shadow_list);
1083 				--backing_object->backing_object->shadow_count;
1084 			}
1085 			object->backing_object = backing_object->backing_object;
1086 			if (object->backing_object) {
1087 				TAILQ_INSERT_TAIL(&object->backing_object->shadow_head,
1088 				    object, shadow_list);
1089 				++object->backing_object->shadow_count;
1090 			}
1091 
1092 			object->backing_object_offset += backing_object->backing_object_offset;
1093 			/*
1094 			 * Discard backing_object.
1095 			 *
1096 			 * Since the backing object has no pages, no pager left,
1097 			 * and no object references within it, all that is
1098 			 * necessary is to dispose of it.
1099 			 */
1100 
1101 			TAILQ_REMOVE(&vm_object_list, backing_object,
1102 			    object_list);
1103 			vm_object_count--;
1104 
1105 			free((caddr_t) backing_object, M_VMOBJ);
1106 
1107 			object_collapses++;
1108 		} else {
1109 			/*
1110 			 * If all of the pages in the backing object are
1111 			 * shadowed by the parent object, the parent object no
1112 			 * longer has to shadow the backing object; it can
1113 			 * shadow the next one in the chain.
1114 			 *
1115 			 * The backing object must not be paged out - we'd have
1116 			 * to check all of the paged-out pages, as well.
1117 			 */
1118 
1119 			if (backing_object->type != OBJT_DEFAULT) {
1120 				return;
1121 			}
1122 			/*
1123 			 * Should have a check for a 'small' number of pages
1124 			 * here.
1125 			 */
1126 
1127 			for (p = TAILQ_FIRST(&backing_object->memq); p; p = TAILQ_NEXT(p, listq)) {
1128 				new_pindex = p->pindex - backing_offset_index;
1129 
1130 				/*
1131 				 * If the parent has a page here, or if this
1132 				 * page falls outside the parent, keep going.
1133 				 *
1134 				 * Otherwise, the backing_object must be left in
1135 				 * the chain.
1136 				 */
1137 
1138 				if (p->pindex >= backing_offset_index &&
1139 					new_pindex <= size) {
1140 
1141 					pp = vm_page_lookup(object, new_pindex);
1142 
1143 					if ((pp == NULL || pp->valid == 0) &&
1144 				   	    !vm_pager_has_page(object, OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL)) {
1145 						/*
1146 						 * Page still needed. Can't go any
1147 						 * further.
1148 						 */
1149 						return;
1150 					}
1151 				}
1152 			}
1153 
1154 			/*
1155 			 * Make the parent shadow the next object in the
1156 			 * chain.  Deallocating backing_object will not remove
1157 			 * it, since its reference count is at least 2.
1158 			 */
1159 
1160 			TAILQ_REMOVE(&object->backing_object->shadow_head,
1161 			    object, shadow_list);
1162 			--object->backing_object->shadow_count;
1163 			vm_object_reference(object->backing_object = backing_object->backing_object);
1164 			if (object->backing_object) {
1165 				TAILQ_INSERT_TAIL(&object->backing_object->shadow_head,
1166 				    object, shadow_list);
1167 				++object->backing_object->shadow_count;
1168 			}
1169 			object->backing_object_offset += backing_object->backing_object_offset;
1170 
1171 			/*
1172 			 * Drop the reference count on backing_object. Since
1173 			 * its ref_count was at least 2, it will not vanish;
1174 			 * so we don't need to call vm_object_deallocate.
1175 			 */
1176 			if (backing_object->ref_count == 1)
1177 				printf("should have called obj deallocate\n");
1178 			backing_object->ref_count--;
1179 
1180 			object_bypasses++;
1181 
1182 		}
1183 
1184 		/*
1185 		 * Try again with this object's new backing object.
1186 		 */
1187 	}
1188 }
1189 
1190 /*
1191  *	vm_object_page_remove: [internal]
1192  *
1193  *	Removes all physical pages in the specified
1194  *	object range from the object's list of pages.
1195  *
1196  *	The object must be locked.
1197  */
1198 void
1199 vm_object_page_remove(object, start, end, clean_only)
1200 	register vm_object_t object;
1201 	register vm_pindex_t start;
1202 	register vm_pindex_t end;
1203 	boolean_t clean_only;
1204 {
1205 	register vm_page_t p, next;
1206 	unsigned int size;
1207 	int s;
1208 
1209 	if (object == NULL)
1210 		return;
1211 
1212 	object->paging_in_progress++;
1213 again:
1214 	size = end - start;
1215 	if (size > 4 || size >= object->size / 4) {
1216 		for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) {
1217 			next = TAILQ_NEXT(p, listq);
1218 			if ((start <= p->pindex) && (p->pindex < end)) {
1219 				if (p->wire_count != 0) {
1220 					vm_page_protect(p, VM_PROT_NONE);
1221 					p->valid = 0;
1222 					continue;
1223 				}
1224 
1225 				/*
1226 				 * The busy flags are only cleared at
1227 				 * interrupt -- minimize the spl transitions
1228 				 */
1229 				if ((p->flags & PG_BUSY) || p->busy) {
1230 					s = splvm();
1231 					if ((p->flags & PG_BUSY) || p->busy) {
1232 						p->flags |= PG_WANTED;
1233 						tsleep(p, PVM, "vmopar", 0);
1234 						splx(s);
1235 						goto again;
1236 					}
1237 					splx(s);
1238 				}
1239 
1240 				if (clean_only) {
1241 					vm_page_test_dirty(p);
1242 					if (p->valid & p->dirty)
1243 						continue;
1244 				}
1245 				vm_page_protect(p, VM_PROT_NONE);
1246 				PAGE_WAKEUP(p);
1247 				vm_page_free(p);
1248 			}
1249 		}
1250 	} else {
1251 		while (size > 0) {
1252 			if ((p = vm_page_lookup(object, start)) != 0) {
1253 				if (p->wire_count != 0) {
1254 					p->valid = 0;
1255 					vm_page_protect(p, VM_PROT_NONE);
1256 					start += 1;
1257 					size -= 1;
1258 					continue;
1259 				}
1260 				/*
1261 				 * The busy flags are only cleared at
1262 				 * interrupt -- minimize the spl transitions
1263 				 */
1264 				if ((p->flags & PG_BUSY) || p->busy) {
1265 					s = splvm();
1266 					if ((p->flags & PG_BUSY) || p->busy) {
1267 						p->flags |= PG_WANTED;
1268 						tsleep(p, PVM, "vmopar", 0);
1269 						splx(s);
1270 						goto again;
1271 					}
1272 					splx(s);
1273 				}
1274 				if (clean_only) {
1275 					vm_page_test_dirty(p);
1276 					if (p->valid & p->dirty) {
1277 						start += 1;
1278 						size -= 1;
1279 						continue;
1280 					}
1281 				}
1282 				vm_page_protect(p, VM_PROT_NONE);
1283 				PAGE_WAKEUP(p);
1284 				vm_page_free(p);
1285 			}
1286 			start += 1;
1287 			size -= 1;
1288 		}
1289 	}
1290 	vm_object_pip_wakeup(object);
1291 }
1292 
1293 /*
1294  *	Routine:	vm_object_coalesce
1295  *	Function:	Coalesces two objects backing up adjoining
1296  *			regions of memory into a single object.
1297  *
1298  *	returns TRUE if objects were combined.
1299  *
1300  *	NOTE:	Only works at the moment if the second object is NULL -
1301  *		if it's not, which object do we lock first?
1302  *
1303  *	Parameters:
1304  *		prev_object	First object to coalesce
1305  *		prev_offset	Offset into prev_object
1306  *		next_object	Second object into coalesce
1307  *		next_offset	Offset into next_object
1308  *
1309  *		prev_size	Size of reference to prev_object
1310  *		next_size	Size of reference to next_object
1311  *
1312  *	Conditions:
1313  *	The object must *not* be locked.
1314  */
1315 boolean_t
1316 vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size)
1317 	register vm_object_t prev_object;
1318 	vm_pindex_t prev_pindex;
1319 	vm_size_t prev_size, next_size;
1320 {
1321 	vm_size_t newsize;
1322 
1323 	if (prev_object == NULL) {
1324 		return (TRUE);
1325 	}
1326 
1327 	if (prev_object->type != OBJT_DEFAULT) {
1328 		return (FALSE);
1329 	}
1330 
1331 	/*
1332 	 * Try to collapse the object first
1333 	 */
1334 	vm_object_collapse(prev_object);
1335 
1336 	/*
1337 	 * Can't coalesce if: . more than one reference . paged out . shadows
1338 	 * another object . has a copy elsewhere (any of which mean that the
1339 	 * pages not mapped to prev_entry may be in use anyway)
1340 	 */
1341 
1342 	if (prev_object->backing_object != NULL) {
1343 		return (FALSE);
1344 	}
1345 
1346 	prev_size >>= PAGE_SHIFT;
1347 	next_size >>= PAGE_SHIFT;
1348 
1349 	if ((prev_object->ref_count > 1) &&
1350 	    (prev_object->size != prev_pindex + prev_size)) {
1351 		return (FALSE);
1352 	}
1353 
1354 	/*
1355 	 * Remove any pages that may still be in the object from a previous
1356 	 * deallocation.
1357 	 */
1358 
1359 	vm_object_page_remove(prev_object,
1360 	    prev_pindex + prev_size,
1361 	    prev_pindex + prev_size + next_size, FALSE);
1362 
1363 	/*
1364 	 * Extend the object if necessary.
1365 	 */
1366 	newsize = prev_pindex + prev_size + next_size;
1367 	if (newsize > prev_object->size)
1368 		prev_object->size = newsize;
1369 
1370 	return (TRUE);
1371 }
1372 
1373 #include "opt_ddb.h"
1374 #ifdef DDB
1375 #include <sys/kernel.h>
1376 
1377 #include <machine/cons.h>
1378 
1379 #include <ddb/ddb.h>
1380 
1381 static int	_vm_object_in_map __P((vm_map_t map, vm_object_t object,
1382 				       vm_map_entry_t entry));
1383 static int	vm_object_in_map __P((vm_object_t object));
1384 
1385 static int
1386 _vm_object_in_map(map, object, entry)
1387 	vm_map_t map;
1388 	vm_object_t object;
1389 	vm_map_entry_t entry;
1390 {
1391 	vm_map_t tmpm;
1392 	vm_map_entry_t tmpe;
1393 	vm_object_t obj;
1394 	int entcount;
1395 
1396 	if (map == 0)
1397 		return 0;
1398 
1399 	if (entry == 0) {
1400 		tmpe = map->header.next;
1401 		entcount = map->nentries;
1402 		while (entcount-- && (tmpe != &map->header)) {
1403 			if( _vm_object_in_map(map, object, tmpe)) {
1404 				return 1;
1405 			}
1406 			tmpe = tmpe->next;
1407 		}
1408 	} else if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1409 		tmpm = entry->object.share_map;
1410 		tmpe = tmpm->header.next;
1411 		entcount = tmpm->nentries;
1412 		while (entcount-- && tmpe != &tmpm->header) {
1413 			if( _vm_object_in_map(tmpm, object, tmpe)) {
1414 				return 1;
1415 			}
1416 			tmpe = tmpe->next;
1417 		}
1418 	} else if (obj = entry->object.vm_object) {
1419 		for(; obj; obj=obj->backing_object)
1420 			if( obj == object) {
1421 				return 1;
1422 			}
1423 	}
1424 	return 0;
1425 }
1426 
1427 static int
1428 vm_object_in_map( object)
1429 	vm_object_t object;
1430 {
1431 	struct proc *p;
1432 	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1433 		if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
1434 			continue;
1435 		if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0))
1436 			return 1;
1437 	}
1438 	if( _vm_object_in_map( kernel_map, object, 0))
1439 		return 1;
1440 	if( _vm_object_in_map( kmem_map, object, 0))
1441 		return 1;
1442 	if( _vm_object_in_map( pager_map, object, 0))
1443 		return 1;
1444 	if( _vm_object_in_map( buffer_map, object, 0))
1445 		return 1;
1446 	if( _vm_object_in_map( io_map, object, 0))
1447 		return 1;
1448 	if( _vm_object_in_map( phys_map, object, 0))
1449 		return 1;
1450 	if( _vm_object_in_map( mb_map, object, 0))
1451 		return 1;
1452 	if( _vm_object_in_map( u_map, object, 0))
1453 		return 1;
1454 	return 0;
1455 }
1456 
1457 DB_SHOW_COMMAND(vmochk, vm_object_check)
1458 {
1459 	vm_object_t object;
1460 
1461 	/*
1462 	 * make sure that internal objs are in a map somewhere
1463 	 * and none have zero ref counts.
1464 	 */
1465 	for (object = TAILQ_FIRST(&vm_object_list);
1466 			object != NULL;
1467 			object = TAILQ_NEXT(object, object_list)) {
1468 		if (object->handle == NULL &&
1469 		    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
1470 			if (object->ref_count == 0) {
1471 				db_printf("vmochk: internal obj has zero ref count: %d\n",
1472 					object->size);
1473 			}
1474 			if (!vm_object_in_map(object)) {
1475 				db_printf("vmochk: internal obj is not in a map: "
1476 		"ref: %d, size: %d: 0x%x, backing_object: 0x%x\n",
1477 				    object->ref_count, object->size,
1478 				    object->size, object->backing_object);
1479 			}
1480 		}
1481 	}
1482 }
1483 
1484 /*
1485  *	vm_object_print:	[ debug ]
1486  */
1487 DB_SHOW_COMMAND(object, vm_object_print_static)
1488 {
1489 	/* XXX convert args. */
1490 	vm_object_t object = (vm_object_t)addr;
1491 	boolean_t full = have_addr;
1492 
1493 	register vm_page_t p;
1494 
1495 	/* XXX count is an (unused) arg.  Avoid shadowing it. */
1496 #define	count	was_count
1497 
1498 	register int count;
1499 
1500 	if (object == NULL)
1501 		return;
1502 
1503 	db_iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ",
1504 	    (int) object, (int) object->size,
1505 	    object->resident_page_count, object->ref_count);
1506 	db_printf("offset=0x%x, backing_object=(0x%x)+0x%x\n",
1507 	    (int) object->paging_offset,
1508 	    (int) object->backing_object, (int) object->backing_object_offset);
1509 	db_printf("cache: next=%p, prev=%p\n",
1510 	    TAILQ_NEXT(object, cached_list), TAILQ_PREV(object, cached_list));
1511 
1512 	if (!full)
1513 		return;
1514 
1515 	db_indent += 2;
1516 	count = 0;
1517 	for (p = TAILQ_FIRST(&object->memq); p != NULL; p = TAILQ_NEXT(p, listq)) {
1518 		if (count == 0)
1519 			db_iprintf("memory:=");
1520 		else if (count == 6) {
1521 			db_printf("\n");
1522 			db_iprintf(" ...");
1523 			count = 0;
1524 		} else
1525 			db_printf(",");
1526 		count++;
1527 
1528 		db_printf("(off=0x%lx,page=0x%lx)",
1529 		    (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
1530 	}
1531 	if (count != 0)
1532 		db_printf("\n");
1533 	db_indent -= 2;
1534 }
1535 
1536 /* XXX. */
1537 #undef count
1538 
1539 /* XXX need this non-static entry for calling from vm_map_print. */
1540 void
1541 vm_object_print(addr, have_addr, count, modif)
1542 	db_expr_t addr;
1543 	boolean_t have_addr;
1544 	db_expr_t count;
1545 	char *modif;
1546 {
1547 	vm_object_print_static(addr, have_addr, count, modif);
1548 }
1549 
1550 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
1551 {
1552 	vm_object_t object;
1553 	int nl = 0;
1554 	int c;
1555 	for (object = TAILQ_FIRST(&vm_object_list);
1556 			object != NULL;
1557 			object = TAILQ_NEXT(object, object_list)) {
1558 		vm_pindex_t idx, fidx;
1559 		vm_pindex_t osize;
1560 		vm_offset_t pa = -1, padiff;
1561 		int rcount;
1562 		vm_page_t m;
1563 
1564 		db_printf("new object: 0x%x\n", object);
1565 		if ( nl > 18) {
1566 			c = cngetc();
1567 			if (c != ' ')
1568 				return;
1569 			nl = 0;
1570 		}
1571 		nl++;
1572 		rcount = 0;
1573 		fidx = 0;
1574 		osize = object->size;
1575 		if (osize > 128)
1576 			osize = 128;
1577 		for(idx=0;idx<osize;idx++) {
1578 			m = vm_page_lookup(object, idx);
1579 			if (m == NULL) {
1580 				if (rcount) {
1581 					db_printf(" index(%d)run(%d)pa(0x%x)\n",
1582 						fidx, rcount, pa);
1583 					if ( nl > 18) {
1584 						c = cngetc();
1585 						if (c != ' ')
1586 							return;
1587 						nl = 0;
1588 					}
1589 					nl++;
1590 					rcount = 0;
1591 				}
1592 				continue;
1593 			}
1594 
1595 
1596 			if (rcount &&
1597 				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
1598 				++rcount;
1599 				continue;
1600 			}
1601 			if (rcount) {
1602 				padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
1603 				padiff >>= PAGE_SHIFT;
1604 				padiff &= PQ_L2_MASK;
1605 				if (padiff == 0) {
1606 					pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
1607 					++rcount;
1608 					continue;
1609 				}
1610 				db_printf(" index(%d)run(%d)pa(0x%x)", fidx, rcount, pa);
1611 				db_printf("pd(%d)\n", padiff);
1612 				if ( nl > 18) {
1613 					c = cngetc();
1614 					if (c != ' ')
1615 						return;
1616 					nl = 0;
1617 				}
1618 				nl++;
1619 			}
1620 			fidx = idx;
1621 			pa = VM_PAGE_TO_PHYS(m);
1622 			rcount = 1;
1623 		}
1624 		if (rcount) {
1625 			db_printf(" index(%d)run(%d)pa(0x%x)\n", fidx, rcount, pa);
1626 			if ( nl > 18) {
1627 				c = cngetc();
1628 				if (c != ' ')
1629 					return;
1630 				nl = 0;
1631 			}
1632 			nl++;
1633 		}
1634 	}
1635 }
1636 #endif /* DDB */
1637