xref: /freebsd/sys/vm/vm_object.c (revision ce834215a70ff69e7e222827437116eee2f9ac6f)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $Id: vm_object.c,v 1.93 1997/06/22 03:00:24 dyson Exp $
65  */
66 
67 /*
68  *	Virtual memory object module.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/proc.h>		/* for curproc, pageproc */
75 #include <sys/malloc.h>
76 #include <sys/vnode.h>
77 #include <sys/mount.h>
78 #include <sys/vmmeter.h>
79 #include <sys/mman.h>
80 
81 #include <vm/vm.h>
82 #include <vm/vm_param.h>
83 #include <vm/vm_prot.h>
84 #include <sys/lock.h>
85 #include <vm/pmap.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_pager.h>
91 #include <vm/swap_pager.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_extern.h>
94 
95 static void	_vm_object_allocate __P((objtype_t, vm_size_t, vm_object_t));
96 static void	vm_object_qcollapse __P((vm_object_t object));
97 #ifdef not_used
98 static void	vm_object_deactivate_pages __P((vm_object_t));
99 #endif
100 static void	vm_object_terminate __P((vm_object_t));
101 static void	vm_object_cache_trim __P((void));
102 
103 /*
104  *	Virtual memory objects maintain the actual data
105  *	associated with allocated virtual memory.  A given
106  *	page of memory exists within exactly one object.
107  *
108  *	An object is only deallocated when all "references"
109  *	are given up.  Only one "reference" to a given
110  *	region of an object should be writeable.
111  *
112  *	Associated with each object is a list of all resident
113  *	memory pages belonging to that object; this list is
114  *	maintained by the "vm_page" module, and locked by the object's
115  *	lock.
116  *
117  *	Each object also records a "pager" routine which is
118  *	used to retrieve (and store) pages to the proper backing
119  *	storage.  In addition, objects may be backed by other
120  *	objects from which they were virtual-copied.
121  *
122  *	The only items within the object structure which are
123  *	modified after time of creation are:
124  *		reference count		locked by object's lock
125  *		pager routine		locked by object's lock
126  *
127  */
128 
129 int vm_object_cache_max;
130 struct object_q vm_object_cached_list;
131 static int vm_object_cached;
132 struct object_q vm_object_list;
133 struct simplelock vm_object_list_lock;
134 static long vm_object_count;
135 vm_object_t kernel_object;
136 vm_object_t kmem_object;
137 static struct vm_object kernel_object_store;
138 static struct vm_object kmem_object_store;
139 extern int vm_pageout_page_count;
140 
141 static long object_collapses;
142 static long object_bypasses;
143 static int next_index;
144 
145 static void
146 _vm_object_allocate(type, size, object)
147 	objtype_t type;
148 	vm_size_t size;
149 	register vm_object_t object;
150 {
151 	TAILQ_INIT(&object->memq);
152 	TAILQ_INIT(&object->shadow_head);
153 
154 	object->type = type;
155 	object->size = size;
156 	object->ref_count = 1;
157 	object->flags = 0;
158 	object->behavior = OBJ_NORMAL;
159 	object->paging_in_progress = 0;
160 	object->resident_page_count = 0;
161 	object->shadow_count = 0;
162 	object->pg_color = next_index;
163 	next_index = (next_index + PQ_PRIME1) & PQ_L2_MASK;
164 	object->handle = NULL;
165 	object->paging_offset = (vm_ooffset_t) 0;
166 	object->backing_object = NULL;
167 	object->backing_object_offset = (vm_ooffset_t) 0;
168 	object->page_hint = NULL;
169 
170 	object->last_read = 0;
171 
172 	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
173 	vm_object_count++;
174 }
175 
176 /*
177  *	vm_object_init:
178  *
179  *	Initialize the VM objects module.
180  */
181 void
182 vm_object_init()
183 {
184 	TAILQ_INIT(&vm_object_cached_list);
185 	TAILQ_INIT(&vm_object_list);
186 	simple_lock_init(&vm_object_list_lock);
187 	vm_object_count = 0;
188 
189 	vm_object_cache_max = 84;
190 	if (cnt.v_page_count > 1000)
191 		vm_object_cache_max += (cnt.v_page_count - 1000) / 4;
192 
193 	kernel_object = &kernel_object_store;
194 	_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
195 	    kernel_object);
196 
197 	kmem_object = &kmem_object_store;
198 	_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
199 	    kmem_object);
200 }
201 
202 /*
203  *	vm_object_allocate:
204  *
205  *	Returns a new object with the given size.
206  */
207 
208 vm_object_t
209 vm_object_allocate(type, size)
210 	objtype_t type;
211 	vm_size_t size;
212 {
213 	register vm_object_t result;
214 
215 	result = (vm_object_t)
216 	    malloc((u_long) sizeof *result, M_VMOBJ, M_WAITOK);
217 
218 
219 	_vm_object_allocate(type, size, result);
220 
221 	return (result);
222 }
223 
224 
225 /*
226  *	vm_object_reference:
227  *
228  *	Gets another reference to the given object.
229  */
230 void
231 vm_object_reference(object)
232 	register vm_object_t object;
233 {
234 	if (object == NULL)
235 		return;
236 
237 	if (object->ref_count == 0) {
238 		if ((object->flags & OBJ_CANPERSIST) == 0)
239 			panic("vm_object_reference: non-persistent object with 0 ref_count");
240 		TAILQ_REMOVE(&vm_object_cached_list, object, cached_list);
241 		vm_object_cached--;
242 	}
243 	object->ref_count++;
244 }
245 
246 /*
247  *	vm_object_deallocate:
248  *
249  *	Release a reference to the specified object,
250  *	gained either through a vm_object_allocate
251  *	or a vm_object_reference call.  When all references
252  *	are gone, storage associated with this object
253  *	may be relinquished.
254  *
255  *	No object may be locked.
256  */
257 void
258 vm_object_deallocate(object)
259 	vm_object_t object;
260 {
261 	vm_object_t temp;
262 
263 	while (object != NULL) {
264 
265 		if (object->ref_count == 0)
266 			panic("vm_object_deallocate: object deallocated too many times");
267 
268 		/*
269 		 * Lose the reference
270 		 */
271 		object->ref_count--;
272 		if (object->ref_count != 0) {
273 			if ((object->ref_count == 1) &&
274 			    (object->handle == NULL) &&
275 			    (object->type == OBJT_DEFAULT ||
276 			     object->type == OBJT_SWAP)) {
277 				vm_object_t robject;
278 				robject = TAILQ_FIRST(&object->shadow_head);
279 				if ((robject != NULL) &&
280 				    (robject->handle == NULL) &&
281 				    (robject->type == OBJT_DEFAULT ||
282 				     robject->type == OBJT_SWAP)) {
283 					int s;
284 					robject->ref_count += 2;
285 					object->ref_count += 2;
286 
287 					do {
288 						s = splvm();
289 						while (robject->paging_in_progress) {
290 							robject->flags |= OBJ_PIPWNT;
291 							tsleep(robject, PVM, "objde1", 0);
292 						}
293 
294 						while (object->paging_in_progress) {
295 							object->flags |= OBJ_PIPWNT;
296 							tsleep(object, PVM, "objde2", 0);
297 						}
298 						splx(s);
299 
300 					} while( object->paging_in_progress || robject->paging_in_progress);
301 
302 					object->ref_count -= 2;
303 					robject->ref_count -= 2;
304 					if( robject->ref_count == 0) {
305 						robject->ref_count += 1;
306 						object = robject;
307 						continue;
308 					}
309 					vm_object_collapse(robject);
310 					return;
311 				}
312 			}
313 			/*
314 			 * If there are still references, then we are done.
315 			 */
316 			return;
317 		}
318 
319 		if (object->type == OBJT_VNODE) {
320 			struct vnode *vp = object->handle;
321 
322 			vp->v_flag &= ~VTEXT;
323 		}
324 
325 		/*
326 		 * See if this object can persist and has some resident
327 		 * pages.  If so, enter it in the cache.
328 		 */
329 		if (object->flags & OBJ_CANPERSIST) {
330 			if (object->resident_page_count != 0) {
331 #if 0
332 				vm_object_page_clean(object, 0, 0 ,TRUE, TRUE);
333 #endif
334 				TAILQ_INSERT_TAIL(&vm_object_cached_list, object,
335 				    cached_list);
336 				vm_object_cached++;
337 
338 				vm_object_cache_trim();
339 				return;
340 			} else {
341 				object->flags &= ~OBJ_CANPERSIST;
342 			}
343 		}
344 
345 		/*
346 		 * Make sure no one uses us.
347 		 */
348 		object->flags |= OBJ_DEAD;
349 
350 		temp = object->backing_object;
351 		if (temp) {
352 			TAILQ_REMOVE(&temp->shadow_head, object, shadow_list);
353 			--temp->shadow_count;
354 		}
355 		vm_object_terminate(object);
356 		/* unlocks and deallocates object */
357 		object = temp;
358 	}
359 }
360 
361 /*
362  *	vm_object_terminate actually destroys the specified object, freeing
363  *	up all previously used resources.
364  *
365  *	The object must be locked.
366  */
367 static void
368 vm_object_terminate(object)
369 	register vm_object_t object;
370 {
371 	register vm_page_t p;
372 	int s;
373 
374 	if (object->flags & OBJ_VFS_REF)
375 		panic("vm_object_deallocate: freeing VFS_REF'ed object");
376 
377 	/*
378 	 * wait for the pageout daemon to be done with the object
379 	 */
380 	s = splvm();
381 	while (object->paging_in_progress) {
382 		object->flags |= OBJ_PIPWNT;
383 		tsleep(object, PVM, "objtrm", 0);
384 	}
385 	splx(s);
386 
387 	if (object->paging_in_progress != 0)
388 		panic("vm_object_deallocate: pageout in progress");
389 
390 	/*
391 	 * Clean and free the pages, as appropriate. All references to the
392 	 * object are gone, so we don't need to lock it.
393 	 */
394 	if (object->type == OBJT_VNODE) {
395 		struct vnode *vp = object->handle;
396 		struct proc *p = curproc;	/* XXX */
397 		int waslocked;
398 
399 		waslocked = VOP_ISLOCKED(vp);
400 		if (!waslocked)
401 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
402 		vm_object_page_clean(object, 0, 0, TRUE, FALSE);
403 		vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
404 		if (!waslocked)
405 			VOP_UNLOCK(vp, 0, p);
406 	}
407 
408 	/*
409 	 * Now free the pages. For internal objects, this also removes them
410 	 * from paging queues.
411 	 */
412 	while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
413 		if (p->busy || (p->flags & PG_BUSY))
414 			printf("vm_object_terminate: freeing busy page\n");
415 		PAGE_WAKEUP(p);
416 		vm_page_free(p);
417 		cnt.v_pfree++;
418 	}
419 
420 	/*
421 	 * Let the pager know object is dead.
422 	 */
423 	vm_pager_deallocate(object);
424 
425 	simple_lock(&vm_object_list_lock);
426 	TAILQ_REMOVE(&vm_object_list, object, object_list);
427 	vm_object_count--;
428 	simple_unlock(&vm_object_list_lock);
429 
430 	wakeup(object);
431 
432 	/*
433 	 * Free the space for the object.
434 	 */
435 	free((caddr_t) object, M_VMOBJ);
436 }
437 
438 /*
439  *	vm_object_page_clean
440  *
441  *	Clean all dirty pages in the specified range of object.
442  *	Leaves page on whatever queue it is currently on.
443  *
444  *	Odd semantics: if start == end, we clean everything.
445  *
446  *	The object must be locked.
447  */
448 
449 void
450 vm_object_page_clean(object, start, end, syncio, lockflag)
451 	vm_object_t object;
452 	vm_pindex_t start;
453 	vm_pindex_t end;
454 	boolean_t syncio;
455 	boolean_t lockflag;
456 {
457 	register vm_page_t p, np, tp;
458 	register vm_offset_t tstart, tend;
459 	vm_pindex_t pi;
460 	int s;
461 	struct vnode *vp;
462 	int runlen;
463 	int maxf;
464 	int chkb;
465 	int maxb;
466 	int i;
467 	vm_page_t maf[vm_pageout_page_count];
468 	vm_page_t mab[vm_pageout_page_count];
469 	vm_page_t ma[vm_pageout_page_count];
470 	struct proc *pproc = curproc;	/* XXX */
471 
472 	if (object->type != OBJT_VNODE ||
473 		(object->flags & OBJ_MIGHTBEDIRTY) == 0)
474 		return;
475 
476 	vp = object->handle;
477 
478 	if (lockflag)
479 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, pproc);
480 	object->flags |= OBJ_CLEANING;
481 
482 	tstart = start;
483 	if (end == 0) {
484 		tend = object->size;
485 	} else {
486 		tend = end;
487 	}
488 	if ((tstart == 0) && (tend == object->size)) {
489 		object->flags &= ~(OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
490 	}
491 	for(p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq))
492 		p->flags |= PG_CLEANCHK;
493 
494 rescan:
495 	for(p = TAILQ_FIRST(&object->memq); p; p = np) {
496 		np = TAILQ_NEXT(p, listq);
497 
498 		pi = p->pindex;
499 		if (((p->flags & PG_CLEANCHK) == 0) ||
500 			(pi < tstart) || (pi >= tend) ||
501 			(p->valid == 0) ||
502 			((p->queue - p->pc) == PQ_CACHE)) {
503 			p->flags &= ~PG_CLEANCHK;
504 			continue;
505 		}
506 
507 		vm_page_test_dirty(p);
508 		if ((p->dirty & p->valid) == 0) {
509 			p->flags &= ~PG_CLEANCHK;
510 			continue;
511 		}
512 
513 		s = splvm();
514 		if ((p->flags & PG_BUSY) || p->busy) {
515 			p->flags |= PG_WANTED|PG_REFERENCED;
516 			tsleep(p, PVM, "vpcwai", 0);
517 			splx(s);
518 			goto rescan;
519 		}
520 		splx(s);
521 
522 		s = splvm();
523 		maxf = 0;
524 		for(i=1;i<vm_pageout_page_count;i++) {
525 			if (tp = vm_page_lookup(object, pi + i)) {
526 				if ((tp->flags & PG_BUSY) ||
527 					(tp->flags & PG_CLEANCHK) == 0)
528 					break;
529 				if((tp->queue - tp->pc) == PQ_CACHE) {
530 					tp->flags &= ~PG_CLEANCHK;
531 					break;
532 				}
533 				vm_page_test_dirty(tp);
534 				if ((tp->dirty & tp->valid) == 0) {
535 					tp->flags &= ~PG_CLEANCHK;
536 					break;
537 				}
538 				maf[ i - 1 ] = tp;
539 				maxf++;
540 				continue;
541 			}
542 			break;
543 		}
544 
545 		maxb = 0;
546 		chkb = vm_pageout_page_count -  maxf;
547 		if (chkb) {
548 			for(i = 1; i < chkb;i++) {
549 				if (tp = vm_page_lookup(object, pi - i)) {
550 					if ((tp->flags & PG_BUSY) ||
551 						(tp->flags & PG_CLEANCHK) == 0)
552 						break;
553 					if((tp->queue - tp->pc) == PQ_CACHE) {
554 						tp->flags &= ~PG_CLEANCHK;
555 						break;
556 					}
557 					vm_page_test_dirty(tp);
558 					if ((tp->dirty & tp->valid) == 0) {
559 						tp->flags &= ~PG_CLEANCHK;
560 						break;
561 					}
562 					mab[ i - 1 ] = tp;
563 					maxb++;
564 					continue;
565 				}
566 				break;
567 			}
568 		}
569 
570 		for(i=0;i<maxb;i++) {
571 			int index = (maxb - i) - 1;
572 			ma[index] = mab[i];
573 			ma[index]->flags |= PG_BUSY;
574 			ma[index]->flags &= ~PG_CLEANCHK;
575 			vm_page_protect(ma[index], VM_PROT_READ);
576 		}
577 		vm_page_protect(p, VM_PROT_READ);
578 		p->flags |= PG_BUSY;
579 		p->flags &= ~PG_CLEANCHK;
580 		ma[maxb] = p;
581 		for(i=0;i<maxf;i++) {
582 			int index = (maxb + i) + 1;
583 			ma[index] = maf[i];
584 			ma[index]->flags |= PG_BUSY;
585 			ma[index]->flags &= ~PG_CLEANCHK;
586 			vm_page_protect(ma[index], VM_PROT_READ);
587 		}
588 		runlen = maxb + maxf + 1;
589 		splx(s);
590 		vm_pageout_flush(ma, runlen, 0);
591 		goto rescan;
592 	}
593 
594 	VOP_FSYNC(vp, NULL, syncio, curproc);
595 
596 	if (lockflag)
597 		VOP_UNLOCK(vp, 0, pproc);
598 	object->flags &= ~OBJ_CLEANING;
599 	return;
600 }
601 
602 #ifdef not_used
603 /* XXX I cannot tell if this should be an exported symbol */
604 /*
605  *	vm_object_deactivate_pages
606  *
607  *	Deactivate all pages in the specified object.  (Keep its pages
608  *	in memory even though it is no longer referenced.)
609  *
610  *	The object must be locked.
611  */
612 static void
613 vm_object_deactivate_pages(object)
614 	register vm_object_t object;
615 {
616 	register vm_page_t p, next;
617 
618 	for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) {
619 		next = TAILQ_NEXT(p, listq);
620 		vm_page_deactivate(p);
621 	}
622 }
623 #endif
624 
625 /*
626  *	Trim the object cache to size.
627  */
628 static void
629 vm_object_cache_trim()
630 {
631 	register vm_object_t object;
632 
633 	while (vm_object_cached > vm_object_cache_max) {
634 		object = TAILQ_FIRST(&vm_object_cached_list);
635 
636 		vm_object_reference(object);
637 		pager_cache(object, FALSE);
638 	}
639 }
640 
641 
642 /*
643  *	vm_object_pmap_copy:
644  *
645  *	Makes all physical pages in the specified
646  *	object range copy-on-write.  No writeable
647  *	references to these pages should remain.
648  *
649  *	The object must *not* be locked.
650  */
651 void
652 vm_object_pmap_copy(object, start, end)
653 	register vm_object_t object;
654 	register vm_pindex_t start;
655 	register vm_pindex_t end;
656 {
657 	register vm_page_t p;
658 
659 	if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
660 		return;
661 
662 	for (p = TAILQ_FIRST(&object->memq);
663 		p != NULL;
664 		p = TAILQ_NEXT(p, listq)) {
665 		vm_page_protect(p, VM_PROT_READ);
666 	}
667 
668 	object->flags &= ~OBJ_WRITEABLE;
669 }
670 
671 /*
672  *	vm_object_pmap_remove:
673  *
674  *	Removes all physical pages in the specified
675  *	object range from all physical maps.
676  *
677  *	The object must *not* be locked.
678  */
679 void
680 vm_object_pmap_remove(object, start, end)
681 	register vm_object_t object;
682 	register vm_pindex_t start;
683 	register vm_pindex_t end;
684 {
685 	register vm_page_t p;
686 	if (object == NULL)
687 		return;
688 	for (p = TAILQ_FIRST(&object->memq);
689 		p != NULL;
690 		p = TAILQ_NEXT(p, listq)) {
691 		if (p->pindex >= start && p->pindex < end)
692 			vm_page_protect(p, VM_PROT_NONE);
693 	}
694 	if ((start == 0) && (object->size == end))
695 		object->flags &= ~OBJ_WRITEABLE;
696 }
697 
698 /*
699  *	vm_object_madvise:
700  *
701  *	Implements the madvise function at the object/page level.
702  */
703 void
704 vm_object_madvise(object, pindex, count, advise)
705 	vm_object_t object;
706 	vm_pindex_t pindex;
707 	int count;
708 	int advise;
709 {
710 	int s;
711 	vm_pindex_t end, tpindex;
712 	vm_object_t tobject;
713 	vm_page_t m;
714 
715 	if (object == NULL)
716 		return;
717 
718 	end = pindex + count;
719 
720 	for (; pindex < end; pindex += 1) {
721 
722 relookup:
723 		tobject = object;
724 		tpindex = pindex;
725 shadowlookup:
726 		m = vm_page_lookup(tobject, tpindex);
727 		if (m == NULL) {
728 			if (tobject->type != OBJT_DEFAULT) {
729 				continue;
730 			}
731 
732 			tobject = tobject->backing_object;
733 			if ((tobject == NULL) || (tobject->ref_count != 1)) {
734 				continue;
735 			}
736 			tpindex += OFF_TO_IDX(tobject->backing_object_offset);
737 			goto shadowlookup;
738 		}
739 
740 		/*
741 		 * If the page is busy or not in a normal active state,
742 		 * we skip it.  Things can break if we mess with pages
743 		 * in any of the below states.
744 		 */
745 		if (m->hold_count || m->wire_count ||
746 			m->valid != VM_PAGE_BITS_ALL) {
747 			continue;
748 		}
749 
750 		if (m->busy || (m->flags & PG_BUSY)) {
751 			s = splvm();
752 			if (m->busy || (m->flags & PG_BUSY)) {
753 				m->flags |= PG_WANTED;
754 				tsleep(m, PVM, "madvpw", 0);
755 			}
756 			splx(s);
757 			goto relookup;
758 		}
759 
760 		if (advise == MADV_WILLNEED) {
761 			if (m->queue != PQ_ACTIVE)
762 				vm_page_activate(m);
763 		} else if (advise == MADV_DONTNEED) {
764 			vm_page_deactivate(m);
765 		} else if (advise == MADV_FREE) {
766 			pmap_clear_modify(VM_PAGE_TO_PHYS(m));
767 			m->dirty = 0;
768 			/*
769 			 * Force a demand zero if attempt to read from swap.
770 			 * We currently don't handle vnode files correctly,
771 			 * and will reread stale contents unnecessarily.
772 			 */
773 			if (object->type == OBJT_SWAP)
774 				swap_pager_dmzspace(tobject, m->pindex, 1);
775 		}
776 	}
777 }
778 
779 /*
780  *	vm_object_shadow:
781  *
782  *	Create a new object which is backed by the
783  *	specified existing object range.  The source
784  *	object reference is deallocated.
785  *
786  *	The new object and offset into that object
787  *	are returned in the source parameters.
788  */
789 
790 void
791 vm_object_shadow(object, offset, length)
792 	vm_object_t *object;	/* IN/OUT */
793 	vm_ooffset_t *offset;	/* IN/OUT */
794 	vm_size_t length;
795 {
796 	register vm_object_t source;
797 	register vm_object_t result;
798 
799 	source = *object;
800 
801 	/*
802 	 * Allocate a new object with the given length
803 	 */
804 
805 	if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL)
806 		panic("vm_object_shadow: no object for shadowing");
807 
808 	/*
809 	 * The new object shadows the source object, adding a reference to it.
810 	 * Our caller changes his reference to point to the new object,
811 	 * removing a reference to the source object.  Net result: no change
812 	 * of reference count.
813 	 */
814 	result->backing_object = source;
815 	if (source) {
816 		TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list);
817 		++source->shadow_count;
818 	}
819 
820 	/*
821 	 * Store the offset into the source object, and fix up the offset into
822 	 * the new object.
823 	 */
824 
825 	result->backing_object_offset = *offset;
826 
827 	/*
828 	 * Return the new things
829 	 */
830 
831 	*offset = 0;
832 	*object = result;
833 }
834 
835 
836 /*
837  * this version of collapse allows the operation to occur earlier and
838  * when paging_in_progress is true for an object...  This is not a complete
839  * operation, but should plug 99.9% of the rest of the leaks.
840  */
841 static void
842 vm_object_qcollapse(object)
843 	register vm_object_t object;
844 {
845 	register vm_object_t backing_object;
846 	register vm_pindex_t backing_offset_index, paging_offset_index;
847 	vm_pindex_t backing_object_paging_offset_index;
848 	vm_pindex_t new_pindex;
849 	register vm_page_t p, pp;
850 	register vm_size_t size;
851 
852 	backing_object = object->backing_object;
853 	if (backing_object->ref_count != 1)
854 		return;
855 
856 	backing_object->ref_count += 2;
857 
858 	backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
859 	backing_object_paging_offset_index = OFF_TO_IDX(backing_object->paging_offset);
860 	paging_offset_index = OFF_TO_IDX(object->paging_offset);
861 	size = object->size;
862 	p = TAILQ_FIRST(&backing_object->memq);
863 	while (p) {
864 		vm_page_t next;
865 
866 		next = TAILQ_NEXT(p, listq);
867 		if ((p->flags & (PG_BUSY | PG_FICTITIOUS)) ||
868 		    ((p->queue - p->pc) == PQ_CACHE) ||
869 		    !p->valid || p->hold_count || p->wire_count || p->busy) {
870 			p = next;
871 			continue;
872 		}
873 		new_pindex = p->pindex - backing_offset_index;
874 		if (p->pindex < backing_offset_index ||
875 		    new_pindex >= size) {
876 			if (backing_object->type == OBJT_SWAP)
877 				swap_pager_freespace(backing_object,
878 				    backing_object_paging_offset_index+p->pindex,
879 				    1);
880 			vm_page_protect(p, VM_PROT_NONE);
881 			vm_page_free(p);
882 		} else {
883 			pp = vm_page_lookup(object, new_pindex);
884 			if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object,
885 				    paging_offset_index + new_pindex, NULL, NULL))) {
886 				if (backing_object->type == OBJT_SWAP)
887 					swap_pager_freespace(backing_object,
888 					    backing_object_paging_offset_index + p->pindex, 1);
889 				vm_page_protect(p, VM_PROT_NONE);
890 				vm_page_free(p);
891 			} else {
892 				if (backing_object->type == OBJT_SWAP)
893 					swap_pager_freespace(backing_object,
894 					    backing_object_paging_offset_index + p->pindex, 1);
895 				vm_page_rename(p, object, new_pindex);
896 				vm_page_protect(p, VM_PROT_NONE);
897 				p->dirty = VM_PAGE_BITS_ALL;
898 			}
899 		}
900 		p = next;
901 	}
902 	backing_object->ref_count -= 2;
903 }
904 
905 /*
906  *	vm_object_collapse:
907  *
908  *	Collapse an object with the object backing it.
909  *	Pages in the backing object are moved into the
910  *	parent, and the backing object is deallocated.
911  */
912 void
913 vm_object_collapse(object)
914 	vm_object_t object;
915 
916 {
917 	vm_object_t backing_object;
918 	vm_ooffset_t backing_offset;
919 	vm_size_t size;
920 	vm_pindex_t new_pindex, backing_offset_index;
921 	vm_page_t p, pp;
922 
923 	while (TRUE) {
924 		/*
925 		 * Verify that the conditions are right for collapse:
926 		 *
927 		 * The object exists and no pages in it are currently being paged
928 		 * out.
929 		 */
930 		if (object == NULL)
931 			return;
932 
933 		/*
934 		 * Make sure there is a backing object.
935 		 */
936 		if ((backing_object = object->backing_object) == NULL)
937 			return;
938 
939 		/*
940 		 * we check the backing object first, because it is most likely
941 		 * not collapsable.
942 		 */
943 		if (backing_object->handle != NULL ||
944 		    (backing_object->type != OBJT_DEFAULT &&
945 		     backing_object->type != OBJT_SWAP) ||
946 		    (backing_object->flags & OBJ_DEAD) ||
947 		    object->handle != NULL ||
948 		    (object->type != OBJT_DEFAULT &&
949 		     object->type != OBJT_SWAP) ||
950 		    (object->flags & OBJ_DEAD)) {
951 			return;
952 		}
953 
954 		if (object->paging_in_progress != 0 ||
955 		    backing_object->paging_in_progress != 0) {
956 			vm_object_qcollapse(object);
957 			return;
958 		}
959 
960 		/*
961 		 * We know that we can either collapse the backing object (if
962 		 * the parent is the only reference to it) or (perhaps) remove
963 		 * the parent's reference to it.
964 		 */
965 
966 		backing_offset = object->backing_object_offset;
967 		backing_offset_index = OFF_TO_IDX(backing_offset);
968 		size = object->size;
969 
970 		/*
971 		 * If there is exactly one reference to the backing object, we
972 		 * can collapse it into the parent.
973 		 */
974 
975 		if (backing_object->ref_count == 1) {
976 
977 			backing_object->flags |= OBJ_DEAD;
978 			/*
979 			 * We can collapse the backing object.
980 			 *
981 			 * Move all in-memory pages from backing_object to the
982 			 * parent.  Pages that have been paged out will be
983 			 * overwritten by any of the parent's pages that
984 			 * shadow them.
985 			 */
986 
987 			while ((p = TAILQ_FIRST(&backing_object->memq)) != 0) {
988 
989 				new_pindex = p->pindex - backing_offset_index;
990 
991 				/*
992 				 * If the parent has a page here, or if this
993 				 * page falls outside the parent, dispose of
994 				 * it.
995 				 *
996 				 * Otherwise, move it as planned.
997 				 */
998 
999 				if (p->pindex < backing_offset_index ||
1000 				    new_pindex >= size) {
1001 					vm_page_protect(p, VM_PROT_NONE);
1002 					PAGE_WAKEUP(p);
1003 					vm_page_free(p);
1004 				} else {
1005 					pp = vm_page_lookup(object, new_pindex);
1006 					if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object,
1007 					    OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL))) {
1008 						vm_page_protect(p, VM_PROT_NONE);
1009 						PAGE_WAKEUP(p);
1010 						vm_page_free(p);
1011 					} else {
1012 						vm_page_protect(p, VM_PROT_NONE);
1013 						vm_page_rename(p, object, new_pindex);
1014 						p->dirty = VM_PAGE_BITS_ALL;
1015 					}
1016 				}
1017 			}
1018 
1019 			/*
1020 			 * Move the pager from backing_object to object.
1021 			 */
1022 
1023 			if (backing_object->type == OBJT_SWAP) {
1024 				backing_object->paging_in_progress++;
1025 				if (object->type == OBJT_SWAP) {
1026 					object->paging_in_progress++;
1027 					/*
1028 					 * copy shadow object pages into ours
1029 					 * and destroy unneeded pages in
1030 					 * shadow object.
1031 					 */
1032 					swap_pager_copy(
1033 					    backing_object,
1034 					    OFF_TO_IDX(backing_object->paging_offset),
1035 					    object,
1036 					    OFF_TO_IDX(object->paging_offset),
1037 					    OFF_TO_IDX(object->backing_object_offset));
1038 					vm_object_pip_wakeup(object);
1039 				} else {
1040 					object->paging_in_progress++;
1041 					/*
1042 					 * move the shadow backing_object's pager data to
1043 					 * "object" and convert "object" type to OBJT_SWAP.
1044 					 */
1045 					object->type = OBJT_SWAP;
1046 					object->un_pager.swp.swp_nblocks =
1047 					    backing_object->un_pager.swp.swp_nblocks;
1048 					object->un_pager.swp.swp_allocsize =
1049 					    backing_object->un_pager.swp.swp_allocsize;
1050 					object->un_pager.swp.swp_blocks =
1051 					    backing_object->un_pager.swp.swp_blocks;
1052 					object->un_pager.swp.swp_poip =		/* XXX */
1053 					    backing_object->un_pager.swp.swp_poip;
1054 					object->paging_offset = backing_object->paging_offset + backing_offset;
1055 					TAILQ_INSERT_TAIL(&swap_pager_un_object_list, object, pager_object_list);
1056 
1057 					/*
1058 					 * Convert backing object from OBJT_SWAP to
1059 					 * OBJT_DEFAULT. XXX - only the TAILQ_REMOVE is
1060 					 * actually necessary.
1061 					 */
1062 					backing_object->type = OBJT_DEFAULT;
1063 					TAILQ_REMOVE(&swap_pager_un_object_list, backing_object, pager_object_list);
1064 					/*
1065 					 * free unnecessary blocks
1066 					 */
1067 					swap_pager_freespace(object, 0,
1068 						OFF_TO_IDX(object->paging_offset));
1069 					vm_object_pip_wakeup(object);
1070 				}
1071 
1072 				vm_object_pip_wakeup(backing_object);
1073 			}
1074 			/*
1075 			 * Object now shadows whatever backing_object did.
1076 			 * Note that the reference to backing_object->backing_object
1077 			 * moves from within backing_object to within object.
1078 			 */
1079 
1080 			TAILQ_REMOVE(&object->backing_object->shadow_head, object,
1081 			    shadow_list);
1082 			--object->backing_object->shadow_count;
1083 			if (backing_object->backing_object) {
1084 				TAILQ_REMOVE(&backing_object->backing_object->shadow_head,
1085 				    backing_object, shadow_list);
1086 				--backing_object->backing_object->shadow_count;
1087 			}
1088 			object->backing_object = backing_object->backing_object;
1089 			if (object->backing_object) {
1090 				TAILQ_INSERT_TAIL(&object->backing_object->shadow_head,
1091 				    object, shadow_list);
1092 				++object->backing_object->shadow_count;
1093 			}
1094 
1095 			object->backing_object_offset += backing_object->backing_object_offset;
1096 			/*
1097 			 * Discard backing_object.
1098 			 *
1099 			 * Since the backing object has no pages, no pager left,
1100 			 * and no object references within it, all that is
1101 			 * necessary is to dispose of it.
1102 			 */
1103 
1104 			TAILQ_REMOVE(&vm_object_list, backing_object,
1105 			    object_list);
1106 			vm_object_count--;
1107 
1108 			free((caddr_t) backing_object, M_VMOBJ);
1109 
1110 			object_collapses++;
1111 		} else {
1112 			/*
1113 			 * If all of the pages in the backing object are
1114 			 * shadowed by the parent object, the parent object no
1115 			 * longer has to shadow the backing object; it can
1116 			 * shadow the next one in the chain.
1117 			 *
1118 			 * The backing object must not be paged out - we'd have
1119 			 * to check all of the paged-out pages, as well.
1120 			 */
1121 
1122 			if (backing_object->type != OBJT_DEFAULT) {
1123 				return;
1124 			}
1125 			/*
1126 			 * Should have a check for a 'small' number of pages
1127 			 * here.
1128 			 */
1129 
1130 			for (p = TAILQ_FIRST(&backing_object->memq); p; p = TAILQ_NEXT(p, listq)) {
1131 				new_pindex = p->pindex - backing_offset_index;
1132 
1133 				/*
1134 				 * If the parent has a page here, or if this
1135 				 * page falls outside the parent, keep going.
1136 				 *
1137 				 * Otherwise, the backing_object must be left in
1138 				 * the chain.
1139 				 */
1140 
1141 				if (p->pindex >= backing_offset_index &&
1142 					new_pindex <= size) {
1143 
1144 					pp = vm_page_lookup(object, new_pindex);
1145 
1146 					if ((pp == NULL || pp->valid == 0) &&
1147 				   	    !vm_pager_has_page(object, OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL)) {
1148 						/*
1149 						 * Page still needed. Can't go any
1150 						 * further.
1151 						 */
1152 						return;
1153 					}
1154 				}
1155 			}
1156 
1157 			/*
1158 			 * Make the parent shadow the next object in the
1159 			 * chain.  Deallocating backing_object will not remove
1160 			 * it, since its reference count is at least 2.
1161 			 */
1162 
1163 			TAILQ_REMOVE(&object->backing_object->shadow_head,
1164 			    object, shadow_list);
1165 			--object->backing_object->shadow_count;
1166 			vm_object_reference(object->backing_object = backing_object->backing_object);
1167 			if (object->backing_object) {
1168 				TAILQ_INSERT_TAIL(&object->backing_object->shadow_head,
1169 				    object, shadow_list);
1170 				++object->backing_object->shadow_count;
1171 			}
1172 			object->backing_object_offset += backing_object->backing_object_offset;
1173 
1174 			/*
1175 			 * Drop the reference count on backing_object. Since
1176 			 * its ref_count was at least 2, it will not vanish;
1177 			 * so we don't need to call vm_object_deallocate.
1178 			 */
1179 			if (backing_object->ref_count == 1)
1180 				printf("should have called obj deallocate\n");
1181 			backing_object->ref_count--;
1182 
1183 			object_bypasses++;
1184 
1185 		}
1186 
1187 		/*
1188 		 * Try again with this object's new backing object.
1189 		 */
1190 	}
1191 }
1192 
1193 /*
1194  *	vm_object_page_remove: [internal]
1195  *
1196  *	Removes all physical pages in the specified
1197  *	object range from the object's list of pages.
1198  *
1199  *	The object must be locked.
1200  */
1201 void
1202 vm_object_page_remove(object, start, end, clean_only)
1203 	register vm_object_t object;
1204 	register vm_pindex_t start;
1205 	register vm_pindex_t end;
1206 	boolean_t clean_only;
1207 {
1208 	register vm_page_t p, next;
1209 	unsigned int size;
1210 	int s;
1211 
1212 	if (object == NULL)
1213 		return;
1214 
1215 	object->paging_in_progress++;
1216 again:
1217 	size = end - start;
1218 	if (size > 4 || size >= object->size / 4) {
1219 		for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) {
1220 			next = TAILQ_NEXT(p, listq);
1221 			if ((start <= p->pindex) && (p->pindex < end)) {
1222 				if (p->wire_count != 0) {
1223 					vm_page_protect(p, VM_PROT_NONE);
1224 					p->valid = 0;
1225 					continue;
1226 				}
1227 
1228 				/*
1229 				 * The busy flags are only cleared at
1230 				 * interrupt -- minimize the spl transitions
1231 				 */
1232 				if ((p->flags & PG_BUSY) || p->busy) {
1233 					s = splvm();
1234 					if ((p->flags & PG_BUSY) || p->busy) {
1235 						p->flags |= PG_WANTED;
1236 						tsleep(p, PVM, "vmopar", 0);
1237 						splx(s);
1238 						goto again;
1239 					}
1240 					splx(s);
1241 				}
1242 
1243 				if (clean_only) {
1244 					vm_page_test_dirty(p);
1245 					if (p->valid & p->dirty)
1246 						continue;
1247 				}
1248 				vm_page_protect(p, VM_PROT_NONE);
1249 				PAGE_WAKEUP(p);
1250 				vm_page_free(p);
1251 			}
1252 		}
1253 	} else {
1254 		while (size > 0) {
1255 			if ((p = vm_page_lookup(object, start)) != 0) {
1256 				if (p->wire_count != 0) {
1257 					p->valid = 0;
1258 					vm_page_protect(p, VM_PROT_NONE);
1259 					start += 1;
1260 					size -= 1;
1261 					continue;
1262 				}
1263 				/*
1264 				 * The busy flags are only cleared at
1265 				 * interrupt -- minimize the spl transitions
1266 				 */
1267 				if ((p->flags & PG_BUSY) || p->busy) {
1268 					s = splvm();
1269 					if ((p->flags & PG_BUSY) || p->busy) {
1270 						p->flags |= PG_WANTED;
1271 						tsleep(p, PVM, "vmopar", 0);
1272 						splx(s);
1273 						goto again;
1274 					}
1275 					splx(s);
1276 				}
1277 				if (clean_only) {
1278 					vm_page_test_dirty(p);
1279 					if (p->valid & p->dirty) {
1280 						start += 1;
1281 						size -= 1;
1282 						continue;
1283 					}
1284 				}
1285 				vm_page_protect(p, VM_PROT_NONE);
1286 				PAGE_WAKEUP(p);
1287 				vm_page_free(p);
1288 			}
1289 			start += 1;
1290 			size -= 1;
1291 		}
1292 	}
1293 	vm_object_pip_wakeup(object);
1294 }
1295 
1296 /*
1297  *	Routine:	vm_object_coalesce
1298  *	Function:	Coalesces two objects backing up adjoining
1299  *			regions of memory into a single object.
1300  *
1301  *	returns TRUE if objects were combined.
1302  *
1303  *	NOTE:	Only works at the moment if the second object is NULL -
1304  *		if it's not, which object do we lock first?
1305  *
1306  *	Parameters:
1307  *		prev_object	First object to coalesce
1308  *		prev_offset	Offset into prev_object
1309  *		next_object	Second object into coalesce
1310  *		next_offset	Offset into next_object
1311  *
1312  *		prev_size	Size of reference to prev_object
1313  *		next_size	Size of reference to next_object
1314  *
1315  *	Conditions:
1316  *	The object must *not* be locked.
1317  */
1318 boolean_t
1319 vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size)
1320 	register vm_object_t prev_object;
1321 	vm_pindex_t prev_pindex;
1322 	vm_size_t prev_size, next_size;
1323 {
1324 	vm_size_t newsize;
1325 
1326 	if (prev_object == NULL) {
1327 		return (TRUE);
1328 	}
1329 
1330 	if (prev_object->type != OBJT_DEFAULT) {
1331 		return (FALSE);
1332 	}
1333 
1334 	/*
1335 	 * Try to collapse the object first
1336 	 */
1337 	vm_object_collapse(prev_object);
1338 
1339 	/*
1340 	 * Can't coalesce if: . more than one reference . paged out . shadows
1341 	 * another object . has a copy elsewhere (any of which mean that the
1342 	 * pages not mapped to prev_entry may be in use anyway)
1343 	 */
1344 
1345 	if (prev_object->backing_object != NULL) {
1346 		return (FALSE);
1347 	}
1348 
1349 	prev_size >>= PAGE_SHIFT;
1350 	next_size >>= PAGE_SHIFT;
1351 
1352 	if ((prev_object->ref_count > 1) &&
1353 	    (prev_object->size != prev_pindex + prev_size)) {
1354 		return (FALSE);
1355 	}
1356 
1357 	/*
1358 	 * Remove any pages that may still be in the object from a previous
1359 	 * deallocation.
1360 	 */
1361 
1362 	vm_object_page_remove(prev_object,
1363 	    prev_pindex + prev_size,
1364 	    prev_pindex + prev_size + next_size, FALSE);
1365 
1366 	/*
1367 	 * Extend the object if necessary.
1368 	 */
1369 	newsize = prev_pindex + prev_size + next_size;
1370 	if (newsize > prev_object->size)
1371 		prev_object->size = newsize;
1372 
1373 	return (TRUE);
1374 }
1375 
1376 #include "opt_ddb.h"
1377 #ifdef DDB
1378 #include <sys/kernel.h>
1379 
1380 #include <machine/cons.h>
1381 
1382 #include <ddb/ddb.h>
1383 
1384 static int	_vm_object_in_map __P((vm_map_t map, vm_object_t object,
1385 				       vm_map_entry_t entry));
1386 static int	vm_object_in_map __P((vm_object_t object));
1387 
1388 static int
1389 _vm_object_in_map(map, object, entry)
1390 	vm_map_t map;
1391 	vm_object_t object;
1392 	vm_map_entry_t entry;
1393 {
1394 	vm_map_t tmpm;
1395 	vm_map_entry_t tmpe;
1396 	vm_object_t obj;
1397 	int entcount;
1398 
1399 	if (map == 0)
1400 		return 0;
1401 
1402 	if (entry == 0) {
1403 		tmpe = map->header.next;
1404 		entcount = map->nentries;
1405 		while (entcount-- && (tmpe != &map->header)) {
1406 			if( _vm_object_in_map(map, object, tmpe)) {
1407 				return 1;
1408 			}
1409 			tmpe = tmpe->next;
1410 		}
1411 	} else if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1412 		tmpm = entry->object.share_map;
1413 		tmpe = tmpm->header.next;
1414 		entcount = tmpm->nentries;
1415 		while (entcount-- && tmpe != &tmpm->header) {
1416 			if( _vm_object_in_map(tmpm, object, tmpe)) {
1417 				return 1;
1418 			}
1419 			tmpe = tmpe->next;
1420 		}
1421 	} else if (obj = entry->object.vm_object) {
1422 		for(; obj; obj=obj->backing_object)
1423 			if( obj == object) {
1424 				return 1;
1425 			}
1426 	}
1427 	return 0;
1428 }
1429 
1430 static int
1431 vm_object_in_map( object)
1432 	vm_object_t object;
1433 {
1434 	struct proc *p;
1435 	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1436 		if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
1437 			continue;
1438 		if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0))
1439 			return 1;
1440 	}
1441 	if( _vm_object_in_map( kernel_map, object, 0))
1442 		return 1;
1443 	if( _vm_object_in_map( kmem_map, object, 0))
1444 		return 1;
1445 	if( _vm_object_in_map( pager_map, object, 0))
1446 		return 1;
1447 	if( _vm_object_in_map( buffer_map, object, 0))
1448 		return 1;
1449 	if( _vm_object_in_map( io_map, object, 0))
1450 		return 1;
1451 	if( _vm_object_in_map( phys_map, object, 0))
1452 		return 1;
1453 	if( _vm_object_in_map( mb_map, object, 0))
1454 		return 1;
1455 	if( _vm_object_in_map( u_map, object, 0))
1456 		return 1;
1457 	return 0;
1458 }
1459 
1460 DB_SHOW_COMMAND(vmochk, vm_object_check)
1461 {
1462 	vm_object_t object;
1463 
1464 	/*
1465 	 * make sure that internal objs are in a map somewhere
1466 	 * and none have zero ref counts.
1467 	 */
1468 	for (object = TAILQ_FIRST(&vm_object_list);
1469 			object != NULL;
1470 			object = TAILQ_NEXT(object, object_list)) {
1471 		if (object->handle == NULL &&
1472 		    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
1473 			if (object->ref_count == 0) {
1474 				db_printf("vmochk: internal obj has zero ref count: %d\n",
1475 					object->size);
1476 			}
1477 			if (!vm_object_in_map(object)) {
1478 				db_printf("vmochk: internal obj is not in a map: "
1479 		"ref: %d, size: %d: 0x%x, backing_object: 0x%x\n",
1480 				    object->ref_count, object->size,
1481 				    object->size, object->backing_object);
1482 			}
1483 		}
1484 	}
1485 }
1486 
1487 /*
1488  *	vm_object_print:	[ debug ]
1489  */
1490 DB_SHOW_COMMAND(object, vm_object_print_static)
1491 {
1492 	/* XXX convert args. */
1493 	vm_object_t object = (vm_object_t)addr;
1494 	boolean_t full = have_addr;
1495 
1496 	register vm_page_t p;
1497 
1498 	/* XXX count is an (unused) arg.  Avoid shadowing it. */
1499 #define	count	was_count
1500 
1501 	register int count;
1502 
1503 	if (object == NULL)
1504 		return;
1505 
1506 	db_iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ",
1507 	    (int) object, (int) object->size,
1508 	    object->resident_page_count, object->ref_count);
1509 	db_printf("offset=0x%x, backing_object=(0x%x)+0x%x\n",
1510 	    (int) object->paging_offset,
1511 	    (int) object->backing_object, (int) object->backing_object_offset);
1512 	db_printf("cache: next=%p, prev=%p\n",
1513 	    TAILQ_NEXT(object, cached_list), TAILQ_PREV(object, cached_list));
1514 
1515 	if (!full)
1516 		return;
1517 
1518 	db_indent += 2;
1519 	count = 0;
1520 	for (p = TAILQ_FIRST(&object->memq); p != NULL; p = TAILQ_NEXT(p, listq)) {
1521 		if (count == 0)
1522 			db_iprintf("memory:=");
1523 		else if (count == 6) {
1524 			db_printf("\n");
1525 			db_iprintf(" ...");
1526 			count = 0;
1527 		} else
1528 			db_printf(",");
1529 		count++;
1530 
1531 		db_printf("(off=0x%lx,page=0x%lx)",
1532 		    (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
1533 	}
1534 	if (count != 0)
1535 		db_printf("\n");
1536 	db_indent -= 2;
1537 }
1538 
1539 /* XXX. */
1540 #undef count
1541 
1542 /* XXX need this non-static entry for calling from vm_map_print. */
1543 void
1544 vm_object_print(addr, have_addr, count, modif)
1545 	db_expr_t addr;
1546 	boolean_t have_addr;
1547 	db_expr_t count;
1548 	char *modif;
1549 {
1550 	vm_object_print_static(addr, have_addr, count, modif);
1551 }
1552 
1553 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
1554 {
1555 	vm_object_t object;
1556 	int nl = 0;
1557 	int c;
1558 	for (object = TAILQ_FIRST(&vm_object_list);
1559 			object != NULL;
1560 			object = TAILQ_NEXT(object, object_list)) {
1561 		vm_pindex_t idx, fidx;
1562 		vm_pindex_t osize;
1563 		vm_offset_t pa = -1, padiff;
1564 		int rcount;
1565 		vm_page_t m;
1566 
1567 		db_printf("new object: 0x%x\n", object);
1568 		if ( nl > 18) {
1569 			c = cngetc();
1570 			if (c != ' ')
1571 				return;
1572 			nl = 0;
1573 		}
1574 		nl++;
1575 		rcount = 0;
1576 		fidx = 0;
1577 		osize = object->size;
1578 		if (osize > 128)
1579 			osize = 128;
1580 		for(idx=0;idx<osize;idx++) {
1581 			m = vm_page_lookup(object, idx);
1582 			if (m == NULL) {
1583 				if (rcount) {
1584 					db_printf(" index(%d)run(%d)pa(0x%x)\n",
1585 						fidx, rcount, pa);
1586 					if ( nl > 18) {
1587 						c = cngetc();
1588 						if (c != ' ')
1589 							return;
1590 						nl = 0;
1591 					}
1592 					nl++;
1593 					rcount = 0;
1594 				}
1595 				continue;
1596 			}
1597 
1598 
1599 			if (rcount &&
1600 				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
1601 				++rcount;
1602 				continue;
1603 			}
1604 			if (rcount) {
1605 				padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
1606 				padiff >>= PAGE_SHIFT;
1607 				padiff &= PQ_L2_MASK;
1608 				if (padiff == 0) {
1609 					pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
1610 					++rcount;
1611 					continue;
1612 				}
1613 				db_printf(" index(%d)run(%d)pa(0x%x)", fidx, rcount, pa);
1614 				db_printf("pd(%d)\n", padiff);
1615 				if ( nl > 18) {
1616 					c = cngetc();
1617 					if (c != ' ')
1618 						return;
1619 					nl = 0;
1620 				}
1621 				nl++;
1622 			}
1623 			fidx = idx;
1624 			pa = VM_PAGE_TO_PHYS(m);
1625 			rcount = 1;
1626 		}
1627 		if (rcount) {
1628 			db_printf(" index(%d)run(%d)pa(0x%x)\n", fidx, rcount, pa);
1629 			if ( nl > 18) {
1630 				c = cngetc();
1631 				if (c != ' ')
1632 					return;
1633 				nl = 0;
1634 			}
1635 			nl++;
1636 		}
1637 	}
1638 }
1639 #endif /* DDB */
1640