xref: /freebsd/sys/vm/vm_map.c (revision 609e0c94f2ea3e5e75ddf58a45ec23613265f2a6)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $Id: vm_map.c,v 1.108 1998/01/22 17:30:37 dyson Exp $
65  */
66 
67 /*
68  *	Virtual memory mapping module.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/malloc.h>
74 #include <sys/proc.h>
75 #include <sys/vmmeter.h>
76 #include <sys/mman.h>
77 #include <sys/buf.h>
78 #include <sys/vnode.h>
79 
80 #include <vm/vm.h>
81 #include <vm/vm_param.h>
82 #include <vm/vm_prot.h>
83 #include <vm/vm_inherit.h>
84 #include <sys/lock.h>
85 #include <vm/pmap.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_pager.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/default_pager.h>
94 #include <vm/swap_pager.h>
95 #include <vm/vm_zone.h>
96 
97 static MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
98 
99 /*
100  *	Virtual memory maps provide for the mapping, protection,
101  *	and sharing of virtual memory objects.  In addition,
102  *	this module provides for an efficient virtual copy of
103  *	memory from one map to another.
104  *
105  *	Synchronization is required prior to most operations.
106  *
107  *	Maps consist of an ordered doubly-linked list of simple
108  *	entries; a single hint is used to speed up lookups.
109  *
110  *	In order to properly represent the sharing of virtual
111  *	memory regions among maps, the map structure is bi-level.
112  *	Top-level ("address") maps refer to regions of sharable
113  *	virtual memory.  These regions are implemented as
114  *	("sharing") maps, which then refer to the actual virtual
115  *	memory objects.  When two address maps "share" memory,
116  *	their top-level maps both have references to the same
117  *	sharing map.  When memory is virtual-copied from one
118  *	address map to another, the references in the sharing
119  *	maps are actually copied -- no copying occurs at the
120  *	virtual memory object level.
121  *
122  *	Since portions of maps are specified by start/end addreses,
123  *	which may not align with existing map entries, all
124  *	routines merely "clip" entries to these start/end values.
125  *	[That is, an entry is split into two, bordering at a
126  *	start or end value.]  Note that these clippings may not
127  *	always be necessary (as the two resulting entries are then
128  *	not changed); however, the clipping is done for convenience.
129  *	No attempt is currently made to "glue back together" two
130  *	abutting entries.
131  *
132  *	As mentioned above, virtual copy operations are performed
133  *	by copying VM object references from one sharing map to
134  *	another, and then marking both regions as copy-on-write.
135  *	It is important to note that only one writeable reference
136  *	to a VM object region exists in any map -- this means that
137  *	shadow object creation can be delayed until a write operation
138  *	occurs.
139  */
140 
141 /*
142  *	vm_map_startup:
143  *
144  *	Initialize the vm_map module.  Must be called before
145  *	any other vm_map routines.
146  *
147  *	Map and entry structures are allocated from the general
148  *	purpose memory pool with some exceptions:
149  *
150  *	- The kernel map and kmem submap are allocated statically.
151  *	- Kernel map entries are allocated out of a static pool.
152  *
153  *	These restrictions are necessary since malloc() uses the
154  *	maps and requires map entries.
155  */
156 
157 extern char kstack[];
158 extern int inmprotect;
159 
160 static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store;
161 static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone;
162 static struct vm_object kmapentobj, mapentobj, mapobj;
163 #define MAP_ENTRY_INIT	128
164 struct vm_map_entry map_entry_init[MAX_MAPENT];
165 struct vm_map_entry kmap_entry_init[MAX_KMAPENT];
166 struct vm_map map_init[MAX_KMAP];
167 
168 static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
169 static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
170 static vm_map_entry_t vm_map_entry_create __P((vm_map_t));
171 static void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t));
172 static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
173 static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
174 static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t,
175 		vm_map_entry_t));
176 static vm_page_t vm_freeze_page_alloc __P((vm_object_t, vm_pindex_t));
177 
178 void
179 vm_map_startup()
180 {
181 	mapzone = &mapzone_store;
182 	zbootinit(mapzone, "MAP", sizeof (struct vm_map),
183 		map_init, MAX_KMAP);
184 	kmapentzone = &kmapentzone_store;
185 	zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry),
186 		kmap_entry_init, MAX_KMAPENT);
187 	mapentzone = &mapentzone_store;
188 	zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
189 		map_entry_init, MAX_MAPENT);
190 }
191 
192 /*
193  * Allocate a vmspace structure, including a vm_map and pmap,
194  * and initialize those structures.  The refcnt is set to 1.
195  * The remaining fields must be initialized by the caller.
196  */
197 struct vmspace *
198 vmspace_alloc(min, max)
199 	vm_offset_t min, max;
200 {
201 	register struct vmspace *vm;
202 
203 	vm = zalloc(vmspace_zone);
204 	bzero(&vm->vm_map, sizeof vm->vm_map);
205 	vm_map_init(&vm->vm_map, min, max);
206 	pmap_pinit(&vm->vm_pmap);
207 	vm->vm_map.pmap = &vm->vm_pmap;		/* XXX */
208 	vm->vm_refcnt = 1;
209 	vm->vm_shm = NULL;
210 	return (vm);
211 }
212 
213 void
214 vm_init2(void) {
215 	zinitna(kmapentzone, &kmapentobj,
216 		NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1);
217 	zinitna(mapentzone, &mapentobj,
218 		NULL, 0, 0, 0, 1);
219 	zinitna(mapzone, &mapobj,
220 		NULL, 0, 0, 0, 1);
221 	vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
222 	pmap_init2();
223 	vm_object_init2();
224 }
225 
226 void
227 vmspace_free(vm)
228 	register struct vmspace *vm;
229 {
230 
231 	if (vm->vm_refcnt == 0)
232 		panic("vmspace_free: attempt to free already freed vmspace");
233 
234 	if (--vm->vm_refcnt == 0) {
235 
236 		/*
237 		 * Lock the map, to wait out all other references to it.
238 		 * Delete all of the mappings and pages they hold, then call
239 		 * the pmap module to reclaim anything left.
240 		 */
241 		vm_map_lock(&vm->vm_map);
242 		(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
243 		    vm->vm_map.max_offset);
244 		vm_map_unlock(&vm->vm_map);
245 
246 		pmap_release(&vm->vm_pmap);
247 		zfree(vmspace_zone, vm);
248 	}
249 }
250 
251 /*
252  *	vm_map_create:
253  *
254  *	Creates and returns a new empty VM map with
255  *	the given physical map structure, and having
256  *	the given lower and upper address bounds.
257  */
258 vm_map_t
259 vm_map_create(pmap, min, max)
260 	pmap_t pmap;
261 	vm_offset_t min, max;
262 {
263 	register vm_map_t result;
264 
265 	result = zalloc(mapzone);
266 	vm_map_init(result, min, max);
267 	result->pmap = pmap;
268 	return (result);
269 }
270 
271 /*
272  * Initialize an existing vm_map structure
273  * such as that in the vmspace structure.
274  * The pmap is set elsewhere.
275  */
276 void
277 vm_map_init(map, min, max)
278 	register struct vm_map *map;
279 	vm_offset_t min, max;
280 {
281 	map->header.next = map->header.prev = &map->header;
282 	map->nentries = 0;
283 	map->size = 0;
284 	map->is_main_map = TRUE;
285 	map->system_map = 0;
286 	map->min_offset = min;
287 	map->max_offset = max;
288 	map->first_free = &map->header;
289 	map->hint = &map->header;
290 	map->timestamp = 0;
291 	lockinit(&map->lock, PVM, "thrd_sleep", 0, 0);
292 }
293 
294 /*
295  *	vm_map_entry_dispose:	[ internal use only ]
296  *
297  *	Inverse of vm_map_entry_create.
298  */
299 static void
300 vm_map_entry_dispose(map, entry)
301 	vm_map_t map;
302 	vm_map_entry_t entry;
303 {
304 	zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry);
305 }
306 
307 /*
308  *	vm_map_entry_create:	[ internal use only ]
309  *
310  *	Allocates a VM map entry for insertion.
311  *	No entry fields are filled in.  This routine is
312  */
313 static vm_map_entry_t
314 vm_map_entry_create(map)
315 	vm_map_t map;
316 {
317 	return zalloc((map->system_map || !mapentzone) ? kmapentzone : mapentzone);
318 }
319 
320 /*
321  *	vm_map_entry_{un,}link:
322  *
323  *	Insert/remove entries from maps.
324  */
325 #define	vm_map_entry_link(map, after_where, entry) \
326 		{ \
327 		(map)->nentries++; \
328 		(map)->timestamp++; \
329 		(entry)->prev = (after_where); \
330 		(entry)->next = (after_where)->next; \
331 		(entry)->prev->next = (entry); \
332 		(entry)->next->prev = (entry); \
333 		}
334 #define	vm_map_entry_unlink(map, entry) \
335 		{ \
336 		(map)->nentries--; \
337 		(map)->timestamp++; \
338 		(entry)->next->prev = (entry)->prev; \
339 		(entry)->prev->next = (entry)->next; \
340 		}
341 
342 /*
343  *	SAVE_HINT:
344  *
345  *	Saves the specified entry as the hint for
346  *	future lookups.
347  */
348 #define	SAVE_HINT(map,value) \
349 		(map)->hint = (value);
350 
351 /*
352  *	vm_map_lookup_entry:	[ internal use only ]
353  *
354  *	Finds the map entry containing (or
355  *	immediately preceding) the specified address
356  *	in the given map; the entry is returned
357  *	in the "entry" parameter.  The boolean
358  *	result indicates whether the address is
359  *	actually contained in the map.
360  */
361 boolean_t
362 vm_map_lookup_entry(map, address, entry)
363 	register vm_map_t map;
364 	register vm_offset_t address;
365 	vm_map_entry_t *entry;	/* OUT */
366 {
367 	register vm_map_entry_t cur;
368 	register vm_map_entry_t last;
369 
370 	/*
371 	 * Start looking either from the head of the list, or from the hint.
372 	 */
373 
374 	cur = map->hint;
375 
376 	if (cur == &map->header)
377 		cur = cur->next;
378 
379 	if (address >= cur->start) {
380 		/*
381 		 * Go from hint to end of list.
382 		 *
383 		 * But first, make a quick check to see if we are already looking
384 		 * at the entry we want (which is usually the case). Note also
385 		 * that we don't need to save the hint here... it is the same
386 		 * hint (unless we are at the header, in which case the hint
387 		 * didn't buy us anything anyway).
388 		 */
389 		last = &map->header;
390 		if ((cur != last) && (cur->end > address)) {
391 			*entry = cur;
392 			return (TRUE);
393 		}
394 	} else {
395 		/*
396 		 * Go from start to hint, *inclusively*
397 		 */
398 		last = cur->next;
399 		cur = map->header.next;
400 	}
401 
402 	/*
403 	 * Search linearly
404 	 */
405 
406 	while (cur != last) {
407 		if (cur->end > address) {
408 			if (address >= cur->start) {
409 				/*
410 				 * Save this lookup for future hints, and
411 				 * return
412 				 */
413 
414 				*entry = cur;
415 				SAVE_HINT(map, cur);
416 				return (TRUE);
417 			}
418 			break;
419 		}
420 		cur = cur->next;
421 	}
422 	*entry = cur->prev;
423 	SAVE_HINT(map, *entry);
424 	return (FALSE);
425 }
426 
427 /*
428  *	vm_map_insert:
429  *
430  *	Inserts the given whole VM object into the target
431  *	map at the specified address range.  The object's
432  *	size should match that of the address range.
433  *
434  *	Requires that the map be locked, and leaves it so.
435  */
436 int
437 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
438 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
439 	      int cow)
440 {
441 	register vm_map_entry_t new_entry;
442 	register vm_map_entry_t prev_entry;
443 	vm_map_entry_t temp_entry;
444 	vm_object_t prev_object;
445 	u_char protoeflags;
446 
447 	if ((object != NULL) && (cow & MAP_NOFAULT)) {
448 		panic("vm_map_insert: paradoxical MAP_NOFAULT request");
449 	}
450 
451 	/*
452 	 * Check that the start and end points are not bogus.
453 	 */
454 
455 	if ((start < map->min_offset) || (end > map->max_offset) ||
456 	    (start >= end))
457 		return (KERN_INVALID_ADDRESS);
458 
459 	/*
460 	 * Find the entry prior to the proposed starting address; if it's part
461 	 * of an existing entry, this range is bogus.
462 	 */
463 
464 	if (vm_map_lookup_entry(map, start, &temp_entry))
465 		return (KERN_NO_SPACE);
466 
467 	prev_entry = temp_entry;
468 
469 	/*
470 	 * Assert that the next entry doesn't overlap the end point.
471 	 */
472 
473 	if ((prev_entry->next != &map->header) &&
474 	    (prev_entry->next->start < end))
475 		return (KERN_NO_SPACE);
476 
477 	protoeflags = 0;
478 	if (cow & MAP_COPY_NEEDED)
479 		protoeflags |= MAP_ENTRY_NEEDS_COPY;
480 
481 	if (cow & MAP_COPY_ON_WRITE)
482 		protoeflags |= MAP_ENTRY_COW;
483 
484 	if (cow & MAP_NOFAULT)
485 		protoeflags |= MAP_ENTRY_NOFAULT;
486 
487 	/*
488 	 * See if we can avoid creating a new entry by extending one of our
489 	 * neighbors.  Or at least extend the object.
490 	 */
491 
492 	if ((object == NULL) &&
493 	    (prev_entry != &map->header) &&
494 	    (( prev_entry->eflags & (MAP_ENTRY_IS_A_MAP | MAP_ENTRY_IS_SUB_MAP)) == 0) &&
495 		((prev_entry->object.vm_object == NULL) ||
496 			(prev_entry->object.vm_object->type == OBJT_DEFAULT)) &&
497 	    (prev_entry->end == start) &&
498 	    (prev_entry->wired_count == 0)) {
499 
500 
501 		if ((protoeflags == prev_entry->eflags) &&
502 		    ((cow & MAP_NOFAULT) ||
503 		     vm_object_coalesce(prev_entry->object.vm_object,
504 					OFF_TO_IDX(prev_entry->offset),
505 					(vm_size_t) (prev_entry->end - prev_entry->start),
506 					(vm_size_t) (end - prev_entry->end)))) {
507 
508 			/*
509 			 * Coalesced the two objects.  Can we extend the
510 			 * previous map entry to include the new range?
511 			 */
512 			if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
513 			    (prev_entry->protection == prot) &&
514 			    (prev_entry->max_protection == max)) {
515 
516 				map->size += (end - prev_entry->end);
517 				prev_entry->end = end;
518 				if ((cow & MAP_NOFAULT) == 0) {
519 					prev_object = prev_entry->object.vm_object;
520 					default_pager_convert_to_swapq(prev_object);
521 				}
522 				return (KERN_SUCCESS);
523 			}
524 			else {
525 				object = prev_entry->object.vm_object;
526 				offset = prev_entry->offset + (prev_entry->end -
527 							       prev_entry->start);
528 
529 				vm_object_reference(object);
530 			}
531 		}
532 	}
533 
534 	/*
535 	 * Create a new entry
536 	 */
537 
538 	new_entry = vm_map_entry_create(map);
539 	new_entry->start = start;
540 	new_entry->end = end;
541 
542 	new_entry->eflags = protoeflags;
543 	new_entry->object.vm_object = object;
544 	new_entry->offset = offset;
545 
546 	if (map->is_main_map) {
547 		new_entry->inheritance = VM_INHERIT_DEFAULT;
548 		new_entry->protection = prot;
549 		new_entry->max_protection = max;
550 		new_entry->wired_count = 0;
551 	}
552 	/*
553 	 * Insert the new entry into the list
554 	 */
555 
556 	vm_map_entry_link(map, prev_entry, new_entry);
557 	map->size += new_entry->end - new_entry->start;
558 
559 	/*
560 	 * Update the free space hint
561 	 */
562 	if ((map->first_free == prev_entry) &&
563 		(prev_entry->end >= new_entry->start))
564 		map->first_free = new_entry;
565 
566 	default_pager_convert_to_swapq(object);
567 	return (KERN_SUCCESS);
568 }
569 
570 /*
571  * Find sufficient space for `length' bytes in the given map, starting at
572  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
573  */
574 int
575 vm_map_findspace(map, start, length, addr)
576 	register vm_map_t map;
577 	register vm_offset_t start;
578 	vm_size_t length;
579 	vm_offset_t *addr;
580 {
581 	register vm_map_entry_t entry, next;
582 	register vm_offset_t end;
583 
584 	if (start < map->min_offset)
585 		start = map->min_offset;
586 	if (start > map->max_offset)
587 		return (1);
588 
589 	/*
590 	 * Look for the first possible address; if there's already something
591 	 * at this address, we have to start after it.
592 	 */
593 	if (start == map->min_offset) {
594 		if ((entry = map->first_free) != &map->header)
595 			start = entry->end;
596 	} else {
597 		vm_map_entry_t tmp;
598 
599 		if (vm_map_lookup_entry(map, start, &tmp))
600 			start = tmp->end;
601 		entry = tmp;
602 	}
603 
604 	/*
605 	 * Look through the rest of the map, trying to fit a new region in the
606 	 * gap between existing regions, or after the very last region.
607 	 */
608 	for (;; start = (entry = next)->end) {
609 		/*
610 		 * Find the end of the proposed new region.  Be sure we didn't
611 		 * go beyond the end of the map, or wrap around the address;
612 		 * if so, we lose.  Otherwise, if this is the last entry, or
613 		 * if the proposed new region fits before the next entry, we
614 		 * win.
615 		 */
616 		end = start + length;
617 		if (end > map->max_offset || end < start)
618 			return (1);
619 		next = entry->next;
620 		if (next == &map->header || next->start >= end)
621 			break;
622 	}
623 	SAVE_HINT(map, entry);
624 	*addr = start;
625 	if (map == kernel_map) {
626 		vm_offset_t ksize;
627 		if ((ksize = round_page(start + length)) > kernel_vm_end) {
628 			pmap_growkernel(ksize);
629 		}
630 	}
631 	return (0);
632 }
633 
634 /*
635  *	vm_map_find finds an unallocated region in the target address
636  *	map with the given length.  The search is defined to be
637  *	first-fit from the specified address; the region found is
638  *	returned in the same parameter.
639  *
640  */
641 int
642 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
643 	    vm_offset_t *addr,	/* IN/OUT */
644 	    vm_size_t length, boolean_t find_space, vm_prot_t prot,
645 	    vm_prot_t max, int cow)
646 {
647 	register vm_offset_t start;
648 	int result, s = 0;
649 
650 	start = *addr;
651 
652 	if (map == kmem_map || map == mb_map)
653 		s = splvm();
654 
655 	vm_map_lock(map);
656 	if (find_space) {
657 		if (vm_map_findspace(map, start, length, addr)) {
658 			vm_map_unlock(map);
659 			if (map == kmem_map || map == mb_map)
660 				splx(s);
661 			return (KERN_NO_SPACE);
662 		}
663 		start = *addr;
664 	}
665 	result = vm_map_insert(map, object, offset,
666 		start, start + length, prot, max, cow);
667 	vm_map_unlock(map);
668 
669 	if (map == kmem_map || map == mb_map)
670 		splx(s);
671 
672 	return (result);
673 }
674 
675 /*
676  *	vm_map_simplify_entry:
677  *
678  *	Simplify the given map entry by merging with either neighbor.
679  */
680 void
681 vm_map_simplify_entry(map, entry)
682 	vm_map_t map;
683 	vm_map_entry_t entry;
684 {
685 	vm_map_entry_t next, prev;
686 	vm_size_t prevsize, esize;
687 
688 	if (entry->eflags & (MAP_ENTRY_IS_SUB_MAP|MAP_ENTRY_IS_A_MAP))
689 		return;
690 
691 	prev = entry->prev;
692 	if (prev != &map->header) {
693 		prevsize = prev->end - prev->start;
694 		if ( (prev->end == entry->start) &&
695 		     (prev->object.vm_object == entry->object.vm_object) &&
696 		     (!prev->object.vm_object ||
697 				(prev->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
698 		     (!prev->object.vm_object ||
699 			(prev->offset + prevsize == entry->offset)) &&
700 		     (prev->eflags == entry->eflags) &&
701 		     (prev->protection == entry->protection) &&
702 		     (prev->max_protection == entry->max_protection) &&
703 		     (prev->inheritance == entry->inheritance) &&
704 		     (prev->wired_count == entry->wired_count)) {
705 			if (map->first_free == prev)
706 				map->first_free = entry;
707 			if (map->hint == prev)
708 				map->hint = entry;
709 			vm_map_entry_unlink(map, prev);
710 			entry->start = prev->start;
711 			entry->offset = prev->offset;
712 			if (prev->object.vm_object)
713 				vm_object_deallocate(prev->object.vm_object);
714 			vm_map_entry_dispose(map, prev);
715 		}
716 	}
717 
718 	next = entry->next;
719 	if (next != &map->header) {
720 		esize = entry->end - entry->start;
721 		if ((entry->end == next->start) &&
722 		    (next->object.vm_object == entry->object.vm_object) &&
723 		    (!next->object.vm_object ||
724 				(next->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
725 		     (!entry->object.vm_object ||
726 			(entry->offset + esize == next->offset)) &&
727 		    (next->eflags == entry->eflags) &&
728 		    (next->protection == entry->protection) &&
729 		    (next->max_protection == entry->max_protection) &&
730 		    (next->inheritance == entry->inheritance) &&
731 		    (next->wired_count == entry->wired_count)) {
732 			if (map->first_free == next)
733 				map->first_free = entry;
734 			if (map->hint == next)
735 				map->hint = entry;
736 			vm_map_entry_unlink(map, next);
737 			entry->end = next->end;
738 			if (next->object.vm_object)
739 				vm_object_deallocate(next->object.vm_object);
740 			vm_map_entry_dispose(map, next);
741 	        }
742 	}
743 }
744 /*
745  *	vm_map_clip_start:	[ internal use only ]
746  *
747  *	Asserts that the given entry begins at or after
748  *	the specified address; if necessary,
749  *	it splits the entry into two.
750  */
751 #define vm_map_clip_start(map, entry, startaddr) \
752 { \
753 	if (startaddr > entry->start) \
754 		_vm_map_clip_start(map, entry, startaddr); \
755 }
756 
757 /*
758  *	This routine is called only when it is known that
759  *	the entry must be split.
760  */
761 static void
762 _vm_map_clip_start(map, entry, start)
763 	register vm_map_t map;
764 	register vm_map_entry_t entry;
765 	register vm_offset_t start;
766 {
767 	register vm_map_entry_t new_entry;
768 
769 	/*
770 	 * Split off the front portion -- note that we must insert the new
771 	 * entry BEFORE this one, so that this entry has the specified
772 	 * starting address.
773 	 */
774 
775 	vm_map_simplify_entry(map, entry);
776 
777 	/*
778 	 * If there is no object backing this entry, we might as well create
779 	 * one now.  If we defer it, an object can get created after the map
780 	 * is clipped, and individual objects will be created for the split-up
781 	 * map.  This is a bit of a hack, but is also about the best place to
782 	 * put this improvement.
783 	 */
784 
785 	if (entry->object.vm_object == NULL) {
786 			vm_object_t object;
787 
788 			object = vm_object_allocate(OBJT_DEFAULT,
789 					atop(entry->end - entry->start));
790 			entry->object.vm_object = object;
791 			entry->offset = 0;
792 	}
793 
794 	new_entry = vm_map_entry_create(map);
795 	*new_entry = *entry;
796 
797 	new_entry->end = start;
798 	entry->offset += (start - entry->start);
799 	entry->start = start;
800 
801 	vm_map_entry_link(map, entry->prev, new_entry);
802 
803 	if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0)
804 		vm_object_reference(new_entry->object.vm_object);
805 }
806 
807 /*
808  *	vm_map_clip_end:	[ internal use only ]
809  *
810  *	Asserts that the given entry ends at or before
811  *	the specified address; if necessary,
812  *	it splits the entry into two.
813  */
814 
815 #define vm_map_clip_end(map, entry, endaddr) \
816 { \
817 	if (endaddr < entry->end) \
818 		_vm_map_clip_end(map, entry, endaddr); \
819 }
820 
821 /*
822  *	This routine is called only when it is known that
823  *	the entry must be split.
824  */
825 static void
826 _vm_map_clip_end(map, entry, end)
827 	register vm_map_t map;
828 	register vm_map_entry_t entry;
829 	register vm_offset_t end;
830 {
831 	register vm_map_entry_t new_entry;
832 
833 	/*
834 	 * If there is no object backing this entry, we might as well create
835 	 * one now.  If we defer it, an object can get created after the map
836 	 * is clipped, and individual objects will be created for the split-up
837 	 * map.  This is a bit of a hack, but is also about the best place to
838 	 * put this improvement.
839 	 */
840 
841 	if (entry->object.vm_object == NULL) {
842 			vm_object_t object;
843 
844 			object = vm_object_allocate(OBJT_DEFAULT,
845 					atop(entry->end - entry->start));
846 			entry->object.vm_object = object;
847 			entry->offset = 0;
848 	}
849 
850 	/*
851 	 * Create a new entry and insert it AFTER the specified entry
852 	 */
853 
854 	new_entry = vm_map_entry_create(map);
855 	*new_entry = *entry;
856 
857 	new_entry->start = entry->end = end;
858 	new_entry->offset += (end - entry->start);
859 
860 	vm_map_entry_link(map, entry, new_entry);
861 
862 	if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0)
863 		vm_object_reference(new_entry->object.vm_object);
864 }
865 
866 /*
867  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
868  *
869  *	Asserts that the starting and ending region
870  *	addresses fall within the valid range of the map.
871  */
872 #define	VM_MAP_RANGE_CHECK(map, start, end)		\
873 		{					\
874 		if (start < vm_map_min(map))		\
875 			start = vm_map_min(map);	\
876 		if (end > vm_map_max(map))		\
877 			end = vm_map_max(map);		\
878 		if (start > end)			\
879 			start = end;			\
880 		}
881 
882 /*
883  *	vm_map_submap:		[ kernel use only ]
884  *
885  *	Mark the given range as handled by a subordinate map.
886  *
887  *	This range must have been created with vm_map_find,
888  *	and no other operations may have been performed on this
889  *	range prior to calling vm_map_submap.
890  *
891  *	Only a limited number of operations can be performed
892  *	within this rage after calling vm_map_submap:
893  *		vm_fault
894  *	[Don't try vm_map_copy!]
895  *
896  *	To remove a submapping, one must first remove the
897  *	range from the superior map, and then destroy the
898  *	submap (if desired).  [Better yet, don't try it.]
899  */
900 int
901 vm_map_submap(map, start, end, submap)
902 	register vm_map_t map;
903 	register vm_offset_t start;
904 	register vm_offset_t end;
905 	vm_map_t submap;
906 {
907 	vm_map_entry_t entry;
908 	register int result = KERN_INVALID_ARGUMENT;
909 
910 	vm_map_lock(map);
911 
912 	VM_MAP_RANGE_CHECK(map, start, end);
913 
914 	if (vm_map_lookup_entry(map, start, &entry)) {
915 		vm_map_clip_start(map, entry, start);
916 	} else
917 		entry = entry->next;
918 
919 	vm_map_clip_end(map, entry, end);
920 
921 	if ((entry->start == start) && (entry->end == end) &&
922 	    ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_COW)) == 0) &&
923 	    (entry->object.vm_object == NULL)) {
924 		entry->object.sub_map = submap;
925 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
926 		result = KERN_SUCCESS;
927 	}
928 	vm_map_unlock(map);
929 
930 	return (result);
931 }
932 
933 /*
934  *	vm_map_protect:
935  *
936  *	Sets the protection of the specified address
937  *	region in the target map.  If "set_max" is
938  *	specified, the maximum protection is to be set;
939  *	otherwise, only the current protection is affected.
940  */
941 int
942 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
943 	       vm_prot_t new_prot, boolean_t set_max)
944 {
945 	register vm_map_entry_t current;
946 	vm_map_entry_t entry;
947 
948 	vm_map_lock(map);
949 
950 	VM_MAP_RANGE_CHECK(map, start, end);
951 
952 	if (vm_map_lookup_entry(map, start, &entry)) {
953 		vm_map_clip_start(map, entry, start);
954 	} else {
955 		entry = entry->next;
956 	}
957 
958 	/*
959 	 * Make a first pass to check for protection violations.
960 	 */
961 
962 	current = entry;
963 	while ((current != &map->header) && (current->start < end)) {
964 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
965 			vm_map_unlock(map);
966 			return (KERN_INVALID_ARGUMENT);
967 		}
968 		if ((new_prot & current->max_protection) != new_prot) {
969 			vm_map_unlock(map);
970 			return (KERN_PROTECTION_FAILURE);
971 		}
972 		current = current->next;
973 	}
974 
975 	/*
976 	 * Go back and fix up protections. [Note that clipping is not
977 	 * necessary the second time.]
978 	 */
979 
980 	current = entry;
981 
982 	while ((current != &map->header) && (current->start < end)) {
983 		vm_prot_t old_prot;
984 
985 		vm_map_clip_end(map, current, end);
986 
987 		old_prot = current->protection;
988 		if (set_max)
989 			current->protection =
990 			    (current->max_protection = new_prot) &
991 			    old_prot;
992 		else
993 			current->protection = new_prot;
994 
995 		/*
996 		 * Update physical map if necessary. Worry about copy-on-write
997 		 * here -- CHECK THIS XXX
998 		 */
999 
1000 		if (current->protection != old_prot) {
1001 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1002 							VM_PROT_ALL)
1003 
1004 			if (current->eflags & MAP_ENTRY_IS_A_MAP) {
1005 				vm_map_entry_t share_entry;
1006 				vm_offset_t share_end;
1007 
1008 				vm_map_lock(current->object.share_map);
1009 				(void) vm_map_lookup_entry(
1010 				    current->object.share_map,
1011 				    current->offset,
1012 				    &share_entry);
1013 				share_end = current->offset +
1014 				    (current->end - current->start);
1015 				while ((share_entry !=
1016 					&current->object.share_map->header) &&
1017 				    (share_entry->start < share_end)) {
1018 
1019 					pmap_protect(map->pmap,
1020 					    (qmax(share_entry->start,
1021 						    current->offset) -
1022 						current->offset +
1023 						current->start),
1024 					    min(share_entry->end,
1025 						share_end) -
1026 					    current->offset +
1027 					    current->start,
1028 					    current->protection &
1029 					    MASK(share_entry));
1030 
1031 					share_entry = share_entry->next;
1032 				}
1033 				vm_map_unlock(current->object.share_map);
1034 			} else
1035 				pmap_protect(map->pmap, current->start,
1036 				    current->end,
1037 				    current->protection & MASK(entry));
1038 #undef	MASK
1039 		}
1040 
1041 		vm_map_simplify_entry(map, current);
1042 
1043 		current = current->next;
1044 	}
1045 
1046 	map->timestamp++;
1047 	vm_map_unlock(map);
1048 	return (KERN_SUCCESS);
1049 }
1050 
1051 /*
1052  *	vm_map_madvise:
1053  *
1054  * 	This routine traverses a processes map handling the madvise
1055  *	system call.
1056  */
1057 void
1058 vm_map_madvise(map, pmap, start, end, advise)
1059 	vm_map_t map;
1060 	pmap_t pmap;
1061 	vm_offset_t start, end;
1062 	int advise;
1063 {
1064 	register vm_map_entry_t current;
1065 	vm_map_entry_t entry;
1066 
1067 	vm_map_lock(map);
1068 
1069 	VM_MAP_RANGE_CHECK(map, start, end);
1070 
1071 	if (vm_map_lookup_entry(map, start, &entry)) {
1072 		vm_map_clip_start(map, entry, start);
1073 	} else
1074 		entry = entry->next;
1075 
1076 	for(current = entry;
1077 		(current != &map->header) && (current->start < end);
1078 		current = current->next) {
1079 		vm_size_t size;
1080 
1081 		if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1082 			continue;
1083 		}
1084 
1085 		vm_map_clip_end(map, current, end);
1086 		size = current->end - current->start;
1087 
1088 		/*
1089 		 * Create an object if needed
1090 		 */
1091 		if (current->object.vm_object == NULL) {
1092 			vm_object_t object;
1093 			if ((advise == MADV_FREE) || (advise == MADV_DONTNEED))
1094 				continue;
1095 			object = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(size));
1096 			current->object.vm_object = object;
1097 			current->offset = 0;
1098 		}
1099 
1100 		switch (advise) {
1101 	case MADV_NORMAL:
1102 			current->object.vm_object->behavior = OBJ_NORMAL;
1103 			break;
1104 	case MADV_SEQUENTIAL:
1105 			current->object.vm_object->behavior = OBJ_SEQUENTIAL;
1106 			break;
1107 	case MADV_RANDOM:
1108 			current->object.vm_object->behavior = OBJ_RANDOM;
1109 			break;
1110 	/*
1111 	 * Right now, we could handle DONTNEED and WILLNEED with common code.
1112 	 * They are mostly the same, except for the potential async reads (NYI).
1113 	 */
1114 	case MADV_FREE:
1115 	case MADV_DONTNEED:
1116 			{
1117 				vm_pindex_t pindex;
1118 				int count;
1119 				pindex = OFF_TO_IDX(current->offset);
1120 				count = OFF_TO_IDX(size);
1121 				/*
1122 				 * MADV_DONTNEED removes the page from all
1123 				 * pmaps, so pmap_remove is not necessary.
1124 				 */
1125 				vm_object_madvise(current->object.vm_object,
1126 					pindex, count, advise);
1127 			}
1128 			break;
1129 
1130 	case MADV_WILLNEED:
1131 			{
1132 				vm_pindex_t pindex;
1133 				int count;
1134 				pindex = OFF_TO_IDX(current->offset);
1135 				count = OFF_TO_IDX(size);
1136 				vm_object_madvise(current->object.vm_object,
1137 					pindex, count, advise);
1138 				pmap_object_init_pt(pmap, current->start,
1139 					current->object.vm_object, pindex,
1140 					(count << PAGE_SHIFT), 0);
1141 			}
1142 			break;
1143 
1144 	default:
1145 			break;
1146 		}
1147 	}
1148 
1149 	map->timestamp++;
1150 	vm_map_simplify_entry(map, entry);
1151 	vm_map_unlock(map);
1152 	return;
1153 }
1154 
1155 
1156 /*
1157  *	vm_map_inherit:
1158  *
1159  *	Sets the inheritance of the specified address
1160  *	range in the target map.  Inheritance
1161  *	affects how the map will be shared with
1162  *	child maps at the time of vm_map_fork.
1163  */
1164 int
1165 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1166 	       vm_inherit_t new_inheritance)
1167 {
1168 	register vm_map_entry_t entry;
1169 	vm_map_entry_t temp_entry;
1170 
1171 	switch (new_inheritance) {
1172 	case VM_INHERIT_NONE:
1173 	case VM_INHERIT_COPY:
1174 	case VM_INHERIT_SHARE:
1175 		break;
1176 	default:
1177 		return (KERN_INVALID_ARGUMENT);
1178 	}
1179 
1180 	vm_map_lock(map);
1181 
1182 	VM_MAP_RANGE_CHECK(map, start, end);
1183 
1184 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
1185 		entry = temp_entry;
1186 		vm_map_clip_start(map, entry, start);
1187 	} else
1188 		entry = temp_entry->next;
1189 
1190 	while ((entry != &map->header) && (entry->start < end)) {
1191 		vm_map_clip_end(map, entry, end);
1192 
1193 		entry->inheritance = new_inheritance;
1194 
1195 		entry = entry->next;
1196 	}
1197 
1198 	vm_map_simplify_entry(map, temp_entry);
1199 	map->timestamp++;
1200 	vm_map_unlock(map);
1201 	return (KERN_SUCCESS);
1202 }
1203 
1204 /*
1205  * Implement the semantics of mlock
1206  */
1207 int
1208 vm_map_user_pageable(map, start, end, new_pageable)
1209 	register vm_map_t map;
1210 	register vm_offset_t start;
1211 	register vm_offset_t end;
1212 	register boolean_t new_pageable;
1213 {
1214 	vm_map_entry_t entry;
1215 	vm_map_entry_t start_entry;
1216 	vm_offset_t estart;
1217 	int rv;
1218 
1219 	vm_map_lock(map);
1220 	VM_MAP_RANGE_CHECK(map, start, end);
1221 
1222 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1223 		vm_map_unlock(map);
1224 		return (KERN_INVALID_ADDRESS);
1225 	}
1226 
1227 	if (new_pageable) {
1228 
1229 		entry = start_entry;
1230 		vm_map_clip_start(map, entry, start);
1231 
1232 		/*
1233 		 * Now decrement the wiring count for each region. If a region
1234 		 * becomes completely unwired, unwire its physical pages and
1235 		 * mappings.
1236 		 */
1237 		vm_map_set_recursive(map);
1238 
1239 		entry = start_entry;
1240 		while ((entry != &map->header) && (entry->start < end)) {
1241 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1242 				vm_map_clip_end(map, entry, end);
1243 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1244 				entry->wired_count--;
1245 				if (entry->wired_count == 0)
1246 					vm_fault_unwire(map, entry->start, entry->end);
1247 			}
1248 			vm_map_simplify_entry(map,entry);
1249 			entry = entry->next;
1250 		}
1251 		vm_map_clear_recursive(map);
1252 	} else {
1253 
1254 		entry = start_entry;
1255 
1256 		while ((entry != &map->header) && (entry->start < end)) {
1257 
1258 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1259 				entry = entry->next;
1260 				continue;
1261 			}
1262 
1263 			if (entry->wired_count != 0) {
1264 				entry->wired_count++;
1265 				entry->eflags |= MAP_ENTRY_USER_WIRED;
1266 				entry = entry->next;
1267 				continue;
1268 			}
1269 
1270 			/* Here on entry being newly wired */
1271 
1272 			if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1273 				int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1274 				if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
1275 
1276 					vm_object_shadow(&entry->object.vm_object,
1277 					    &entry->offset,
1278 					    atop(entry->end - entry->start));
1279 					entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1280 
1281 				} else if (entry->object.vm_object == NULL) {
1282 
1283 					entry->object.vm_object =
1284 					    vm_object_allocate(OBJT_DEFAULT,
1285 						atop(entry->end - entry->start));
1286 					entry->offset = (vm_offset_t) 0;
1287 
1288 				}
1289 				default_pager_convert_to_swapq(entry->object.vm_object);
1290 			}
1291 
1292 			vm_map_clip_start(map, entry, start);
1293 			vm_map_clip_end(map, entry, end);
1294 
1295 			entry->wired_count++;
1296 			entry->eflags |= MAP_ENTRY_USER_WIRED;
1297 			estart = entry->start;
1298 
1299 			/* First we need to allow map modifications */
1300 			vm_map_set_recursive(map);
1301 			vm_map_lock_downgrade(map);
1302 			map->timestamp++;
1303 
1304 			rv = vm_fault_user_wire(map, entry->start, entry->end);
1305 			if (rv) {
1306 
1307 				entry->wired_count--;
1308 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1309 
1310 				vm_map_clear_recursive(map);
1311 				vm_map_unlock(map);
1312 
1313 				(void) vm_map_user_pageable(map, start, entry->start, TRUE);
1314 				return rv;
1315 			}
1316 
1317 			vm_map_clear_recursive(map);
1318 			if (vm_map_lock_upgrade(map)) {
1319 				vm_map_lock(map);
1320 				if (vm_map_lookup_entry(map, estart, &entry)
1321 				    == FALSE) {
1322 					vm_map_unlock(map);
1323 					(void) vm_map_user_pageable(map,
1324 								    start,
1325 								    estart,
1326 								    TRUE);
1327 					return (KERN_INVALID_ADDRESS);
1328 				}
1329 			}
1330 			vm_map_simplify_entry(map,entry);
1331 		}
1332 	}
1333 	map->timestamp++;
1334 	vm_map_unlock(map);
1335 	return KERN_SUCCESS;
1336 }
1337 
1338 /*
1339  *	vm_map_pageable:
1340  *
1341  *	Sets the pageability of the specified address
1342  *	range in the target map.  Regions specified
1343  *	as not pageable require locked-down physical
1344  *	memory and physical page maps.
1345  *
1346  *	The map must not be locked, but a reference
1347  *	must remain to the map throughout the call.
1348  */
1349 int
1350 vm_map_pageable(map, start, end, new_pageable)
1351 	register vm_map_t map;
1352 	register vm_offset_t start;
1353 	register vm_offset_t end;
1354 	register boolean_t new_pageable;
1355 {
1356 	register vm_map_entry_t entry;
1357 	vm_map_entry_t start_entry;
1358 	register vm_offset_t failed = 0;
1359 	int rv;
1360 
1361 	vm_map_lock(map);
1362 
1363 	VM_MAP_RANGE_CHECK(map, start, end);
1364 
1365 	/*
1366 	 * Only one pageability change may take place at one time, since
1367 	 * vm_fault assumes it will be called only once for each
1368 	 * wiring/unwiring.  Therefore, we have to make sure we're actually
1369 	 * changing the pageability for the entire region.  We do so before
1370 	 * making any changes.
1371 	 */
1372 
1373 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1374 		vm_map_unlock(map);
1375 		return (KERN_INVALID_ADDRESS);
1376 	}
1377 	entry = start_entry;
1378 
1379 	/*
1380 	 * Actions are rather different for wiring and unwiring, so we have
1381 	 * two separate cases.
1382 	 */
1383 
1384 	if (new_pageable) {
1385 
1386 		vm_map_clip_start(map, entry, start);
1387 
1388 		/*
1389 		 * Unwiring.  First ensure that the range to be unwired is
1390 		 * really wired down and that there are no holes.
1391 		 */
1392 		while ((entry != &map->header) && (entry->start < end)) {
1393 
1394 			if (entry->wired_count == 0 ||
1395 			    (entry->end < end &&
1396 				(entry->next == &map->header ||
1397 				    entry->next->start > entry->end))) {
1398 				vm_map_unlock(map);
1399 				return (KERN_INVALID_ARGUMENT);
1400 			}
1401 			entry = entry->next;
1402 		}
1403 
1404 		/*
1405 		 * Now decrement the wiring count for each region. If a region
1406 		 * becomes completely unwired, unwire its physical pages and
1407 		 * mappings.
1408 		 */
1409 		vm_map_set_recursive(map);
1410 
1411 		entry = start_entry;
1412 		while ((entry != &map->header) && (entry->start < end)) {
1413 			vm_map_clip_end(map, entry, end);
1414 
1415 			entry->wired_count--;
1416 			if (entry->wired_count == 0)
1417 				vm_fault_unwire(map, entry->start, entry->end);
1418 
1419 			entry = entry->next;
1420 		}
1421 		vm_map_simplify_entry(map, start_entry);
1422 		vm_map_clear_recursive(map);
1423 	} else {
1424 		/*
1425 		 * Wiring.  We must do this in two passes:
1426 		 *
1427 		 * 1.  Holding the write lock, we create any shadow or zero-fill
1428 		 * objects that need to be created. Then we clip each map
1429 		 * entry to the region to be wired and increment its wiring
1430 		 * count.  We create objects before clipping the map entries
1431 		 * to avoid object proliferation.
1432 		 *
1433 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
1434 		 * fault in the pages for any newly wired area (wired_count is
1435 		 * 1).
1436 		 *
1437 		 * Downgrading to a read lock for vm_fault_wire avoids a possible
1438 		 * deadlock with another process that may have faulted on one
1439 		 * of the pages to be wired (it would mark the page busy,
1440 		 * blocking us, then in turn block on the map lock that we
1441 		 * hold).  Because of problems in the recursive lock package,
1442 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
1443 		 * any actions that require the write lock must be done
1444 		 * beforehand.  Because we keep the read lock on the map, the
1445 		 * copy-on-write status of the entries we modify here cannot
1446 		 * change.
1447 		 */
1448 
1449 		/*
1450 		 * Pass 1.
1451 		 */
1452 		while ((entry != &map->header) && (entry->start < end)) {
1453 			if (entry->wired_count == 0) {
1454 
1455 				/*
1456 				 * Perform actions of vm_map_lookup that need
1457 				 * the write lock on the map: create a shadow
1458 				 * object for a copy-on-write region, or an
1459 				 * object for a zero-fill region.
1460 				 *
1461 				 * We don't have to do this for entries that
1462 				 * point to sharing maps, because we won't
1463 				 * hold the lock on the sharing map.
1464 				 */
1465 				if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1466 					int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1467 					if (copyflag &&
1468 					    ((entry->protection & VM_PROT_WRITE) != 0)) {
1469 
1470 						vm_object_shadow(&entry->object.vm_object,
1471 						    &entry->offset,
1472 						    atop(entry->end - entry->start));
1473 						entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1474 					} else if (entry->object.vm_object == NULL) {
1475 						entry->object.vm_object =
1476 						    vm_object_allocate(OBJT_DEFAULT,
1477 							atop(entry->end - entry->start));
1478 						entry->offset = (vm_offset_t) 0;
1479 					}
1480 					default_pager_convert_to_swapq(entry->object.vm_object);
1481 				}
1482 			}
1483 			vm_map_clip_start(map, entry, start);
1484 			vm_map_clip_end(map, entry, end);
1485 			entry->wired_count++;
1486 
1487 			/*
1488 			 * Check for holes
1489 			 */
1490 			if (entry->end < end &&
1491 			    (entry->next == &map->header ||
1492 				entry->next->start > entry->end)) {
1493 				/*
1494 				 * Found one.  Object creation actions do not
1495 				 * need to be undone, but the wired counts
1496 				 * need to be restored.
1497 				 */
1498 				while (entry != &map->header && entry->end > start) {
1499 					entry->wired_count--;
1500 					entry = entry->prev;
1501 				}
1502 				map->timestamp++;
1503 				vm_map_unlock(map);
1504 				return (KERN_INVALID_ARGUMENT);
1505 			}
1506 			entry = entry->next;
1507 		}
1508 
1509 		/*
1510 		 * Pass 2.
1511 		 */
1512 
1513 		/*
1514 		 * HACK HACK HACK HACK
1515 		 *
1516 		 * If we are wiring in the kernel map or a submap of it,
1517 		 * unlock the map to avoid deadlocks.  We trust that the
1518 		 * kernel is well-behaved, and therefore will not do
1519 		 * anything destructive to this region of the map while
1520 		 * we have it unlocked.  We cannot trust user processes
1521 		 * to do the same.
1522 		 *
1523 		 * HACK HACK HACK HACK
1524 		 */
1525 		if (vm_map_pmap(map) == kernel_pmap) {
1526 			vm_map_unlock(map);	/* trust me ... */
1527 		} else {
1528 			vm_map_set_recursive(map);
1529 			vm_map_lock_downgrade(map);
1530 		}
1531 
1532 		rv = 0;
1533 		entry = start_entry;
1534 		while (entry != &map->header && entry->start < end) {
1535 			/*
1536 			 * If vm_fault_wire fails for any page we need to undo
1537 			 * what has been done.  We decrement the wiring count
1538 			 * for those pages which have not yet been wired (now)
1539 			 * and unwire those that have (later).
1540 			 *
1541 			 * XXX this violates the locking protocol on the map,
1542 			 * needs to be fixed.
1543 			 */
1544 			if (rv)
1545 				entry->wired_count--;
1546 			else if (entry->wired_count == 1) {
1547 				rv = vm_fault_wire(map, entry->start, entry->end);
1548 				if (rv) {
1549 					failed = entry->start;
1550 					entry->wired_count--;
1551 				}
1552 			}
1553 			entry = entry->next;
1554 		}
1555 
1556 		if (vm_map_pmap(map) == kernel_pmap) {
1557 			vm_map_lock(map);
1558 		} else {
1559 			vm_map_clear_recursive(map);
1560 		}
1561 		if (rv) {
1562 			vm_map_unlock(map);
1563 			(void) vm_map_pageable(map, start, failed, TRUE);
1564 			return (rv);
1565 		}
1566 		vm_map_simplify_entry(map, start_entry);
1567 	}
1568 
1569 	vm_map_unlock(map);
1570 
1571 	map->timestamp++;
1572 	return (KERN_SUCCESS);
1573 }
1574 
1575 /*
1576  * vm_map_clean
1577  *
1578  * Push any dirty cached pages in the address range to their pager.
1579  * If syncio is TRUE, dirty pages are written synchronously.
1580  * If invalidate is TRUE, any cached pages are freed as well.
1581  *
1582  * Returns an error if any part of the specified range is not mapped.
1583  */
1584 int
1585 vm_map_clean(map, start, end, syncio, invalidate)
1586 	vm_map_t map;
1587 	vm_offset_t start;
1588 	vm_offset_t end;
1589 	boolean_t syncio;
1590 	boolean_t invalidate;
1591 {
1592 	register vm_map_entry_t current;
1593 	vm_map_entry_t entry;
1594 	vm_size_t size;
1595 	vm_object_t object;
1596 	vm_ooffset_t offset;
1597 
1598 	vm_map_lock_read(map);
1599 	VM_MAP_RANGE_CHECK(map, start, end);
1600 	if (!vm_map_lookup_entry(map, start, &entry)) {
1601 		vm_map_unlock_read(map);
1602 		return (KERN_INVALID_ADDRESS);
1603 	}
1604 	/*
1605 	 * Make a first pass to check for holes.
1606 	 */
1607 	for (current = entry; current->start < end; current = current->next) {
1608 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1609 			vm_map_unlock_read(map);
1610 			return (KERN_INVALID_ARGUMENT);
1611 		}
1612 		if (end > current->end &&
1613 		    (current->next == &map->header ||
1614 			current->end != current->next->start)) {
1615 			vm_map_unlock_read(map);
1616 			return (KERN_INVALID_ADDRESS);
1617 		}
1618 	}
1619 
1620 	/*
1621 	 * Make a second pass, cleaning/uncaching pages from the indicated
1622 	 * objects as we go.
1623 	 */
1624 	for (current = entry; current->start < end; current = current->next) {
1625 		offset = current->offset + (start - current->start);
1626 		size = (end <= current->end ? end : current->end) - start;
1627 		if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1628 			register vm_map_t smap;
1629 			vm_map_entry_t tentry;
1630 			vm_size_t tsize;
1631 
1632 			smap = current->object.share_map;
1633 			vm_map_lock_read(smap);
1634 			(void) vm_map_lookup_entry(smap, offset, &tentry);
1635 			tsize = tentry->end - offset;
1636 			if (tsize < size)
1637 				size = tsize;
1638 			object = tentry->object.vm_object;
1639 			offset = tentry->offset + (offset - tentry->start);
1640 			vm_map_unlock_read(smap);
1641 		} else {
1642 			object = current->object.vm_object;
1643 		}
1644 		/*
1645 		 * Note that there is absolutely no sense in writing out
1646 		 * anonymous objects, so we track down the vnode object
1647 		 * to write out.
1648 		 * We invalidate (remove) all pages from the address space
1649 		 * anyway, for semantic correctness.
1650 		 */
1651 		while (object->backing_object) {
1652 			object = object->backing_object;
1653 			offset += object->backing_object_offset;
1654 			if (object->size < OFF_TO_IDX( offset + size))
1655 				size = IDX_TO_OFF(object->size) - offset;
1656 		}
1657 		if (invalidate)
1658 			pmap_remove(vm_map_pmap(map), current->start,
1659 				current->start + size);
1660 		if (object && (object->type == OBJT_VNODE)) {
1661 			/*
1662 			 * Flush pages if writing is allowed. XXX should we continue
1663 			 * on an error?
1664 			 *
1665 			 * XXX Doing async I/O and then removing all the pages from
1666 			 *     the object before it completes is probably a very bad
1667 			 *     idea.
1668 			 */
1669 			if (current->protection & VM_PROT_WRITE) {
1670 				if (object->type == OBJT_VNODE)
1671 					vn_lock(object->handle, LK_EXCLUSIVE, curproc);
1672 		   	    vm_object_page_clean(object,
1673 					OFF_TO_IDX(offset),
1674 					OFF_TO_IDX(offset + size + PAGE_MASK),
1675 					(syncio||invalidate)?1:0);
1676 				if (invalidate)
1677 					vm_object_page_remove(object,
1678 						OFF_TO_IDX(offset),
1679 						OFF_TO_IDX(offset + size + PAGE_MASK),
1680 						FALSE);
1681 				if (object->type == OBJT_VNODE)
1682 					VOP_UNLOCK(object->handle, 0, curproc);
1683 			}
1684 		}
1685 		start += size;
1686 	}
1687 
1688 	vm_map_unlock_read(map);
1689 	return (KERN_SUCCESS);
1690 }
1691 
1692 /*
1693  *	vm_map_entry_unwire:	[ internal use only ]
1694  *
1695  *	Make the region specified by this entry pageable.
1696  *
1697  *	The map in question should be locked.
1698  *	[This is the reason for this routine's existence.]
1699  */
1700 static void
1701 vm_map_entry_unwire(map, entry)
1702 	vm_map_t map;
1703 	register vm_map_entry_t entry;
1704 {
1705 	vm_fault_unwire(map, entry->start, entry->end);
1706 	entry->wired_count = 0;
1707 }
1708 
1709 /*
1710  *	vm_map_entry_delete:	[ internal use only ]
1711  *
1712  *	Deallocate the given entry from the target map.
1713  */
1714 static void
1715 vm_map_entry_delete(map, entry)
1716 	register vm_map_t map;
1717 	register vm_map_entry_t entry;
1718 {
1719 	vm_map_entry_unlink(map, entry);
1720 	map->size -= entry->end - entry->start;
1721 
1722 	if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1723 		vm_object_deallocate(entry->object.vm_object);
1724 	}
1725 
1726 	vm_map_entry_dispose(map, entry);
1727 }
1728 
1729 /*
1730  *	vm_map_delete:	[ internal use only ]
1731  *
1732  *	Deallocates the given address range from the target
1733  *	map.
1734  *
1735  *	When called with a sharing map, removes pages from
1736  *	that region from all physical maps.
1737  */
1738 int
1739 vm_map_delete(map, start, end)
1740 	register vm_map_t map;
1741 	vm_offset_t start;
1742 	register vm_offset_t end;
1743 {
1744 	register vm_map_entry_t entry;
1745 	vm_map_entry_t first_entry;
1746 
1747 	/*
1748 	 * Find the start of the region, and clip it
1749 	 */
1750 
1751 	if (!vm_map_lookup_entry(map, start, &first_entry))
1752 		entry = first_entry->next;
1753 	else {
1754 		entry = first_entry;
1755 		vm_map_clip_start(map, entry, start);
1756 
1757 		/*
1758 		 * Fix the lookup hint now, rather than each time though the
1759 		 * loop.
1760 		 */
1761 
1762 		SAVE_HINT(map, entry->prev);
1763 	}
1764 
1765 	/*
1766 	 * Save the free space hint
1767 	 */
1768 
1769 	if (entry == &map->header) {
1770 		map->first_free = &map->header;
1771 	} else if (map->first_free->start >= start)
1772 		map->first_free = entry->prev;
1773 
1774 	/*
1775 	 * Step through all entries in this region
1776 	 */
1777 
1778 	while ((entry != &map->header) && (entry->start < end)) {
1779 		vm_map_entry_t next;
1780 		vm_offset_t s, e;
1781 		vm_object_t object;
1782 		vm_ooffset_t offset;
1783 
1784 		vm_map_clip_end(map, entry, end);
1785 
1786 		next = entry->next;
1787 		s = entry->start;
1788 		e = entry->end;
1789 		offset = entry->offset;
1790 
1791 		/*
1792 		 * Unwire before removing addresses from the pmap; otherwise,
1793 		 * unwiring will put the entries back in the pmap.
1794 		 */
1795 
1796 		object = entry->object.vm_object;
1797 		if (entry->wired_count != 0)
1798 			vm_map_entry_unwire(map, entry);
1799 
1800 		/*
1801 		 * If this is a sharing map, we must remove *all* references
1802 		 * to this data, since we can't find all of the physical maps
1803 		 * which are sharing it.
1804 		 */
1805 
1806 		if (object == kernel_object || object == kmem_object) {
1807 			vm_object_page_remove(object, OFF_TO_IDX(offset),
1808 			    OFF_TO_IDX(offset + (e - s)), FALSE);
1809 		} else if (!map->is_main_map) {
1810 			vm_object_pmap_remove(object,
1811 			    OFF_TO_IDX(offset),
1812 			    OFF_TO_IDX(offset + (e - s)));
1813 		} else {
1814 			pmap_remove(map->pmap, s, e);
1815 		}
1816 
1817 		/*
1818 		 * Delete the entry (which may delete the object) only after
1819 		 * removing all pmap entries pointing to its pages.
1820 		 * (Otherwise, its page frames may be reallocated, and any
1821 		 * modify bits will be set in the wrong object!)
1822 		 */
1823 
1824 		vm_map_entry_delete(map, entry);
1825 		entry = next;
1826 	}
1827 	return (KERN_SUCCESS);
1828 }
1829 
1830 /*
1831  *	vm_map_remove:
1832  *
1833  *	Remove the given address range from the target map.
1834  *	This is the exported form of vm_map_delete.
1835  */
1836 int
1837 vm_map_remove(map, start, end)
1838 	register vm_map_t map;
1839 	register vm_offset_t start;
1840 	register vm_offset_t end;
1841 {
1842 	register int result, s = 0;
1843 
1844 	if (map == kmem_map || map == mb_map)
1845 		s = splvm();
1846 
1847 	vm_map_lock(map);
1848 	VM_MAP_RANGE_CHECK(map, start, end);
1849 	result = vm_map_delete(map, start, end);
1850 	vm_map_unlock(map);
1851 
1852 	if (map == kmem_map || map == mb_map)
1853 		splx(s);
1854 
1855 	return (result);
1856 }
1857 
1858 /*
1859  *	vm_map_check_protection:
1860  *
1861  *	Assert that the target map allows the specified
1862  *	privilege on the entire address region given.
1863  *	The entire region must be allocated.
1864  */
1865 boolean_t
1866 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
1867 			vm_prot_t protection)
1868 {
1869 	register vm_map_entry_t entry;
1870 	vm_map_entry_t tmp_entry;
1871 
1872 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
1873 		return (FALSE);
1874 	}
1875 	entry = tmp_entry;
1876 
1877 	while (start < end) {
1878 		if (entry == &map->header) {
1879 			return (FALSE);
1880 		}
1881 		/*
1882 		 * No holes allowed!
1883 		 */
1884 
1885 		if (start < entry->start) {
1886 			return (FALSE);
1887 		}
1888 		/*
1889 		 * Check protection associated with entry.
1890 		 */
1891 
1892 		if ((entry->protection & protection) != protection) {
1893 			return (FALSE);
1894 		}
1895 		/* go to next entry */
1896 
1897 		start = entry->end;
1898 		entry = entry->next;
1899 	}
1900 	return (TRUE);
1901 }
1902 
1903 /*
1904  *	vm_map_copy_entry:
1905  *
1906  *	Copies the contents of the source entry to the destination
1907  *	entry.  The entries *must* be aligned properly.
1908  */
1909 static void
1910 vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
1911 	vm_map_t src_map, dst_map;
1912 	register vm_map_entry_t src_entry, dst_entry;
1913 {
1914 	if ((dst_entry->eflags|src_entry->eflags) &
1915 		(MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
1916 		return;
1917 
1918 	if (src_entry->wired_count == 0) {
1919 
1920 		/*
1921 		 * If the source entry is marked needs_copy, it is already
1922 		 * write-protected.
1923 		 */
1924 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
1925 			pmap_protect(src_map->pmap,
1926 			    src_entry->start,
1927 			    src_entry->end,
1928 			    src_entry->protection & ~VM_PROT_WRITE);
1929 		}
1930 
1931 		/*
1932 		 * Make a copy of the object.
1933 		 */
1934 		if (src_entry->object.vm_object) {
1935 			if ((src_entry->object.vm_object->handle == NULL) &&
1936 				(src_entry->object.vm_object->type == OBJT_DEFAULT ||
1937 				 src_entry->object.vm_object->type == OBJT_SWAP))
1938 				vm_object_collapse(src_entry->object.vm_object);
1939 			vm_object_reference(src_entry->object.vm_object);
1940 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
1941 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
1942 			dst_entry->object.vm_object =
1943 				src_entry->object.vm_object;
1944 			dst_entry->offset = src_entry->offset;
1945 		} else {
1946 			dst_entry->object.vm_object = NULL;
1947 			dst_entry->offset = 0;
1948 		}
1949 
1950 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
1951 		    dst_entry->end - dst_entry->start, src_entry->start);
1952 	} else {
1953 		/*
1954 		 * Of course, wired down pages can't be set copy-on-write.
1955 		 * Cause wired pages to be copied into the new map by
1956 		 * simulating faults (the new pages are pageable)
1957 		 */
1958 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
1959 	}
1960 }
1961 
1962 /*
1963  * vmspace_fork:
1964  * Create a new process vmspace structure and vm_map
1965  * based on those of an existing process.  The new map
1966  * is based on the old map, according to the inheritance
1967  * values on the regions in that map.
1968  *
1969  * The source map must not be locked.
1970  */
1971 struct vmspace *
1972 vmspace_fork(vm1)
1973 	register struct vmspace *vm1;
1974 {
1975 	register struct vmspace *vm2;
1976 	vm_map_t old_map = &vm1->vm_map;
1977 	vm_map_t new_map;
1978 	vm_map_entry_t old_entry;
1979 	vm_map_entry_t new_entry;
1980 	pmap_t new_pmap;
1981 	vm_object_t object;
1982 
1983 	vm_map_lock(old_map);
1984 
1985 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
1986 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
1987 	    (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
1988 	new_pmap = &vm2->vm_pmap;	/* XXX */
1989 	new_map = &vm2->vm_map;	/* XXX */
1990 	new_map->timestamp = 1;
1991 
1992 	old_entry = old_map->header.next;
1993 
1994 	while (old_entry != &old_map->header) {
1995 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
1996 			panic("vm_map_fork: encountered a submap");
1997 
1998 		switch (old_entry->inheritance) {
1999 		case VM_INHERIT_NONE:
2000 			break;
2001 
2002 		case VM_INHERIT_SHARE:
2003 			/*
2004 			 * Clone the entry, creating the shared object if necessary.
2005 			 */
2006 			object = old_entry->object.vm_object;
2007 			if (object == NULL) {
2008 				object = vm_object_allocate(OBJT_DEFAULT,
2009 					atop(old_entry->end - old_entry->start));
2010 				old_entry->object.vm_object = object;
2011 				old_entry->offset = (vm_offset_t) 0;
2012 			} else if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2013 				vm_object_shadow(&old_entry->object.vm_object,
2014 					&old_entry->offset,
2015 					atop(old_entry->end - old_entry->start));
2016 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2017 				object = old_entry->object.vm_object;
2018 			}
2019 
2020 			/*
2021 			 * Clone the entry, referencing the sharing map.
2022 			 */
2023 			new_entry = vm_map_entry_create(new_map);
2024 			*new_entry = *old_entry;
2025 			new_entry->wired_count = 0;
2026 			vm_object_reference(object);
2027 
2028 			/*
2029 			 * Insert the entry into the new map -- we know we're
2030 			 * inserting at the end of the new map.
2031 			 */
2032 
2033 			vm_map_entry_link(new_map, new_map->header.prev,
2034 			    new_entry);
2035 
2036 			/*
2037 			 * Update the physical map
2038 			 */
2039 
2040 			pmap_copy(new_map->pmap, old_map->pmap,
2041 			    new_entry->start,
2042 			    (old_entry->end - old_entry->start),
2043 			    old_entry->start);
2044 			break;
2045 
2046 		case VM_INHERIT_COPY:
2047 			/*
2048 			 * Clone the entry and link into the map.
2049 			 */
2050 			new_entry = vm_map_entry_create(new_map);
2051 			*new_entry = *old_entry;
2052 			new_entry->wired_count = 0;
2053 			new_entry->object.vm_object = NULL;
2054 			new_entry->eflags &= ~MAP_ENTRY_IS_A_MAP;
2055 			vm_map_entry_link(new_map, new_map->header.prev,
2056 			    new_entry);
2057 			vm_map_copy_entry(old_map, new_map, old_entry,
2058 			    new_entry);
2059 			break;
2060 		}
2061 		old_entry = old_entry->next;
2062 	}
2063 
2064 	new_map->size = old_map->size;
2065 	vm_map_unlock(old_map);
2066 
2067 	return (vm2);
2068 }
2069 
2070 /*
2071  * Unshare the specified VM space for exec.  If other processes are
2072  * mapped to it, then create a new one.  The new vmspace is null.
2073  */
2074 
2075 void
2076 vmspace_exec(struct proc *p) {
2077 	struct vmspace *oldvmspace = p->p_vmspace;
2078 	struct vmspace *newvmspace;
2079 	vm_map_t map = &p->p_vmspace->vm_map;
2080 
2081 	newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
2082 	bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
2083 	    (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
2084 	/*
2085 	 * This code is written like this for prototype purposes.  The
2086 	 * goal is to avoid running down the vmspace here, but let the
2087 	 * other process's that are still using the vmspace to finally
2088 	 * run it down.  Even though there is little or no chance of blocking
2089 	 * here, it is a good idea to keep this form for future mods.
2090 	 */
2091 	vmspace_free(oldvmspace);
2092 	p->p_vmspace = newvmspace;
2093 	if (p == curproc)
2094 		pmap_activate(p);
2095 }
2096 
2097 /*
2098  * Unshare the specified VM space for forcing COW.  This
2099  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
2100  */
2101 
2102 void
2103 vmspace_unshare(struct proc *p) {
2104 	struct vmspace *oldvmspace = p->p_vmspace;
2105 	struct vmspace *newvmspace;
2106 
2107 	if (oldvmspace->vm_refcnt == 1)
2108 		return;
2109 	newvmspace = vmspace_fork(oldvmspace);
2110 	vmspace_free(oldvmspace);
2111 	p->p_vmspace = newvmspace;
2112 	if (p == curproc)
2113 		pmap_activate(p);
2114 }
2115 
2116 
2117 /*
2118  *	vm_map_lookup:
2119  *
2120  *	Finds the VM object, offset, and
2121  *	protection for a given virtual address in the
2122  *	specified map, assuming a page fault of the
2123  *	type specified.
2124  *
2125  *	Leaves the map in question locked for read; return
2126  *	values are guaranteed until a vm_map_lookup_done
2127  *	call is performed.  Note that the map argument
2128  *	is in/out; the returned map must be used in
2129  *	the call to vm_map_lookup_done.
2130  *
2131  *	A handle (out_entry) is returned for use in
2132  *	vm_map_lookup_done, to make that fast.
2133  *
2134  *	If a lookup is requested with "write protection"
2135  *	specified, the map may be changed to perform virtual
2136  *	copying operations, although the data referenced will
2137  *	remain the same.
2138  */
2139 int
2140 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
2141 	      vm_offset_t vaddr,
2142 	      vm_prot_t fault_typea,
2143 	      vm_map_entry_t *out_entry,	/* OUT */
2144 	      vm_object_t *object,		/* OUT */
2145 	      vm_pindex_t *pindex,		/* OUT */
2146 	      vm_prot_t *out_prot,		/* OUT */
2147 	      boolean_t *wired)			/* OUT */
2148 {
2149 	vm_map_t share_map;
2150 	vm_offset_t share_offset;
2151 	register vm_map_entry_t entry;
2152 	register vm_map_t map = *var_map;
2153 	register vm_prot_t prot;
2154 	register boolean_t su;
2155 	vm_prot_t fault_type = fault_typea;
2156 
2157 RetryLookup:;
2158 
2159 	/*
2160 	 * Lookup the faulting address.
2161 	 */
2162 
2163 	vm_map_lock_read(map);
2164 
2165 #define	RETURN(why) \
2166 		{ \
2167 		vm_map_unlock_read(map); \
2168 		return(why); \
2169 		}
2170 
2171 	/*
2172 	 * If the map has an interesting hint, try it before calling full
2173 	 * blown lookup routine.
2174 	 */
2175 
2176 	entry = map->hint;
2177 
2178 	*out_entry = entry;
2179 
2180 	if ((entry == &map->header) ||
2181 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
2182 		vm_map_entry_t tmp_entry;
2183 
2184 		/*
2185 		 * Entry was either not a valid hint, or the vaddr was not
2186 		 * contained in the entry, so do a full lookup.
2187 		 */
2188 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2189 			RETURN(KERN_INVALID_ADDRESS);
2190 
2191 		entry = tmp_entry;
2192 		*out_entry = entry;
2193 	}
2194 
2195 	/*
2196 	 * Handle submaps.
2197 	 */
2198 
2199 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2200 		vm_map_t old_map = map;
2201 
2202 		*var_map = map = entry->object.sub_map;
2203 		vm_map_unlock_read(old_map);
2204 		goto RetryLookup;
2205 	}
2206 
2207 	/*
2208 	 * Check whether this task is allowed to have this page.
2209 	 * Note the special case for MAP_ENTRY_COW
2210 	 * pages with an override.  This is to implement a forced
2211 	 * COW for debuggers.
2212 	 */
2213 
2214 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
2215 		prot = entry->max_protection;
2216 	else
2217 		prot = entry->protection;
2218 
2219 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
2220 	if ((fault_type & prot) != fault_type) {
2221 			RETURN(KERN_PROTECTION_FAILURE);
2222 	}
2223 
2224 	if (entry->wired_count && (fault_type & VM_PROT_WRITE) &&
2225 			(entry->eflags & MAP_ENTRY_COW) &&
2226 			(fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
2227 			RETURN(KERN_PROTECTION_FAILURE);
2228 	}
2229 
2230 	/*
2231 	 * If this page is not pageable, we have to get it for all possible
2232 	 * accesses.
2233 	 */
2234 
2235 	*wired = (entry->wired_count != 0);
2236 	if (*wired)
2237 		prot = fault_type = entry->protection;
2238 
2239 	/*
2240 	 * If we don't already have a VM object, track it down.
2241 	 */
2242 
2243 	su = (entry->eflags & MAP_ENTRY_IS_A_MAP) == 0;
2244 	if (su) {
2245 		share_map = map;
2246 		share_offset = vaddr;
2247 	} else {
2248 		vm_map_entry_t share_entry;
2249 
2250 		/*
2251 		 * Compute the sharing map, and offset into it.
2252 		 */
2253 
2254 		share_map = entry->object.share_map;
2255 		share_offset = (vaddr - entry->start) + entry->offset;
2256 
2257 		/*
2258 		 * Look for the backing store object and offset
2259 		 */
2260 
2261 		vm_map_lock_read(share_map);
2262 
2263 		if (!vm_map_lookup_entry(share_map, share_offset,
2264 			&share_entry)) {
2265 			vm_map_unlock_read(share_map);
2266 			RETURN(KERN_INVALID_ADDRESS);
2267 		}
2268 		entry = share_entry;
2269 	}
2270 
2271 	/*
2272 	 * If the entry was copy-on-write, we either ...
2273 	 */
2274 
2275 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2276 		/*
2277 		 * If we want to write the page, we may as well handle that
2278 		 * now since we've got the sharing map locked.
2279 		 *
2280 		 * If we don't need to write the page, we just demote the
2281 		 * permissions allowed.
2282 		 */
2283 
2284 		if (fault_type & VM_PROT_WRITE) {
2285 			/*
2286 			 * Make a new object, and place it in the object
2287 			 * chain.  Note that no new references have appeared
2288 			 * -- one just moved from the share map to the new
2289 			 * object.
2290 			 */
2291 
2292 			if (vm_map_lock_upgrade(share_map)) {
2293 				if (share_map != map)
2294 					vm_map_unlock_read(map);
2295 
2296 				goto RetryLookup;
2297 			}
2298 			vm_object_shadow(
2299 			    &entry->object.vm_object,
2300 			    &entry->offset,
2301 			    atop(entry->end - entry->start));
2302 
2303 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2304 			vm_map_lock_downgrade(share_map);
2305 		} else {
2306 			/*
2307 			 * We're attempting to read a copy-on-write page --
2308 			 * don't allow writes.
2309 			 */
2310 
2311 			prot &= ~VM_PROT_WRITE;
2312 		}
2313 	}
2314 
2315 	/*
2316 	 * Create an object if necessary.
2317 	 */
2318 	if (entry->object.vm_object == NULL) {
2319 
2320 		if (vm_map_lock_upgrade(share_map)) {
2321 			if (share_map != map)
2322 				vm_map_unlock_read(map);
2323 			goto RetryLookup;
2324 		}
2325 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2326 		    atop(entry->end - entry->start));
2327 		entry->offset = 0;
2328 		vm_map_lock_downgrade(share_map);
2329 	}
2330 
2331 	if (entry->object.vm_object->type == OBJT_DEFAULT)
2332 		default_pager_convert_to_swapq(entry->object.vm_object);
2333 	/*
2334 	 * Return the object/offset from this entry.  If the entry was
2335 	 * copy-on-write or empty, it has been fixed up.
2336 	 */
2337 
2338 	*pindex = OFF_TO_IDX((share_offset - entry->start) + entry->offset);
2339 	*object = entry->object.vm_object;
2340 
2341 	/*
2342 	 * Return whether this is the only map sharing this data.
2343 	 */
2344 
2345 	*out_prot = prot;
2346 	return (KERN_SUCCESS);
2347 
2348 #undef	RETURN
2349 }
2350 
2351 /*
2352  *	vm_map_lookup_done:
2353  *
2354  *	Releases locks acquired by a vm_map_lookup
2355  *	(according to the handle returned by that lookup).
2356  */
2357 
2358 void
2359 vm_map_lookup_done(map, entry)
2360 	register vm_map_t map;
2361 	vm_map_entry_t entry;
2362 {
2363 	/*
2364 	 * If this entry references a map, unlock it first.
2365 	 */
2366 
2367 	if (entry->eflags & MAP_ENTRY_IS_A_MAP)
2368 		vm_map_unlock_read(entry->object.share_map);
2369 
2370 	/*
2371 	 * Unlock the main-level map
2372 	 */
2373 
2374 	vm_map_unlock_read(map);
2375 }
2376 
2377 /*
2378  * Implement uiomove with VM operations.  This handles (and collateral changes)
2379  * support every combination of source object modification, and COW type
2380  * operations.
2381  */
2382 int
2383 vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
2384 	vm_map_t mapa;
2385 	vm_object_t srcobject;
2386 	off_t cp;
2387 	int cnta;
2388 	vm_offset_t uaddra;
2389 	int *npages;
2390 {
2391 	vm_map_t map;
2392 	vm_object_t first_object, oldobject, object;
2393 	vm_map_entry_t entry;
2394 	vm_prot_t prot;
2395 	boolean_t wired;
2396 	int tcnt, rv;
2397 	vm_offset_t uaddr, start, end, tend;
2398 	vm_pindex_t first_pindex, osize, oindex;
2399 	off_t ooffset;
2400 	int cnt;
2401 
2402 	if (npages)
2403 		*npages = 0;
2404 
2405 	cnt = cnta;
2406 	uaddr = uaddra;
2407 
2408 	while (cnt > 0) {
2409 		map = mapa;
2410 
2411 		if ((vm_map_lookup(&map, uaddr,
2412 			VM_PROT_READ, &entry, &first_object,
2413 			&first_pindex, &prot, &wired)) != KERN_SUCCESS) {
2414 			return EFAULT;
2415 		}
2416 
2417 		vm_map_clip_start(map, entry, uaddr);
2418 
2419 		tcnt = cnt;
2420 		tend = uaddr + tcnt;
2421 		if (tend > entry->end) {
2422 			tcnt = entry->end - uaddr;
2423 			tend = entry->end;
2424 		}
2425 
2426 		vm_map_clip_end(map, entry, tend);
2427 
2428 		start = entry->start;
2429 		end = entry->end;
2430 
2431 		osize = atop(tcnt);
2432 
2433 		oindex = OFF_TO_IDX(cp);
2434 		if (npages) {
2435 			vm_pindex_t idx;
2436 			for (idx = 0; idx < osize; idx++) {
2437 				vm_page_t m;
2438 				if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
2439 					vm_map_lookup_done(map, entry);
2440 					return 0;
2441 				}
2442 				if ((m->flags & PG_BUSY) ||
2443 					((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
2444 					vm_map_lookup_done(map, entry);
2445 					return 0;
2446 				}
2447 			}
2448 		}
2449 
2450 /*
2451  * If we are changing an existing map entry, just redirect
2452  * the object, and change mappings.
2453  */
2454 		if ((first_object->type == OBJT_VNODE) &&
2455 			((oldobject = entry->object.vm_object) == first_object)) {
2456 
2457 			if ((entry->offset != cp) || (oldobject != srcobject)) {
2458 				/*
2459    				* Remove old window into the file
2460    				*/
2461 				pmap_remove (map->pmap, uaddr, tend);
2462 
2463 				/*
2464    				* Force copy on write for mmaped regions
2465    				*/
2466 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2467 
2468 				/*
2469    				* Point the object appropriately
2470    				*/
2471 				if (oldobject != srcobject) {
2472 
2473 				/*
2474    				* Set the object optimization hint flag
2475    				*/
2476 					srcobject->flags |= OBJ_OPT;
2477 					vm_object_reference(srcobject);
2478 					entry->object.vm_object = srcobject;
2479 
2480 					if (oldobject) {
2481 						vm_object_deallocate(oldobject);
2482 					}
2483 				}
2484 
2485 				entry->offset = cp;
2486 				map->timestamp++;
2487 			} else {
2488 				pmap_remove (map->pmap, uaddr, tend);
2489 			}
2490 
2491 		} else if ((first_object->ref_count == 1) &&
2492 			(first_object->size == osize) &&
2493 			((first_object->type == OBJT_DEFAULT) ||
2494 				(first_object->type == OBJT_SWAP)) ) {
2495 
2496 			oldobject = first_object->backing_object;
2497 
2498 			if ((first_object->backing_object_offset != cp) ||
2499 				(oldobject != srcobject)) {
2500 				/*
2501    				* Remove old window into the file
2502    				*/
2503 				pmap_remove (map->pmap, uaddr, tend);
2504 
2505 				/*
2506 				 * Remove unneeded old pages
2507 				 */
2508 				if (first_object->resident_page_count) {
2509 					vm_object_page_remove (first_object, 0, 0, 0);
2510 				}
2511 
2512 				/*
2513 				 * Invalidate swap space
2514 				 */
2515 				if (first_object->type == OBJT_SWAP) {
2516 					swap_pager_freespace(first_object,
2517 						OFF_TO_IDX(first_object->paging_offset),
2518 						first_object->size);
2519 				}
2520 
2521 				/*
2522    				* Force copy on write for mmaped regions
2523    				*/
2524 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2525 
2526 				/*
2527    				* Point the object appropriately
2528    				*/
2529 				if (oldobject != srcobject) {
2530 
2531 				/*
2532    				* Set the object optimization hint flag
2533    				*/
2534 					srcobject->flags |= OBJ_OPT;
2535 					vm_object_reference(srcobject);
2536 
2537 					if (oldobject) {
2538 						TAILQ_REMOVE(&oldobject->shadow_head,
2539 							first_object, shadow_list);
2540 						oldobject->shadow_count--;
2541 						vm_object_deallocate(oldobject);
2542 					}
2543 
2544 					TAILQ_INSERT_TAIL(&srcobject->shadow_head,
2545 						first_object, shadow_list);
2546 					srcobject->shadow_count++;
2547 
2548 					first_object->backing_object = srcobject;
2549 				}
2550 				first_object->backing_object_offset = cp;
2551 				map->timestamp++;
2552 			} else {
2553 				pmap_remove (map->pmap, uaddr, tend);
2554 			}
2555 /*
2556  * Otherwise, we have to do a logical mmap.
2557  */
2558 		} else {
2559 
2560 			srcobject->flags |= OBJ_OPT;
2561 			vm_object_reference(srcobject);
2562 
2563 			pmap_remove (map->pmap, uaddr, tend);
2564 
2565 			vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2566 			vm_map_lock_upgrade(map);
2567 
2568 			if (entry == &map->header) {
2569 				map->first_free = &map->header;
2570 			} else if (map->first_free->start >= start) {
2571 				map->first_free = entry->prev;
2572 			}
2573 
2574 			SAVE_HINT(map, entry->prev);
2575 			vm_map_entry_delete(map, entry);
2576 
2577 			object = srcobject;
2578 			ooffset = cp;
2579 #if 0
2580 			vm_object_shadow(&object, &ooffset, osize);
2581 #endif
2582 
2583 			rv = vm_map_insert(map, object, ooffset, start, tend,
2584 				VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE|MAP_COPY_NEEDED);
2585 
2586 			if (rv != KERN_SUCCESS)
2587 				panic("vm_uiomove: could not insert new entry: %d", rv);
2588 		}
2589 
2590 /*
2591  * Map the window directly, if it is already in memory
2592  */
2593 		pmap_object_init_pt(map->pmap, uaddr,
2594 			srcobject, oindex, tcnt, 0);
2595 
2596 		map->timestamp++;
2597 		vm_map_unlock(map);
2598 
2599 		cnt -= tcnt;
2600 		uaddr += tcnt;
2601 		cp += tcnt;
2602 		if (npages)
2603 			*npages += osize;
2604 	}
2605 	return 0;
2606 }
2607 
2608 /*
2609  * local routine to allocate a page for an object.
2610  */
2611 static vm_page_t
2612 vm_freeze_page_alloc(object, pindex)
2613 	vm_object_t object;
2614 	vm_pindex_t pindex;
2615 {
2616 	vm_page_t m;
2617 
2618 	while ((m = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL)) == NULL) {
2619 		VM_WAIT;
2620 		if (m = vm_page_lookup(object, pindex))
2621 			return NULL;
2622 	}
2623 
2624 	m->valid = 0;
2625 	m->dirty = 0;
2626 	return m;
2627 }
2628 
2629 /*
2630  * Performs the copy_on_write operations necessary to allow the virtual copies
2631  * into user space to work.  This has to be called for write(2) system calls
2632  * from other processes, file unlinking, and file size shrinkage.
2633  */
2634 void
2635 vm_freeze_copyopts(object, froma, toa)
2636 	vm_object_t object;
2637 	vm_pindex_t froma, toa;
2638 {
2639 	int s;
2640 	vm_object_t robject, robjectn;
2641 	vm_pindex_t idx, from, to;
2642 
2643 	if ((object == NULL) ||
2644 		((object->flags & OBJ_OPT) == 0))
2645 		return;
2646 
2647 	if (object->shadow_count > object->ref_count)
2648 		panic("vm_freeze_copyopts: sc > rc");
2649 
2650 	while( robject = TAILQ_FIRST(&object->shadow_head)) {
2651 		vm_pindex_t bo_pindex;
2652 		vm_page_t m_in, m_out;
2653 
2654 		bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
2655 
2656 		vm_object_reference(robject);
2657 
2658 		s = splvm();
2659 		while (robject->paging_in_progress) {
2660 			robject->flags |= OBJ_PIPWNT;
2661 			tsleep(robject, PVM, "objfrz", 0);
2662 		}
2663 		splx(s);
2664 
2665 		if (robject->ref_count == 1) {
2666 			vm_object_deallocate(robject);
2667 			continue;
2668 		}
2669 
2670 		robject->paging_in_progress++;
2671 
2672 		for (idx = 0; idx < robject->size; idx++) {
2673 
2674 m_outretry:
2675 			m_out = vm_page_lookup(robject, idx);
2676 			if( m_out && (m_out->flags & PG_BUSY)) {
2677 				s = splvm();
2678 				while (m_out && (m_out->flags & PG_BUSY)) {
2679 					m_out->flags |= PG_WANTED;
2680 					tsleep(m_out, PVM, "pwtfrz", 0);
2681 					splx(s);
2682 					goto m_outretry;
2683 				}
2684 				splx(s);
2685 			}
2686 
2687 			if (m_out == NULL) {
2688 				m_out = vm_freeze_page_alloc(robject, idx);
2689 				if (m_out == NULL)
2690 					goto m_outretry;
2691 			}
2692 
2693 			if (m_out->valid == 0) {
2694 				m_out->flags |= PG_BUSY;
2695 m_inretry:
2696 				m_in = vm_page_lookup(object, bo_pindex + idx);
2697 				if (m_in == NULL) {
2698 					int rv;
2699 					m_in = vm_freeze_page_alloc(object, bo_pindex + idx);
2700 					if (m_in == NULL)
2701 						goto m_inretry;
2702 					rv = vm_pager_get_pages(object, &m_in, 1, 0);
2703 					if (rv != VM_PAGER_OK) {
2704 						printf("vm_freeze_copyopts: cannot read page from file: %x\n", m_in->pindex);
2705 						continue;
2706 					}
2707 				} else if(m_in->busy || (m_in->flags & PG_BUSY)) {
2708 					s = splvm();
2709 					while (m_in && (m_in->busy || (m_in->flags & PG_BUSY))) {
2710 						m_in->flags |= PG_WANTED;
2711 						tsleep(m_in, PVM, "pwtfrz", 0);
2712 						splx(s);
2713 						goto m_inretry;
2714 					}
2715 					splx(s);
2716 					if (m_in == NULL) {
2717 						goto m_inretry;
2718 					}
2719 				}
2720 
2721 				m_in->flags |= PG_BUSY;
2722 				vm_page_protect(m_in, VM_PROT_NONE);
2723 				pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
2724 				m_out->valid = VM_PAGE_BITS_ALL;
2725 				m_out->dirty = VM_PAGE_BITS_ALL;
2726 
2727 				vm_page_deactivate(m_out);
2728 				vm_page_deactivate(m_in);
2729 
2730 				PAGE_WAKEUP(m_out);
2731 				PAGE_WAKEUP(m_in);
2732 			}
2733 		}
2734 
2735 		object->shadow_count--;
2736 		object->ref_count--;
2737 		TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);
2738 		robject->backing_object = NULL;
2739 		robject->backing_object_offset = 0;
2740 
2741 		vm_object_pip_wakeup(robject);
2742 		vm_object_deallocate(robject);
2743 	}
2744 
2745 	object->flags &= ~OBJ_OPT;
2746 }
2747 
2748 #include "opt_ddb.h"
2749 #ifdef DDB
2750 #include <sys/kernel.h>
2751 
2752 #include <ddb/ddb.h>
2753 
2754 /*
2755  *	vm_map_print:	[ debug ]
2756  */
2757 DB_SHOW_COMMAND(map, vm_map_print)
2758 {
2759 	static int nlines;
2760 	/* XXX convert args. */
2761 	register vm_map_t map = (vm_map_t)addr;
2762 	boolean_t full = have_addr;
2763 
2764 	register vm_map_entry_t entry;
2765 
2766 	db_iprintf("%s map 0x%x: pmap=0x%x, nentries=%d, version=%d\n",
2767 	    (map->is_main_map ? "Task" : "Share"),
2768 	    (int) map, (int) (map->pmap), map->nentries,
2769 	    map->timestamp);
2770 	nlines++;
2771 
2772 	if (!full && db_indent)
2773 		return;
2774 
2775 	db_indent += 2;
2776 	for (entry = map->header.next; entry != &map->header;
2777 	    entry = entry->next) {
2778 #if 0
2779 		if (nlines > 18) {
2780 			db_printf("--More--");
2781 			cngetc();
2782 			db_printf("\r");
2783 			nlines = 0;
2784 		}
2785 #endif
2786 
2787 		db_iprintf("map entry 0x%x: start=0x%x, end=0x%x\n",
2788 		    (int) entry, (int) entry->start, (int) entry->end);
2789 		nlines++;
2790 		if (map->is_main_map) {
2791 			static char *inheritance_name[4] =
2792 			{"share", "copy", "none", "donate_copy"};
2793 
2794 			db_iprintf(" prot=%x/%x/%s",
2795 			    entry->protection,
2796 			    entry->max_protection,
2797 			    inheritance_name[entry->inheritance]);
2798 			if (entry->wired_count != 0)
2799 				db_printf(", wired");
2800 		}
2801 		if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
2802 			db_printf(", share=0x%x, offset=0x%x\n",
2803 			    (int) entry->object.share_map,
2804 			    (int) entry->offset);
2805 			nlines++;
2806 			if ((entry->prev == &map->header) ||
2807 			    ((entry->prev->eflags & MAP_ENTRY_IS_A_MAP) == 0) ||
2808 			    (entry->prev->object.share_map !=
2809 				entry->object.share_map)) {
2810 				db_indent += 2;
2811 				vm_map_print((int)entry->object.share_map,
2812 					     full, 0, (char *)0);
2813 				db_indent -= 2;
2814 			}
2815 		} else {
2816 			db_printf(", object=0x%x, offset=0x%x",
2817 			    (int) entry->object.vm_object,
2818 			    (int) entry->offset);
2819 			if (entry->eflags & MAP_ENTRY_COW)
2820 				db_printf(", copy (%s)",
2821 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
2822 			db_printf("\n");
2823 			nlines++;
2824 
2825 			if ((entry->prev == &map->header) ||
2826 			    (entry->prev->eflags & MAP_ENTRY_IS_A_MAP) ||
2827 			    (entry->prev->object.vm_object !=
2828 				entry->object.vm_object)) {
2829 				db_indent += 2;
2830 				vm_object_print((int)entry->object.vm_object,
2831 						full, 0, (char *)0);
2832 				nlines += 4;
2833 				db_indent -= 2;
2834 			}
2835 		}
2836 	}
2837 	db_indent -= 2;
2838 	if (db_indent == 0)
2839 		nlines = 0;
2840 }
2841 
2842 
2843 DB_SHOW_COMMAND(procvm, procvm)
2844 {
2845 	struct proc *p;
2846 
2847 	if (have_addr) {
2848 		p = (struct proc *) addr;
2849 	} else {
2850 		p = curproc;
2851 	}
2852 
2853 	printf("p = 0x%x, vmspace = 0x%x, map = 0x%x, pmap = 0x%x\n",
2854 		p, p->p_vmspace, &p->p_vmspace->vm_map, &p->p_vmspace->vm_pmap);
2855 
2856 	vm_map_print ((int) &p->p_vmspace->vm_map, 1, 0, NULL);
2857 }
2858 
2859 #endif /* DDB */
2860