xref: /freebsd/sys/vm/vm_map.c (revision a8445737e740901f5f2c8d24c12ef7fc8b00134e)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $Id: vm_map.c,v 1.134 1998/08/24 08:39:37 dfr Exp $
65  */
66 
67 /*
68  *	Virtual memory mapping module.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/malloc.h>
74 #include <sys/proc.h>
75 #include <sys/vmmeter.h>
76 #include <sys/mman.h>
77 #include <sys/vnode.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <vm/vm_prot.h>
82 #include <vm/vm_inherit.h>
83 #include <sys/lock.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_pager.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_extern.h>
91 #include <vm/default_pager.h>
92 #include <vm/swap_pager.h>
93 #include <vm/vm_zone.h>
94 
95 static MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
96 
97 /*
98  *	Virtual memory maps provide for the mapping, protection,
99  *	and sharing of virtual memory objects.  In addition,
100  *	this module provides for an efficient virtual copy of
101  *	memory from one map to another.
102  *
103  *	Synchronization is required prior to most operations.
104  *
105  *	Maps consist of an ordered doubly-linked list of simple
106  *	entries; a single hint is used to speed up lookups.
107  *
108  *	In order to properly represent the sharing of virtual
109  *	memory regions among maps, the map structure is bi-level.
110  *	Top-level ("address") maps refer to regions of sharable
111  *	virtual memory.  These regions are implemented as
112  *	("sharing") maps, which then refer to the actual virtual
113  *	memory objects.  When two address maps "share" memory,
114  *	their top-level maps both have references to the same
115  *	sharing map.  When memory is virtual-copied from one
116  *	address map to another, the references in the sharing
117  *	maps are actually copied -- no copying occurs at the
118  *	virtual memory object level.
119  *
120  *	Since portions of maps are specified by start/end addreses,
121  *	which may not align with existing map entries, all
122  *	routines merely "clip" entries to these start/end values.
123  *	[That is, an entry is split into two, bordering at a
124  *	start or end value.]  Note that these clippings may not
125  *	always be necessary (as the two resulting entries are then
126  *	not changed); however, the clipping is done for convenience.
127  *	No attempt is currently made to "glue back together" two
128  *	abutting entries.
129  *
130  *	As mentioned above, virtual copy operations are performed
131  *	by copying VM object references from one sharing map to
132  *	another, and then marking both regions as copy-on-write.
133  *	It is important to note that only one writeable reference
134  *	to a VM object region exists in any map -- this means that
135  *	shadow object creation can be delayed until a write operation
136  *	occurs.
137  */
138 
139 /*
140  *	vm_map_startup:
141  *
142  *	Initialize the vm_map module.  Must be called before
143  *	any other vm_map routines.
144  *
145  *	Map and entry structures are allocated from the general
146  *	purpose memory pool with some exceptions:
147  *
148  *	- The kernel map and kmem submap are allocated statically.
149  *	- Kernel map entries are allocated out of a static pool.
150  *
151  *	These restrictions are necessary since malloc() uses the
152  *	maps and requires map entries.
153  */
154 
155 extern char kstack[];
156 extern int inmprotect;
157 
158 static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store;
159 static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone;
160 static struct vm_object kmapentobj, mapentobj, mapobj;
161 #define MAP_ENTRY_INIT	128
162 static struct vm_map_entry map_entry_init[MAX_MAPENT];
163 static struct vm_map_entry kmap_entry_init[MAX_KMAPENT];
164 static struct vm_map map_init[MAX_KMAP];
165 
166 static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
167 static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
168 static vm_map_entry_t vm_map_entry_create __P((vm_map_t));
169 static void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t));
170 static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
171 static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
172 static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t,
173 		vm_map_entry_t));
174 static void vm_map_split __P((vm_map_entry_t));
175 
176 void
177 vm_map_startup()
178 {
179 	mapzone = &mapzone_store;
180 	zbootinit(mapzone, "MAP", sizeof (struct vm_map),
181 		map_init, MAX_KMAP);
182 	kmapentzone = &kmapentzone_store;
183 	zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry),
184 		kmap_entry_init, MAX_KMAPENT);
185 	mapentzone = &mapentzone_store;
186 	zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
187 		map_entry_init, MAX_MAPENT);
188 }
189 
190 /*
191  * Allocate a vmspace structure, including a vm_map and pmap,
192  * and initialize those structures.  The refcnt is set to 1.
193  * The remaining fields must be initialized by the caller.
194  */
195 struct vmspace *
196 vmspace_alloc(min, max)
197 	vm_offset_t min, max;
198 {
199 	struct vmspace *vm;
200 
201 	vm = zalloc(vmspace_zone);
202 	bzero(&vm->vm_map, sizeof vm->vm_map);
203 	vm_map_init(&vm->vm_map, min, max);
204 	pmap_pinit(&vm->vm_pmap);
205 	vm->vm_map.pmap = &vm->vm_pmap;		/* XXX */
206 	vm->vm_refcnt = 1;
207 	vm->vm_shm = NULL;
208 	return (vm);
209 }
210 
211 void
212 vm_init2(void) {
213 	zinitna(kmapentzone, &kmapentobj,
214 		NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1);
215 	zinitna(mapentzone, &mapentobj,
216 		NULL, 0, 0, 0, 1);
217 	zinitna(mapzone, &mapobj,
218 		NULL, 0, 0, 0, 1);
219 	vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
220 	pmap_init2();
221 	vm_object_init2();
222 }
223 
224 void
225 vmspace_free(vm)
226 	struct vmspace *vm;
227 {
228 
229 	if (vm->vm_refcnt == 0)
230 		panic("vmspace_free: attempt to free already freed vmspace");
231 
232 	if (--vm->vm_refcnt == 0) {
233 
234 		/*
235 		 * Lock the map, to wait out all other references to it.
236 		 * Delete all of the mappings and pages they hold, then call
237 		 * the pmap module to reclaim anything left.
238 		 */
239 		vm_map_lock(&vm->vm_map);
240 		(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
241 		    vm->vm_map.max_offset);
242 		vm_map_unlock(&vm->vm_map);
243 
244 		pmap_release(&vm->vm_pmap);
245 		zfree(vmspace_zone, vm);
246 	}
247 }
248 
249 /*
250  *	vm_map_create:
251  *
252  *	Creates and returns a new empty VM map with
253  *	the given physical map structure, and having
254  *	the given lower and upper address bounds.
255  */
256 vm_map_t
257 vm_map_create(pmap, min, max)
258 	pmap_t pmap;
259 	vm_offset_t min, max;
260 {
261 	vm_map_t result;
262 
263 	result = zalloc(mapzone);
264 	vm_map_init(result, min, max);
265 	result->pmap = pmap;
266 	return (result);
267 }
268 
269 /*
270  * Initialize an existing vm_map structure
271  * such as that in the vmspace structure.
272  * The pmap is set elsewhere.
273  */
274 void
275 vm_map_init(map, min, max)
276 	struct vm_map *map;
277 	vm_offset_t min, max;
278 {
279 	map->header.next = map->header.prev = &map->header;
280 	map->nentries = 0;
281 	map->size = 0;
282 	map->is_main_map = TRUE;
283 	map->system_map = 0;
284 	map->min_offset = min;
285 	map->max_offset = max;
286 	map->first_free = &map->header;
287 	map->hint = &map->header;
288 	map->timestamp = 0;
289 	lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
290 }
291 
292 /*
293  *	vm_map_entry_dispose:	[ internal use only ]
294  *
295  *	Inverse of vm_map_entry_create.
296  */
297 static void
298 vm_map_entry_dispose(map, entry)
299 	vm_map_t map;
300 	vm_map_entry_t entry;
301 {
302 	zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry);
303 }
304 
305 /*
306  *	vm_map_entry_create:	[ internal use only ]
307  *
308  *	Allocates a VM map entry for insertion.
309  *	No entry fields are filled in.  This routine is
310  */
311 static vm_map_entry_t
312 vm_map_entry_create(map)
313 	vm_map_t map;
314 {
315 	return zalloc((map->system_map || !mapentzone) ? kmapentzone : mapentzone);
316 }
317 
318 /*
319  *	vm_map_entry_{un,}link:
320  *
321  *	Insert/remove entries from maps.
322  */
323 #define	vm_map_entry_link(map, after_where, entry) \
324 		{ \
325 		(map)->nentries++; \
326 		(map)->timestamp++; \
327 		(entry)->prev = (after_where); \
328 		(entry)->next = (after_where)->next; \
329 		(entry)->prev->next = (entry); \
330 		(entry)->next->prev = (entry); \
331 		}
332 #define	vm_map_entry_unlink(map, entry) \
333 		{ \
334 		(map)->nentries--; \
335 		(map)->timestamp++; \
336 		(entry)->next->prev = (entry)->prev; \
337 		(entry)->prev->next = (entry)->next; \
338 		}
339 
340 /*
341  *	SAVE_HINT:
342  *
343  *	Saves the specified entry as the hint for
344  *	future lookups.
345  */
346 #define	SAVE_HINT(map,value) \
347 		(map)->hint = (value);
348 
349 /*
350  *	vm_map_lookup_entry:	[ internal use only ]
351  *
352  *	Finds the map entry containing (or
353  *	immediately preceding) the specified address
354  *	in the given map; the entry is returned
355  *	in the "entry" parameter.  The boolean
356  *	result indicates whether the address is
357  *	actually contained in the map.
358  */
359 boolean_t
360 vm_map_lookup_entry(map, address, entry)
361 	vm_map_t map;
362 	vm_offset_t address;
363 	vm_map_entry_t *entry;	/* OUT */
364 {
365 	vm_map_entry_t cur;
366 	vm_map_entry_t last;
367 
368 	/*
369 	 * Start looking either from the head of the list, or from the hint.
370 	 */
371 
372 	cur = map->hint;
373 
374 	if (cur == &map->header)
375 		cur = cur->next;
376 
377 	if (address >= cur->start) {
378 		/*
379 		 * Go from hint to end of list.
380 		 *
381 		 * But first, make a quick check to see if we are already looking
382 		 * at the entry we want (which is usually the case). Note also
383 		 * that we don't need to save the hint here... it is the same
384 		 * hint (unless we are at the header, in which case the hint
385 		 * didn't buy us anything anyway).
386 		 */
387 		last = &map->header;
388 		if ((cur != last) && (cur->end > address)) {
389 			*entry = cur;
390 			return (TRUE);
391 		}
392 	} else {
393 		/*
394 		 * Go from start to hint, *inclusively*
395 		 */
396 		last = cur->next;
397 		cur = map->header.next;
398 	}
399 
400 	/*
401 	 * Search linearly
402 	 */
403 
404 	while (cur != last) {
405 		if (cur->end > address) {
406 			if (address >= cur->start) {
407 				/*
408 				 * Save this lookup for future hints, and
409 				 * return
410 				 */
411 
412 				*entry = cur;
413 				SAVE_HINT(map, cur);
414 				return (TRUE);
415 			}
416 			break;
417 		}
418 		cur = cur->next;
419 	}
420 	*entry = cur->prev;
421 	SAVE_HINT(map, *entry);
422 	return (FALSE);
423 }
424 
425 /*
426  *	vm_map_insert:
427  *
428  *	Inserts the given whole VM object into the target
429  *	map at the specified address range.  The object's
430  *	size should match that of the address range.
431  *
432  *	Requires that the map be locked, and leaves it so.
433  */
434 int
435 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
436 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
437 	      int cow)
438 {
439 	vm_map_entry_t new_entry;
440 	vm_map_entry_t prev_entry;
441 	vm_map_entry_t temp_entry;
442 	vm_object_t prev_object;
443 	u_char protoeflags;
444 
445 	if ((object != NULL) && (cow & MAP_NOFAULT)) {
446 		panic("vm_map_insert: paradoxical MAP_NOFAULT request");
447 	}
448 
449 	/*
450 	 * Check that the start and end points are not bogus.
451 	 */
452 
453 	if ((start < map->min_offset) || (end > map->max_offset) ||
454 	    (start >= end))
455 		return (KERN_INVALID_ADDRESS);
456 
457 	/*
458 	 * Find the entry prior to the proposed starting address; if it's part
459 	 * of an existing entry, this range is bogus.
460 	 */
461 
462 	if (vm_map_lookup_entry(map, start, &temp_entry))
463 		return (KERN_NO_SPACE);
464 
465 	prev_entry = temp_entry;
466 
467 	/*
468 	 * Assert that the next entry doesn't overlap the end point.
469 	 */
470 
471 	if ((prev_entry->next != &map->header) &&
472 	    (prev_entry->next->start < end))
473 		return (KERN_NO_SPACE);
474 
475 	protoeflags = 0;
476 	if (cow & MAP_COPY_NEEDED)
477 		protoeflags |= MAP_ENTRY_NEEDS_COPY;
478 
479 	if (cow & MAP_COPY_ON_WRITE)
480 		protoeflags |= MAP_ENTRY_COW;
481 
482 	if (cow & MAP_NOFAULT)
483 		protoeflags |= MAP_ENTRY_NOFAULT;
484 
485 	/*
486 	 * See if we can avoid creating a new entry by extending one of our
487 	 * neighbors.  Or at least extend the object.
488 	 */
489 
490 	if ((object == NULL) &&
491 	    (prev_entry != &map->header) &&
492 	    (( prev_entry->eflags & (MAP_ENTRY_IS_A_MAP | MAP_ENTRY_IS_SUB_MAP)) == 0) &&
493 		((prev_entry->object.vm_object == NULL) ||
494 			(prev_entry->object.vm_object->type == OBJT_DEFAULT)) &&
495 	    (prev_entry->end == start) &&
496 	    (prev_entry->wired_count == 0)) {
497 
498 
499 		if ((protoeflags == prev_entry->eflags) &&
500 		    ((cow & MAP_NOFAULT) ||
501 		     vm_object_coalesce(prev_entry->object.vm_object,
502 					OFF_TO_IDX(prev_entry->offset),
503 					(vm_size_t) (prev_entry->end - prev_entry->start),
504 					(vm_size_t) (end - prev_entry->end)))) {
505 
506 			/*
507 			 * Coalesced the two objects.  Can we extend the
508 			 * previous map entry to include the new range?
509 			 */
510 			if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
511 			    (prev_entry->protection == prot) &&
512 			    (prev_entry->max_protection == max)) {
513 
514 				map->size += (end - prev_entry->end);
515 				prev_entry->end = end;
516 				if ((cow & MAP_NOFAULT) == 0) {
517 					prev_object = prev_entry->object.vm_object;
518 					default_pager_convert_to_swapq(prev_object);
519 				}
520 				return (KERN_SUCCESS);
521 			}
522 			else {
523 				object = prev_entry->object.vm_object;
524 				offset = prev_entry->offset + (prev_entry->end -
525 							       prev_entry->start);
526 
527 				vm_object_reference(object);
528 			}
529 		}
530 	}
531 
532 	/*
533 	 * Create a new entry
534 	 */
535 
536 	new_entry = vm_map_entry_create(map);
537 	new_entry->start = start;
538 	new_entry->end = end;
539 
540 	new_entry->eflags = protoeflags;
541 	new_entry->object.vm_object = object;
542 	new_entry->offset = offset;
543 	if (object) {
544 		if ((object->ref_count > 1) || (object->shadow_count != 0)) {
545 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
546 		} else {
547 			vm_object_set_flag(object, OBJ_ONEMAPPING);
548 		}
549 	}
550 
551 	if (map->is_main_map) {
552 		new_entry->inheritance = VM_INHERIT_DEFAULT;
553 		new_entry->protection = prot;
554 		new_entry->max_protection = max;
555 		new_entry->wired_count = 0;
556 	}
557 	/*
558 	 * Insert the new entry into the list
559 	 */
560 
561 	vm_map_entry_link(map, prev_entry, new_entry);
562 	map->size += new_entry->end - new_entry->start;
563 
564 	/*
565 	 * Update the free space hint
566 	 */
567 	if ((map->first_free == prev_entry) &&
568 		(prev_entry->end >= new_entry->start))
569 		map->first_free = new_entry;
570 
571 	default_pager_convert_to_swapq(object);
572 	return (KERN_SUCCESS);
573 }
574 
575 /*
576  * Find sufficient space for `length' bytes in the given map, starting at
577  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
578  */
579 int
580 vm_map_findspace(map, start, length, addr)
581 	vm_map_t map;
582 	vm_offset_t start;
583 	vm_size_t length;
584 	vm_offset_t *addr;
585 {
586 	vm_map_entry_t entry, next;
587 	vm_offset_t end;
588 
589 	if (start < map->min_offset)
590 		start = map->min_offset;
591 	if (start > map->max_offset)
592 		return (1);
593 
594 	/*
595 	 * Look for the first possible address; if there's already something
596 	 * at this address, we have to start after it.
597 	 */
598 	if (start == map->min_offset) {
599 		if ((entry = map->first_free) != &map->header)
600 			start = entry->end;
601 	} else {
602 		vm_map_entry_t tmp;
603 
604 		if (vm_map_lookup_entry(map, start, &tmp))
605 			start = tmp->end;
606 		entry = tmp;
607 	}
608 
609 	/*
610 	 * Look through the rest of the map, trying to fit a new region in the
611 	 * gap between existing regions, or after the very last region.
612 	 */
613 	for (;; start = (entry = next)->end) {
614 		/*
615 		 * Find the end of the proposed new region.  Be sure we didn't
616 		 * go beyond the end of the map, or wrap around the address;
617 		 * if so, we lose.  Otherwise, if this is the last entry, or
618 		 * if the proposed new region fits before the next entry, we
619 		 * win.
620 		 */
621 		end = start + length;
622 		if (end > map->max_offset || end < start)
623 			return (1);
624 		next = entry->next;
625 		if (next == &map->header || next->start >= end)
626 			break;
627 	}
628 	SAVE_HINT(map, entry);
629 	*addr = start;
630 	if (map == kernel_map) {
631 		vm_offset_t ksize;
632 		if ((ksize = round_page(start + length)) > kernel_vm_end) {
633 			pmap_growkernel(ksize);
634 		}
635 	}
636 	return (0);
637 }
638 
639 /*
640  *	vm_map_find finds an unallocated region in the target address
641  *	map with the given length.  The search is defined to be
642  *	first-fit from the specified address; the region found is
643  *	returned in the same parameter.
644  *
645  */
646 int
647 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
648 	    vm_offset_t *addr,	/* IN/OUT */
649 	    vm_size_t length, boolean_t find_space, vm_prot_t prot,
650 	    vm_prot_t max, int cow)
651 {
652 	vm_offset_t start;
653 	int result, s = 0;
654 
655 	start = *addr;
656 
657 	if (map == kmem_map || map == mb_map)
658 		s = splvm();
659 
660 	vm_map_lock(map);
661 	if (find_space) {
662 		if (vm_map_findspace(map, start, length, addr)) {
663 			vm_map_unlock(map);
664 			if (map == kmem_map || map == mb_map)
665 				splx(s);
666 			return (KERN_NO_SPACE);
667 		}
668 		start = *addr;
669 	}
670 	result = vm_map_insert(map, object, offset,
671 		start, start + length, prot, max, cow);
672 	vm_map_unlock(map);
673 
674 	if (map == kmem_map || map == mb_map)
675 		splx(s);
676 
677 	return (result);
678 }
679 
680 /*
681  *	vm_map_simplify_entry:
682  *
683  *	Simplify the given map entry by merging with either neighbor.
684  */
685 void
686 vm_map_simplify_entry(map, entry)
687 	vm_map_t map;
688 	vm_map_entry_t entry;
689 {
690 	vm_map_entry_t next, prev;
691 	vm_size_t prevsize, esize;
692 
693 	if (entry->eflags & (MAP_ENTRY_IS_SUB_MAP|MAP_ENTRY_IS_A_MAP))
694 		return;
695 
696 	prev = entry->prev;
697 	if (prev != &map->header) {
698 		prevsize = prev->end - prev->start;
699 		if ( (prev->end == entry->start) &&
700 		     (prev->object.vm_object == entry->object.vm_object) &&
701 		     (!prev->object.vm_object ||
702 				(prev->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
703 		     (!prev->object.vm_object ||
704 			(prev->offset + prevsize == entry->offset)) &&
705 		     (prev->eflags == entry->eflags) &&
706 		     (prev->protection == entry->protection) &&
707 		     (prev->max_protection == entry->max_protection) &&
708 		     (prev->inheritance == entry->inheritance) &&
709 		     (prev->wired_count == entry->wired_count)) {
710 			if (map->first_free == prev)
711 				map->first_free = entry;
712 			if (map->hint == prev)
713 				map->hint = entry;
714 			vm_map_entry_unlink(map, prev);
715 			entry->start = prev->start;
716 			entry->offset = prev->offset;
717 			if (prev->object.vm_object)
718 				vm_object_deallocate(prev->object.vm_object);
719 			vm_map_entry_dispose(map, prev);
720 		}
721 	}
722 
723 	next = entry->next;
724 	if (next != &map->header) {
725 		esize = entry->end - entry->start;
726 		if ((entry->end == next->start) &&
727 		    (next->object.vm_object == entry->object.vm_object) &&
728 		    (!next->object.vm_object ||
729 				(next->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
730 		     (!entry->object.vm_object ||
731 			(entry->offset + esize == next->offset)) &&
732 		    (next->eflags == entry->eflags) &&
733 		    (next->protection == entry->protection) &&
734 		    (next->max_protection == entry->max_protection) &&
735 		    (next->inheritance == entry->inheritance) &&
736 		    (next->wired_count == entry->wired_count)) {
737 			if (map->first_free == next)
738 				map->first_free = entry;
739 			if (map->hint == next)
740 				map->hint = entry;
741 			vm_map_entry_unlink(map, next);
742 			entry->end = next->end;
743 			if (next->object.vm_object)
744 				vm_object_deallocate(next->object.vm_object);
745 			vm_map_entry_dispose(map, next);
746 	        }
747 	}
748 }
749 /*
750  *	vm_map_clip_start:	[ internal use only ]
751  *
752  *	Asserts that the given entry begins at or after
753  *	the specified address; if necessary,
754  *	it splits the entry into two.
755  */
756 #define vm_map_clip_start(map, entry, startaddr) \
757 { \
758 	if (startaddr > entry->start) \
759 		_vm_map_clip_start(map, entry, startaddr); \
760 	else if (entry->object.vm_object && (entry->object.vm_object->ref_count == 1)) \
761 		vm_object_set_flag(entry->object.vm_object, OBJ_ONEMAPPING); \
762 }
763 
764 /*
765  *	This routine is called only when it is known that
766  *	the entry must be split.
767  */
768 static void
769 _vm_map_clip_start(map, entry, start)
770 	vm_map_t map;
771 	vm_map_entry_t entry;
772 	vm_offset_t start;
773 {
774 	vm_map_entry_t new_entry;
775 
776 	/*
777 	 * Split off the front portion -- note that we must insert the new
778 	 * entry BEFORE this one, so that this entry has the specified
779 	 * starting address.
780 	 */
781 
782 	vm_map_simplify_entry(map, entry);
783 
784 	/*
785 	 * If there is no object backing this entry, we might as well create
786 	 * one now.  If we defer it, an object can get created after the map
787 	 * is clipped, and individual objects will be created for the split-up
788 	 * map.  This is a bit of a hack, but is also about the best place to
789 	 * put this improvement.
790 	 */
791 
792 	if (entry->object.vm_object == NULL) {
793 		vm_object_t object;
794 		object = vm_object_allocate(OBJT_DEFAULT,
795 				atop(entry->end - entry->start));
796 		entry->object.vm_object = object;
797 		entry->offset = 0;
798 	}
799 
800 	new_entry = vm_map_entry_create(map);
801 	*new_entry = *entry;
802 
803 	new_entry->end = start;
804 	entry->offset += (start - entry->start);
805 	entry->start = start;
806 
807 	vm_map_entry_link(map, entry->prev, new_entry);
808 
809 	if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
810 		if (new_entry->object.vm_object->ref_count == 1)
811 			vm_object_set_flag(new_entry->object.vm_object,
812 					   OBJ_ONEMAPPING);
813 		vm_object_reference(new_entry->object.vm_object);
814 	}
815 }
816 
817 /*
818  *	vm_map_clip_end:	[ internal use only ]
819  *
820  *	Asserts that the given entry ends at or before
821  *	the specified address; if necessary,
822  *	it splits the entry into two.
823  */
824 
825 #define vm_map_clip_end(map, entry, endaddr) \
826 { \
827 	if (endaddr < entry->end) \
828 		_vm_map_clip_end(map, entry, endaddr); \
829 	else if (entry->object.vm_object && (entry->object.vm_object->ref_count == 1)) \
830 		vm_object_set_flag(entry->object.vm_object, OBJ_ONEMAPPING); \
831 }
832 
833 /*
834  *	This routine is called only when it is known that
835  *	the entry must be split.
836  */
837 static void
838 _vm_map_clip_end(map, entry, end)
839 	vm_map_t map;
840 	vm_map_entry_t entry;
841 	vm_offset_t end;
842 {
843 	vm_map_entry_t new_entry;
844 
845 	/*
846 	 * If there is no object backing this entry, we might as well create
847 	 * one now.  If we defer it, an object can get created after the map
848 	 * is clipped, and individual objects will be created for the split-up
849 	 * map.  This is a bit of a hack, but is also about the best place to
850 	 * put this improvement.
851 	 */
852 
853 	if (entry->object.vm_object == NULL) {
854 		vm_object_t object;
855 		object = vm_object_allocate(OBJT_DEFAULT,
856 				atop(entry->end - entry->start));
857 		entry->object.vm_object = object;
858 		entry->offset = 0;
859 	}
860 
861 	/*
862 	 * Create a new entry and insert it AFTER the specified entry
863 	 */
864 
865 	new_entry = vm_map_entry_create(map);
866 	*new_entry = *entry;
867 
868 	new_entry->start = entry->end = end;
869 	new_entry->offset += (end - entry->start);
870 
871 	vm_map_entry_link(map, entry, new_entry);
872 
873 	if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
874 		if (new_entry->object.vm_object->ref_count == 1)
875 			vm_object_set_flag(new_entry->object.vm_object,
876 					   OBJ_ONEMAPPING);
877 		vm_object_reference(new_entry->object.vm_object);
878 	}
879 }
880 
881 /*
882  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
883  *
884  *	Asserts that the starting and ending region
885  *	addresses fall within the valid range of the map.
886  */
887 #define	VM_MAP_RANGE_CHECK(map, start, end)		\
888 		{					\
889 		if (start < vm_map_min(map))		\
890 			start = vm_map_min(map);	\
891 		if (end > vm_map_max(map))		\
892 			end = vm_map_max(map);		\
893 		if (start > end)			\
894 			start = end;			\
895 		}
896 
897 /*
898  *	vm_map_submap:		[ kernel use only ]
899  *
900  *	Mark the given range as handled by a subordinate map.
901  *
902  *	This range must have been created with vm_map_find,
903  *	and no other operations may have been performed on this
904  *	range prior to calling vm_map_submap.
905  *
906  *	Only a limited number of operations can be performed
907  *	within this rage after calling vm_map_submap:
908  *		vm_fault
909  *	[Don't try vm_map_copy!]
910  *
911  *	To remove a submapping, one must first remove the
912  *	range from the superior map, and then destroy the
913  *	submap (if desired).  [Better yet, don't try it.]
914  */
915 int
916 vm_map_submap(map, start, end, submap)
917 	vm_map_t map;
918 	vm_offset_t start;
919 	vm_offset_t end;
920 	vm_map_t submap;
921 {
922 	vm_map_entry_t entry;
923 	int result = KERN_INVALID_ARGUMENT;
924 
925 	vm_map_lock(map);
926 
927 	VM_MAP_RANGE_CHECK(map, start, end);
928 
929 	if (vm_map_lookup_entry(map, start, &entry)) {
930 		vm_map_clip_start(map, entry, start);
931 	} else
932 		entry = entry->next;
933 
934 	vm_map_clip_end(map, entry, end);
935 
936 	if ((entry->start == start) && (entry->end == end) &&
937 	    ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_COW)) == 0) &&
938 	    (entry->object.vm_object == NULL)) {
939 		entry->object.sub_map = submap;
940 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
941 		result = KERN_SUCCESS;
942 	}
943 	vm_map_unlock(map);
944 
945 	return (result);
946 }
947 
948 /*
949  *	vm_map_protect:
950  *
951  *	Sets the protection of the specified address
952  *	region in the target map.  If "set_max" is
953  *	specified, the maximum protection is to be set;
954  *	otherwise, only the current protection is affected.
955  */
956 int
957 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
958 	       vm_prot_t new_prot, boolean_t set_max)
959 {
960 	vm_map_entry_t current;
961 	vm_map_entry_t entry;
962 
963 	vm_map_lock(map);
964 
965 	VM_MAP_RANGE_CHECK(map, start, end);
966 
967 	if (vm_map_lookup_entry(map, start, &entry)) {
968 		vm_map_clip_start(map, entry, start);
969 	} else {
970 		entry = entry->next;
971 	}
972 
973 	/*
974 	 * Make a first pass to check for protection violations.
975 	 */
976 
977 	current = entry;
978 	while ((current != &map->header) && (current->start < end)) {
979 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
980 			vm_map_unlock(map);
981 			return (KERN_INVALID_ARGUMENT);
982 		}
983 		if ((new_prot & current->max_protection) != new_prot) {
984 			vm_map_unlock(map);
985 			return (KERN_PROTECTION_FAILURE);
986 		}
987 		current = current->next;
988 	}
989 
990 	/*
991 	 * Go back and fix up protections. [Note that clipping is not
992 	 * necessary the second time.]
993 	 */
994 
995 	current = entry;
996 
997 	while ((current != &map->header) && (current->start < end)) {
998 		vm_prot_t old_prot;
999 
1000 		vm_map_clip_end(map, current, end);
1001 
1002 		old_prot = current->protection;
1003 		if (set_max)
1004 			current->protection =
1005 			    (current->max_protection = new_prot) &
1006 			    old_prot;
1007 		else
1008 			current->protection = new_prot;
1009 
1010 		/*
1011 		 * Update physical map if necessary. Worry about copy-on-write
1012 		 * here -- CHECK THIS XXX
1013 		 */
1014 
1015 		if (current->protection != old_prot) {
1016 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1017 							VM_PROT_ALL)
1018 
1019 			if (current->eflags & MAP_ENTRY_IS_A_MAP) {
1020 				vm_map_entry_t share_entry;
1021 				vm_offset_t share_end;
1022 
1023 				vm_map_lock(current->object.share_map);
1024 				(void) vm_map_lookup_entry(
1025 				    current->object.share_map,
1026 				    current->offset,
1027 				    &share_entry);
1028 				share_end = current->offset +
1029 				    (current->end - current->start);
1030 				while ((share_entry !=
1031 					&current->object.share_map->header) &&
1032 				    (share_entry->start < share_end)) {
1033 
1034 					pmap_protect(map->pmap,
1035 					    (qmax(share_entry->start,
1036 						    current->offset) -
1037 						current->offset +
1038 						current->start),
1039 					    min(share_entry->end,
1040 						share_end) -
1041 					    current->offset +
1042 					    current->start,
1043 					    current->protection &
1044 					    MASK(share_entry));
1045 
1046 					share_entry = share_entry->next;
1047 				}
1048 				vm_map_unlock(current->object.share_map);
1049 			} else
1050 				pmap_protect(map->pmap, current->start,
1051 				    current->end,
1052 				    current->protection & MASK(entry));
1053 #undef	MASK
1054 		}
1055 
1056 		vm_map_simplify_entry(map, current);
1057 
1058 		current = current->next;
1059 	}
1060 
1061 	map->timestamp++;
1062 	vm_map_unlock(map);
1063 	return (KERN_SUCCESS);
1064 }
1065 
1066 /*
1067  *	vm_map_madvise:
1068  *
1069  * 	This routine traverses a processes map handling the madvise
1070  *	system call.
1071  */
1072 void
1073 vm_map_madvise(map, pmap, start, end, advise)
1074 	vm_map_t map;
1075 	pmap_t pmap;
1076 	vm_offset_t start, end;
1077 	int advise;
1078 {
1079 	vm_map_entry_t current;
1080 	vm_map_entry_t entry;
1081 
1082 	vm_map_lock(map);
1083 
1084 	VM_MAP_RANGE_CHECK(map, start, end);
1085 
1086 	if (vm_map_lookup_entry(map, start, &entry)) {
1087 		vm_map_clip_start(map, entry, start);
1088 	} else
1089 		entry = entry->next;
1090 
1091 	for(current = entry;
1092 		(current != &map->header) && (current->start < end);
1093 		current = current->next) {
1094 		vm_size_t size;
1095 
1096 		if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1097 			continue;
1098 		}
1099 
1100 		vm_map_clip_end(map, current, end);
1101 		size = current->end - current->start;
1102 
1103 		/*
1104 		 * Create an object if needed
1105 		 */
1106 		if (current->object.vm_object == NULL) {
1107 			vm_object_t object;
1108 			if ((advise == MADV_FREE) || (advise == MADV_DONTNEED))
1109 				continue;
1110 			object = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(size));
1111 			current->object.vm_object = object;
1112 			current->offset = 0;
1113 		}
1114 
1115 		switch (advise) {
1116 	case MADV_NORMAL:
1117 			current->object.vm_object->behavior = OBJ_NORMAL;
1118 			break;
1119 	case MADV_SEQUENTIAL:
1120 			current->object.vm_object->behavior = OBJ_SEQUENTIAL;
1121 			break;
1122 	case MADV_RANDOM:
1123 			current->object.vm_object->behavior = OBJ_RANDOM;
1124 			break;
1125 	/*
1126 	 * Right now, we could handle DONTNEED and WILLNEED with common code.
1127 	 * They are mostly the same, except for the potential async reads (NYI).
1128 	 */
1129 	case MADV_FREE:
1130 	case MADV_DONTNEED:
1131 			{
1132 				vm_pindex_t pindex;
1133 				int count;
1134 				pindex = OFF_TO_IDX(current->offset);
1135 				count = OFF_TO_IDX(size);
1136 				/*
1137 				 * MADV_DONTNEED removes the page from all
1138 				 * pmaps, so pmap_remove is not necessary.
1139 				 */
1140 				vm_object_madvise(current->object.vm_object,
1141 					pindex, count, advise);
1142 			}
1143 			break;
1144 
1145 	case MADV_WILLNEED:
1146 			{
1147 				vm_pindex_t pindex;
1148 				int count;
1149 				pindex = OFF_TO_IDX(current->offset);
1150 				count = OFF_TO_IDX(size);
1151 				vm_object_madvise(current->object.vm_object,
1152 					pindex, count, advise);
1153 				pmap_object_init_pt(pmap, current->start,
1154 					current->object.vm_object, pindex,
1155 					(count << PAGE_SHIFT), 0);
1156 			}
1157 			break;
1158 
1159 	default:
1160 			break;
1161 		}
1162 	}
1163 
1164 	map->timestamp++;
1165 	vm_map_simplify_entry(map, entry);
1166 	vm_map_unlock(map);
1167 	return;
1168 }
1169 
1170 
1171 /*
1172  *	vm_map_inherit:
1173  *
1174  *	Sets the inheritance of the specified address
1175  *	range in the target map.  Inheritance
1176  *	affects how the map will be shared with
1177  *	child maps at the time of vm_map_fork.
1178  */
1179 int
1180 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1181 	       vm_inherit_t new_inheritance)
1182 {
1183 	vm_map_entry_t entry;
1184 	vm_map_entry_t temp_entry;
1185 
1186 	switch (new_inheritance) {
1187 	case VM_INHERIT_NONE:
1188 	case VM_INHERIT_COPY:
1189 	case VM_INHERIT_SHARE:
1190 		break;
1191 	default:
1192 		return (KERN_INVALID_ARGUMENT);
1193 	}
1194 
1195 	vm_map_lock(map);
1196 
1197 	VM_MAP_RANGE_CHECK(map, start, end);
1198 
1199 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
1200 		entry = temp_entry;
1201 		vm_map_clip_start(map, entry, start);
1202 	} else
1203 		entry = temp_entry->next;
1204 
1205 	while ((entry != &map->header) && (entry->start < end)) {
1206 		vm_map_clip_end(map, entry, end);
1207 
1208 		entry->inheritance = new_inheritance;
1209 
1210 		entry = entry->next;
1211 	}
1212 
1213 	vm_map_simplify_entry(map, temp_entry);
1214 	map->timestamp++;
1215 	vm_map_unlock(map);
1216 	return (KERN_SUCCESS);
1217 }
1218 
1219 /*
1220  * Implement the semantics of mlock
1221  */
1222 int
1223 vm_map_user_pageable(map, start, end, new_pageable)
1224 	vm_map_t map;
1225 	vm_offset_t start;
1226 	vm_offset_t end;
1227 	boolean_t new_pageable;
1228 {
1229 	vm_map_entry_t entry;
1230 	vm_map_entry_t start_entry;
1231 	vm_offset_t estart;
1232 	int rv;
1233 
1234 	vm_map_lock(map);
1235 	VM_MAP_RANGE_CHECK(map, start, end);
1236 
1237 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1238 		vm_map_unlock(map);
1239 		return (KERN_INVALID_ADDRESS);
1240 	}
1241 
1242 	if (new_pageable) {
1243 
1244 		entry = start_entry;
1245 		vm_map_clip_start(map, entry, start);
1246 
1247 		/*
1248 		 * Now decrement the wiring count for each region. If a region
1249 		 * becomes completely unwired, unwire its physical pages and
1250 		 * mappings.
1251 		 */
1252 		vm_map_set_recursive(map);
1253 
1254 		entry = start_entry;
1255 		while ((entry != &map->header) && (entry->start < end)) {
1256 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1257 				vm_map_clip_end(map, entry, end);
1258 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1259 				entry->wired_count--;
1260 				if (entry->wired_count == 0)
1261 					vm_fault_unwire(map, entry->start, entry->end);
1262 			}
1263 			vm_map_simplify_entry(map,entry);
1264 			entry = entry->next;
1265 		}
1266 		vm_map_clear_recursive(map);
1267 	} else {
1268 
1269 		entry = start_entry;
1270 
1271 		while ((entry != &map->header) && (entry->start < end)) {
1272 
1273 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1274 				entry = entry->next;
1275 				continue;
1276 			}
1277 
1278 			if (entry->wired_count != 0) {
1279 				entry->wired_count++;
1280 				entry->eflags |= MAP_ENTRY_USER_WIRED;
1281 				entry = entry->next;
1282 				continue;
1283 			}
1284 
1285 			/* Here on entry being newly wired */
1286 
1287 			if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1288 				int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1289 				if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
1290 
1291 					vm_object_shadow(&entry->object.vm_object,
1292 					    &entry->offset,
1293 					    atop(entry->end - entry->start));
1294 					entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1295 
1296 				} else if (entry->object.vm_object == NULL) {
1297 
1298 					entry->object.vm_object =
1299 					    vm_object_allocate(OBJT_DEFAULT,
1300 						atop(entry->end - entry->start));
1301 					entry->offset = (vm_offset_t) 0;
1302 
1303 				}
1304 				default_pager_convert_to_swapq(entry->object.vm_object);
1305 			}
1306 
1307 			vm_map_clip_start(map, entry, start);
1308 			vm_map_clip_end(map, entry, end);
1309 
1310 			entry->wired_count++;
1311 			entry->eflags |= MAP_ENTRY_USER_WIRED;
1312 			estart = entry->start;
1313 
1314 			/* First we need to allow map modifications */
1315 			vm_map_set_recursive(map);
1316 			vm_map_lock_downgrade(map);
1317 			map->timestamp++;
1318 
1319 			rv = vm_fault_user_wire(map, entry->start, entry->end);
1320 			if (rv) {
1321 
1322 				entry->wired_count--;
1323 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1324 
1325 				vm_map_clear_recursive(map);
1326 				vm_map_unlock(map);
1327 
1328 				(void) vm_map_user_pageable(map, start, entry->start, TRUE);
1329 				return rv;
1330 			}
1331 
1332 			vm_map_clear_recursive(map);
1333 			if (vm_map_lock_upgrade(map)) {
1334 				vm_map_lock(map);
1335 				if (vm_map_lookup_entry(map, estart, &entry)
1336 				    == FALSE) {
1337 					vm_map_unlock(map);
1338 					(void) vm_map_user_pageable(map,
1339 								    start,
1340 								    estart,
1341 								    TRUE);
1342 					return (KERN_INVALID_ADDRESS);
1343 				}
1344 			}
1345 			vm_map_simplify_entry(map,entry);
1346 		}
1347 	}
1348 	map->timestamp++;
1349 	vm_map_unlock(map);
1350 	return KERN_SUCCESS;
1351 }
1352 
1353 /*
1354  *	vm_map_pageable:
1355  *
1356  *	Sets the pageability of the specified address
1357  *	range in the target map.  Regions specified
1358  *	as not pageable require locked-down physical
1359  *	memory and physical page maps.
1360  *
1361  *	The map must not be locked, but a reference
1362  *	must remain to the map throughout the call.
1363  */
1364 int
1365 vm_map_pageable(map, start, end, new_pageable)
1366 	vm_map_t map;
1367 	vm_offset_t start;
1368 	vm_offset_t end;
1369 	boolean_t new_pageable;
1370 {
1371 	vm_map_entry_t entry;
1372 	vm_map_entry_t start_entry;
1373 	vm_offset_t failed = 0;
1374 	int rv;
1375 
1376 	vm_map_lock(map);
1377 
1378 	VM_MAP_RANGE_CHECK(map, start, end);
1379 
1380 	/*
1381 	 * Only one pageability change may take place at one time, since
1382 	 * vm_fault assumes it will be called only once for each
1383 	 * wiring/unwiring.  Therefore, we have to make sure we're actually
1384 	 * changing the pageability for the entire region.  We do so before
1385 	 * making any changes.
1386 	 */
1387 
1388 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1389 		vm_map_unlock(map);
1390 		return (KERN_INVALID_ADDRESS);
1391 	}
1392 	entry = start_entry;
1393 
1394 	/*
1395 	 * Actions are rather different for wiring and unwiring, so we have
1396 	 * two separate cases.
1397 	 */
1398 
1399 	if (new_pageable) {
1400 
1401 		vm_map_clip_start(map, entry, start);
1402 
1403 		/*
1404 		 * Unwiring.  First ensure that the range to be unwired is
1405 		 * really wired down and that there are no holes.
1406 		 */
1407 		while ((entry != &map->header) && (entry->start < end)) {
1408 
1409 			if (entry->wired_count == 0 ||
1410 			    (entry->end < end &&
1411 				(entry->next == &map->header ||
1412 				    entry->next->start > entry->end))) {
1413 				vm_map_unlock(map);
1414 				return (KERN_INVALID_ARGUMENT);
1415 			}
1416 			entry = entry->next;
1417 		}
1418 
1419 		/*
1420 		 * Now decrement the wiring count for each region. If a region
1421 		 * becomes completely unwired, unwire its physical pages and
1422 		 * mappings.
1423 		 */
1424 		vm_map_set_recursive(map);
1425 
1426 		entry = start_entry;
1427 		while ((entry != &map->header) && (entry->start < end)) {
1428 			vm_map_clip_end(map, entry, end);
1429 
1430 			entry->wired_count--;
1431 			if (entry->wired_count == 0)
1432 				vm_fault_unwire(map, entry->start, entry->end);
1433 
1434 			entry = entry->next;
1435 		}
1436 		vm_map_simplify_entry(map, start_entry);
1437 		vm_map_clear_recursive(map);
1438 	} else {
1439 		/*
1440 		 * Wiring.  We must do this in two passes:
1441 		 *
1442 		 * 1.  Holding the write lock, we create any shadow or zero-fill
1443 		 * objects that need to be created. Then we clip each map
1444 		 * entry to the region to be wired and increment its wiring
1445 		 * count.  We create objects before clipping the map entries
1446 		 * to avoid object proliferation.
1447 		 *
1448 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
1449 		 * fault in the pages for any newly wired area (wired_count is
1450 		 * 1).
1451 		 *
1452 		 * Downgrading to a read lock for vm_fault_wire avoids a possible
1453 		 * deadlock with another process that may have faulted on one
1454 		 * of the pages to be wired (it would mark the page busy,
1455 		 * blocking us, then in turn block on the map lock that we
1456 		 * hold).  Because of problems in the recursive lock package,
1457 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
1458 		 * any actions that require the write lock must be done
1459 		 * beforehand.  Because we keep the read lock on the map, the
1460 		 * copy-on-write status of the entries we modify here cannot
1461 		 * change.
1462 		 */
1463 
1464 		/*
1465 		 * Pass 1.
1466 		 */
1467 		while ((entry != &map->header) && (entry->start < end)) {
1468 			if (entry->wired_count == 0) {
1469 
1470 				/*
1471 				 * Perform actions of vm_map_lookup that need
1472 				 * the write lock on the map: create a shadow
1473 				 * object for a copy-on-write region, or an
1474 				 * object for a zero-fill region.
1475 				 *
1476 				 * We don't have to do this for entries that
1477 				 * point to sharing maps, because we won't
1478 				 * hold the lock on the sharing map.
1479 				 */
1480 				if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1481 					int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1482 					if (copyflag &&
1483 					    ((entry->protection & VM_PROT_WRITE) != 0)) {
1484 
1485 						vm_object_shadow(&entry->object.vm_object,
1486 						    &entry->offset,
1487 						    atop(entry->end - entry->start));
1488 						entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1489 					} else if (entry->object.vm_object == NULL) {
1490 						entry->object.vm_object =
1491 						    vm_object_allocate(OBJT_DEFAULT,
1492 							atop(entry->end - entry->start));
1493 						entry->offset = (vm_offset_t) 0;
1494 					}
1495 					default_pager_convert_to_swapq(entry->object.vm_object);
1496 				}
1497 			}
1498 			vm_map_clip_start(map, entry, start);
1499 			vm_map_clip_end(map, entry, end);
1500 			entry->wired_count++;
1501 
1502 			/*
1503 			 * Check for holes
1504 			 */
1505 			if (entry->end < end &&
1506 			    (entry->next == &map->header ||
1507 				entry->next->start > entry->end)) {
1508 				/*
1509 				 * Found one.  Object creation actions do not
1510 				 * need to be undone, but the wired counts
1511 				 * need to be restored.
1512 				 */
1513 				while (entry != &map->header && entry->end > start) {
1514 					entry->wired_count--;
1515 					entry = entry->prev;
1516 				}
1517 				map->timestamp++;
1518 				vm_map_unlock(map);
1519 				return (KERN_INVALID_ARGUMENT);
1520 			}
1521 			entry = entry->next;
1522 		}
1523 
1524 		/*
1525 		 * Pass 2.
1526 		 */
1527 
1528 		/*
1529 		 * HACK HACK HACK HACK
1530 		 *
1531 		 * If we are wiring in the kernel map or a submap of it,
1532 		 * unlock the map to avoid deadlocks.  We trust that the
1533 		 * kernel is well-behaved, and therefore will not do
1534 		 * anything destructive to this region of the map while
1535 		 * we have it unlocked.  We cannot trust user processes
1536 		 * to do the same.
1537 		 *
1538 		 * HACK HACK HACK HACK
1539 		 */
1540 		if (vm_map_pmap(map) == kernel_pmap) {
1541 			vm_map_unlock(map);	/* trust me ... */
1542 		} else {
1543 			vm_map_set_recursive(map);
1544 			vm_map_lock_downgrade(map);
1545 		}
1546 
1547 		rv = 0;
1548 		entry = start_entry;
1549 		while (entry != &map->header && entry->start < end) {
1550 			/*
1551 			 * If vm_fault_wire fails for any page we need to undo
1552 			 * what has been done.  We decrement the wiring count
1553 			 * for those pages which have not yet been wired (now)
1554 			 * and unwire those that have (later).
1555 			 *
1556 			 * XXX this violates the locking protocol on the map,
1557 			 * needs to be fixed.
1558 			 */
1559 			if (rv)
1560 				entry->wired_count--;
1561 			else if (entry->wired_count == 1) {
1562 				rv = vm_fault_wire(map, entry->start, entry->end);
1563 				if (rv) {
1564 					failed = entry->start;
1565 					entry->wired_count--;
1566 				}
1567 			}
1568 			entry = entry->next;
1569 		}
1570 
1571 		if (vm_map_pmap(map) == kernel_pmap) {
1572 			vm_map_lock(map);
1573 		} else {
1574 			vm_map_clear_recursive(map);
1575 		}
1576 		if (rv) {
1577 			vm_map_unlock(map);
1578 			(void) vm_map_pageable(map, start, failed, TRUE);
1579 			return (rv);
1580 		}
1581 		vm_map_simplify_entry(map, start_entry);
1582 	}
1583 
1584 	vm_map_unlock(map);
1585 
1586 	map->timestamp++;
1587 	return (KERN_SUCCESS);
1588 }
1589 
1590 /*
1591  * vm_map_clean
1592  *
1593  * Push any dirty cached pages in the address range to their pager.
1594  * If syncio is TRUE, dirty pages are written synchronously.
1595  * If invalidate is TRUE, any cached pages are freed as well.
1596  *
1597  * Returns an error if any part of the specified range is not mapped.
1598  */
1599 int
1600 vm_map_clean(map, start, end, syncio, invalidate)
1601 	vm_map_t map;
1602 	vm_offset_t start;
1603 	vm_offset_t end;
1604 	boolean_t syncio;
1605 	boolean_t invalidate;
1606 {
1607 	vm_map_entry_t current;
1608 	vm_map_entry_t entry;
1609 	vm_size_t size;
1610 	vm_object_t object;
1611 	vm_ooffset_t offset;
1612 
1613 	vm_map_lock_read(map);
1614 	VM_MAP_RANGE_CHECK(map, start, end);
1615 	if (!vm_map_lookup_entry(map, start, &entry)) {
1616 		vm_map_unlock_read(map);
1617 		return (KERN_INVALID_ADDRESS);
1618 	}
1619 	/*
1620 	 * Make a first pass to check for holes.
1621 	 */
1622 	for (current = entry; current->start < end; current = current->next) {
1623 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1624 			vm_map_unlock_read(map);
1625 			return (KERN_INVALID_ARGUMENT);
1626 		}
1627 		if (end > current->end &&
1628 		    (current->next == &map->header ||
1629 			current->end != current->next->start)) {
1630 			vm_map_unlock_read(map);
1631 			return (KERN_INVALID_ADDRESS);
1632 		}
1633 	}
1634 
1635 	if (invalidate)
1636 		pmap_remove(vm_map_pmap(map), start, end);
1637 	/*
1638 	 * Make a second pass, cleaning/uncaching pages from the indicated
1639 	 * objects as we go.
1640 	 */
1641 	for (current = entry; current->start < end; current = current->next) {
1642 		offset = current->offset + (start - current->start);
1643 		size = (end <= current->end ? end : current->end) - start;
1644 		if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1645 			vm_map_t smap;
1646 			vm_map_entry_t tentry;
1647 			vm_size_t tsize;
1648 
1649 			smap = current->object.share_map;
1650 			vm_map_lock_read(smap);
1651 			(void) vm_map_lookup_entry(smap, offset, &tentry);
1652 			tsize = tentry->end - offset;
1653 			if (tsize < size)
1654 				size = tsize;
1655 			object = tentry->object.vm_object;
1656 			offset = tentry->offset + (offset - tentry->start);
1657 			vm_map_unlock_read(smap);
1658 		} else {
1659 			object = current->object.vm_object;
1660 		}
1661 		/*
1662 		 * Note that there is absolutely no sense in writing out
1663 		 * anonymous objects, so we track down the vnode object
1664 		 * to write out.
1665 		 * We invalidate (remove) all pages from the address space
1666 		 * anyway, for semantic correctness.
1667 		 */
1668 		while (object->backing_object) {
1669 			object = object->backing_object;
1670 			offset += object->backing_object_offset;
1671 			if (object->size < OFF_TO_IDX( offset + size))
1672 				size = IDX_TO_OFF(object->size) - offset;
1673 		}
1674 		if (object && (object->type == OBJT_VNODE)) {
1675 			/*
1676 			 * Flush pages if writing is allowed. XXX should we continue
1677 			 * on an error?
1678 			 *
1679 			 * XXX Doing async I/O and then removing all the pages from
1680 			 *     the object before it completes is probably a very bad
1681 			 *     idea.
1682 			 */
1683 			if (current->protection & VM_PROT_WRITE) {
1684 				int flags;
1685 				if (object->type == OBJT_VNODE)
1686 					vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
1687 				flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
1688 				flags |= invalidate ? OBJPC_INVAL : 0;
1689 		   	    vm_object_page_clean(object,
1690 					OFF_TO_IDX(offset),
1691 					OFF_TO_IDX(offset + size + PAGE_MASK),
1692 					flags);
1693 				if (invalidate) {
1694 					vm_object_pip_wait(object, "objmcl");
1695 					vm_object_page_remove(object,
1696 						OFF_TO_IDX(offset),
1697 						OFF_TO_IDX(offset + size + PAGE_MASK),
1698 						FALSE);
1699 				}
1700 				if (object->type == OBJT_VNODE)
1701 					VOP_UNLOCK(object->handle, 0, curproc);
1702 			}
1703 		}
1704 		start += size;
1705 	}
1706 
1707 	vm_map_unlock_read(map);
1708 	return (KERN_SUCCESS);
1709 }
1710 
1711 /*
1712  *	vm_map_entry_unwire:	[ internal use only ]
1713  *
1714  *	Make the region specified by this entry pageable.
1715  *
1716  *	The map in question should be locked.
1717  *	[This is the reason for this routine's existence.]
1718  */
1719 static void
1720 vm_map_entry_unwire(map, entry)
1721 	vm_map_t map;
1722 	vm_map_entry_t entry;
1723 {
1724 	vm_fault_unwire(map, entry->start, entry->end);
1725 	entry->wired_count = 0;
1726 }
1727 
1728 /*
1729  *	vm_map_entry_delete:	[ internal use only ]
1730  *
1731  *	Deallocate the given entry from the target map.
1732  */
1733 static void
1734 vm_map_entry_delete(map, entry)
1735 	vm_map_t map;
1736 	vm_map_entry_t entry;
1737 {
1738 	vm_map_entry_unlink(map, entry);
1739 	map->size -= entry->end - entry->start;
1740 
1741 	if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1742 		vm_object_deallocate(entry->object.vm_object);
1743 	}
1744 
1745 	vm_map_entry_dispose(map, entry);
1746 }
1747 
1748 /*
1749  *	vm_map_delete:	[ internal use only ]
1750  *
1751  *	Deallocates the given address range from the target
1752  *	map.
1753  *
1754  *	When called with a sharing map, removes pages from
1755  *	that region from all physical maps.
1756  */
1757 int
1758 vm_map_delete(map, start, end)
1759 	vm_map_t map;
1760 	vm_offset_t start;
1761 	vm_offset_t end;
1762 {
1763 	vm_object_t object;
1764 	vm_map_entry_t entry;
1765 	vm_map_entry_t first_entry;
1766 
1767 	/*
1768 	 * Find the start of the region, and clip it
1769 	 */
1770 
1771 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
1772 		entry = first_entry->next;
1773 		object = entry->object.vm_object;
1774 		if (object && (object->ref_count == 1) && (object->shadow_count == 0))
1775 			vm_object_set_flag(object, OBJ_ONEMAPPING);
1776 	} else {
1777 		entry = first_entry;
1778 		vm_map_clip_start(map, entry, start);
1779 		/*
1780 		 * Fix the lookup hint now, rather than each time though the
1781 		 * loop.
1782 		 */
1783 		SAVE_HINT(map, entry->prev);
1784 	}
1785 
1786 	/*
1787 	 * Save the free space hint
1788 	 */
1789 
1790 	if (entry == &map->header) {
1791 		map->first_free = &map->header;
1792 	} else if (map->first_free->start >= start) {
1793 		map->first_free = entry->prev;
1794 	}
1795 
1796 	/*
1797 	 * Step through all entries in this region
1798 	 */
1799 
1800 	while ((entry != &map->header) && (entry->start < end)) {
1801 		vm_map_entry_t next;
1802 		vm_offset_t s, e;
1803 		vm_pindex_t offidxstart, offidxend, count;
1804 
1805 		vm_map_clip_end(map, entry, end);
1806 
1807 		s = entry->start;
1808 		e = entry->end;
1809 		next = entry->next;
1810 
1811 		offidxstart = OFF_TO_IDX(entry->offset);
1812 		count = OFF_TO_IDX(e - s);
1813 		object = entry->object.vm_object;
1814 
1815 		/*
1816 		 * Unwire before removing addresses from the pmap; otherwise,
1817 		 * unwiring will put the entries back in the pmap.
1818 		 */
1819 		if (entry->wired_count != 0) {
1820 			vm_map_entry_unwire(map, entry);
1821 		}
1822 
1823 		offidxend = offidxstart + count;
1824 		/*
1825 		 * If this is a sharing map, we must remove *all* references
1826 		 * to this data, since we can't find all of the physical maps
1827 		 * which are sharing it.
1828 		 */
1829 
1830 		if ((object == kernel_object) || (object == kmem_object)) {
1831 			vm_object_page_remove(object, offidxstart, offidxend, FALSE);
1832 		} else if (!map->is_main_map) {
1833 			vm_object_pmap_remove(object, offidxstart, offidxend);
1834 		} else {
1835 			pmap_remove(map->pmap, s, e);
1836 			if (object &&
1837 				((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) &&
1838 				((object->type == OBJT_SWAP) || (object->type == OBJT_DEFAULT))) {
1839 				vm_object_collapse(object);
1840 				vm_object_page_remove(object, offidxstart, offidxend, FALSE);
1841 				if (object->type == OBJT_SWAP) {
1842 					swap_pager_freespace(object, offidxstart, count);
1843 				}
1844 
1845 				if ((offidxend >= object->size) &&
1846 					(offidxstart < object->size)) {
1847 						object->size = offidxstart;
1848 				}
1849 			}
1850 		}
1851 
1852 		/*
1853 		 * Delete the entry (which may delete the object) only after
1854 		 * removing all pmap entries pointing to its pages.
1855 		 * (Otherwise, its page frames may be reallocated, and any
1856 		 * modify bits will be set in the wrong object!)
1857 		 */
1858 		vm_map_entry_delete(map, entry);
1859 		entry = next;
1860 	}
1861 	return (KERN_SUCCESS);
1862 }
1863 
1864 /*
1865  *	vm_map_remove:
1866  *
1867  *	Remove the given address range from the target map.
1868  *	This is the exported form of vm_map_delete.
1869  */
1870 int
1871 vm_map_remove(map, start, end)
1872 	vm_map_t map;
1873 	vm_offset_t start;
1874 	vm_offset_t end;
1875 {
1876 	int result, s = 0;
1877 
1878 	if (map == kmem_map || map == mb_map)
1879 		s = splvm();
1880 
1881 	vm_map_lock(map);
1882 	VM_MAP_RANGE_CHECK(map, start, end);
1883 	result = vm_map_delete(map, start, end);
1884 	vm_map_unlock(map);
1885 
1886 	if (map == kmem_map || map == mb_map)
1887 		splx(s);
1888 
1889 	return (result);
1890 }
1891 
1892 /*
1893  *	vm_map_check_protection:
1894  *
1895  *	Assert that the target map allows the specified
1896  *	privilege on the entire address region given.
1897  *	The entire region must be allocated.
1898  */
1899 boolean_t
1900 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
1901 			vm_prot_t protection)
1902 {
1903 	vm_map_entry_t entry;
1904 	vm_map_entry_t tmp_entry;
1905 
1906 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
1907 		return (FALSE);
1908 	}
1909 	entry = tmp_entry;
1910 
1911 	while (start < end) {
1912 		if (entry == &map->header) {
1913 			return (FALSE);
1914 		}
1915 		/*
1916 		 * No holes allowed!
1917 		 */
1918 
1919 		if (start < entry->start) {
1920 			return (FALSE);
1921 		}
1922 		/*
1923 		 * Check protection associated with entry.
1924 		 */
1925 
1926 		if ((entry->protection & protection) != protection) {
1927 			return (FALSE);
1928 		}
1929 		/* go to next entry */
1930 
1931 		start = entry->end;
1932 		entry = entry->next;
1933 	}
1934 	return (TRUE);
1935 }
1936 
1937 /*
1938  * Split the pages in a map entry into a new object.  This affords
1939  * easier removal of unused pages, and keeps object inheritance from
1940  * being a negative impact on memory usage.
1941  */
1942 static void
1943 vm_map_split(entry)
1944 	vm_map_entry_t entry;
1945 {
1946 	vm_page_t m;
1947 	vm_object_t orig_object, new_object, source;
1948 	vm_offset_t s, e;
1949 	vm_pindex_t offidxstart, offidxend, idx;
1950 	vm_size_t size;
1951 	vm_ooffset_t offset;
1952 
1953 	orig_object = entry->object.vm_object;
1954 	if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
1955 		return;
1956 	if (orig_object->ref_count <= 1)
1957 		return;
1958 
1959 	offset = entry->offset;
1960 	s = entry->start;
1961 	e = entry->end;
1962 
1963 	offidxstart = OFF_TO_IDX(offset);
1964 	offidxend = offidxstart + OFF_TO_IDX(e - s);
1965 	size = offidxend - offidxstart;
1966 
1967 	new_object = vm_pager_allocate(orig_object->type,
1968 		NULL, size, VM_PROT_ALL, 0LL);
1969 	if (new_object == NULL)
1970 		return;
1971 
1972 	source = orig_object->backing_object;
1973 	if (source != NULL) {
1974 		vm_object_reference(source);	/* Referenced by new_object */
1975 		TAILQ_INSERT_TAIL(&source->shadow_head,
1976 				  new_object, shadow_list);
1977 		vm_object_clear_flag(source, OBJ_ONEMAPPING);
1978 		new_object->backing_object_offset =
1979 			orig_object->backing_object_offset + offidxstart;
1980 		new_object->backing_object = source;
1981 		source->shadow_count++;
1982 		source->generation++;
1983 	}
1984 
1985 	for (idx = 0; idx < size; idx++) {
1986 		vm_page_t m;
1987 
1988 	retry:
1989 		m = vm_page_lookup(orig_object, offidxstart + idx);
1990 		if (m == NULL)
1991 			continue;
1992 		if (m->flags & PG_BUSY) {
1993 			vm_page_flag_set(m, PG_WANTED);
1994 			tsleep(m, PVM, "spltwt", 0);
1995 			goto retry;
1996 		}
1997 
1998 		vm_page_busy(m);
1999 		vm_page_protect(m, VM_PROT_NONE);
2000 		vm_page_rename(m, new_object, idx);
2001 		m->dirty = VM_PAGE_BITS_ALL;
2002 		vm_page_busy(m);
2003 	}
2004 
2005 	if (orig_object->type == OBJT_SWAP) {
2006 		vm_object_pip_add(orig_object, 1);
2007 		/*
2008 		 * copy orig_object pages into new_object
2009 		 * and destroy unneeded pages in
2010 		 * shadow object.
2011 		 */
2012 		swap_pager_copy(orig_object, OFF_TO_IDX(orig_object->paging_offset),
2013 		    new_object, OFF_TO_IDX(new_object->paging_offset),
2014 			offidxstart, 0);
2015 		vm_object_pip_wakeup(orig_object);
2016 	}
2017 
2018 	for (idx = 0; idx < size; idx++) {
2019 		m = vm_page_lookup(new_object, idx);
2020 		if (m) {
2021 			vm_page_wakeup(m);
2022 		}
2023 	}
2024 
2025 	entry->object.vm_object = new_object;
2026 	entry->offset = 0LL;
2027 	vm_object_deallocate(orig_object);
2028 }
2029 
2030 /*
2031  *	vm_map_copy_entry:
2032  *
2033  *	Copies the contents of the source entry to the destination
2034  *	entry.  The entries *must* be aligned properly.
2035  */
2036 static void
2037 vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
2038 	vm_map_t src_map, dst_map;
2039 	vm_map_entry_t src_entry, dst_entry;
2040 {
2041 	vm_object_t src_object;
2042 
2043 	if ((dst_entry->eflags|src_entry->eflags) &
2044 		(MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
2045 		return;
2046 
2047 	if (src_entry->wired_count == 0) {
2048 
2049 		/*
2050 		 * If the source entry is marked needs_copy, it is already
2051 		 * write-protected.
2052 		 */
2053 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2054 			pmap_protect(src_map->pmap,
2055 			    src_entry->start,
2056 			    src_entry->end,
2057 			    src_entry->protection & ~VM_PROT_WRITE);
2058 		}
2059 
2060 		/*
2061 		 * Make a copy of the object.
2062 		 */
2063 		if (src_object = src_entry->object.vm_object) {
2064 
2065 			if ((src_object->handle == NULL) &&
2066 				(src_object->type == OBJT_DEFAULT ||
2067 				 src_object->type == OBJT_SWAP)) {
2068 				vm_object_collapse(src_object);
2069 				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2070 					vm_map_split(src_entry);
2071 					src_map->timestamp++;
2072 					src_object = src_entry->object.vm_object;
2073 				}
2074 			}
2075 
2076 			vm_object_reference(src_object);
2077 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2078 			dst_entry->object.vm_object = src_object;
2079 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2080 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2081 			dst_entry->offset = src_entry->offset;
2082 		} else {
2083 			dst_entry->object.vm_object = NULL;
2084 			dst_entry->offset = 0;
2085 		}
2086 
2087 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2088 		    dst_entry->end - dst_entry->start, src_entry->start);
2089 	} else {
2090 		/*
2091 		 * Of course, wired down pages can't be set copy-on-write.
2092 		 * Cause wired pages to be copied into the new map by
2093 		 * simulating faults (the new pages are pageable)
2094 		 */
2095 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2096 	}
2097 }
2098 
2099 /*
2100  * vmspace_fork:
2101  * Create a new process vmspace structure and vm_map
2102  * based on those of an existing process.  The new map
2103  * is based on the old map, according to the inheritance
2104  * values on the regions in that map.
2105  *
2106  * The source map must not be locked.
2107  */
2108 struct vmspace *
2109 vmspace_fork(vm1)
2110 	struct vmspace *vm1;
2111 {
2112 	struct vmspace *vm2;
2113 	vm_map_t old_map = &vm1->vm_map;
2114 	vm_map_t new_map;
2115 	vm_map_entry_t old_entry;
2116 	vm_map_entry_t new_entry;
2117 	pmap_t new_pmap;
2118 	vm_object_t object;
2119 
2120 	vm_map_lock(old_map);
2121 
2122 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2123 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2124 	    (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2125 	new_pmap = &vm2->vm_pmap;	/* XXX */
2126 	new_map = &vm2->vm_map;	/* XXX */
2127 	new_map->timestamp = 1;
2128 
2129 	old_entry = old_map->header.next;
2130 
2131 	while (old_entry != &old_map->header) {
2132 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2133 			panic("vm_map_fork: encountered a submap");
2134 
2135 		switch (old_entry->inheritance) {
2136 		case VM_INHERIT_NONE:
2137 			break;
2138 
2139 		case VM_INHERIT_SHARE:
2140 			/*
2141 			 * Clone the entry, creating the shared object if necessary.
2142 			 */
2143 			object = old_entry->object.vm_object;
2144 			if (object == NULL) {
2145 				object = vm_object_allocate(OBJT_DEFAULT,
2146 					atop(old_entry->end - old_entry->start));
2147 				old_entry->object.vm_object = object;
2148 				old_entry->offset = (vm_offset_t) 0;
2149 			} else if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2150 				vm_object_shadow(&old_entry->object.vm_object,
2151 					&old_entry->offset,
2152 					atop(old_entry->end - old_entry->start));
2153 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2154 				object = old_entry->object.vm_object;
2155 			}
2156 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
2157 
2158 			/*
2159 			 * Clone the entry, referencing the sharing map.
2160 			 */
2161 			new_entry = vm_map_entry_create(new_map);
2162 			*new_entry = *old_entry;
2163 			new_entry->wired_count = 0;
2164 			vm_object_reference(object);
2165 
2166 			/*
2167 			 * Insert the entry into the new map -- we know we're
2168 			 * inserting at the end of the new map.
2169 			 */
2170 
2171 			vm_map_entry_link(new_map, new_map->header.prev,
2172 			    new_entry);
2173 
2174 			/*
2175 			 * Update the physical map
2176 			 */
2177 
2178 			pmap_copy(new_map->pmap, old_map->pmap,
2179 			    new_entry->start,
2180 			    (old_entry->end - old_entry->start),
2181 			    old_entry->start);
2182 			break;
2183 
2184 		case VM_INHERIT_COPY:
2185 			/*
2186 			 * Clone the entry and link into the map.
2187 			 */
2188 			new_entry = vm_map_entry_create(new_map);
2189 			*new_entry = *old_entry;
2190 			new_entry->wired_count = 0;
2191 			new_entry->object.vm_object = NULL;
2192 			new_entry->eflags &= ~MAP_ENTRY_IS_A_MAP;
2193 			vm_map_entry_link(new_map, new_map->header.prev,
2194 			    new_entry);
2195 			vm_map_copy_entry(old_map, new_map, old_entry,
2196 			    new_entry);
2197 			break;
2198 		}
2199 		old_entry = old_entry->next;
2200 	}
2201 
2202 	new_map->size = old_map->size;
2203 	vm_map_unlock(old_map);
2204 	old_map->timestamp++;
2205 
2206 	return (vm2);
2207 }
2208 
2209 /*
2210  * Unshare the specified VM space for exec.  If other processes are
2211  * mapped to it, then create a new one.  The new vmspace is null.
2212  */
2213 
2214 void
2215 vmspace_exec(struct proc *p) {
2216 	struct vmspace *oldvmspace = p->p_vmspace;
2217 	struct vmspace *newvmspace;
2218 	vm_map_t map = &p->p_vmspace->vm_map;
2219 
2220 	newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
2221 	bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
2222 	    (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
2223 	/*
2224 	 * This code is written like this for prototype purposes.  The
2225 	 * goal is to avoid running down the vmspace here, but let the
2226 	 * other process's that are still using the vmspace to finally
2227 	 * run it down.  Even though there is little or no chance of blocking
2228 	 * here, it is a good idea to keep this form for future mods.
2229 	 */
2230 	vmspace_free(oldvmspace);
2231 	p->p_vmspace = newvmspace;
2232 	if (p == curproc)
2233 		pmap_activate(p);
2234 }
2235 
2236 /*
2237  * Unshare the specified VM space for forcing COW.  This
2238  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
2239  */
2240 
2241 void
2242 vmspace_unshare(struct proc *p) {
2243 	struct vmspace *oldvmspace = p->p_vmspace;
2244 	struct vmspace *newvmspace;
2245 
2246 	if (oldvmspace->vm_refcnt == 1)
2247 		return;
2248 	newvmspace = vmspace_fork(oldvmspace);
2249 	vmspace_free(oldvmspace);
2250 	p->p_vmspace = newvmspace;
2251 	if (p == curproc)
2252 		pmap_activate(p);
2253 }
2254 
2255 
2256 /*
2257  *	vm_map_lookup:
2258  *
2259  *	Finds the VM object, offset, and
2260  *	protection for a given virtual address in the
2261  *	specified map, assuming a page fault of the
2262  *	type specified.
2263  *
2264  *	Leaves the map in question locked for read; return
2265  *	values are guaranteed until a vm_map_lookup_done
2266  *	call is performed.  Note that the map argument
2267  *	is in/out; the returned map must be used in
2268  *	the call to vm_map_lookup_done.
2269  *
2270  *	A handle (out_entry) is returned for use in
2271  *	vm_map_lookup_done, to make that fast.
2272  *
2273  *	If a lookup is requested with "write protection"
2274  *	specified, the map may be changed to perform virtual
2275  *	copying operations, although the data referenced will
2276  *	remain the same.
2277  */
2278 int
2279 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
2280 	      vm_offset_t vaddr,
2281 	      vm_prot_t fault_typea,
2282 	      vm_map_entry_t *out_entry,	/* OUT */
2283 	      vm_object_t *object,		/* OUT */
2284 	      vm_pindex_t *pindex,		/* OUT */
2285 	      vm_prot_t *out_prot,		/* OUT */
2286 	      boolean_t *wired)			/* OUT */
2287 {
2288 	vm_map_t share_map;
2289 	vm_offset_t share_offset;
2290 	vm_map_entry_t entry;
2291 	vm_map_t map = *var_map;
2292 	vm_prot_t prot;
2293 	boolean_t su;
2294 	vm_prot_t fault_type = fault_typea;
2295 
2296 RetryLookup:;
2297 
2298 	/*
2299 	 * Lookup the faulting address.
2300 	 */
2301 
2302 	vm_map_lock_read(map);
2303 
2304 #define	RETURN(why) \
2305 		{ \
2306 		vm_map_unlock_read(map); \
2307 		return(why); \
2308 		}
2309 
2310 	/*
2311 	 * If the map has an interesting hint, try it before calling full
2312 	 * blown lookup routine.
2313 	 */
2314 
2315 	entry = map->hint;
2316 
2317 	*out_entry = entry;
2318 
2319 	if ((entry == &map->header) ||
2320 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
2321 		vm_map_entry_t tmp_entry;
2322 
2323 		/*
2324 		 * Entry was either not a valid hint, or the vaddr was not
2325 		 * contained in the entry, so do a full lookup.
2326 		 */
2327 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2328 			RETURN(KERN_INVALID_ADDRESS);
2329 
2330 		entry = tmp_entry;
2331 		*out_entry = entry;
2332 	}
2333 
2334 	/*
2335 	 * Handle submaps.
2336 	 */
2337 
2338 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2339 		vm_map_t old_map = map;
2340 
2341 		*var_map = map = entry->object.sub_map;
2342 		vm_map_unlock_read(old_map);
2343 		goto RetryLookup;
2344 	}
2345 
2346 	/*
2347 	 * Check whether this task is allowed to have this page.
2348 	 * Note the special case for MAP_ENTRY_COW
2349 	 * pages with an override.  This is to implement a forced
2350 	 * COW for debuggers.
2351 	 */
2352 
2353 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
2354 		prot = entry->max_protection;
2355 	else
2356 		prot = entry->protection;
2357 
2358 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
2359 	if ((fault_type & prot) != fault_type) {
2360 			RETURN(KERN_PROTECTION_FAILURE);
2361 	}
2362 
2363 	if (entry->wired_count && (fault_type & VM_PROT_WRITE) &&
2364 			(entry->eflags & MAP_ENTRY_COW) &&
2365 			(fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
2366 			RETURN(KERN_PROTECTION_FAILURE);
2367 	}
2368 
2369 	/*
2370 	 * If this page is not pageable, we have to get it for all possible
2371 	 * accesses.
2372 	 */
2373 
2374 	*wired = (entry->wired_count != 0);
2375 	if (*wired)
2376 		prot = fault_type = entry->protection;
2377 
2378 	/*
2379 	 * If we don't already have a VM object, track it down.
2380 	 */
2381 
2382 	su = (entry->eflags & MAP_ENTRY_IS_A_MAP) == 0;
2383 	if (su) {
2384 		share_map = map;
2385 		share_offset = vaddr;
2386 	} else {
2387 		vm_map_entry_t share_entry;
2388 
2389 		/*
2390 		 * Compute the sharing map, and offset into it.
2391 		 */
2392 
2393 		share_map = entry->object.share_map;
2394 		share_offset = (vaddr - entry->start) + entry->offset;
2395 
2396 		/*
2397 		 * Look for the backing store object and offset
2398 		 */
2399 
2400 		vm_map_lock_read(share_map);
2401 
2402 		if (!vm_map_lookup_entry(share_map, share_offset,
2403 			&share_entry)) {
2404 			vm_map_unlock_read(share_map);
2405 			RETURN(KERN_INVALID_ADDRESS);
2406 		}
2407 		entry = share_entry;
2408 	}
2409 
2410 	/*
2411 	 * If the entry was copy-on-write, we either ...
2412 	 */
2413 
2414 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2415 		/*
2416 		 * If we want to write the page, we may as well handle that
2417 		 * now since we've got the sharing map locked.
2418 		 *
2419 		 * If we don't need to write the page, we just demote the
2420 		 * permissions allowed.
2421 		 */
2422 
2423 		if (fault_type & VM_PROT_WRITE) {
2424 			/*
2425 			 * Make a new object, and place it in the object
2426 			 * chain.  Note that no new references have appeared
2427 			 * -- one just moved from the share map to the new
2428 			 * object.
2429 			 */
2430 
2431 			if (vm_map_lock_upgrade(share_map)) {
2432 				if (share_map != map)
2433 					vm_map_unlock_read(map);
2434 
2435 				goto RetryLookup;
2436 			}
2437 			vm_object_shadow(
2438 			    &entry->object.vm_object,
2439 			    &entry->offset,
2440 			    atop(entry->end - entry->start));
2441 
2442 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2443 			vm_map_lock_downgrade(share_map);
2444 		} else {
2445 			/*
2446 			 * We're attempting to read a copy-on-write page --
2447 			 * don't allow writes.
2448 			 */
2449 
2450 			prot &= ~VM_PROT_WRITE;
2451 		}
2452 	}
2453 
2454 	/*
2455 	 * Create an object if necessary.
2456 	 */
2457 	if (entry->object.vm_object == NULL) {
2458 
2459 		if (vm_map_lock_upgrade(share_map)) {
2460 			if (share_map != map)
2461 				vm_map_unlock_read(map);
2462 			goto RetryLookup;
2463 		}
2464 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2465 		    atop(entry->end - entry->start));
2466 		entry->offset = 0;
2467 		vm_map_lock_downgrade(share_map);
2468 	}
2469 
2470 	if (entry->object.vm_object->type == OBJT_DEFAULT)
2471 		default_pager_convert_to_swapq(entry->object.vm_object);
2472 	/*
2473 	 * Return the object/offset from this entry.  If the entry was
2474 	 * copy-on-write or empty, it has been fixed up.
2475 	 */
2476 
2477 	*pindex = OFF_TO_IDX((share_offset - entry->start) + entry->offset);
2478 	*object = entry->object.vm_object;
2479 
2480 	/*
2481 	 * Return whether this is the only map sharing this data.
2482 	 */
2483 
2484 	*out_prot = prot;
2485 	return (KERN_SUCCESS);
2486 
2487 #undef	RETURN
2488 }
2489 
2490 /*
2491  *	vm_map_lookup_done:
2492  *
2493  *	Releases locks acquired by a vm_map_lookup
2494  *	(according to the handle returned by that lookup).
2495  */
2496 
2497 void
2498 vm_map_lookup_done(map, entry)
2499 	vm_map_t map;
2500 	vm_map_entry_t entry;
2501 {
2502 	/*
2503 	 * If this entry references a map, unlock it first.
2504 	 */
2505 
2506 	if (entry->eflags & MAP_ENTRY_IS_A_MAP)
2507 		vm_map_unlock_read(entry->object.share_map);
2508 
2509 	/*
2510 	 * Unlock the main-level map
2511 	 */
2512 
2513 	vm_map_unlock_read(map);
2514 }
2515 
2516 /*
2517  * Implement uiomove with VM operations.  This handles (and collateral changes)
2518  * support every combination of source object modification, and COW type
2519  * operations.
2520  */
2521 int
2522 vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
2523 	vm_map_t mapa;
2524 	vm_object_t srcobject;
2525 	off_t cp;
2526 	int cnta;
2527 	vm_offset_t uaddra;
2528 	int *npages;
2529 {
2530 	vm_map_t map;
2531 	vm_object_t first_object, oldobject, object;
2532 	vm_map_entry_t entry;
2533 	vm_prot_t prot;
2534 	boolean_t wired;
2535 	int tcnt, rv;
2536 	vm_offset_t uaddr, start, end, tend;
2537 	vm_pindex_t first_pindex, osize, oindex;
2538 	off_t ooffset;
2539 	int cnt;
2540 
2541 	if (npages)
2542 		*npages = 0;
2543 
2544 	cnt = cnta;
2545 	uaddr = uaddra;
2546 
2547 	while (cnt > 0) {
2548 		map = mapa;
2549 
2550 		if ((vm_map_lookup(&map, uaddr,
2551 			VM_PROT_READ, &entry, &first_object,
2552 			&first_pindex, &prot, &wired)) != KERN_SUCCESS) {
2553 			return EFAULT;
2554 		}
2555 
2556 		vm_map_clip_start(map, entry, uaddr);
2557 
2558 		tcnt = cnt;
2559 		tend = uaddr + tcnt;
2560 		if (tend > entry->end) {
2561 			tcnt = entry->end - uaddr;
2562 			tend = entry->end;
2563 		}
2564 
2565 		vm_map_clip_end(map, entry, tend);
2566 
2567 		start = entry->start;
2568 		end = entry->end;
2569 
2570 		osize = atop(tcnt);
2571 
2572 		oindex = OFF_TO_IDX(cp);
2573 		if (npages) {
2574 			vm_pindex_t idx;
2575 			for (idx = 0; idx < osize; idx++) {
2576 				vm_page_t m;
2577 				if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
2578 					vm_map_lookup_done(map, entry);
2579 					return 0;
2580 				}
2581 				if ((m->flags & PG_BUSY) ||
2582 					((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
2583 					vm_map_lookup_done(map, entry);
2584 					return 0;
2585 				}
2586 			}
2587 		}
2588 
2589 /*
2590  * If we are changing an existing map entry, just redirect
2591  * the object, and change mappings.
2592  */
2593 		if ((first_object->type == OBJT_VNODE) &&
2594 			((oldobject = entry->object.vm_object) == first_object)) {
2595 
2596 			if ((entry->offset != cp) || (oldobject != srcobject)) {
2597 				/*
2598    				* Remove old window into the file
2599    				*/
2600 				pmap_remove (map->pmap, uaddr, tend);
2601 
2602 				/*
2603    				* Force copy on write for mmaped regions
2604    				*/
2605 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2606 
2607 				/*
2608    				* Point the object appropriately
2609    				*/
2610 				if (oldobject != srcobject) {
2611 
2612 				/*
2613    				* Set the object optimization hint flag
2614    				*/
2615 					vm_object_set_flag(srcobject, OBJ_OPT);
2616 					vm_object_reference(srcobject);
2617 					entry->object.vm_object = srcobject;
2618 
2619 					if (oldobject) {
2620 						vm_object_deallocate(oldobject);
2621 					}
2622 				}
2623 
2624 				entry->offset = cp;
2625 				map->timestamp++;
2626 			} else {
2627 				pmap_remove (map->pmap, uaddr, tend);
2628 			}
2629 
2630 		} else if ((first_object->ref_count == 1) &&
2631 			(first_object->size == osize) &&
2632 			((first_object->type == OBJT_DEFAULT) ||
2633 				(first_object->type == OBJT_SWAP)) ) {
2634 
2635 			oldobject = first_object->backing_object;
2636 
2637 			if ((first_object->backing_object_offset != cp) ||
2638 				(oldobject != srcobject)) {
2639 				/*
2640    				* Remove old window into the file
2641    				*/
2642 				pmap_remove (map->pmap, uaddr, tend);
2643 
2644 				/*
2645 				 * Remove unneeded old pages
2646 				 */
2647 				if (first_object->resident_page_count) {
2648 					vm_object_page_remove (first_object, 0, 0, 0);
2649 				}
2650 
2651 				/*
2652 				 * Invalidate swap space
2653 				 */
2654 				if (first_object->type == OBJT_SWAP) {
2655 					swap_pager_freespace(first_object,
2656 						OFF_TO_IDX(first_object->paging_offset),
2657 						first_object->size);
2658 				}
2659 
2660 				/*
2661    				* Force copy on write for mmaped regions
2662    				*/
2663 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2664 
2665 				/*
2666    				* Point the object appropriately
2667    				*/
2668 				if (oldobject != srcobject) {
2669 
2670 				/*
2671    				* Set the object optimization hint flag
2672    				*/
2673 					vm_object_set_flag(srcobject, OBJ_OPT);
2674 					vm_object_reference(srcobject);
2675 
2676 					if (oldobject) {
2677 						TAILQ_REMOVE(&oldobject->shadow_head,
2678 							first_object, shadow_list);
2679 						oldobject->shadow_count--;
2680 						vm_object_deallocate(oldobject);
2681 					}
2682 
2683 					TAILQ_INSERT_TAIL(&srcobject->shadow_head,
2684 						first_object, shadow_list);
2685 					srcobject->shadow_count++;
2686 
2687 					first_object->backing_object = srcobject;
2688 				}
2689 				first_object->backing_object_offset = cp;
2690 				map->timestamp++;
2691 			} else {
2692 				pmap_remove (map->pmap, uaddr, tend);
2693 			}
2694 /*
2695  * Otherwise, we have to do a logical mmap.
2696  */
2697 		} else {
2698 
2699 			vm_object_set_flag(srcobject, OBJ_OPT);
2700 			vm_object_reference(srcobject);
2701 
2702 			pmap_remove (map->pmap, uaddr, tend);
2703 
2704 			vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2705 			vm_map_lock_upgrade(map);
2706 
2707 			if (entry == &map->header) {
2708 				map->first_free = &map->header;
2709 			} else if (map->first_free->start >= start) {
2710 				map->first_free = entry->prev;
2711 			}
2712 
2713 			SAVE_HINT(map, entry->prev);
2714 			vm_map_entry_delete(map, entry);
2715 
2716 			object = srcobject;
2717 			ooffset = cp;
2718 #if 0
2719 			vm_object_shadow(&object, &ooffset, osize);
2720 #endif
2721 
2722 			rv = vm_map_insert(map, object, ooffset, start, tend,
2723 				VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE|MAP_COPY_NEEDED);
2724 
2725 			if (rv != KERN_SUCCESS)
2726 				panic("vm_uiomove: could not insert new entry: %d", rv);
2727 		}
2728 
2729 /*
2730  * Map the window directly, if it is already in memory
2731  */
2732 		pmap_object_init_pt(map->pmap, uaddr,
2733 			srcobject, oindex, tcnt, 0);
2734 
2735 		map->timestamp++;
2736 		vm_map_unlock(map);
2737 
2738 		cnt -= tcnt;
2739 		uaddr += tcnt;
2740 		cp += tcnt;
2741 		if (npages)
2742 			*npages += osize;
2743 	}
2744 	return 0;
2745 }
2746 
2747 /*
2748  * Performs the copy_on_write operations necessary to allow the virtual copies
2749  * into user space to work.  This has to be called for write(2) system calls
2750  * from other processes, file unlinking, and file size shrinkage.
2751  */
2752 void
2753 vm_freeze_copyopts(object, froma, toa)
2754 	vm_object_t object;
2755 	vm_pindex_t froma, toa;
2756 {
2757 	int s, rv;
2758 	vm_object_t robject, robjectn;
2759 	vm_pindex_t idx, from, to;
2760 
2761 	if ((object == NULL) ||
2762 		((object->flags & OBJ_OPT) == 0))
2763 		return;
2764 
2765 	if (object->shadow_count > object->ref_count)
2766 		panic("vm_freeze_copyopts: sc > rc");
2767 
2768 	while( robject = TAILQ_FIRST(&object->shadow_head)) {
2769 		vm_pindex_t bo_pindex;
2770 		vm_page_t m_in, m_out;
2771 
2772 		bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
2773 
2774 		vm_object_reference(robject);
2775 
2776 		vm_object_pip_wait(robject, "objfrz");
2777 
2778 		if (robject->ref_count == 1) {
2779 			vm_object_deallocate(robject);
2780 			continue;
2781 		}
2782 
2783 		vm_object_pip_add(robject, 1);
2784 
2785 		for (idx = 0; idx < robject->size; idx++) {
2786 
2787 m_outretry:
2788 			m_out = vm_page_grab(robject, idx,
2789 						VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
2790 
2791 			if (m_out->valid == 0) {
2792 m_inretry:
2793 				m_in = vm_page_grab(object, bo_pindex + idx,
2794 						VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
2795 				if (m_in->valid == 0) {
2796 					rv = vm_pager_get_pages(object, &m_in, 1, 0);
2797 					if (rv != VM_PAGER_OK) {
2798 						printf("vm_freeze_copyopts: cannot read page from file: %x\n", m_in->pindex);
2799 						continue;
2800 					}
2801 					vm_page_deactivate(m_in);
2802 				}
2803 
2804 				vm_page_protect(m_in, VM_PROT_NONE);
2805 				pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
2806 				m_out->valid = m_in->valid;
2807 				m_out->dirty = VM_PAGE_BITS_ALL;
2808 
2809 				vm_page_activate(m_out);
2810 
2811 				vm_page_wakeup(m_in);
2812 			}
2813 			vm_page_wakeup(m_out);
2814 		}
2815 
2816 		object->shadow_count--;
2817 		object->ref_count--;
2818 		TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);
2819 		robject->backing_object = NULL;
2820 		robject->backing_object_offset = 0;
2821 
2822 		vm_object_pip_wakeup(robject);
2823 		vm_object_deallocate(robject);
2824 	}
2825 
2826 	vm_object_clear_flag(object, OBJ_OPT);
2827 }
2828 
2829 #include "opt_ddb.h"
2830 #ifdef DDB
2831 #include <sys/kernel.h>
2832 
2833 #include <ddb/ddb.h>
2834 
2835 /*
2836  *	vm_map_print:	[ debug ]
2837  */
2838 DB_SHOW_COMMAND(map, vm_map_print)
2839 {
2840 	static int nlines;
2841 	/* XXX convert args. */
2842 	vm_map_t map = (vm_map_t)addr;
2843 	boolean_t full = have_addr;
2844 
2845 	vm_map_entry_t entry;
2846 
2847 	db_iprintf("%s map %p: pmap=%p, nentries=%d, version=%u\n",
2848 	    (map->is_main_map ? "Task" : "Share"), (void *)map,
2849 	    (void *)map->pmap, map->nentries, map->timestamp);
2850 	nlines++;
2851 
2852 	if (!full && db_indent)
2853 		return;
2854 
2855 	db_indent += 2;
2856 	for (entry = map->header.next; entry != &map->header;
2857 	    entry = entry->next) {
2858 #if 0
2859 		if (nlines > 18) {
2860 			db_printf("--More--");
2861 			cngetc();
2862 			db_printf("\r");
2863 			nlines = 0;
2864 		}
2865 #endif
2866 
2867 		db_iprintf("map entry %p: start=%p, end=%p\n",
2868 		    (void *)entry, (void *)entry->start, (void *)entry->end);
2869 		nlines++;
2870 		if (map->is_main_map) {
2871 			static char *inheritance_name[4] =
2872 			{"share", "copy", "none", "donate_copy"};
2873 
2874 			db_iprintf(" prot=%x/%x/%s",
2875 			    entry->protection,
2876 			    entry->max_protection,
2877 			    inheritance_name[entry->inheritance]);
2878 			if (entry->wired_count != 0)
2879 				db_printf(", wired");
2880 		}
2881 		if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
2882 			/* XXX no %qd in kernel.  Truncate entry->offset. */
2883 			db_printf(", share=%p, offset=0x%lx\n",
2884 			    (void *)entry->object.share_map,
2885 			    (long)entry->offset);
2886 			nlines++;
2887 			if ((entry->prev == &map->header) ||
2888 			    ((entry->prev->eflags & MAP_ENTRY_IS_A_MAP) == 0) ||
2889 			    (entry->prev->object.share_map !=
2890 				entry->object.share_map)) {
2891 				db_indent += 2;
2892 				vm_map_print((db_expr_t)(intptr_t)
2893 					     entry->object.share_map,
2894 					     full, 0, (char *)0);
2895 				db_indent -= 2;
2896 			}
2897 		} else {
2898 			/* XXX no %qd in kernel.  Truncate entry->offset. */
2899 			db_printf(", object=%p, offset=0x%lx",
2900 			    (void *)entry->object.vm_object,
2901 			    (long)entry->offset);
2902 			if (entry->eflags & MAP_ENTRY_COW)
2903 				db_printf(", copy (%s)",
2904 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
2905 			db_printf("\n");
2906 			nlines++;
2907 
2908 			if ((entry->prev == &map->header) ||
2909 			    (entry->prev->eflags & MAP_ENTRY_IS_A_MAP) ||
2910 			    (entry->prev->object.vm_object !=
2911 				entry->object.vm_object)) {
2912 				db_indent += 2;
2913 				vm_object_print((db_expr_t)(intptr_t)
2914 						entry->object.vm_object,
2915 						full, 0, (char *)0);
2916 				nlines += 4;
2917 				db_indent -= 2;
2918 			}
2919 		}
2920 	}
2921 	db_indent -= 2;
2922 	if (db_indent == 0)
2923 		nlines = 0;
2924 }
2925 
2926 
2927 DB_SHOW_COMMAND(procvm, procvm)
2928 {
2929 	struct proc *p;
2930 
2931 	if (have_addr) {
2932 		p = (struct proc *) addr;
2933 	} else {
2934 		p = curproc;
2935 	}
2936 
2937 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
2938 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
2939 	    (void *)&p->p_vmspace->vm_pmap);
2940 
2941 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
2942 }
2943 
2944 #endif /* DDB */
2945