xref: /freebsd/sys/vm/vm_map.c (revision 77a0943ded95b9e6438f7db70c4a28e4d93946d4)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD$
65  */
66 
67 /*
68  *	Virtual memory mapping module.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc.h>
74 #include <sys/vmmeter.h>
75 #include <sys/mman.h>
76 #include <sys/vnode.h>
77 #include <sys/resourcevar.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <sys/lock.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_object.h>
86 #include <vm/vm_pager.h>
87 #include <vm/vm_kern.h>
88 #include <vm/vm_extern.h>
89 #include <vm/swap_pager.h>
90 #include <vm/vm_zone.h>
91 
92 /*
93  *	Virtual memory maps provide for the mapping, protection,
94  *	and sharing of virtual memory objects.  In addition,
95  *	this module provides for an efficient virtual copy of
96  *	memory from one map to another.
97  *
98  *	Synchronization is required prior to most operations.
99  *
100  *	Maps consist of an ordered doubly-linked list of simple
101  *	entries; a single hint is used to speed up lookups.
102  *
103  *	Since portions of maps are specified by start/end addresses,
104  *	which may not align with existing map entries, all
105  *	routines merely "clip" entries to these start/end values.
106  *	[That is, an entry is split into two, bordering at a
107  *	start or end value.]  Note that these clippings may not
108  *	always be necessary (as the two resulting entries are then
109  *	not changed); however, the clipping is done for convenience.
110  *
111  *	As mentioned above, virtual copy operations are performed
112  *	by copying VM object references from one map to
113  *	another, and then marking both regions as copy-on-write.
114  */
115 
116 /*
117  *	vm_map_startup:
118  *
119  *	Initialize the vm_map module.  Must be called before
120  *	any other vm_map routines.
121  *
122  *	Map and entry structures are allocated from the general
123  *	purpose memory pool with some exceptions:
124  *
125  *	- The kernel map and kmem submap are allocated statically.
126  *	- Kernel map entries are allocated out of a static pool.
127  *
128  *	These restrictions are necessary since malloc() uses the
129  *	maps and requires map entries.
130  */
131 
132 static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store;
133 static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone;
134 static struct vm_object kmapentobj, mapentobj, mapobj;
135 
136 static struct vm_map_entry map_entry_init[MAX_MAPENT];
137 static struct vm_map_entry kmap_entry_init[MAX_KMAPENT];
138 static struct vm_map map_init[MAX_KMAP];
139 
140 static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
141 static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
142 static vm_map_entry_t vm_map_entry_create __P((vm_map_t));
143 static void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t));
144 static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
145 static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
146 static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t,
147 		vm_map_entry_t));
148 static void vm_map_split __P((vm_map_entry_t));
149 
150 void
151 vm_map_startup()
152 {
153 	mapzone = &mapzone_store;
154 	zbootinit(mapzone, "MAP", sizeof (struct vm_map),
155 		map_init, MAX_KMAP);
156 	kmapentzone = &kmapentzone_store;
157 	zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry),
158 		kmap_entry_init, MAX_KMAPENT);
159 	mapentzone = &mapentzone_store;
160 	zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
161 		map_entry_init, MAX_MAPENT);
162 }
163 
164 /*
165  * Allocate a vmspace structure, including a vm_map and pmap,
166  * and initialize those structures.  The refcnt is set to 1.
167  * The remaining fields must be initialized by the caller.
168  */
169 struct vmspace *
170 vmspace_alloc(min, max)
171 	vm_offset_t min, max;
172 {
173 	struct vmspace *vm;
174 
175 	vm = zalloc(vmspace_zone);
176 	vm_map_init(&vm->vm_map, min, max);
177 	pmap_pinit(vmspace_pmap(vm));
178 	vm->vm_map.pmap = vmspace_pmap(vm);		/* XXX */
179 	vm->vm_refcnt = 1;
180 	vm->vm_shm = NULL;
181 	return (vm);
182 }
183 
184 void
185 vm_init2(void) {
186 	zinitna(kmapentzone, &kmapentobj,
187 		NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1);
188 	zinitna(mapentzone, &mapentobj,
189 		NULL, 0, 0, 0, 1);
190 	zinitna(mapzone, &mapobj,
191 		NULL, 0, 0, 0, 1);
192 	vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
193 	pmap_init2();
194 	vm_object_init2();
195 }
196 
197 void
198 vmspace_free(vm)
199 	struct vmspace *vm;
200 {
201 
202 	if (vm->vm_refcnt == 0)
203 		panic("vmspace_free: attempt to free already freed vmspace");
204 
205 	if (--vm->vm_refcnt == 0) {
206 
207 		/*
208 		 * Lock the map, to wait out all other references to it.
209 		 * Delete all of the mappings and pages they hold, then call
210 		 * the pmap module to reclaim anything left.
211 		 */
212 		vm_map_lock(&vm->vm_map);
213 		(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
214 		    vm->vm_map.max_offset);
215 		vm_map_unlock(&vm->vm_map);
216 
217 		pmap_release(vmspace_pmap(vm));
218 		vm_map_destroy(&vm->vm_map);
219 		zfree(vmspace_zone, vm);
220 	}
221 }
222 
223 /*
224  *	vm_map_create:
225  *
226  *	Creates and returns a new empty VM map with
227  *	the given physical map structure, and having
228  *	the given lower and upper address bounds.
229  */
230 vm_map_t
231 vm_map_create(pmap, min, max)
232 	pmap_t pmap;
233 	vm_offset_t min, max;
234 {
235 	vm_map_t result;
236 
237 	result = zalloc(mapzone);
238 	vm_map_init(result, min, max);
239 	result->pmap = pmap;
240 	return (result);
241 }
242 
243 /*
244  * Initialize an existing vm_map structure
245  * such as that in the vmspace structure.
246  * The pmap is set elsewhere.
247  */
248 void
249 vm_map_init(map, min, max)
250 	struct vm_map *map;
251 	vm_offset_t min, max;
252 {
253 	map->header.next = map->header.prev = &map->header;
254 	map->nentries = 0;
255 	map->size = 0;
256 	map->system_map = 0;
257 	map->min_offset = min;
258 	map->max_offset = max;
259 	map->first_free = &map->header;
260 	map->hint = &map->header;
261 	map->timestamp = 0;
262 	lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
263 }
264 
265 void
266 vm_map_destroy(map)
267 	struct vm_map *map;
268 {
269 	lockdestroy(&map->lock);
270 }
271 
272 /*
273  *	vm_map_entry_dispose:	[ internal use only ]
274  *
275  *	Inverse of vm_map_entry_create.
276  */
277 static void
278 vm_map_entry_dispose(map, entry)
279 	vm_map_t map;
280 	vm_map_entry_t entry;
281 {
282 	zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry);
283 }
284 
285 /*
286  *	vm_map_entry_create:	[ internal use only ]
287  *
288  *	Allocates a VM map entry for insertion.
289  *	No entry fields are filled in.  This routine is
290  */
291 static vm_map_entry_t
292 vm_map_entry_create(map)
293 	vm_map_t map;
294 {
295 	vm_map_entry_t new_entry;
296 
297 	new_entry = zalloc((map->system_map || !mapentzone) ?
298 		kmapentzone : mapentzone);
299 	if (new_entry == NULL)
300 	    panic("vm_map_entry_create: kernel resources exhausted");
301 	return(new_entry);
302 }
303 
304 /*
305  *	vm_map_entry_{un,}link:
306  *
307  *	Insert/remove entries from maps.
308  */
309 static __inline void
310 vm_map_entry_link(vm_map_t map,
311 		  vm_map_entry_t after_where,
312 		  vm_map_entry_t entry)
313 {
314 	map->nentries++;
315 	entry->prev = after_where;
316 	entry->next = after_where->next;
317 	entry->next->prev = entry;
318 	after_where->next = entry;
319 }
320 
321 static __inline void
322 vm_map_entry_unlink(vm_map_t map,
323 		    vm_map_entry_t entry)
324 {
325 	vm_map_entry_t prev = entry->prev;
326 	vm_map_entry_t next = entry->next;
327 
328 	next->prev = prev;
329 	prev->next = next;
330 	map->nentries--;
331 }
332 
333 /*
334  *	SAVE_HINT:
335  *
336  *	Saves the specified entry as the hint for
337  *	future lookups.
338  */
339 #define	SAVE_HINT(map,value) \
340 		(map)->hint = (value);
341 
342 /*
343  *	vm_map_lookup_entry:	[ internal use only ]
344  *
345  *	Finds the map entry containing (or
346  *	immediately preceding) the specified address
347  *	in the given map; the entry is returned
348  *	in the "entry" parameter.  The boolean
349  *	result indicates whether the address is
350  *	actually contained in the map.
351  */
352 boolean_t
353 vm_map_lookup_entry(map, address, entry)
354 	vm_map_t map;
355 	vm_offset_t address;
356 	vm_map_entry_t *entry;	/* OUT */
357 {
358 	vm_map_entry_t cur;
359 	vm_map_entry_t last;
360 
361 	/*
362 	 * Start looking either from the head of the list, or from the hint.
363 	 */
364 
365 	cur = map->hint;
366 
367 	if (cur == &map->header)
368 		cur = cur->next;
369 
370 	if (address >= cur->start) {
371 		/*
372 		 * Go from hint to end of list.
373 		 *
374 		 * But first, make a quick check to see if we are already looking
375 		 * at the entry we want (which is usually the case). Note also
376 		 * that we don't need to save the hint here... it is the same
377 		 * hint (unless we are at the header, in which case the hint
378 		 * didn't buy us anything anyway).
379 		 */
380 		last = &map->header;
381 		if ((cur != last) && (cur->end > address)) {
382 			*entry = cur;
383 			return (TRUE);
384 		}
385 	} else {
386 		/*
387 		 * Go from start to hint, *inclusively*
388 		 */
389 		last = cur->next;
390 		cur = map->header.next;
391 	}
392 
393 	/*
394 	 * Search linearly
395 	 */
396 
397 	while (cur != last) {
398 		if (cur->end > address) {
399 			if (address >= cur->start) {
400 				/*
401 				 * Save this lookup for future hints, and
402 				 * return
403 				 */
404 
405 				*entry = cur;
406 				SAVE_HINT(map, cur);
407 				return (TRUE);
408 			}
409 			break;
410 		}
411 		cur = cur->next;
412 	}
413 	*entry = cur->prev;
414 	SAVE_HINT(map, *entry);
415 	return (FALSE);
416 }
417 
418 /*
419  *	vm_map_insert:
420  *
421  *	Inserts the given whole VM object into the target
422  *	map at the specified address range.  The object's
423  *	size should match that of the address range.
424  *
425  *	Requires that the map be locked, and leaves it so.
426  *
427  *	If object is non-NULL, ref count must be bumped by caller
428  *	prior to making call to account for the new entry.
429  */
430 int
431 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
432 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
433 	      int cow)
434 {
435 	vm_map_entry_t new_entry;
436 	vm_map_entry_t prev_entry;
437 	vm_map_entry_t temp_entry;
438 	vm_eflags_t protoeflags;
439 
440 	/*
441 	 * Check that the start and end points are not bogus.
442 	 */
443 
444 	if ((start < map->min_offset) || (end > map->max_offset) ||
445 	    (start >= end))
446 		return (KERN_INVALID_ADDRESS);
447 
448 	/*
449 	 * Find the entry prior to the proposed starting address; if it's part
450 	 * of an existing entry, this range is bogus.
451 	 */
452 
453 	if (vm_map_lookup_entry(map, start, &temp_entry))
454 		return (KERN_NO_SPACE);
455 
456 	prev_entry = temp_entry;
457 
458 	/*
459 	 * Assert that the next entry doesn't overlap the end point.
460 	 */
461 
462 	if ((prev_entry->next != &map->header) &&
463 	    (prev_entry->next->start < end))
464 		return (KERN_NO_SPACE);
465 
466 	protoeflags = 0;
467 
468 	if (cow & MAP_COPY_ON_WRITE)
469 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
470 
471 	if (cow & MAP_NOFAULT) {
472 		protoeflags |= MAP_ENTRY_NOFAULT;
473 
474 		KASSERT(object == NULL,
475 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
476 	}
477 	if (cow & MAP_DISABLE_SYNCER)
478 		protoeflags |= MAP_ENTRY_NOSYNC;
479 	if (cow & MAP_DISABLE_COREDUMP)
480 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
481 
482 	if (object) {
483 		/*
484 		 * When object is non-NULL, it could be shared with another
485 		 * process.  We have to set or clear OBJ_ONEMAPPING
486 		 * appropriately.
487 		 */
488 		if ((object->ref_count > 1) || (object->shadow_count != 0)) {
489 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
490 		}
491 	}
492 	else if ((prev_entry != &map->header) &&
493 		 (prev_entry->eflags == protoeflags) &&
494 		 (prev_entry->end == start) &&
495 		 (prev_entry->wired_count == 0) &&
496 		 ((prev_entry->object.vm_object == NULL) ||
497 		  vm_object_coalesce(prev_entry->object.vm_object,
498 				     OFF_TO_IDX(prev_entry->offset),
499 				     (vm_size_t)(prev_entry->end - prev_entry->start),
500 				     (vm_size_t)(end - prev_entry->end)))) {
501 		/*
502 		 * We were able to extend the object.  Determine if we
503 		 * can extend the previous map entry to include the
504 		 * new range as well.
505 		 */
506 		if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
507 		    (prev_entry->protection == prot) &&
508 		    (prev_entry->max_protection == max)) {
509 			map->size += (end - prev_entry->end);
510 			prev_entry->end = end;
511 			return (KERN_SUCCESS);
512 		}
513 
514 		/*
515 		 * If we can extend the object but cannot extend the
516 		 * map entry, we have to create a new map entry.  We
517 		 * must bump the ref count on the extended object to
518 		 * account for it.
519 		 */
520 		object = prev_entry->object.vm_object;
521 		offset = prev_entry->offset +
522 			(prev_entry->end - prev_entry->start);
523 		vm_object_reference(object);
524 	}
525 
526 	/*
527 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
528 	 * in things like the buffer map where we manage kva but do not manage
529 	 * backing objects.
530 	 */
531 
532 	/*
533 	 * Create a new entry
534 	 */
535 
536 	new_entry = vm_map_entry_create(map);
537 	new_entry->start = start;
538 	new_entry->end = end;
539 
540 	new_entry->eflags = protoeflags;
541 	new_entry->object.vm_object = object;
542 	new_entry->offset = offset;
543 	new_entry->avail_ssize = 0;
544 
545 	new_entry->inheritance = VM_INHERIT_DEFAULT;
546 	new_entry->protection = prot;
547 	new_entry->max_protection = max;
548 	new_entry->wired_count = 0;
549 
550 	/*
551 	 * Insert the new entry into the list
552 	 */
553 
554 	vm_map_entry_link(map, prev_entry, new_entry);
555 	map->size += new_entry->end - new_entry->start;
556 
557 	/*
558 	 * Update the free space hint
559 	 */
560 	if ((map->first_free == prev_entry) &&
561 	    (prev_entry->end >= new_entry->start)) {
562 		map->first_free = new_entry;
563 	}
564 
565 	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
566 		pmap_object_init_pt(map->pmap, start,
567 				    object, OFF_TO_IDX(offset), end - start,
568 				    cow & MAP_PREFAULT_PARTIAL);
569 	}
570 
571 	return (KERN_SUCCESS);
572 }
573 
574 /*
575  * Find sufficient space for `length' bytes in the given map, starting at
576  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
577  */
578 int
579 vm_map_findspace(map, start, length, addr)
580 	vm_map_t map;
581 	vm_offset_t start;
582 	vm_size_t length;
583 	vm_offset_t *addr;
584 {
585 	vm_map_entry_t entry, next;
586 	vm_offset_t end;
587 
588 	if (start < map->min_offset)
589 		start = map->min_offset;
590 	if (start > map->max_offset)
591 		return (1);
592 
593 	/*
594 	 * Look for the first possible address; if there's already something
595 	 * at this address, we have to start after it.
596 	 */
597 	if (start == map->min_offset) {
598 		if ((entry = map->first_free) != &map->header)
599 			start = entry->end;
600 	} else {
601 		vm_map_entry_t tmp;
602 
603 		if (vm_map_lookup_entry(map, start, &tmp))
604 			start = tmp->end;
605 		entry = tmp;
606 	}
607 
608 	/*
609 	 * Look through the rest of the map, trying to fit a new region in the
610 	 * gap between existing regions, or after the very last region.
611 	 */
612 	for (;; start = (entry = next)->end) {
613 		/*
614 		 * Find the end of the proposed new region.  Be sure we didn't
615 		 * go beyond the end of the map, or wrap around the address;
616 		 * if so, we lose.  Otherwise, if this is the last entry, or
617 		 * if the proposed new region fits before the next entry, we
618 		 * win.
619 		 */
620 		end = start + length;
621 		if (end > map->max_offset || end < start)
622 			return (1);
623 		next = entry->next;
624 		if (next == &map->header || next->start >= end)
625 			break;
626 	}
627 	SAVE_HINT(map, entry);
628 	*addr = start;
629 	if (map == kernel_map) {
630 		vm_offset_t ksize;
631 		if ((ksize = round_page(start + length)) > kernel_vm_end) {
632 			pmap_growkernel(ksize);
633 		}
634 	}
635 	return (0);
636 }
637 
638 /*
639  *	vm_map_find finds an unallocated region in the target address
640  *	map with the given length.  The search is defined to be
641  *	first-fit from the specified address; the region found is
642  *	returned in the same parameter.
643  *
644  *	If object is non-NULL, ref count must be bumped by caller
645  *	prior to making call to account for the new entry.
646  */
647 int
648 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
649 	    vm_offset_t *addr,	/* IN/OUT */
650 	    vm_size_t length, boolean_t find_space, vm_prot_t prot,
651 	    vm_prot_t max, int cow)
652 {
653 	vm_offset_t start;
654 	int result, s = 0;
655 
656 	start = *addr;
657 
658 	if (map == kmem_map || map == mb_map)
659 		s = splvm();
660 
661 	vm_map_lock(map);
662 	if (find_space) {
663 		if (vm_map_findspace(map, start, length, addr)) {
664 			vm_map_unlock(map);
665 			if (map == kmem_map || map == mb_map)
666 				splx(s);
667 			return (KERN_NO_SPACE);
668 		}
669 		start = *addr;
670 	}
671 	result = vm_map_insert(map, object, offset,
672 		start, start + length, prot, max, cow);
673 	vm_map_unlock(map);
674 
675 	if (map == kmem_map || map == mb_map)
676 		splx(s);
677 
678 	return (result);
679 }
680 
681 /*
682  *	vm_map_simplify_entry:
683  *
684  *	Simplify the given map entry by merging with either neighbor.
685  */
686 void
687 vm_map_simplify_entry(map, entry)
688 	vm_map_t map;
689 	vm_map_entry_t entry;
690 {
691 	vm_map_entry_t next, prev;
692 	vm_size_t prevsize, esize;
693 
694 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
695 		return;
696 
697 	prev = entry->prev;
698 	if (prev != &map->header) {
699 		prevsize = prev->end - prev->start;
700 		if ( (prev->end == entry->start) &&
701 		     (prev->object.vm_object == entry->object.vm_object) &&
702 		     (!prev->object.vm_object ||
703 			(prev->offset + prevsize == entry->offset)) &&
704 		     (prev->eflags == entry->eflags) &&
705 		     (prev->protection == entry->protection) &&
706 		     (prev->max_protection == entry->max_protection) &&
707 		     (prev->inheritance == entry->inheritance) &&
708 		     (prev->wired_count == entry->wired_count)) {
709 			if (map->first_free == prev)
710 				map->first_free = entry;
711 			if (map->hint == prev)
712 				map->hint = entry;
713 			vm_map_entry_unlink(map, prev);
714 			entry->start = prev->start;
715 			entry->offset = prev->offset;
716 			if (prev->object.vm_object)
717 				vm_object_deallocate(prev->object.vm_object);
718 			vm_map_entry_dispose(map, prev);
719 		}
720 	}
721 
722 	next = entry->next;
723 	if (next != &map->header) {
724 		esize = entry->end - entry->start;
725 		if ((entry->end == next->start) &&
726 		    (next->object.vm_object == entry->object.vm_object) &&
727 		     (!entry->object.vm_object ||
728 			(entry->offset + esize == next->offset)) &&
729 		    (next->eflags == entry->eflags) &&
730 		    (next->protection == entry->protection) &&
731 		    (next->max_protection == entry->max_protection) &&
732 		    (next->inheritance == entry->inheritance) &&
733 		    (next->wired_count == entry->wired_count)) {
734 			if (map->first_free == next)
735 				map->first_free = entry;
736 			if (map->hint == next)
737 				map->hint = entry;
738 			vm_map_entry_unlink(map, next);
739 			entry->end = next->end;
740 			if (next->object.vm_object)
741 				vm_object_deallocate(next->object.vm_object);
742 			vm_map_entry_dispose(map, next);
743 	        }
744 	}
745 }
746 /*
747  *	vm_map_clip_start:	[ internal use only ]
748  *
749  *	Asserts that the given entry begins at or after
750  *	the specified address; if necessary,
751  *	it splits the entry into two.
752  */
753 #define vm_map_clip_start(map, entry, startaddr) \
754 { \
755 	if (startaddr > entry->start) \
756 		_vm_map_clip_start(map, entry, startaddr); \
757 }
758 
759 /*
760  *	This routine is called only when it is known that
761  *	the entry must be split.
762  */
763 static void
764 _vm_map_clip_start(map, entry, start)
765 	vm_map_t map;
766 	vm_map_entry_t entry;
767 	vm_offset_t start;
768 {
769 	vm_map_entry_t new_entry;
770 
771 	/*
772 	 * Split off the front portion -- note that we must insert the new
773 	 * entry BEFORE this one, so that this entry has the specified
774 	 * starting address.
775 	 */
776 
777 	vm_map_simplify_entry(map, entry);
778 
779 	/*
780 	 * If there is no object backing this entry, we might as well create
781 	 * one now.  If we defer it, an object can get created after the map
782 	 * is clipped, and individual objects will be created for the split-up
783 	 * map.  This is a bit of a hack, but is also about the best place to
784 	 * put this improvement.
785 	 */
786 
787 	if (entry->object.vm_object == NULL) {
788 		vm_object_t object;
789 		object = vm_object_allocate(OBJT_DEFAULT,
790 				atop(entry->end - entry->start));
791 		entry->object.vm_object = object;
792 		entry->offset = 0;
793 	}
794 
795 	new_entry = vm_map_entry_create(map);
796 	*new_entry = *entry;
797 
798 	new_entry->end = start;
799 	entry->offset += (start - entry->start);
800 	entry->start = start;
801 
802 	vm_map_entry_link(map, entry->prev, new_entry);
803 
804 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
805 		vm_object_reference(new_entry->object.vm_object);
806 	}
807 }
808 
809 /*
810  *	vm_map_clip_end:	[ internal use only ]
811  *
812  *	Asserts that the given entry ends at or before
813  *	the specified address; if necessary,
814  *	it splits the entry into two.
815  */
816 
817 #define vm_map_clip_end(map, entry, endaddr) \
818 { \
819 	if (endaddr < entry->end) \
820 		_vm_map_clip_end(map, entry, endaddr); \
821 }
822 
823 /*
824  *	This routine is called only when it is known that
825  *	the entry must be split.
826  */
827 static void
828 _vm_map_clip_end(map, entry, end)
829 	vm_map_t map;
830 	vm_map_entry_t entry;
831 	vm_offset_t end;
832 {
833 	vm_map_entry_t new_entry;
834 
835 	/*
836 	 * If there is no object backing this entry, we might as well create
837 	 * one now.  If we defer it, an object can get created after the map
838 	 * is clipped, and individual objects will be created for the split-up
839 	 * map.  This is a bit of a hack, but is also about the best place to
840 	 * put this improvement.
841 	 */
842 
843 	if (entry->object.vm_object == NULL) {
844 		vm_object_t object;
845 		object = vm_object_allocate(OBJT_DEFAULT,
846 				atop(entry->end - entry->start));
847 		entry->object.vm_object = object;
848 		entry->offset = 0;
849 	}
850 
851 	/*
852 	 * Create a new entry and insert it AFTER the specified entry
853 	 */
854 
855 	new_entry = vm_map_entry_create(map);
856 	*new_entry = *entry;
857 
858 	new_entry->start = entry->end = end;
859 	new_entry->offset += (end - entry->start);
860 
861 	vm_map_entry_link(map, entry, new_entry);
862 
863 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
864 		vm_object_reference(new_entry->object.vm_object);
865 	}
866 }
867 
868 /*
869  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
870  *
871  *	Asserts that the starting and ending region
872  *	addresses fall within the valid range of the map.
873  */
874 #define	VM_MAP_RANGE_CHECK(map, start, end)		\
875 		{					\
876 		if (start < vm_map_min(map))		\
877 			start = vm_map_min(map);	\
878 		if (end > vm_map_max(map))		\
879 			end = vm_map_max(map);		\
880 		if (start > end)			\
881 			start = end;			\
882 		}
883 
884 /*
885  *	vm_map_submap:		[ kernel use only ]
886  *
887  *	Mark the given range as handled by a subordinate map.
888  *
889  *	This range must have been created with vm_map_find,
890  *	and no other operations may have been performed on this
891  *	range prior to calling vm_map_submap.
892  *
893  *	Only a limited number of operations can be performed
894  *	within this rage after calling vm_map_submap:
895  *		vm_fault
896  *	[Don't try vm_map_copy!]
897  *
898  *	To remove a submapping, one must first remove the
899  *	range from the superior map, and then destroy the
900  *	submap (if desired).  [Better yet, don't try it.]
901  */
902 int
903 vm_map_submap(map, start, end, submap)
904 	vm_map_t map;
905 	vm_offset_t start;
906 	vm_offset_t end;
907 	vm_map_t submap;
908 {
909 	vm_map_entry_t entry;
910 	int result = KERN_INVALID_ARGUMENT;
911 
912 	vm_map_lock(map);
913 
914 	VM_MAP_RANGE_CHECK(map, start, end);
915 
916 	if (vm_map_lookup_entry(map, start, &entry)) {
917 		vm_map_clip_start(map, entry, start);
918 	} else
919 		entry = entry->next;
920 
921 	vm_map_clip_end(map, entry, end);
922 
923 	if ((entry->start == start) && (entry->end == end) &&
924 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
925 	    (entry->object.vm_object == NULL)) {
926 		entry->object.sub_map = submap;
927 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
928 		result = KERN_SUCCESS;
929 	}
930 	vm_map_unlock(map);
931 
932 	return (result);
933 }
934 
935 /*
936  *	vm_map_protect:
937  *
938  *	Sets the protection of the specified address
939  *	region in the target map.  If "set_max" is
940  *	specified, the maximum protection is to be set;
941  *	otherwise, only the current protection is affected.
942  */
943 int
944 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
945 	       vm_prot_t new_prot, boolean_t set_max)
946 {
947 	vm_map_entry_t current;
948 	vm_map_entry_t entry;
949 
950 	vm_map_lock(map);
951 
952 	VM_MAP_RANGE_CHECK(map, start, end);
953 
954 	if (vm_map_lookup_entry(map, start, &entry)) {
955 		vm_map_clip_start(map, entry, start);
956 	} else {
957 		entry = entry->next;
958 	}
959 
960 	/*
961 	 * Make a first pass to check for protection violations.
962 	 */
963 
964 	current = entry;
965 	while ((current != &map->header) && (current->start < end)) {
966 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
967 			vm_map_unlock(map);
968 			return (KERN_INVALID_ARGUMENT);
969 		}
970 		if ((new_prot & current->max_protection) != new_prot) {
971 			vm_map_unlock(map);
972 			return (KERN_PROTECTION_FAILURE);
973 		}
974 		current = current->next;
975 	}
976 
977 	/*
978 	 * Go back and fix up protections. [Note that clipping is not
979 	 * necessary the second time.]
980 	 */
981 
982 	current = entry;
983 
984 	while ((current != &map->header) && (current->start < end)) {
985 		vm_prot_t old_prot;
986 
987 		vm_map_clip_end(map, current, end);
988 
989 		old_prot = current->protection;
990 		if (set_max)
991 			current->protection =
992 			    (current->max_protection = new_prot) &
993 			    old_prot;
994 		else
995 			current->protection = new_prot;
996 
997 		/*
998 		 * Update physical map if necessary. Worry about copy-on-write
999 		 * here -- CHECK THIS XXX
1000 		 */
1001 
1002 		if (current->protection != old_prot) {
1003 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1004 							VM_PROT_ALL)
1005 
1006 			pmap_protect(map->pmap, current->start,
1007 			    current->end,
1008 			    current->protection & MASK(current));
1009 #undef	MASK
1010 		}
1011 
1012 		vm_map_simplify_entry(map, current);
1013 
1014 		current = current->next;
1015 	}
1016 
1017 	vm_map_unlock(map);
1018 	return (KERN_SUCCESS);
1019 }
1020 
1021 /*
1022  *	vm_map_madvise:
1023  *
1024  * 	This routine traverses a processes map handling the madvise
1025  *	system call.  Advisories are classified as either those effecting
1026  *	the vm_map_entry structure, or those effecting the underlying
1027  *	objects.
1028  */
1029 
1030 int
1031 vm_map_madvise(map, start, end, behav)
1032 	vm_map_t map;
1033 	vm_offset_t start, end;
1034 	int behav;
1035 {
1036 	vm_map_entry_t current, entry;
1037 	int modify_map = 0;
1038 
1039 	/*
1040 	 * Some madvise calls directly modify the vm_map_entry, in which case
1041 	 * we need to use an exclusive lock on the map and we need to perform
1042 	 * various clipping operations.  Otherwise we only need a read-lock
1043 	 * on the map.
1044 	 */
1045 
1046 	switch(behav) {
1047 	case MADV_NORMAL:
1048 	case MADV_SEQUENTIAL:
1049 	case MADV_RANDOM:
1050 	case MADV_NOSYNC:
1051 	case MADV_AUTOSYNC:
1052 	case MADV_NOCORE:
1053 	case MADV_CORE:
1054 		modify_map = 1;
1055 		vm_map_lock(map);
1056 		break;
1057 	case MADV_WILLNEED:
1058 	case MADV_DONTNEED:
1059 	case MADV_FREE:
1060 		vm_map_lock_read(map);
1061 		break;
1062 	default:
1063 		return (KERN_INVALID_ARGUMENT);
1064 	}
1065 
1066 	/*
1067 	 * Locate starting entry and clip if necessary.
1068 	 */
1069 
1070 	VM_MAP_RANGE_CHECK(map, start, end);
1071 
1072 	if (vm_map_lookup_entry(map, start, &entry)) {
1073 		if (modify_map)
1074 			vm_map_clip_start(map, entry, start);
1075 	} else {
1076 		entry = entry->next;
1077 	}
1078 
1079 	if (modify_map) {
1080 		/*
1081 		 * madvise behaviors that are implemented in the vm_map_entry.
1082 		 *
1083 		 * We clip the vm_map_entry so that behavioral changes are
1084 		 * limited to the specified address range.
1085 		 */
1086 		for (current = entry;
1087 		     (current != &map->header) && (current->start < end);
1088 		     current = current->next
1089 		) {
1090 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1091 				continue;
1092 
1093 			vm_map_clip_end(map, current, end);
1094 
1095 			switch (behav) {
1096 			case MADV_NORMAL:
1097 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1098 				break;
1099 			case MADV_SEQUENTIAL:
1100 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1101 				break;
1102 			case MADV_RANDOM:
1103 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1104 				break;
1105 			case MADV_NOSYNC:
1106 				current->eflags |= MAP_ENTRY_NOSYNC;
1107 				break;
1108 			case MADV_AUTOSYNC:
1109 				current->eflags &= ~MAP_ENTRY_NOSYNC;
1110 				break;
1111 			case MADV_NOCORE:
1112 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
1113 				break;
1114 			case MADV_CORE:
1115 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
1116 				break;
1117 			default:
1118 				break;
1119 			}
1120 			vm_map_simplify_entry(map, current);
1121 		}
1122 		vm_map_unlock(map);
1123 	} else {
1124 		vm_pindex_t pindex;
1125 		int count;
1126 
1127 		/*
1128 		 * madvise behaviors that are implemented in the underlying
1129 		 * vm_object.
1130 		 *
1131 		 * Since we don't clip the vm_map_entry, we have to clip
1132 		 * the vm_object pindex and count.
1133 		 */
1134 		for (current = entry;
1135 		     (current != &map->header) && (current->start < end);
1136 		     current = current->next
1137 		) {
1138 			vm_offset_t useStart;
1139 
1140 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1141 				continue;
1142 
1143 			pindex = OFF_TO_IDX(current->offset);
1144 			count = atop(current->end - current->start);
1145 			useStart = current->start;
1146 
1147 			if (current->start < start) {
1148 				pindex += atop(start - current->start);
1149 				count -= atop(start - current->start);
1150 				useStart = start;
1151 			}
1152 			if (current->end > end)
1153 				count -= atop(current->end - end);
1154 
1155 			if (count <= 0)
1156 				continue;
1157 
1158 			vm_object_madvise(current->object.vm_object,
1159 					  pindex, count, behav);
1160 			if (behav == MADV_WILLNEED) {
1161 				pmap_object_init_pt(
1162 				    map->pmap,
1163 				    useStart,
1164 				    current->object.vm_object,
1165 				    pindex,
1166 				    (count << PAGE_SHIFT),
1167 				    0
1168 				);
1169 			}
1170 		}
1171 		vm_map_unlock_read(map);
1172 	}
1173 	return(0);
1174 }
1175 
1176 
1177 /*
1178  *	vm_map_inherit:
1179  *
1180  *	Sets the inheritance of the specified address
1181  *	range in the target map.  Inheritance
1182  *	affects how the map will be shared with
1183  *	child maps at the time of vm_map_fork.
1184  */
1185 int
1186 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1187 	       vm_inherit_t new_inheritance)
1188 {
1189 	vm_map_entry_t entry;
1190 	vm_map_entry_t temp_entry;
1191 
1192 	switch (new_inheritance) {
1193 	case VM_INHERIT_NONE:
1194 	case VM_INHERIT_COPY:
1195 	case VM_INHERIT_SHARE:
1196 		break;
1197 	default:
1198 		return (KERN_INVALID_ARGUMENT);
1199 	}
1200 
1201 	vm_map_lock(map);
1202 
1203 	VM_MAP_RANGE_CHECK(map, start, end);
1204 
1205 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
1206 		entry = temp_entry;
1207 		vm_map_clip_start(map, entry, start);
1208 	} else
1209 		entry = temp_entry->next;
1210 
1211 	while ((entry != &map->header) && (entry->start < end)) {
1212 		vm_map_clip_end(map, entry, end);
1213 
1214 		entry->inheritance = new_inheritance;
1215 
1216 		vm_map_simplify_entry(map, entry);
1217 
1218 		entry = entry->next;
1219 	}
1220 
1221 	vm_map_unlock(map);
1222 	return (KERN_SUCCESS);
1223 }
1224 
1225 /*
1226  * Implement the semantics of mlock
1227  */
1228 int
1229 vm_map_user_pageable(map, start, end, new_pageable)
1230 	vm_map_t map;
1231 	vm_offset_t start;
1232 	vm_offset_t end;
1233 	boolean_t new_pageable;
1234 {
1235 	vm_map_entry_t entry;
1236 	vm_map_entry_t start_entry;
1237 	vm_offset_t estart;
1238 	int rv;
1239 
1240 	vm_map_lock(map);
1241 	VM_MAP_RANGE_CHECK(map, start, end);
1242 
1243 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1244 		vm_map_unlock(map);
1245 		return (KERN_INVALID_ADDRESS);
1246 	}
1247 
1248 	if (new_pageable) {
1249 
1250 		entry = start_entry;
1251 		vm_map_clip_start(map, entry, start);
1252 
1253 		/*
1254 		 * Now decrement the wiring count for each region. If a region
1255 		 * becomes completely unwired, unwire its physical pages and
1256 		 * mappings.
1257 		 */
1258 		while ((entry != &map->header) && (entry->start < end)) {
1259 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1260 				vm_map_clip_end(map, entry, end);
1261 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1262 				entry->wired_count--;
1263 				if (entry->wired_count == 0)
1264 					vm_fault_unwire(map, entry->start, entry->end);
1265 			}
1266 			vm_map_simplify_entry(map,entry);
1267 			entry = entry->next;
1268 		}
1269 	} else {
1270 
1271 		entry = start_entry;
1272 
1273 		while ((entry != &map->header) && (entry->start < end)) {
1274 
1275 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1276 				entry = entry->next;
1277 				continue;
1278 			}
1279 
1280 			if (entry->wired_count != 0) {
1281 				entry->wired_count++;
1282 				entry->eflags |= MAP_ENTRY_USER_WIRED;
1283 				entry = entry->next;
1284 				continue;
1285 			}
1286 
1287 			/* Here on entry being newly wired */
1288 
1289 			if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1290 				int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1291 				if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
1292 
1293 					vm_object_shadow(&entry->object.vm_object,
1294 					    &entry->offset,
1295 					    atop(entry->end - entry->start));
1296 					entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1297 
1298 				} else if (entry->object.vm_object == NULL) {
1299 
1300 					entry->object.vm_object =
1301 					    vm_object_allocate(OBJT_DEFAULT,
1302 						atop(entry->end - entry->start));
1303 					entry->offset = (vm_offset_t) 0;
1304 
1305 				}
1306 			}
1307 
1308 			vm_map_clip_start(map, entry, start);
1309 			vm_map_clip_end(map, entry, end);
1310 
1311 			entry->wired_count++;
1312 			entry->eflags |= MAP_ENTRY_USER_WIRED;
1313 			estart = entry->start;
1314 
1315 			/* First we need to allow map modifications */
1316 			vm_map_set_recursive(map);
1317 			vm_map_lock_downgrade(map);
1318 			map->timestamp++;
1319 
1320 			rv = vm_fault_user_wire(map, entry->start, entry->end);
1321 			if (rv) {
1322 
1323 				entry->wired_count--;
1324 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1325 
1326 				vm_map_clear_recursive(map);
1327 				vm_map_unlock(map);
1328 
1329 				(void) vm_map_user_pageable(map, start, entry->start, TRUE);
1330 				return rv;
1331 			}
1332 
1333 			vm_map_clear_recursive(map);
1334 			if (vm_map_lock_upgrade(map)) {
1335 				vm_map_lock(map);
1336 				if (vm_map_lookup_entry(map, estart, &entry)
1337 				    == FALSE) {
1338 					vm_map_unlock(map);
1339 					(void) vm_map_user_pageable(map,
1340 								    start,
1341 								    estart,
1342 								    TRUE);
1343 					return (KERN_INVALID_ADDRESS);
1344 				}
1345 			}
1346 			vm_map_simplify_entry(map,entry);
1347 		}
1348 	}
1349 	map->timestamp++;
1350 	vm_map_unlock(map);
1351 	return KERN_SUCCESS;
1352 }
1353 
1354 /*
1355  *	vm_map_pageable:
1356  *
1357  *	Sets the pageability of the specified address
1358  *	range in the target map.  Regions specified
1359  *	as not pageable require locked-down physical
1360  *	memory and physical page maps.
1361  *
1362  *	The map must not be locked, but a reference
1363  *	must remain to the map throughout the call.
1364  */
1365 int
1366 vm_map_pageable(map, start, end, new_pageable)
1367 	vm_map_t map;
1368 	vm_offset_t start;
1369 	vm_offset_t end;
1370 	boolean_t new_pageable;
1371 {
1372 	vm_map_entry_t entry;
1373 	vm_map_entry_t start_entry;
1374 	vm_offset_t failed = 0;
1375 	int rv;
1376 
1377 	vm_map_lock(map);
1378 
1379 	VM_MAP_RANGE_CHECK(map, start, end);
1380 
1381 	/*
1382 	 * Only one pageability change may take place at one time, since
1383 	 * vm_fault assumes it will be called only once for each
1384 	 * wiring/unwiring.  Therefore, we have to make sure we're actually
1385 	 * changing the pageability for the entire region.  We do so before
1386 	 * making any changes.
1387 	 */
1388 
1389 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1390 		vm_map_unlock(map);
1391 		return (KERN_INVALID_ADDRESS);
1392 	}
1393 	entry = start_entry;
1394 
1395 	/*
1396 	 * Actions are rather different for wiring and unwiring, so we have
1397 	 * two separate cases.
1398 	 */
1399 
1400 	if (new_pageable) {
1401 
1402 		vm_map_clip_start(map, entry, start);
1403 
1404 		/*
1405 		 * Unwiring.  First ensure that the range to be unwired is
1406 		 * really wired down and that there are no holes.
1407 		 */
1408 		while ((entry != &map->header) && (entry->start < end)) {
1409 
1410 			if (entry->wired_count == 0 ||
1411 			    (entry->end < end &&
1412 				(entry->next == &map->header ||
1413 				    entry->next->start > entry->end))) {
1414 				vm_map_unlock(map);
1415 				return (KERN_INVALID_ARGUMENT);
1416 			}
1417 			entry = entry->next;
1418 		}
1419 
1420 		/*
1421 		 * Now decrement the wiring count for each region. If a region
1422 		 * becomes completely unwired, unwire its physical pages and
1423 		 * mappings.
1424 		 */
1425 		entry = start_entry;
1426 		while ((entry != &map->header) && (entry->start < end)) {
1427 			vm_map_clip_end(map, entry, end);
1428 
1429 			entry->wired_count--;
1430 			if (entry->wired_count == 0)
1431 				vm_fault_unwire(map, entry->start, entry->end);
1432 
1433 			vm_map_simplify_entry(map, entry);
1434 
1435 			entry = entry->next;
1436 		}
1437 	} else {
1438 		/*
1439 		 * Wiring.  We must do this in two passes:
1440 		 *
1441 		 * 1.  Holding the write lock, we create any shadow or zero-fill
1442 		 * objects that need to be created. Then we clip each map
1443 		 * entry to the region to be wired and increment its wiring
1444 		 * count.  We create objects before clipping the map entries
1445 		 * to avoid object proliferation.
1446 		 *
1447 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
1448 		 * fault in the pages for any newly wired area (wired_count is
1449 		 * 1).
1450 		 *
1451 		 * Downgrading to a read lock for vm_fault_wire avoids a possible
1452 		 * deadlock with another process that may have faulted on one
1453 		 * of the pages to be wired (it would mark the page busy,
1454 		 * blocking us, then in turn block on the map lock that we
1455 		 * hold).  Because of problems in the recursive lock package,
1456 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
1457 		 * any actions that require the write lock must be done
1458 		 * beforehand.  Because we keep the read lock on the map, the
1459 		 * copy-on-write status of the entries we modify here cannot
1460 		 * change.
1461 		 */
1462 
1463 		/*
1464 		 * Pass 1.
1465 		 */
1466 		while ((entry != &map->header) && (entry->start < end)) {
1467 			if (entry->wired_count == 0) {
1468 
1469 				/*
1470 				 * Perform actions of vm_map_lookup that need
1471 				 * the write lock on the map: create a shadow
1472 				 * object for a copy-on-write region, or an
1473 				 * object for a zero-fill region.
1474 				 *
1475 				 * We don't have to do this for entries that
1476 				 * point to sub maps, because we won't
1477 				 * hold the lock on the sub map.
1478 				 */
1479 				if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1480 					int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1481 					if (copyflag &&
1482 					    ((entry->protection & VM_PROT_WRITE) != 0)) {
1483 
1484 						vm_object_shadow(&entry->object.vm_object,
1485 						    &entry->offset,
1486 						    atop(entry->end - entry->start));
1487 						entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1488 					} else if (entry->object.vm_object == NULL) {
1489 						entry->object.vm_object =
1490 						    vm_object_allocate(OBJT_DEFAULT,
1491 							atop(entry->end - entry->start));
1492 						entry->offset = (vm_offset_t) 0;
1493 					}
1494 				}
1495 			}
1496 			vm_map_clip_start(map, entry, start);
1497 			vm_map_clip_end(map, entry, end);
1498 			entry->wired_count++;
1499 
1500 			/*
1501 			 * Check for holes
1502 			 */
1503 			if (entry->end < end &&
1504 			    (entry->next == &map->header ||
1505 				entry->next->start > entry->end)) {
1506 				/*
1507 				 * Found one.  Object creation actions do not
1508 				 * need to be undone, but the wired counts
1509 				 * need to be restored.
1510 				 */
1511 				while (entry != &map->header && entry->end > start) {
1512 					entry->wired_count--;
1513 					entry = entry->prev;
1514 				}
1515 				vm_map_unlock(map);
1516 				return (KERN_INVALID_ARGUMENT);
1517 			}
1518 			entry = entry->next;
1519 		}
1520 
1521 		/*
1522 		 * Pass 2.
1523 		 */
1524 
1525 		/*
1526 		 * HACK HACK HACK HACK
1527 		 *
1528 		 * If we are wiring in the kernel map or a submap of it,
1529 		 * unlock the map to avoid deadlocks.  We trust that the
1530 		 * kernel is well-behaved, and therefore will not do
1531 		 * anything destructive to this region of the map while
1532 		 * we have it unlocked.  We cannot trust user processes
1533 		 * to do the same.
1534 		 *
1535 		 * HACK HACK HACK HACK
1536 		 */
1537 		if (vm_map_pmap(map) == kernel_pmap) {
1538 			vm_map_unlock(map);	/* trust me ... */
1539 		} else {
1540 			vm_map_lock_downgrade(map);
1541 		}
1542 
1543 		rv = 0;
1544 		entry = start_entry;
1545 		while (entry != &map->header && entry->start < end) {
1546 			/*
1547 			 * If vm_fault_wire fails for any page we need to undo
1548 			 * what has been done.  We decrement the wiring count
1549 			 * for those pages which have not yet been wired (now)
1550 			 * and unwire those that have (later).
1551 			 *
1552 			 * XXX this violates the locking protocol on the map,
1553 			 * needs to be fixed.
1554 			 */
1555 			if (rv)
1556 				entry->wired_count--;
1557 			else if (entry->wired_count == 1) {
1558 				rv = vm_fault_wire(map, entry->start, entry->end);
1559 				if (rv) {
1560 					failed = entry->start;
1561 					entry->wired_count--;
1562 				}
1563 			}
1564 			entry = entry->next;
1565 		}
1566 
1567 		if (vm_map_pmap(map) == kernel_pmap) {
1568 			vm_map_lock(map);
1569 		}
1570 		if (rv) {
1571 			vm_map_unlock(map);
1572 			(void) vm_map_pageable(map, start, failed, TRUE);
1573 			return (rv);
1574 		}
1575 		vm_map_simplify_entry(map, start_entry);
1576 	}
1577 
1578 	vm_map_unlock(map);
1579 
1580 	return (KERN_SUCCESS);
1581 }
1582 
1583 /*
1584  * vm_map_clean
1585  *
1586  * Push any dirty cached pages in the address range to their pager.
1587  * If syncio is TRUE, dirty pages are written synchronously.
1588  * If invalidate is TRUE, any cached pages are freed as well.
1589  *
1590  * Returns an error if any part of the specified range is not mapped.
1591  */
1592 int
1593 vm_map_clean(map, start, end, syncio, invalidate)
1594 	vm_map_t map;
1595 	vm_offset_t start;
1596 	vm_offset_t end;
1597 	boolean_t syncio;
1598 	boolean_t invalidate;
1599 {
1600 	vm_map_entry_t current;
1601 	vm_map_entry_t entry;
1602 	vm_size_t size;
1603 	vm_object_t object;
1604 	vm_ooffset_t offset;
1605 
1606 	vm_map_lock_read(map);
1607 	VM_MAP_RANGE_CHECK(map, start, end);
1608 	if (!vm_map_lookup_entry(map, start, &entry)) {
1609 		vm_map_unlock_read(map);
1610 		return (KERN_INVALID_ADDRESS);
1611 	}
1612 	/*
1613 	 * Make a first pass to check for holes.
1614 	 */
1615 	for (current = entry; current->start < end; current = current->next) {
1616 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1617 			vm_map_unlock_read(map);
1618 			return (KERN_INVALID_ARGUMENT);
1619 		}
1620 		if (end > current->end &&
1621 		    (current->next == &map->header ||
1622 			current->end != current->next->start)) {
1623 			vm_map_unlock_read(map);
1624 			return (KERN_INVALID_ADDRESS);
1625 		}
1626 	}
1627 
1628 	if (invalidate)
1629 		pmap_remove(vm_map_pmap(map), start, end);
1630 	/*
1631 	 * Make a second pass, cleaning/uncaching pages from the indicated
1632 	 * objects as we go.
1633 	 */
1634 	for (current = entry; current->start < end; current = current->next) {
1635 		offset = current->offset + (start - current->start);
1636 		size = (end <= current->end ? end : current->end) - start;
1637 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1638 			vm_map_t smap;
1639 			vm_map_entry_t tentry;
1640 			vm_size_t tsize;
1641 
1642 			smap = current->object.sub_map;
1643 			vm_map_lock_read(smap);
1644 			(void) vm_map_lookup_entry(smap, offset, &tentry);
1645 			tsize = tentry->end - offset;
1646 			if (tsize < size)
1647 				size = tsize;
1648 			object = tentry->object.vm_object;
1649 			offset = tentry->offset + (offset - tentry->start);
1650 			vm_map_unlock_read(smap);
1651 		} else {
1652 			object = current->object.vm_object;
1653 		}
1654 		/*
1655 		 * Note that there is absolutely no sense in writing out
1656 		 * anonymous objects, so we track down the vnode object
1657 		 * to write out.
1658 		 * We invalidate (remove) all pages from the address space
1659 		 * anyway, for semantic correctness.
1660 		 */
1661 		while (object->backing_object) {
1662 			object = object->backing_object;
1663 			offset += object->backing_object_offset;
1664 			if (object->size < OFF_TO_IDX( offset + size))
1665 				size = IDX_TO_OFF(object->size) - offset;
1666 		}
1667 		if (object && (object->type == OBJT_VNODE) &&
1668 		    (current->protection & VM_PROT_WRITE)) {
1669 			/*
1670 			 * Flush pages if writing is allowed, invalidate them
1671 			 * if invalidation requested.  Pages undergoing I/O
1672 			 * will be ignored by vm_object_page_remove().
1673 			 *
1674 			 * We cannot lock the vnode and then wait for paging
1675 			 * to complete without deadlocking against vm_fault.
1676 			 * Instead we simply call vm_object_page_remove() and
1677 			 * allow it to block internally on a page-by-page
1678 			 * basis when it encounters pages undergoing async
1679 			 * I/O.
1680 			 */
1681 			int flags;
1682 
1683 			vm_object_reference(object);
1684 			vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
1685 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
1686 			flags |= invalidate ? OBJPC_INVAL : 0;
1687 			vm_object_page_clean(object,
1688 			    OFF_TO_IDX(offset),
1689 			    OFF_TO_IDX(offset + size + PAGE_MASK),
1690 			    flags);
1691 			if (invalidate) {
1692 				/*vm_object_pip_wait(object, "objmcl");*/
1693 				vm_object_page_remove(object,
1694 				    OFF_TO_IDX(offset),
1695 				    OFF_TO_IDX(offset + size + PAGE_MASK),
1696 				    FALSE);
1697 			}
1698 			VOP_UNLOCK(object->handle, 0, curproc);
1699 			vm_object_deallocate(object);
1700 		}
1701 		start += size;
1702 	}
1703 
1704 	vm_map_unlock_read(map);
1705 	return (KERN_SUCCESS);
1706 }
1707 
1708 /*
1709  *	vm_map_entry_unwire:	[ internal use only ]
1710  *
1711  *	Make the region specified by this entry pageable.
1712  *
1713  *	The map in question should be locked.
1714  *	[This is the reason for this routine's existence.]
1715  */
1716 static void
1717 vm_map_entry_unwire(map, entry)
1718 	vm_map_t map;
1719 	vm_map_entry_t entry;
1720 {
1721 	vm_fault_unwire(map, entry->start, entry->end);
1722 	entry->wired_count = 0;
1723 }
1724 
1725 /*
1726  *	vm_map_entry_delete:	[ internal use only ]
1727  *
1728  *	Deallocate the given entry from the target map.
1729  */
1730 static void
1731 vm_map_entry_delete(map, entry)
1732 	vm_map_t map;
1733 	vm_map_entry_t entry;
1734 {
1735 	vm_map_entry_unlink(map, entry);
1736 	map->size -= entry->end - entry->start;
1737 
1738 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1739 		vm_object_deallocate(entry->object.vm_object);
1740 	}
1741 
1742 	vm_map_entry_dispose(map, entry);
1743 }
1744 
1745 /*
1746  *	vm_map_delete:	[ internal use only ]
1747  *
1748  *	Deallocates the given address range from the target
1749  *	map.
1750  */
1751 int
1752 vm_map_delete(map, start, end)
1753 	vm_map_t map;
1754 	vm_offset_t start;
1755 	vm_offset_t end;
1756 {
1757 	vm_object_t object;
1758 	vm_map_entry_t entry;
1759 	vm_map_entry_t first_entry;
1760 
1761 	/*
1762 	 * Find the start of the region, and clip it
1763 	 */
1764 
1765 	if (!vm_map_lookup_entry(map, start, &first_entry))
1766 		entry = first_entry->next;
1767 	else {
1768 		entry = first_entry;
1769 		vm_map_clip_start(map, entry, start);
1770 		/*
1771 		 * Fix the lookup hint now, rather than each time though the
1772 		 * loop.
1773 		 */
1774 		SAVE_HINT(map, entry->prev);
1775 	}
1776 
1777 	/*
1778 	 * Save the free space hint
1779 	 */
1780 
1781 	if (entry == &map->header) {
1782 		map->first_free = &map->header;
1783 	} else if (map->first_free->start >= start) {
1784 		map->first_free = entry->prev;
1785 	}
1786 
1787 	/*
1788 	 * Step through all entries in this region
1789 	 */
1790 
1791 	while ((entry != &map->header) && (entry->start < end)) {
1792 		vm_map_entry_t next;
1793 		vm_offset_t s, e;
1794 		vm_pindex_t offidxstart, offidxend, count;
1795 
1796 		vm_map_clip_end(map, entry, end);
1797 
1798 		s = entry->start;
1799 		e = entry->end;
1800 		next = entry->next;
1801 
1802 		offidxstart = OFF_TO_IDX(entry->offset);
1803 		count = OFF_TO_IDX(e - s);
1804 		object = entry->object.vm_object;
1805 
1806 		/*
1807 		 * Unwire before removing addresses from the pmap; otherwise,
1808 		 * unwiring will put the entries back in the pmap.
1809 		 */
1810 		if (entry->wired_count != 0) {
1811 			vm_map_entry_unwire(map, entry);
1812 		}
1813 
1814 		offidxend = offidxstart + count;
1815 
1816 		if ((object == kernel_object) || (object == kmem_object)) {
1817 			vm_object_page_remove(object, offidxstart, offidxend, FALSE);
1818 		} else {
1819 			pmap_remove(map->pmap, s, e);
1820 			if (object != NULL &&
1821 			    object->ref_count != 1 &&
1822 			    (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
1823 			    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
1824 				vm_object_collapse(object);
1825 				vm_object_page_remove(object, offidxstart, offidxend, FALSE);
1826 				if (object->type == OBJT_SWAP) {
1827 					swap_pager_freespace(object, offidxstart, count);
1828 				}
1829 				if (offidxend >= object->size &&
1830 				    offidxstart < object->size) {
1831 					object->size = offidxstart;
1832 				}
1833 			}
1834 		}
1835 
1836 		/*
1837 		 * Delete the entry (which may delete the object) only after
1838 		 * removing all pmap entries pointing to its pages.
1839 		 * (Otherwise, its page frames may be reallocated, and any
1840 		 * modify bits will be set in the wrong object!)
1841 		 */
1842 		vm_map_entry_delete(map, entry);
1843 		entry = next;
1844 	}
1845 	return (KERN_SUCCESS);
1846 }
1847 
1848 /*
1849  *	vm_map_remove:
1850  *
1851  *	Remove the given address range from the target map.
1852  *	This is the exported form of vm_map_delete.
1853  */
1854 int
1855 vm_map_remove(map, start, end)
1856 	vm_map_t map;
1857 	vm_offset_t start;
1858 	vm_offset_t end;
1859 {
1860 	int result, s = 0;
1861 
1862 	if (map == kmem_map || map == mb_map)
1863 		s = splvm();
1864 
1865 	vm_map_lock(map);
1866 	VM_MAP_RANGE_CHECK(map, start, end);
1867 	result = vm_map_delete(map, start, end);
1868 	vm_map_unlock(map);
1869 
1870 	if (map == kmem_map || map == mb_map)
1871 		splx(s);
1872 
1873 	return (result);
1874 }
1875 
1876 /*
1877  *	vm_map_check_protection:
1878  *
1879  *	Assert that the target map allows the specified
1880  *	privilege on the entire address region given.
1881  *	The entire region must be allocated.
1882  */
1883 boolean_t
1884 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
1885 			vm_prot_t protection)
1886 {
1887 	vm_map_entry_t entry;
1888 	vm_map_entry_t tmp_entry;
1889 
1890 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
1891 		return (FALSE);
1892 	}
1893 	entry = tmp_entry;
1894 
1895 	while (start < end) {
1896 		if (entry == &map->header) {
1897 			return (FALSE);
1898 		}
1899 		/*
1900 		 * No holes allowed!
1901 		 */
1902 
1903 		if (start < entry->start) {
1904 			return (FALSE);
1905 		}
1906 		/*
1907 		 * Check protection associated with entry.
1908 		 */
1909 
1910 		if ((entry->protection & protection) != protection) {
1911 			return (FALSE);
1912 		}
1913 		/* go to next entry */
1914 
1915 		start = entry->end;
1916 		entry = entry->next;
1917 	}
1918 	return (TRUE);
1919 }
1920 
1921 /*
1922  * Split the pages in a map entry into a new object.  This affords
1923  * easier removal of unused pages, and keeps object inheritance from
1924  * being a negative impact on memory usage.
1925  */
1926 static void
1927 vm_map_split(entry)
1928 	vm_map_entry_t entry;
1929 {
1930 	vm_page_t m;
1931 	vm_object_t orig_object, new_object, source;
1932 	vm_offset_t s, e;
1933 	vm_pindex_t offidxstart, offidxend, idx;
1934 	vm_size_t size;
1935 	vm_ooffset_t offset;
1936 
1937 	orig_object = entry->object.vm_object;
1938 	if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
1939 		return;
1940 	if (orig_object->ref_count <= 1)
1941 		return;
1942 
1943 	offset = entry->offset;
1944 	s = entry->start;
1945 	e = entry->end;
1946 
1947 	offidxstart = OFF_TO_IDX(offset);
1948 	offidxend = offidxstart + OFF_TO_IDX(e - s);
1949 	size = offidxend - offidxstart;
1950 
1951 	new_object = vm_pager_allocate(orig_object->type,
1952 		NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL);
1953 	if (new_object == NULL)
1954 		return;
1955 
1956 	source = orig_object->backing_object;
1957 	if (source != NULL) {
1958 		vm_object_reference(source);	/* Referenced by new_object */
1959 		TAILQ_INSERT_TAIL(&source->shadow_head,
1960 				  new_object, shadow_list);
1961 		vm_object_clear_flag(source, OBJ_ONEMAPPING);
1962 		new_object->backing_object_offset =
1963 			orig_object->backing_object_offset + IDX_TO_OFF(offidxstart);
1964 		new_object->backing_object = source;
1965 		source->shadow_count++;
1966 		source->generation++;
1967 	}
1968 
1969 	for (idx = 0; idx < size; idx++) {
1970 		vm_page_t m;
1971 
1972 	retry:
1973 		m = vm_page_lookup(orig_object, offidxstart + idx);
1974 		if (m == NULL)
1975 			continue;
1976 
1977 		/*
1978 		 * We must wait for pending I/O to complete before we can
1979 		 * rename the page.
1980 		 *
1981 		 * We do not have to VM_PROT_NONE the page as mappings should
1982 		 * not be changed by this operation.
1983 		 */
1984 		if (vm_page_sleep_busy(m, TRUE, "spltwt"))
1985 			goto retry;
1986 
1987 		vm_page_busy(m);
1988 		vm_page_rename(m, new_object, idx);
1989 		/* page automatically made dirty by rename and cache handled */
1990 		vm_page_busy(m);
1991 	}
1992 
1993 	if (orig_object->type == OBJT_SWAP) {
1994 		vm_object_pip_add(orig_object, 1);
1995 		/*
1996 		 * copy orig_object pages into new_object
1997 		 * and destroy unneeded pages in
1998 		 * shadow object.
1999 		 */
2000 		swap_pager_copy(orig_object, new_object, offidxstart, 0);
2001 		vm_object_pip_wakeup(orig_object);
2002 	}
2003 
2004 	for (idx = 0; idx < size; idx++) {
2005 		m = vm_page_lookup(new_object, idx);
2006 		if (m) {
2007 			vm_page_wakeup(m);
2008 		}
2009 	}
2010 
2011 	entry->object.vm_object = new_object;
2012 	entry->offset = 0LL;
2013 	vm_object_deallocate(orig_object);
2014 }
2015 
2016 /*
2017  *	vm_map_copy_entry:
2018  *
2019  *	Copies the contents of the source entry to the destination
2020  *	entry.  The entries *must* be aligned properly.
2021  */
2022 static void
2023 vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
2024 	vm_map_t src_map, dst_map;
2025 	vm_map_entry_t src_entry, dst_entry;
2026 {
2027 	vm_object_t src_object;
2028 
2029 	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2030 		return;
2031 
2032 	if (src_entry->wired_count == 0) {
2033 
2034 		/*
2035 		 * If the source entry is marked needs_copy, it is already
2036 		 * write-protected.
2037 		 */
2038 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2039 			pmap_protect(src_map->pmap,
2040 			    src_entry->start,
2041 			    src_entry->end,
2042 			    src_entry->protection & ~VM_PROT_WRITE);
2043 		}
2044 
2045 		/*
2046 		 * Make a copy of the object.
2047 		 */
2048 		if ((src_object = src_entry->object.vm_object) != NULL) {
2049 
2050 			if ((src_object->handle == NULL) &&
2051 				(src_object->type == OBJT_DEFAULT ||
2052 				 src_object->type == OBJT_SWAP)) {
2053 				vm_object_collapse(src_object);
2054 				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2055 					vm_map_split(src_entry);
2056 					src_object = src_entry->object.vm_object;
2057 				}
2058 			}
2059 
2060 			vm_object_reference(src_object);
2061 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2062 			dst_entry->object.vm_object = src_object;
2063 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2064 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2065 			dst_entry->offset = src_entry->offset;
2066 		} else {
2067 			dst_entry->object.vm_object = NULL;
2068 			dst_entry->offset = 0;
2069 		}
2070 
2071 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2072 		    dst_entry->end - dst_entry->start, src_entry->start);
2073 	} else {
2074 		/*
2075 		 * Of course, wired down pages can't be set copy-on-write.
2076 		 * Cause wired pages to be copied into the new map by
2077 		 * simulating faults (the new pages are pageable)
2078 		 */
2079 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2080 	}
2081 }
2082 
2083 /*
2084  * vmspace_fork:
2085  * Create a new process vmspace structure and vm_map
2086  * based on those of an existing process.  The new map
2087  * is based on the old map, according to the inheritance
2088  * values on the regions in that map.
2089  *
2090  * The source map must not be locked.
2091  */
2092 struct vmspace *
2093 vmspace_fork(vm1)
2094 	struct vmspace *vm1;
2095 {
2096 	struct vmspace *vm2;
2097 	vm_map_t old_map = &vm1->vm_map;
2098 	vm_map_t new_map;
2099 	vm_map_entry_t old_entry;
2100 	vm_map_entry_t new_entry;
2101 	vm_object_t object;
2102 
2103 	vm_map_lock(old_map);
2104 
2105 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2106 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2107 	    (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2108 	new_map = &vm2->vm_map;	/* XXX */
2109 	new_map->timestamp = 1;
2110 
2111 	old_entry = old_map->header.next;
2112 
2113 	while (old_entry != &old_map->header) {
2114 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2115 			panic("vm_map_fork: encountered a submap");
2116 
2117 		switch (old_entry->inheritance) {
2118 		case VM_INHERIT_NONE:
2119 			break;
2120 
2121 		case VM_INHERIT_SHARE:
2122 			/*
2123 			 * Clone the entry, creating the shared object if necessary.
2124 			 */
2125 			object = old_entry->object.vm_object;
2126 			if (object == NULL) {
2127 				object = vm_object_allocate(OBJT_DEFAULT,
2128 					atop(old_entry->end - old_entry->start));
2129 				old_entry->object.vm_object = object;
2130 				old_entry->offset = (vm_offset_t) 0;
2131 			}
2132 
2133 			/*
2134 			 * Add the reference before calling vm_object_shadow
2135 			 * to insure that a shadow object is created.
2136 			 */
2137 			vm_object_reference(object);
2138 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2139 				vm_object_shadow(&old_entry->object.vm_object,
2140 					&old_entry->offset,
2141 					atop(old_entry->end - old_entry->start));
2142 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2143 				object = old_entry->object.vm_object;
2144 			}
2145 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
2146 
2147 			/*
2148 			 * Clone the entry, referencing the shared object.
2149 			 */
2150 			new_entry = vm_map_entry_create(new_map);
2151 			*new_entry = *old_entry;
2152 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2153 			new_entry->wired_count = 0;
2154 
2155 			/*
2156 			 * Insert the entry into the new map -- we know we're
2157 			 * inserting at the end of the new map.
2158 			 */
2159 
2160 			vm_map_entry_link(new_map, new_map->header.prev,
2161 			    new_entry);
2162 
2163 			/*
2164 			 * Update the physical map
2165 			 */
2166 
2167 			pmap_copy(new_map->pmap, old_map->pmap,
2168 			    new_entry->start,
2169 			    (old_entry->end - old_entry->start),
2170 			    old_entry->start);
2171 			break;
2172 
2173 		case VM_INHERIT_COPY:
2174 			/*
2175 			 * Clone the entry and link into the map.
2176 			 */
2177 			new_entry = vm_map_entry_create(new_map);
2178 			*new_entry = *old_entry;
2179 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2180 			new_entry->wired_count = 0;
2181 			new_entry->object.vm_object = NULL;
2182 			vm_map_entry_link(new_map, new_map->header.prev,
2183 			    new_entry);
2184 			vm_map_copy_entry(old_map, new_map, old_entry,
2185 			    new_entry);
2186 			break;
2187 		}
2188 		old_entry = old_entry->next;
2189 	}
2190 
2191 	new_map->size = old_map->size;
2192 	vm_map_unlock(old_map);
2193 
2194 	return (vm2);
2195 }
2196 
2197 int
2198 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
2199 	      vm_prot_t prot, vm_prot_t max, int cow)
2200 {
2201 	vm_map_entry_t prev_entry;
2202 	vm_map_entry_t new_stack_entry;
2203 	vm_size_t      init_ssize;
2204 	int            rv;
2205 
2206 	if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
2207 		return (KERN_NO_SPACE);
2208 
2209 	if (max_ssize < SGROWSIZ)
2210 		init_ssize = max_ssize;
2211 	else
2212 		init_ssize = SGROWSIZ;
2213 
2214 	vm_map_lock(map);
2215 
2216 	/* If addr is already mapped, no go */
2217 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
2218 		vm_map_unlock(map);
2219 		return (KERN_NO_SPACE);
2220 	}
2221 
2222 	/* If we can't accomodate max_ssize in the current mapping,
2223 	 * no go.  However, we need to be aware that subsequent user
2224 	 * mappings might map into the space we have reserved for
2225 	 * stack, and currently this space is not protected.
2226 	 *
2227 	 * Hopefully we will at least detect this condition
2228 	 * when we try to grow the stack.
2229 	 */
2230 	if ((prev_entry->next != &map->header) &&
2231 	    (prev_entry->next->start < addrbos + max_ssize)) {
2232 		vm_map_unlock(map);
2233 		return (KERN_NO_SPACE);
2234 	}
2235 
2236 	/* We initially map a stack of only init_ssize.  We will
2237 	 * grow as needed later.  Since this is to be a grow
2238 	 * down stack, we map at the top of the range.
2239 	 *
2240 	 * Note: we would normally expect prot and max to be
2241 	 * VM_PROT_ALL, and cow to be 0.  Possibly we should
2242 	 * eliminate these as input parameters, and just
2243 	 * pass these values here in the insert call.
2244 	 */
2245 	rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
2246 	                   addrbos + max_ssize, prot, max, cow);
2247 
2248 	/* Now set the avail_ssize amount */
2249 	if (rv == KERN_SUCCESS){
2250 		if (prev_entry != &map->header)
2251 			vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize);
2252 		new_stack_entry = prev_entry->next;
2253 		if (new_stack_entry->end   != addrbos + max_ssize ||
2254 		    new_stack_entry->start != addrbos + max_ssize - init_ssize)
2255 			panic ("Bad entry start/end for new stack entry");
2256 		else
2257 			new_stack_entry->avail_ssize = max_ssize - init_ssize;
2258 	}
2259 
2260 	vm_map_unlock(map);
2261 	return (rv);
2262 }
2263 
2264 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
2265  * desired address is already mapped, or if we successfully grow
2266  * the stack.  Also returns KERN_SUCCESS if addr is outside the
2267  * stack range (this is strange, but preserves compatibility with
2268  * the grow function in vm_machdep.c).
2269  */
2270 int
2271 vm_map_growstack (struct proc *p, vm_offset_t addr)
2272 {
2273 	vm_map_entry_t prev_entry;
2274 	vm_map_entry_t stack_entry;
2275 	vm_map_entry_t new_stack_entry;
2276 	struct vmspace *vm = p->p_vmspace;
2277 	vm_map_t map = &vm->vm_map;
2278 	vm_offset_t    end;
2279 	int      grow_amount;
2280 	int      rv;
2281 	int      is_procstack;
2282 Retry:
2283 	vm_map_lock_read(map);
2284 
2285 	/* If addr is already in the entry range, no need to grow.*/
2286 	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
2287 		vm_map_unlock_read(map);
2288 		return (KERN_SUCCESS);
2289 	}
2290 
2291 	if ((stack_entry = prev_entry->next) == &map->header) {
2292 		vm_map_unlock_read(map);
2293 		return (KERN_SUCCESS);
2294 	}
2295 	if (prev_entry == &map->header)
2296 		end = stack_entry->start - stack_entry->avail_ssize;
2297 	else
2298 		end = prev_entry->end;
2299 
2300 	/* This next test mimics the old grow function in vm_machdep.c.
2301 	 * It really doesn't quite make sense, but we do it anyway
2302 	 * for compatibility.
2303 	 *
2304 	 * If not growable stack, return success.  This signals the
2305 	 * caller to proceed as he would normally with normal vm.
2306 	 */
2307 	if (stack_entry->avail_ssize < 1 ||
2308 	    addr >= stack_entry->start ||
2309 	    addr <  stack_entry->start - stack_entry->avail_ssize) {
2310 		vm_map_unlock_read(map);
2311 		return (KERN_SUCCESS);
2312 	}
2313 
2314 	/* Find the minimum grow amount */
2315 	grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
2316 	if (grow_amount > stack_entry->avail_ssize) {
2317 		vm_map_unlock_read(map);
2318 		return (KERN_NO_SPACE);
2319 	}
2320 
2321 	/* If there is no longer enough space between the entries
2322 	 * nogo, and adjust the available space.  Note: this
2323 	 * should only happen if the user has mapped into the
2324 	 * stack area after the stack was created, and is
2325 	 * probably an error.
2326 	 *
2327 	 * This also effectively destroys any guard page the user
2328 	 * might have intended by limiting the stack size.
2329 	 */
2330 	if (grow_amount > stack_entry->start - end) {
2331 		if (vm_map_lock_upgrade(map))
2332 			goto Retry;
2333 
2334 		stack_entry->avail_ssize = stack_entry->start - end;
2335 
2336 		vm_map_unlock(map);
2337 		return (KERN_NO_SPACE);
2338 	}
2339 
2340 	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
2341 
2342 	/* If this is the main process stack, see if we're over the
2343 	 * stack limit.
2344 	 */
2345 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2346 			     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2347 		vm_map_unlock_read(map);
2348 		return (KERN_NO_SPACE);
2349 	}
2350 
2351 	/* Round up the grow amount modulo SGROWSIZ */
2352 	grow_amount = roundup (grow_amount, SGROWSIZ);
2353 	if (grow_amount > stack_entry->avail_ssize) {
2354 		grow_amount = stack_entry->avail_ssize;
2355 	}
2356 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2357 	                     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2358 		grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
2359 		              ctob(vm->vm_ssize);
2360 	}
2361 
2362 	if (vm_map_lock_upgrade(map))
2363 		goto Retry;
2364 
2365 	/* Get the preliminary new entry start value */
2366 	addr = stack_entry->start - grow_amount;
2367 
2368 	/* If this puts us into the previous entry, cut back our growth
2369 	 * to the available space.  Also, see the note above.
2370 	 */
2371 	if (addr < end) {
2372 		stack_entry->avail_ssize = stack_entry->start - end;
2373 		addr = end;
2374 	}
2375 
2376 	rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
2377 			   VM_PROT_ALL,
2378 			   VM_PROT_ALL,
2379 			   0);
2380 
2381 	/* Adjust the available stack space by the amount we grew. */
2382 	if (rv == KERN_SUCCESS) {
2383 		if (prev_entry != &map->header)
2384 			vm_map_clip_end(map, prev_entry, addr);
2385 		new_stack_entry = prev_entry->next;
2386 		if (new_stack_entry->end   != stack_entry->start  ||
2387 		    new_stack_entry->start != addr)
2388 			panic ("Bad stack grow start/end in new stack entry");
2389 		else {
2390 			new_stack_entry->avail_ssize = stack_entry->avail_ssize -
2391 							(new_stack_entry->end -
2392 							 new_stack_entry->start);
2393 			if (is_procstack)
2394 				vm->vm_ssize += btoc(new_stack_entry->end -
2395 						     new_stack_entry->start);
2396 		}
2397 	}
2398 
2399 	vm_map_unlock(map);
2400 	return (rv);
2401 
2402 }
2403 
2404 /*
2405  * Unshare the specified VM space for exec.  If other processes are
2406  * mapped to it, then create a new one.  The new vmspace is null.
2407  */
2408 
2409 void
2410 vmspace_exec(struct proc *p) {
2411 	struct vmspace *oldvmspace = p->p_vmspace;
2412 	struct vmspace *newvmspace;
2413 	vm_map_t map = &p->p_vmspace->vm_map;
2414 
2415 	newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
2416 	bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
2417 	    (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
2418 	/*
2419 	 * This code is written like this for prototype purposes.  The
2420 	 * goal is to avoid running down the vmspace here, but let the
2421 	 * other process's that are still using the vmspace to finally
2422 	 * run it down.  Even though there is little or no chance of blocking
2423 	 * here, it is a good idea to keep this form for future mods.
2424 	 */
2425 	vmspace_free(oldvmspace);
2426 	p->p_vmspace = newvmspace;
2427 	pmap_pinit2(vmspace_pmap(newvmspace));
2428 	if (p == curproc)
2429 		pmap_activate(p);
2430 }
2431 
2432 /*
2433  * Unshare the specified VM space for forcing COW.  This
2434  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
2435  */
2436 
2437 void
2438 vmspace_unshare(struct proc *p) {
2439 	struct vmspace *oldvmspace = p->p_vmspace;
2440 	struct vmspace *newvmspace;
2441 
2442 	if (oldvmspace->vm_refcnt == 1)
2443 		return;
2444 	newvmspace = vmspace_fork(oldvmspace);
2445 	vmspace_free(oldvmspace);
2446 	p->p_vmspace = newvmspace;
2447 	pmap_pinit2(vmspace_pmap(newvmspace));
2448 	if (p == curproc)
2449 		pmap_activate(p);
2450 }
2451 
2452 
2453 /*
2454  *	vm_map_lookup:
2455  *
2456  *	Finds the VM object, offset, and
2457  *	protection for a given virtual address in the
2458  *	specified map, assuming a page fault of the
2459  *	type specified.
2460  *
2461  *	Leaves the map in question locked for read; return
2462  *	values are guaranteed until a vm_map_lookup_done
2463  *	call is performed.  Note that the map argument
2464  *	is in/out; the returned map must be used in
2465  *	the call to vm_map_lookup_done.
2466  *
2467  *	A handle (out_entry) is returned for use in
2468  *	vm_map_lookup_done, to make that fast.
2469  *
2470  *	If a lookup is requested with "write protection"
2471  *	specified, the map may be changed to perform virtual
2472  *	copying operations, although the data referenced will
2473  *	remain the same.
2474  */
2475 int
2476 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
2477 	      vm_offset_t vaddr,
2478 	      vm_prot_t fault_typea,
2479 	      vm_map_entry_t *out_entry,	/* OUT */
2480 	      vm_object_t *object,		/* OUT */
2481 	      vm_pindex_t *pindex,		/* OUT */
2482 	      vm_prot_t *out_prot,		/* OUT */
2483 	      boolean_t *wired)			/* OUT */
2484 {
2485 	vm_map_entry_t entry;
2486 	vm_map_t map = *var_map;
2487 	vm_prot_t prot;
2488 	vm_prot_t fault_type = fault_typea;
2489 
2490 RetryLookup:;
2491 
2492 	/*
2493 	 * Lookup the faulting address.
2494 	 */
2495 
2496 	vm_map_lock_read(map);
2497 
2498 #define	RETURN(why) \
2499 		{ \
2500 		vm_map_unlock_read(map); \
2501 		return(why); \
2502 		}
2503 
2504 	/*
2505 	 * If the map has an interesting hint, try it before calling full
2506 	 * blown lookup routine.
2507 	 */
2508 
2509 	entry = map->hint;
2510 
2511 	*out_entry = entry;
2512 
2513 	if ((entry == &map->header) ||
2514 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
2515 		vm_map_entry_t tmp_entry;
2516 
2517 		/*
2518 		 * Entry was either not a valid hint, or the vaddr was not
2519 		 * contained in the entry, so do a full lookup.
2520 		 */
2521 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2522 			RETURN(KERN_INVALID_ADDRESS);
2523 
2524 		entry = tmp_entry;
2525 		*out_entry = entry;
2526 	}
2527 
2528 	/*
2529 	 * Handle submaps.
2530 	 */
2531 
2532 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2533 		vm_map_t old_map = map;
2534 
2535 		*var_map = map = entry->object.sub_map;
2536 		vm_map_unlock_read(old_map);
2537 		goto RetryLookup;
2538 	}
2539 
2540 	/*
2541 	 * Check whether this task is allowed to have this page.
2542 	 * Note the special case for MAP_ENTRY_COW
2543 	 * pages with an override.  This is to implement a forced
2544 	 * COW for debuggers.
2545 	 */
2546 
2547 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
2548 		prot = entry->max_protection;
2549 	else
2550 		prot = entry->protection;
2551 
2552 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
2553 	if ((fault_type & prot) != fault_type) {
2554 			RETURN(KERN_PROTECTION_FAILURE);
2555 	}
2556 
2557 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
2558 	    (entry->eflags & MAP_ENTRY_COW) &&
2559 	    (fault_type & VM_PROT_WRITE) &&
2560 	    (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
2561 		RETURN(KERN_PROTECTION_FAILURE);
2562 	}
2563 
2564 	/*
2565 	 * If this page is not pageable, we have to get it for all possible
2566 	 * accesses.
2567 	 */
2568 
2569 	*wired = (entry->wired_count != 0);
2570 	if (*wired)
2571 		prot = fault_type = entry->protection;
2572 
2573 	/*
2574 	 * If the entry was copy-on-write, we either ...
2575 	 */
2576 
2577 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2578 		/*
2579 		 * If we want to write the page, we may as well handle that
2580 		 * now since we've got the map locked.
2581 		 *
2582 		 * If we don't need to write the page, we just demote the
2583 		 * permissions allowed.
2584 		 */
2585 
2586 		if (fault_type & VM_PROT_WRITE) {
2587 			/*
2588 			 * Make a new object, and place it in the object
2589 			 * chain.  Note that no new references have appeared
2590 			 * -- one just moved from the map to the new
2591 			 * object.
2592 			 */
2593 
2594 			if (vm_map_lock_upgrade(map))
2595 				goto RetryLookup;
2596 
2597 			vm_object_shadow(
2598 			    &entry->object.vm_object,
2599 			    &entry->offset,
2600 			    atop(entry->end - entry->start));
2601 
2602 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2603 			vm_map_lock_downgrade(map);
2604 		} else {
2605 			/*
2606 			 * We're attempting to read a copy-on-write page --
2607 			 * don't allow writes.
2608 			 */
2609 
2610 			prot &= ~VM_PROT_WRITE;
2611 		}
2612 	}
2613 
2614 	/*
2615 	 * Create an object if necessary.
2616 	 */
2617 	if (entry->object.vm_object == NULL) {
2618 		if (vm_map_lock_upgrade(map))
2619 			goto RetryLookup;
2620 
2621 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2622 		    atop(entry->end - entry->start));
2623 		entry->offset = 0;
2624 		vm_map_lock_downgrade(map);
2625 	}
2626 
2627 	/*
2628 	 * Return the object/offset from this entry.  If the entry was
2629 	 * copy-on-write or empty, it has been fixed up.
2630 	 */
2631 
2632 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
2633 	*object = entry->object.vm_object;
2634 
2635 	/*
2636 	 * Return whether this is the only map sharing this data.
2637 	 */
2638 
2639 	*out_prot = prot;
2640 	return (KERN_SUCCESS);
2641 
2642 #undef	RETURN
2643 }
2644 
2645 /*
2646  *	vm_map_lookup_done:
2647  *
2648  *	Releases locks acquired by a vm_map_lookup
2649  *	(according to the handle returned by that lookup).
2650  */
2651 
2652 void
2653 vm_map_lookup_done(map, entry)
2654 	vm_map_t map;
2655 	vm_map_entry_t entry;
2656 {
2657 	/*
2658 	 * Unlock the main-level map
2659 	 */
2660 
2661 	vm_map_unlock_read(map);
2662 }
2663 
2664 /*
2665  * Implement uiomove with VM operations.  This handles (and collateral changes)
2666  * support every combination of source object modification, and COW type
2667  * operations.
2668  */
2669 int
2670 vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
2671 	vm_map_t mapa;
2672 	vm_object_t srcobject;
2673 	off_t cp;
2674 	int cnta;
2675 	vm_offset_t uaddra;
2676 	int *npages;
2677 {
2678 	vm_map_t map;
2679 	vm_object_t first_object, oldobject, object;
2680 	vm_map_entry_t entry;
2681 	vm_prot_t prot;
2682 	boolean_t wired;
2683 	int tcnt, rv;
2684 	vm_offset_t uaddr, start, end, tend;
2685 	vm_pindex_t first_pindex, osize, oindex;
2686 	off_t ooffset;
2687 	int cnt;
2688 
2689 	if (npages)
2690 		*npages = 0;
2691 
2692 	cnt = cnta;
2693 	uaddr = uaddra;
2694 
2695 	while (cnt > 0) {
2696 		map = mapa;
2697 
2698 		if ((vm_map_lookup(&map, uaddr,
2699 			VM_PROT_READ, &entry, &first_object,
2700 			&first_pindex, &prot, &wired)) != KERN_SUCCESS) {
2701 			return EFAULT;
2702 		}
2703 
2704 		vm_map_clip_start(map, entry, uaddr);
2705 
2706 		tcnt = cnt;
2707 		tend = uaddr + tcnt;
2708 		if (tend > entry->end) {
2709 			tcnt = entry->end - uaddr;
2710 			tend = entry->end;
2711 		}
2712 
2713 		vm_map_clip_end(map, entry, tend);
2714 
2715 		start = entry->start;
2716 		end = entry->end;
2717 
2718 		osize = atop(tcnt);
2719 
2720 		oindex = OFF_TO_IDX(cp);
2721 		if (npages) {
2722 			vm_pindex_t idx;
2723 			for (idx = 0; idx < osize; idx++) {
2724 				vm_page_t m;
2725 				if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
2726 					vm_map_lookup_done(map, entry);
2727 					return 0;
2728 				}
2729 				/*
2730 				 * disallow busy or invalid pages, but allow
2731 				 * m->busy pages if they are entirely valid.
2732 				 */
2733 				if ((m->flags & PG_BUSY) ||
2734 					((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
2735 					vm_map_lookup_done(map, entry);
2736 					return 0;
2737 				}
2738 			}
2739 		}
2740 
2741 /*
2742  * If we are changing an existing map entry, just redirect
2743  * the object, and change mappings.
2744  */
2745 		if ((first_object->type == OBJT_VNODE) &&
2746 			((oldobject = entry->object.vm_object) == first_object)) {
2747 
2748 			if ((entry->offset != cp) || (oldobject != srcobject)) {
2749 				/*
2750    				* Remove old window into the file
2751    				*/
2752 				pmap_remove (map->pmap, uaddr, tend);
2753 
2754 				/*
2755    				* Force copy on write for mmaped regions
2756    				*/
2757 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2758 
2759 				/*
2760    				* Point the object appropriately
2761    				*/
2762 				if (oldobject != srcobject) {
2763 
2764 				/*
2765    				* Set the object optimization hint flag
2766    				*/
2767 					vm_object_set_flag(srcobject, OBJ_OPT);
2768 					vm_object_reference(srcobject);
2769 					entry->object.vm_object = srcobject;
2770 
2771 					if (oldobject) {
2772 						vm_object_deallocate(oldobject);
2773 					}
2774 				}
2775 
2776 				entry->offset = cp;
2777 				map->timestamp++;
2778 			} else {
2779 				pmap_remove (map->pmap, uaddr, tend);
2780 			}
2781 
2782 		} else if ((first_object->ref_count == 1) &&
2783 			(first_object->size == osize) &&
2784 			((first_object->type == OBJT_DEFAULT) ||
2785 				(first_object->type == OBJT_SWAP)) ) {
2786 
2787 			oldobject = first_object->backing_object;
2788 
2789 			if ((first_object->backing_object_offset != cp) ||
2790 				(oldobject != srcobject)) {
2791 				/*
2792    				* Remove old window into the file
2793    				*/
2794 				pmap_remove (map->pmap, uaddr, tend);
2795 
2796 				/*
2797 				 * Remove unneeded old pages
2798 				 */
2799 				vm_object_page_remove(first_object, 0, 0, 0);
2800 
2801 				/*
2802 				 * Invalidate swap space
2803 				 */
2804 				if (first_object->type == OBJT_SWAP) {
2805 					swap_pager_freespace(first_object,
2806 						0,
2807 						first_object->size);
2808 				}
2809 
2810 				/*
2811    				* Force copy on write for mmaped regions
2812    				*/
2813 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2814 
2815 				/*
2816    				* Point the object appropriately
2817    				*/
2818 				if (oldobject != srcobject) {
2819 
2820 				/*
2821    				* Set the object optimization hint flag
2822    				*/
2823 					vm_object_set_flag(srcobject, OBJ_OPT);
2824 					vm_object_reference(srcobject);
2825 
2826 					if (oldobject) {
2827 						TAILQ_REMOVE(&oldobject->shadow_head,
2828 							first_object, shadow_list);
2829 						oldobject->shadow_count--;
2830 						/* XXX bump generation? */
2831 						vm_object_deallocate(oldobject);
2832 					}
2833 
2834 					TAILQ_INSERT_TAIL(&srcobject->shadow_head,
2835 						first_object, shadow_list);
2836 					srcobject->shadow_count++;
2837 					/* XXX bump generation? */
2838 
2839 					first_object->backing_object = srcobject;
2840 				}
2841 				first_object->backing_object_offset = cp;
2842 				map->timestamp++;
2843 			} else {
2844 				pmap_remove (map->pmap, uaddr, tend);
2845 			}
2846 /*
2847  * Otherwise, we have to do a logical mmap.
2848  */
2849 		} else {
2850 
2851 			vm_object_set_flag(srcobject, OBJ_OPT);
2852 			vm_object_reference(srcobject);
2853 
2854 			pmap_remove (map->pmap, uaddr, tend);
2855 
2856 			vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2857 			vm_map_lock_upgrade(map);
2858 
2859 			if (entry == &map->header) {
2860 				map->first_free = &map->header;
2861 			} else if (map->first_free->start >= start) {
2862 				map->first_free = entry->prev;
2863 			}
2864 
2865 			SAVE_HINT(map, entry->prev);
2866 			vm_map_entry_delete(map, entry);
2867 
2868 			object = srcobject;
2869 			ooffset = cp;
2870 
2871 			rv = vm_map_insert(map, object, ooffset, start, tend,
2872 				VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
2873 
2874 			if (rv != KERN_SUCCESS)
2875 				panic("vm_uiomove: could not insert new entry: %d", rv);
2876 		}
2877 
2878 /*
2879  * Map the window directly, if it is already in memory
2880  */
2881 		pmap_object_init_pt(map->pmap, uaddr,
2882 			srcobject, oindex, tcnt, 0);
2883 
2884 		map->timestamp++;
2885 		vm_map_unlock(map);
2886 
2887 		cnt -= tcnt;
2888 		uaddr += tcnt;
2889 		cp += tcnt;
2890 		if (npages)
2891 			*npages += osize;
2892 	}
2893 	return 0;
2894 }
2895 
2896 /*
2897  * Performs the copy_on_write operations necessary to allow the virtual copies
2898  * into user space to work.  This has to be called for write(2) system calls
2899  * from other processes, file unlinking, and file size shrinkage.
2900  */
2901 void
2902 vm_freeze_copyopts(object, froma, toa)
2903 	vm_object_t object;
2904 	vm_pindex_t froma, toa;
2905 {
2906 	int rv;
2907 	vm_object_t robject;
2908 	vm_pindex_t idx;
2909 
2910 	if ((object == NULL) ||
2911 		((object->flags & OBJ_OPT) == 0))
2912 		return;
2913 
2914 	if (object->shadow_count > object->ref_count)
2915 		panic("vm_freeze_copyopts: sc > rc");
2916 
2917 	while((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) {
2918 		vm_pindex_t bo_pindex;
2919 		vm_page_t m_in, m_out;
2920 
2921 		bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
2922 
2923 		vm_object_reference(robject);
2924 
2925 		vm_object_pip_wait(robject, "objfrz");
2926 
2927 		if (robject->ref_count == 1) {
2928 			vm_object_deallocate(robject);
2929 			continue;
2930 		}
2931 
2932 		vm_object_pip_add(robject, 1);
2933 
2934 		for (idx = 0; idx < robject->size; idx++) {
2935 
2936 			m_out = vm_page_grab(robject, idx,
2937 						VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
2938 
2939 			if (m_out->valid == 0) {
2940 				m_in = vm_page_grab(object, bo_pindex + idx,
2941 						VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
2942 				if (m_in->valid == 0) {
2943 					rv = vm_pager_get_pages(object, &m_in, 1, 0);
2944 					if (rv != VM_PAGER_OK) {
2945 						printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex);
2946 						continue;
2947 					}
2948 					vm_page_deactivate(m_in);
2949 				}
2950 
2951 				vm_page_protect(m_in, VM_PROT_NONE);
2952 				pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
2953 				m_out->valid = m_in->valid;
2954 				vm_page_dirty(m_out);
2955 				vm_page_activate(m_out);
2956 				vm_page_wakeup(m_in);
2957 			}
2958 			vm_page_wakeup(m_out);
2959 		}
2960 
2961 		object->shadow_count--;
2962 		object->ref_count--;
2963 		TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);
2964 		robject->backing_object = NULL;
2965 		robject->backing_object_offset = 0;
2966 
2967 		vm_object_pip_wakeup(robject);
2968 		vm_object_deallocate(robject);
2969 	}
2970 
2971 	vm_object_clear_flag(object, OBJ_OPT);
2972 }
2973 
2974 #include "opt_ddb.h"
2975 #ifdef DDB
2976 #include <sys/kernel.h>
2977 
2978 #include <ddb/ddb.h>
2979 
2980 /*
2981  *	vm_map_print:	[ debug ]
2982  */
2983 DB_SHOW_COMMAND(map, vm_map_print)
2984 {
2985 	static int nlines;
2986 	/* XXX convert args. */
2987 	vm_map_t map = (vm_map_t)addr;
2988 	boolean_t full = have_addr;
2989 
2990 	vm_map_entry_t entry;
2991 
2992 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
2993 	    (void *)map,
2994 	    (void *)map->pmap, map->nentries, map->timestamp);
2995 	nlines++;
2996 
2997 	if (!full && db_indent)
2998 		return;
2999 
3000 	db_indent += 2;
3001 	for (entry = map->header.next; entry != &map->header;
3002 	    entry = entry->next) {
3003 		db_iprintf("map entry %p: start=%p, end=%p\n",
3004 		    (void *)entry, (void *)entry->start, (void *)entry->end);
3005 		nlines++;
3006 		{
3007 			static char *inheritance_name[4] =
3008 			{"share", "copy", "none", "donate_copy"};
3009 
3010 			db_iprintf(" prot=%x/%x/%s",
3011 			    entry->protection,
3012 			    entry->max_protection,
3013 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
3014 			if (entry->wired_count != 0)
3015 				db_printf(", wired");
3016 		}
3017 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3018 			/* XXX no %qd in kernel.  Truncate entry->offset. */
3019 			db_printf(", share=%p, offset=0x%lx\n",
3020 			    (void *)entry->object.sub_map,
3021 			    (long)entry->offset);
3022 			nlines++;
3023 			if ((entry->prev == &map->header) ||
3024 			    (entry->prev->object.sub_map !=
3025 				entry->object.sub_map)) {
3026 				db_indent += 2;
3027 				vm_map_print((db_expr_t)(intptr_t)
3028 					     entry->object.sub_map,
3029 					     full, 0, (char *)0);
3030 				db_indent -= 2;
3031 			}
3032 		} else {
3033 			/* XXX no %qd in kernel.  Truncate entry->offset. */
3034 			db_printf(", object=%p, offset=0x%lx",
3035 			    (void *)entry->object.vm_object,
3036 			    (long)entry->offset);
3037 			if (entry->eflags & MAP_ENTRY_COW)
3038 				db_printf(", copy (%s)",
3039 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3040 			db_printf("\n");
3041 			nlines++;
3042 
3043 			if ((entry->prev == &map->header) ||
3044 			    (entry->prev->object.vm_object !=
3045 				entry->object.vm_object)) {
3046 				db_indent += 2;
3047 				vm_object_print((db_expr_t)(intptr_t)
3048 						entry->object.vm_object,
3049 						full, 0, (char *)0);
3050 				nlines += 4;
3051 				db_indent -= 2;
3052 			}
3053 		}
3054 	}
3055 	db_indent -= 2;
3056 	if (db_indent == 0)
3057 		nlines = 0;
3058 }
3059 
3060 
3061 DB_SHOW_COMMAND(procvm, procvm)
3062 {
3063 	struct proc *p;
3064 
3065 	if (have_addr) {
3066 		p = (struct proc *) addr;
3067 	} else {
3068 		p = curproc;
3069 	}
3070 
3071 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3072 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3073 	    (void *)vmspace_pmap(p->p_vmspace));
3074 
3075 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3076 }
3077 
3078 #endif /* DDB */
3079