xref: /freebsd/sys/vm/vm_map.c (revision e972780a114bdef5234e4c83b1908c69a899dc5a)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $Id: vm_map.c,v 1.162 1999/05/16 05:07:31 alc Exp $
65  */
66 
67 /*
68  *	Virtual memory mapping module.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/malloc.h>
74 #include <sys/proc.h>
75 #include <sys/vmmeter.h>
76 #include <sys/mman.h>
77 #include <sys/vnode.h>
78 #include <sys/resourcevar.h>
79 
80 #include <vm/vm.h>
81 #include <vm/vm_param.h>
82 #include <vm/vm_prot.h>
83 #include <vm/vm_inherit.h>
84 #include <sys/lock.h>
85 #include <vm/pmap.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_pager.h>
90 #include <vm/vm_kern.h>
91 #include <vm/vm_extern.h>
92 #include <vm/default_pager.h>
93 #include <vm/swap_pager.h>
94 #include <vm/vm_zone.h>
95 
96 /*
97  *	Virtual memory maps provide for the mapping, protection,
98  *	and sharing of virtual memory objects.  In addition,
99  *	this module provides for an efficient virtual copy of
100  *	memory from one map to another.
101  *
102  *	Synchronization is required prior to most operations.
103  *
104  *	Maps consist of an ordered doubly-linked list of simple
105  *	entries; a single hint is used to speed up lookups.
106  *
107  *	Since portions of maps are specified by start/end addreses,
108  *	which may not align with existing map entries, all
109  *	routines merely "clip" entries to these start/end values.
110  *	[That is, an entry is split into two, bordering at a
111  *	start or end value.]  Note that these clippings may not
112  *	always be necessary (as the two resulting entries are then
113  *	not changed); however, the clipping is done for convenience.
114  *
115  *	As mentioned above, virtual copy operations are performed
116  *	by copying VM object references from one map to
117  *	another, and then marking both regions as copy-on-write.
118  */
119 
120 /*
121  *	vm_map_startup:
122  *
123  *	Initialize the vm_map module.  Must be called before
124  *	any other vm_map routines.
125  *
126  *	Map and entry structures are allocated from the general
127  *	purpose memory pool with some exceptions:
128  *
129  *	- The kernel map and kmem submap are allocated statically.
130  *	- Kernel map entries are allocated out of a static pool.
131  *
132  *	These restrictions are necessary since malloc() uses the
133  *	maps and requires map entries.
134  */
135 
136 extern char kstack[];
137 extern int inmprotect;
138 
139 static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store;
140 static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone;
141 static struct vm_object kmapentobj, mapentobj, mapobj;
142 #define MAP_ENTRY_INIT	128
143 static struct vm_map_entry map_entry_init[MAX_MAPENT];
144 static struct vm_map_entry kmap_entry_init[MAX_KMAPENT];
145 static struct vm_map map_init[MAX_KMAP];
146 
147 static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
148 static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
149 static vm_map_entry_t vm_map_entry_create __P((vm_map_t));
150 static void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t));
151 static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
152 static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
153 static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t,
154 		vm_map_entry_t));
155 static void vm_map_split __P((vm_map_entry_t));
156 
157 void
158 vm_map_startup()
159 {
160 	mapzone = &mapzone_store;
161 	zbootinit(mapzone, "MAP", sizeof (struct vm_map),
162 		map_init, MAX_KMAP);
163 	kmapentzone = &kmapentzone_store;
164 	zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry),
165 		kmap_entry_init, MAX_KMAPENT);
166 	mapentzone = &mapentzone_store;
167 	zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
168 		map_entry_init, MAX_MAPENT);
169 }
170 
171 /*
172  * Allocate a vmspace structure, including a vm_map and pmap,
173  * and initialize those structures.  The refcnt is set to 1.
174  * The remaining fields must be initialized by the caller.
175  */
176 struct vmspace *
177 vmspace_alloc(min, max)
178 	vm_offset_t min, max;
179 {
180 	struct vmspace *vm;
181 
182 	vm = zalloc(vmspace_zone);
183 	bzero(&vm->vm_map, sizeof vm->vm_map);
184 	vm_map_init(&vm->vm_map, min, max);
185 	pmap_pinit(vmspace_pmap(vm));
186 	vm->vm_map.pmap = vmspace_pmap(vm);		/* XXX */
187 	vm->vm_refcnt = 1;
188 	vm->vm_shm = NULL;
189 	return (vm);
190 }
191 
192 void
193 vm_init2(void) {
194 	zinitna(kmapentzone, &kmapentobj,
195 		NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1);
196 	zinitna(mapentzone, &mapentobj,
197 		NULL, 0, 0, 0, 1);
198 	zinitna(mapzone, &mapobj,
199 		NULL, 0, 0, 0, 1);
200 	vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
201 	pmap_init2();
202 	vm_object_init2();
203 }
204 
205 void
206 vmspace_free(vm)
207 	struct vmspace *vm;
208 {
209 
210 	if (vm->vm_refcnt == 0)
211 		panic("vmspace_free: attempt to free already freed vmspace");
212 
213 	if (--vm->vm_refcnt == 0) {
214 
215 		/*
216 		 * Lock the map, to wait out all other references to it.
217 		 * Delete all of the mappings and pages they hold, then call
218 		 * the pmap module to reclaim anything left.
219 		 */
220 		vm_map_lock(&vm->vm_map);
221 		(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
222 		    vm->vm_map.max_offset);
223 		vm_map_unlock(&vm->vm_map);
224 
225 		pmap_release(vmspace_pmap(vm));
226 		zfree(vmspace_zone, vm);
227 	}
228 }
229 
230 /*
231  *	vm_map_create:
232  *
233  *	Creates and returns a new empty VM map with
234  *	the given physical map structure, and having
235  *	the given lower and upper address bounds.
236  */
237 vm_map_t
238 vm_map_create(pmap, min, max)
239 	pmap_t pmap;
240 	vm_offset_t min, max;
241 {
242 	vm_map_t result;
243 
244 	result = zalloc(mapzone);
245 	vm_map_init(result, min, max);
246 	result->pmap = pmap;
247 	return (result);
248 }
249 
250 /*
251  * Initialize an existing vm_map structure
252  * such as that in the vmspace structure.
253  * The pmap is set elsewhere.
254  */
255 void
256 vm_map_init(map, min, max)
257 	struct vm_map *map;
258 	vm_offset_t min, max;
259 {
260 	map->header.next = map->header.prev = &map->header;
261 	map->nentries = 0;
262 	map->size = 0;
263 	map->system_map = 0;
264 	map->min_offset = min;
265 	map->max_offset = max;
266 	map->first_free = &map->header;
267 	map->hint = &map->header;
268 	map->timestamp = 0;
269 	lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
270 }
271 
272 /*
273  *	vm_map_entry_dispose:	[ internal use only ]
274  *
275  *	Inverse of vm_map_entry_create.
276  */
277 static void
278 vm_map_entry_dispose(map, entry)
279 	vm_map_t map;
280 	vm_map_entry_t entry;
281 {
282 	zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry);
283 }
284 
285 /*
286  *	vm_map_entry_create:	[ internal use only ]
287  *
288  *	Allocates a VM map entry for insertion.
289  *	No entry fields are filled in.  This routine is
290  */
291 static vm_map_entry_t
292 vm_map_entry_create(map)
293 	vm_map_t map;
294 {
295 	return zalloc((map->system_map || !mapentzone) ? kmapentzone : mapentzone);
296 }
297 
298 /*
299  *	vm_map_entry_{un,}link:
300  *
301  *	Insert/remove entries from maps.
302  */
303 static __inline void
304 vm_map_entry_link(vm_map_t map,
305 		  vm_map_entry_t after_where,
306 		  vm_map_entry_t entry)
307 {
308 	map->nentries++;
309 	entry->prev = after_where;
310 	entry->next = after_where->next;
311 	entry->next->prev = entry;
312 	after_where->next = entry;
313 }
314 
315 static __inline void
316 vm_map_entry_unlink(vm_map_t map,
317 		    vm_map_entry_t entry)
318 {
319 	vm_map_entry_t prev = entry->prev;
320 	vm_map_entry_t next = entry->next;
321 
322 	next->prev = prev;
323 	prev->next = next;
324 	map->nentries--;
325 }
326 
327 /*
328  *	SAVE_HINT:
329  *
330  *	Saves the specified entry as the hint for
331  *	future lookups.
332  */
333 #define	SAVE_HINT(map,value) \
334 		(map)->hint = (value);
335 
336 /*
337  *	vm_map_lookup_entry:	[ internal use only ]
338  *
339  *	Finds the map entry containing (or
340  *	immediately preceding) the specified address
341  *	in the given map; the entry is returned
342  *	in the "entry" parameter.  The boolean
343  *	result indicates whether the address is
344  *	actually contained in the map.
345  */
346 boolean_t
347 vm_map_lookup_entry(map, address, entry)
348 	vm_map_t map;
349 	vm_offset_t address;
350 	vm_map_entry_t *entry;	/* OUT */
351 {
352 	vm_map_entry_t cur;
353 	vm_map_entry_t last;
354 
355 	/*
356 	 * Start looking either from the head of the list, or from the hint.
357 	 */
358 
359 	cur = map->hint;
360 
361 	if (cur == &map->header)
362 		cur = cur->next;
363 
364 	if (address >= cur->start) {
365 		/*
366 		 * Go from hint to end of list.
367 		 *
368 		 * But first, make a quick check to see if we are already looking
369 		 * at the entry we want (which is usually the case). Note also
370 		 * that we don't need to save the hint here... it is the same
371 		 * hint (unless we are at the header, in which case the hint
372 		 * didn't buy us anything anyway).
373 		 */
374 		last = &map->header;
375 		if ((cur != last) && (cur->end > address)) {
376 			*entry = cur;
377 			return (TRUE);
378 		}
379 	} else {
380 		/*
381 		 * Go from start to hint, *inclusively*
382 		 */
383 		last = cur->next;
384 		cur = map->header.next;
385 	}
386 
387 	/*
388 	 * Search linearly
389 	 */
390 
391 	while (cur != last) {
392 		if (cur->end > address) {
393 			if (address >= cur->start) {
394 				/*
395 				 * Save this lookup for future hints, and
396 				 * return
397 				 */
398 
399 				*entry = cur;
400 				SAVE_HINT(map, cur);
401 				return (TRUE);
402 			}
403 			break;
404 		}
405 		cur = cur->next;
406 	}
407 	*entry = cur->prev;
408 	SAVE_HINT(map, *entry);
409 	return (FALSE);
410 }
411 
412 /*
413  *	vm_map_insert:
414  *
415  *	Inserts the given whole VM object into the target
416  *	map at the specified address range.  The object's
417  *	size should match that of the address range.
418  *
419  *	Requires that the map be locked, and leaves it so.
420  *
421  *	If object is non-NULL, ref count must be bumped by caller
422  *	prior to making call to account for the new entry.
423  */
424 int
425 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
426 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
427 	      int cow)
428 {
429 	vm_map_entry_t new_entry;
430 	vm_map_entry_t prev_entry;
431 	vm_map_entry_t temp_entry;
432 	u_char protoeflags;
433 
434 	if ((object != NULL) && (cow & MAP_NOFAULT)) {
435 		panic("vm_map_insert: paradoxical MAP_NOFAULT request");
436 	}
437 
438 	/*
439 	 * Check that the start and end points are not bogus.
440 	 */
441 
442 	if ((start < map->min_offset) || (end > map->max_offset) ||
443 	    (start >= end))
444 		return (KERN_INVALID_ADDRESS);
445 
446 	/*
447 	 * Find the entry prior to the proposed starting address; if it's part
448 	 * of an existing entry, this range is bogus.
449 	 */
450 
451 	if (vm_map_lookup_entry(map, start, &temp_entry))
452 		return (KERN_NO_SPACE);
453 
454 	prev_entry = temp_entry;
455 
456 	/*
457 	 * Assert that the next entry doesn't overlap the end point.
458 	 */
459 
460 	if ((prev_entry->next != &map->header) &&
461 	    (prev_entry->next->start < end))
462 		return (KERN_NO_SPACE);
463 
464 	protoeflags = 0;
465 
466 	if (cow & MAP_COPY_ON_WRITE)
467 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
468 
469 	if (cow & MAP_NOFAULT)
470 		protoeflags |= MAP_ENTRY_NOFAULT;
471 
472 	if (object) {
473 		/*
474 		 * When object is non-NULL, it could be shared with another
475 		 * process.  We have to set or clear OBJ_ONEMAPPING
476 		 * appropriately.
477 		 */
478 		if ((object->ref_count > 1) || (object->shadow_count != 0)) {
479 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
480 		} else {
481 			vm_object_set_flag(object, OBJ_ONEMAPPING);
482 		}
483 	} else if (
484 	    (prev_entry != &map->header) &&
485 	    ((prev_entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) &&
486 		((prev_entry->object.vm_object == NULL) ||
487 		 (prev_entry->object.vm_object->type == OBJT_DEFAULT) ||
488 		 (prev_entry->object.vm_object->type == OBJT_SWAP)) &&
489 	    (prev_entry->end == start) &&
490 	    (prev_entry->wired_count == 0)
491 	) {
492 		if ((protoeflags == prev_entry->eflags) &&
493 		    ((cow & MAP_NOFAULT) ||
494 		     vm_object_coalesce(prev_entry->object.vm_object,
495 					OFF_TO_IDX(prev_entry->offset),
496 					(vm_size_t) (prev_entry->end - prev_entry->start),
497 					(vm_size_t) (end - prev_entry->end)))) {
498 
499 			/*
500 			 * We were able to extend the object.  Determine if we
501 			 * can extend the previous map entry to include the
502 			 * new range as well.
503 			 */
504 			if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
505 			    (prev_entry->protection == prot) &&
506 			    (prev_entry->max_protection == max)) {
507 
508 				map->size += (end - prev_entry->end);
509 				prev_entry->end = end;
510 				return (KERN_SUCCESS);
511 			}
512 
513 			/*
514 			 * If we can extend the object but cannot extend the
515 			 * map entry, we have to create a new map entry.  We
516 			 * must bump the ref count on the extended object to
517 			 * account for it.
518 			 */
519 			object = prev_entry->object.vm_object;
520 			offset = prev_entry->offset +
521 			    (prev_entry->end - prev_entry->start);
522 			vm_object_reference(object);
523 		}
524 	}
525 
526 	/*
527 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
528 	 * in things like the buffer map where we manage kva but do not manage
529 	 * backing objects.
530 	 */
531 
532 	/*
533 	 * Create a new entry
534 	 */
535 
536 	new_entry = vm_map_entry_create(map);
537 	new_entry->start = start;
538 	new_entry->end = end;
539 
540 	new_entry->eflags = protoeflags;
541 	new_entry->object.vm_object = object;
542 	new_entry->offset = offset;
543 	new_entry->avail_ssize = 0;
544 
545 	new_entry->inheritance = VM_INHERIT_DEFAULT;
546 	new_entry->protection = prot;
547 	new_entry->max_protection = max;
548 	new_entry->wired_count = 0;
549 
550 	/*
551 	 * Insert the new entry into the list
552 	 */
553 
554 	vm_map_entry_link(map, prev_entry, new_entry);
555 	map->size += new_entry->end - new_entry->start;
556 
557 	/*
558 	 * Update the free space hint
559 	 */
560 	if ((map->first_free == prev_entry) &&
561 		(prev_entry->end >= new_entry->start))
562 		map->first_free = new_entry;
563 
564 	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL))
565 		pmap_object_init_pt(map->pmap, start,
566 				    object, OFF_TO_IDX(offset), end - start,
567 				    cow & MAP_PREFAULT_PARTIAL);
568 
569 	return (KERN_SUCCESS);
570 }
571 
572 int
573 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
574 	      vm_prot_t prot, vm_prot_t max, int cow)
575 {
576 	vm_map_entry_t prev_entry;
577 	vm_map_entry_t new_stack_entry;
578 	vm_size_t      init_ssize;
579 	int            rv;
580 
581 	if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
582 		return (KERN_NO_SPACE);
583 
584 	if (max_ssize < SGROWSIZ)
585 		init_ssize = max_ssize;
586 	else
587 		init_ssize = SGROWSIZ;
588 
589 	vm_map_lock(map);
590 
591 	/* If addr is already mapped, no go */
592 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
593 		vm_map_unlock(map);
594 		return (KERN_NO_SPACE);
595 	}
596 
597 	/* If we can't accomodate max_ssize in the current mapping,
598 	 * no go.  However, we need to be aware that subsequent user
599 	 * mappings might map into the space we have reserved for
600 	 * stack, and currently this space is not protected.
601 	 *
602 	 * Hopefully we will at least detect this condition
603 	 * when we try to grow the stack.
604 	 */
605 	if ((prev_entry->next != &map->header) &&
606 	    (prev_entry->next->start < addrbos + max_ssize)) {
607 		vm_map_unlock(map);
608 		return (KERN_NO_SPACE);
609 	}
610 
611 	/* We initially map a stack of only init_ssize.  We will
612 	 * grow as needed later.  Since this is to be a grow
613 	 * down stack, we map at the top of the range.
614 	 *
615 	 * Note: we would normally expect prot and max to be
616 	 * VM_PROT_ALL, and cow to be 0.  Possibly we should
617 	 * eliminate these as input parameters, and just
618 	 * pass these values here in the insert call.
619 	 */
620 	rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
621 	                   addrbos + max_ssize, prot, max, cow);
622 
623 	/* Now set the avail_ssize amount */
624 	if (rv == KERN_SUCCESS){
625 		new_stack_entry = prev_entry->next;
626 		if (new_stack_entry->end   != addrbos + max_ssize ||
627 		    new_stack_entry->start != addrbos + max_ssize - init_ssize)
628 			panic ("Bad entry start/end for new stack entry");
629 		else
630 			new_stack_entry->avail_ssize = max_ssize - init_ssize;
631 	}
632 
633 	vm_map_unlock(map);
634 	return (rv);
635 }
636 
637 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
638  * desired address is already mapped, or if we successfully grow
639  * the stack.  Also returns KERN_SUCCESS if addr is outside the
640  * stack range (this is strange, but preserves compatibility with
641  * the grow function in vm_machdep.c).
642  */
643 int
644 vm_map_growstack (struct proc *p, vm_offset_t addr)
645 {
646 	vm_map_entry_t prev_entry;
647 	vm_map_entry_t stack_entry;
648 	vm_map_entry_t new_stack_entry;
649 	struct vmspace *vm = p->p_vmspace;
650 	vm_map_t map = &vm->vm_map;
651 	vm_offset_t    end;
652 	int      grow_amount;
653 	int      rv;
654 	int      is_procstack;
655 Retry:
656 	vm_map_lock_read(map);
657 
658 	/* If addr is already in the entry range, no need to grow.*/
659 	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
660 		vm_map_unlock_read(map);
661 		return (KERN_SUCCESS);
662 	}
663 
664 	if ((stack_entry = prev_entry->next) == &map->header) {
665 		vm_map_unlock_read(map);
666 		return (KERN_SUCCESS);
667 	}
668 	if (prev_entry == &map->header)
669 		end = stack_entry->start - stack_entry->avail_ssize;
670 	else
671 		end = prev_entry->end;
672 
673 	/* This next test mimics the old grow function in vm_machdep.c.
674 	 * It really doesn't quite make sense, but we do it anyway
675 	 * for compatibility.
676 	 *
677 	 * If not growable stack, return success.  This signals the
678 	 * caller to proceed as he would normally with normal vm.
679 	 */
680 	if (stack_entry->avail_ssize < 1 ||
681 	    addr >= stack_entry->start ||
682 	    addr <  stack_entry->start - stack_entry->avail_ssize) {
683 		vm_map_unlock_read(map);
684 		return (KERN_SUCCESS);
685 	}
686 
687 	/* Find the minimum grow amount */
688 	grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
689 	if (grow_amount > stack_entry->avail_ssize) {
690 		vm_map_unlock_read(map);
691 		return (KERN_NO_SPACE);
692 	}
693 
694 	/* If there is no longer enough space between the entries
695 	 * nogo, and adjust the available space.  Note: this
696 	 * should only happen if the user has mapped into the
697 	 * stack area after the stack was created, and is
698 	 * probably an error.
699 	 *
700 	 * This also effectively destroys any guard page the user
701 	 * might have intended by limiting the stack size.
702 	 */
703 	if (grow_amount > stack_entry->start - end) {
704 		if (vm_map_lock_upgrade(map))
705 			goto Retry;
706 
707 		stack_entry->avail_ssize = stack_entry->start - end;
708 
709 		vm_map_unlock(map);
710 		return (KERN_NO_SPACE);
711 	}
712 
713 	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
714 
715 	/* If this is the main process stack, see if we're over the
716 	 * stack limit.
717 	 */
718 	if (is_procstack && (vm->vm_ssize + grow_amount >
719 			     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
720 		vm_map_unlock_read(map);
721 		return (KERN_NO_SPACE);
722 	}
723 
724 	/* Round up the grow amount modulo SGROWSIZ */
725 	grow_amount = roundup (grow_amount, SGROWSIZ);
726 	if (grow_amount > stack_entry->avail_ssize) {
727 		grow_amount = stack_entry->avail_ssize;
728 	}
729 	if (is_procstack && (vm->vm_ssize + grow_amount >
730 	                     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
731 		grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
732 		              vm->vm_ssize;
733 	}
734 
735 	if (vm_map_lock_upgrade(map))
736 		goto Retry;
737 
738 	/* Get the preliminary new entry start value */
739 	addr = stack_entry->start - grow_amount;
740 
741 	/* If this puts us into the previous entry, cut back our growth
742 	 * to the available space.  Also, see the note above.
743 	 */
744 	if (addr < end) {
745 		stack_entry->avail_ssize = stack_entry->start - end;
746 		addr = end;
747 	}
748 
749 	rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
750 			   stack_entry->protection,
751 			   stack_entry->max_protection,
752 			   0);
753 
754 	/* Adjust the available stack space by the amount we grew. */
755 	if (rv == KERN_SUCCESS) {
756 		new_stack_entry = prev_entry->next;
757 		if (new_stack_entry->end   != stack_entry->start  ||
758 		    new_stack_entry->start != addr)
759 			panic ("Bad stack grow start/end in new stack entry");
760 		else {
761 			new_stack_entry->avail_ssize = stack_entry->avail_ssize -
762 							(new_stack_entry->end -
763 							 new_stack_entry->start);
764 			if (is_procstack)
765 				vm->vm_ssize += new_stack_entry->end -
766 						new_stack_entry->start;
767 		}
768 	}
769 
770 	vm_map_unlock(map);
771 	return (rv);
772 
773 }
774 
775 /*
776  * Find sufficient space for `length' bytes in the given map, starting at
777  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
778  */
779 int
780 vm_map_findspace(map, start, length, addr)
781 	vm_map_t map;
782 	vm_offset_t start;
783 	vm_size_t length;
784 	vm_offset_t *addr;
785 {
786 	vm_map_entry_t entry, next;
787 	vm_offset_t end;
788 
789 	if (start < map->min_offset)
790 		start = map->min_offset;
791 	if (start > map->max_offset)
792 		return (1);
793 
794 	/*
795 	 * Look for the first possible address; if there's already something
796 	 * at this address, we have to start after it.
797 	 */
798 	if (start == map->min_offset) {
799 		if ((entry = map->first_free) != &map->header)
800 			start = entry->end;
801 	} else {
802 		vm_map_entry_t tmp;
803 
804 		if (vm_map_lookup_entry(map, start, &tmp))
805 			start = tmp->end;
806 		entry = tmp;
807 	}
808 
809 	/*
810 	 * Look through the rest of the map, trying to fit a new region in the
811 	 * gap between existing regions, or after the very last region.
812 	 */
813 	for (;; start = (entry = next)->end) {
814 		/*
815 		 * Find the end of the proposed new region.  Be sure we didn't
816 		 * go beyond the end of the map, or wrap around the address;
817 		 * if so, we lose.  Otherwise, if this is the last entry, or
818 		 * if the proposed new region fits before the next entry, we
819 		 * win.
820 		 */
821 		end = start + length;
822 		if (end > map->max_offset || end < start)
823 			return (1);
824 		next = entry->next;
825 		if (next == &map->header || next->start >= end)
826 			break;
827 	}
828 	SAVE_HINT(map, entry);
829 	*addr = start;
830 	if (map == kernel_map) {
831 		vm_offset_t ksize;
832 		if ((ksize = round_page(start + length)) > kernel_vm_end) {
833 			pmap_growkernel(ksize);
834 		}
835 	}
836 	return (0);
837 }
838 
839 /*
840  *	vm_map_find finds an unallocated region in the target address
841  *	map with the given length.  The search is defined to be
842  *	first-fit from the specified address; the region found is
843  *	returned in the same parameter.
844  *
845  *	If object is non-NULL, ref count must be bumped by caller
846  *	prior to making call to account for the new entry.
847  */
848 int
849 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
850 	    vm_offset_t *addr,	/* IN/OUT */
851 	    vm_size_t length, boolean_t find_space, vm_prot_t prot,
852 	    vm_prot_t max, int cow)
853 {
854 	vm_offset_t start;
855 	int result, s = 0;
856 
857 	start = *addr;
858 
859 	if (map == kmem_map || map == mb_map)
860 		s = splvm();
861 
862 	vm_map_lock(map);
863 	if (find_space) {
864 		if (vm_map_findspace(map, start, length, addr)) {
865 			vm_map_unlock(map);
866 			if (map == kmem_map || map == mb_map)
867 				splx(s);
868 			return (KERN_NO_SPACE);
869 		}
870 		start = *addr;
871 	}
872 	result = vm_map_insert(map, object, offset,
873 		start, start + length, prot, max, cow);
874 	vm_map_unlock(map);
875 
876 	if (map == kmem_map || map == mb_map)
877 		splx(s);
878 
879 	return (result);
880 }
881 
882 /*
883  *	vm_map_simplify_entry:
884  *
885  *	Simplify the given map entry by merging with either neighbor.
886  */
887 void
888 vm_map_simplify_entry(map, entry)
889 	vm_map_t map;
890 	vm_map_entry_t entry;
891 {
892 	vm_map_entry_t next, prev;
893 	vm_size_t prevsize, esize;
894 
895 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
896 		return;
897 
898 	prev = entry->prev;
899 	if (prev != &map->header) {
900 		prevsize = prev->end - prev->start;
901 		if ( (prev->end == entry->start) &&
902 		     (prev->object.vm_object == entry->object.vm_object) &&
903 		     (!prev->object.vm_object ||
904 			(prev->offset + prevsize == entry->offset)) &&
905 		     (prev->eflags == entry->eflags) &&
906 		     (prev->protection == entry->protection) &&
907 		     (prev->max_protection == entry->max_protection) &&
908 		     (prev->inheritance == entry->inheritance) &&
909 		     (prev->wired_count == entry->wired_count)) {
910 			if (map->first_free == prev)
911 				map->first_free = entry;
912 			if (map->hint == prev)
913 				map->hint = entry;
914 			vm_map_entry_unlink(map, prev);
915 			entry->start = prev->start;
916 			entry->offset = prev->offset;
917 			if (prev->object.vm_object)
918 				vm_object_deallocate(prev->object.vm_object);
919 			vm_map_entry_dispose(map, prev);
920 		}
921 	}
922 
923 	next = entry->next;
924 	if (next != &map->header) {
925 		esize = entry->end - entry->start;
926 		if ((entry->end == next->start) &&
927 		    (next->object.vm_object == entry->object.vm_object) &&
928 		     (!entry->object.vm_object ||
929 			(entry->offset + esize == next->offset)) &&
930 		    (next->eflags == entry->eflags) &&
931 		    (next->protection == entry->protection) &&
932 		    (next->max_protection == entry->max_protection) &&
933 		    (next->inheritance == entry->inheritance) &&
934 		    (next->wired_count == entry->wired_count)) {
935 			if (map->first_free == next)
936 				map->first_free = entry;
937 			if (map->hint == next)
938 				map->hint = entry;
939 			vm_map_entry_unlink(map, next);
940 			entry->end = next->end;
941 			if (next->object.vm_object)
942 				vm_object_deallocate(next->object.vm_object);
943 			vm_map_entry_dispose(map, next);
944 	        }
945 	}
946 }
947 /*
948  *	vm_map_clip_start:	[ internal use only ]
949  *
950  *	Asserts that the given entry begins at or after
951  *	the specified address; if necessary,
952  *	it splits the entry into two.
953  */
954 #define vm_map_clip_start(map, entry, startaddr) \
955 { \
956 	if (startaddr > entry->start) \
957 		_vm_map_clip_start(map, entry, startaddr); \
958 	else if (entry->object.vm_object && (entry->object.vm_object->ref_count == 1)) \
959 		vm_object_set_flag(entry->object.vm_object, OBJ_ONEMAPPING); \
960 }
961 
962 /*
963  *	This routine is called only when it is known that
964  *	the entry must be split.
965  */
966 static void
967 _vm_map_clip_start(map, entry, start)
968 	vm_map_t map;
969 	vm_map_entry_t entry;
970 	vm_offset_t start;
971 {
972 	vm_map_entry_t new_entry;
973 
974 	/*
975 	 * Split off the front portion -- note that we must insert the new
976 	 * entry BEFORE this one, so that this entry has the specified
977 	 * starting address.
978 	 */
979 
980 	vm_map_simplify_entry(map, entry);
981 
982 	/*
983 	 * If there is no object backing this entry, we might as well create
984 	 * one now.  If we defer it, an object can get created after the map
985 	 * is clipped, and individual objects will be created for the split-up
986 	 * map.  This is a bit of a hack, but is also about the best place to
987 	 * put this improvement.
988 	 */
989 
990 	if (entry->object.vm_object == NULL) {
991 		vm_object_t object;
992 		object = vm_object_allocate(OBJT_DEFAULT,
993 				atop(entry->end - entry->start));
994 		entry->object.vm_object = object;
995 		entry->offset = 0;
996 	}
997 
998 	new_entry = vm_map_entry_create(map);
999 	*new_entry = *entry;
1000 
1001 	new_entry->end = start;
1002 	entry->offset += (start - entry->start);
1003 	entry->start = start;
1004 
1005 	vm_map_entry_link(map, entry->prev, new_entry);
1006 
1007 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1008 		if (new_entry->object.vm_object->ref_count == 1)
1009 			vm_object_set_flag(new_entry->object.vm_object,
1010 					   OBJ_ONEMAPPING);
1011 		vm_object_reference(new_entry->object.vm_object);
1012 	}
1013 }
1014 
1015 /*
1016  *	vm_map_clip_end:	[ internal use only ]
1017  *
1018  *	Asserts that the given entry ends at or before
1019  *	the specified address; if necessary,
1020  *	it splits the entry into two.
1021  */
1022 
1023 #define vm_map_clip_end(map, entry, endaddr) \
1024 { \
1025 	if (endaddr < entry->end) \
1026 		_vm_map_clip_end(map, entry, endaddr); \
1027 	else if (entry->object.vm_object && (entry->object.vm_object->ref_count == 1)) \
1028 		vm_object_set_flag(entry->object.vm_object, OBJ_ONEMAPPING); \
1029 }
1030 
1031 /*
1032  *	This routine is called only when it is known that
1033  *	the entry must be split.
1034  */
1035 static void
1036 _vm_map_clip_end(map, entry, end)
1037 	vm_map_t map;
1038 	vm_map_entry_t entry;
1039 	vm_offset_t end;
1040 {
1041 	vm_map_entry_t new_entry;
1042 
1043 	/*
1044 	 * If there is no object backing this entry, we might as well create
1045 	 * one now.  If we defer it, an object can get created after the map
1046 	 * is clipped, and individual objects will be created for the split-up
1047 	 * map.  This is a bit of a hack, but is also about the best place to
1048 	 * put this improvement.
1049 	 */
1050 
1051 	if (entry->object.vm_object == NULL) {
1052 		vm_object_t object;
1053 		object = vm_object_allocate(OBJT_DEFAULT,
1054 				atop(entry->end - entry->start));
1055 		entry->object.vm_object = object;
1056 		entry->offset = 0;
1057 	}
1058 
1059 	/*
1060 	 * Create a new entry and insert it AFTER the specified entry
1061 	 */
1062 
1063 	new_entry = vm_map_entry_create(map);
1064 	*new_entry = *entry;
1065 
1066 	new_entry->start = entry->end = end;
1067 	new_entry->offset += (end - entry->start);
1068 
1069 	vm_map_entry_link(map, entry, new_entry);
1070 
1071 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1072 		if (new_entry->object.vm_object->ref_count == 1)
1073 			vm_object_set_flag(new_entry->object.vm_object,
1074 					   OBJ_ONEMAPPING);
1075 		vm_object_reference(new_entry->object.vm_object);
1076 	}
1077 }
1078 
1079 /*
1080  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
1081  *
1082  *	Asserts that the starting and ending region
1083  *	addresses fall within the valid range of the map.
1084  */
1085 #define	VM_MAP_RANGE_CHECK(map, start, end)		\
1086 		{					\
1087 		if (start < vm_map_min(map))		\
1088 			start = vm_map_min(map);	\
1089 		if (end > vm_map_max(map))		\
1090 			end = vm_map_max(map);		\
1091 		if (start > end)			\
1092 			start = end;			\
1093 		}
1094 
1095 /*
1096  *	vm_map_submap:		[ kernel use only ]
1097  *
1098  *	Mark the given range as handled by a subordinate map.
1099  *
1100  *	This range must have been created with vm_map_find,
1101  *	and no other operations may have been performed on this
1102  *	range prior to calling vm_map_submap.
1103  *
1104  *	Only a limited number of operations can be performed
1105  *	within this rage after calling vm_map_submap:
1106  *		vm_fault
1107  *	[Don't try vm_map_copy!]
1108  *
1109  *	To remove a submapping, one must first remove the
1110  *	range from the superior map, and then destroy the
1111  *	submap (if desired).  [Better yet, don't try it.]
1112  */
1113 int
1114 vm_map_submap(map, start, end, submap)
1115 	vm_map_t map;
1116 	vm_offset_t start;
1117 	vm_offset_t end;
1118 	vm_map_t submap;
1119 {
1120 	vm_map_entry_t entry;
1121 	int result = KERN_INVALID_ARGUMENT;
1122 
1123 	vm_map_lock(map);
1124 
1125 	VM_MAP_RANGE_CHECK(map, start, end);
1126 
1127 	if (vm_map_lookup_entry(map, start, &entry)) {
1128 		vm_map_clip_start(map, entry, start);
1129 	} else
1130 		entry = entry->next;
1131 
1132 	vm_map_clip_end(map, entry, end);
1133 
1134 	if ((entry->start == start) && (entry->end == end) &&
1135 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1136 	    (entry->object.vm_object == NULL)) {
1137 		entry->object.sub_map = submap;
1138 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1139 		result = KERN_SUCCESS;
1140 	}
1141 	vm_map_unlock(map);
1142 
1143 	return (result);
1144 }
1145 
1146 /*
1147  *	vm_map_protect:
1148  *
1149  *	Sets the protection of the specified address
1150  *	region in the target map.  If "set_max" is
1151  *	specified, the maximum protection is to be set;
1152  *	otherwise, only the current protection is affected.
1153  */
1154 int
1155 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1156 	       vm_prot_t new_prot, boolean_t set_max)
1157 {
1158 	vm_map_entry_t current;
1159 	vm_map_entry_t entry;
1160 
1161 	vm_map_lock(map);
1162 
1163 	VM_MAP_RANGE_CHECK(map, start, end);
1164 
1165 	if (vm_map_lookup_entry(map, start, &entry)) {
1166 		vm_map_clip_start(map, entry, start);
1167 	} else {
1168 		entry = entry->next;
1169 	}
1170 
1171 	/*
1172 	 * Make a first pass to check for protection violations.
1173 	 */
1174 
1175 	current = entry;
1176 	while ((current != &map->header) && (current->start < end)) {
1177 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1178 			vm_map_unlock(map);
1179 			return (KERN_INVALID_ARGUMENT);
1180 		}
1181 		if ((new_prot & current->max_protection) != new_prot) {
1182 			vm_map_unlock(map);
1183 			return (KERN_PROTECTION_FAILURE);
1184 		}
1185 		current = current->next;
1186 	}
1187 
1188 	/*
1189 	 * Go back and fix up protections. [Note that clipping is not
1190 	 * necessary the second time.]
1191 	 */
1192 
1193 	current = entry;
1194 
1195 	while ((current != &map->header) && (current->start < end)) {
1196 		vm_prot_t old_prot;
1197 
1198 		vm_map_clip_end(map, current, end);
1199 
1200 		old_prot = current->protection;
1201 		if (set_max)
1202 			current->protection =
1203 			    (current->max_protection = new_prot) &
1204 			    old_prot;
1205 		else
1206 			current->protection = new_prot;
1207 
1208 		/*
1209 		 * Update physical map if necessary. Worry about copy-on-write
1210 		 * here -- CHECK THIS XXX
1211 		 */
1212 
1213 		if (current->protection != old_prot) {
1214 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1215 							VM_PROT_ALL)
1216 
1217 			pmap_protect(map->pmap, current->start,
1218 			    current->end,
1219 			    current->protection & MASK(entry));
1220 #undef	MASK
1221 		}
1222 
1223 		vm_map_simplify_entry(map, current);
1224 
1225 		current = current->next;
1226 	}
1227 
1228 	vm_map_unlock(map);
1229 	return (KERN_SUCCESS);
1230 }
1231 
1232 /*
1233  *	vm_map_madvise:
1234  *
1235  * 	This routine traverses a processes map handling the madvise
1236  *	system call.
1237  */
1238 void
1239 vm_map_madvise(map, start, end, advise)
1240 	vm_map_t map;
1241 	vm_offset_t start, end;
1242 	int advise;
1243 {
1244 	vm_map_entry_t current;
1245 	vm_map_entry_t entry;
1246 
1247 	vm_map_lock(map);
1248 
1249 	VM_MAP_RANGE_CHECK(map, start, end);
1250 
1251 	if (vm_map_lookup_entry(map, start, &entry)) {
1252 		vm_map_clip_start(map, entry, start);
1253 	} else
1254 		entry = entry->next;
1255 
1256 	for(current = entry;
1257 		(current != &map->header) && (current->start < end);
1258 		current = current->next) {
1259 		vm_size_t size;
1260 
1261 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1262 			continue;
1263 		}
1264 
1265 		vm_map_clip_end(map, current, end);
1266 		size = current->end - current->start;
1267 
1268 		/*
1269 		 * Create an object if needed
1270 		 */
1271 		if (current->object.vm_object == NULL) {
1272 			vm_object_t object;
1273 			if ((advise == MADV_FREE) || (advise == MADV_DONTNEED))
1274 				continue;
1275 			object = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(size));
1276 			current->object.vm_object = object;
1277 			current->offset = 0;
1278 		}
1279 
1280 		switch (advise) {
1281 	case MADV_NORMAL:
1282 			current->object.vm_object->behavior = OBJ_NORMAL;
1283 			break;
1284 	case MADV_SEQUENTIAL:
1285 			current->object.vm_object->behavior = OBJ_SEQUENTIAL;
1286 			break;
1287 	case MADV_RANDOM:
1288 			current->object.vm_object->behavior = OBJ_RANDOM;
1289 			break;
1290 	/*
1291 	 * Right now, we could handle DONTNEED and WILLNEED with common code.
1292 	 * They are mostly the same, except for the potential async reads (NYI).
1293 	 */
1294 	case MADV_FREE:
1295 	case MADV_DONTNEED:
1296 			{
1297 				vm_pindex_t pindex;
1298 				int count;
1299 				pindex = OFF_TO_IDX(current->offset);
1300 				count = OFF_TO_IDX(size);
1301 				/*
1302 				 * MADV_DONTNEED removes the page from all
1303 				 * pmaps, so pmap_remove is not necessary.
1304 				 */
1305 				vm_object_madvise(current->object.vm_object,
1306 					pindex, count, advise);
1307 			}
1308 			break;
1309 
1310 	case MADV_WILLNEED:
1311 			{
1312 				vm_pindex_t pindex;
1313 				int count;
1314 				pindex = OFF_TO_IDX(current->offset);
1315 				count = OFF_TO_IDX(size);
1316 				vm_object_madvise(current->object.vm_object,
1317 					pindex, count, advise);
1318 				pmap_object_init_pt(map->pmap, current->start,
1319 					current->object.vm_object, pindex,
1320 					(count << PAGE_SHIFT), 0);
1321 			}
1322 			break;
1323 
1324 	default:
1325 			break;
1326 		}
1327 	}
1328 
1329 	vm_map_simplify_entry(map, entry);
1330 	vm_map_unlock(map);
1331 	return;
1332 }
1333 
1334 
1335 /*
1336  *	vm_map_inherit:
1337  *
1338  *	Sets the inheritance of the specified address
1339  *	range in the target map.  Inheritance
1340  *	affects how the map will be shared with
1341  *	child maps at the time of vm_map_fork.
1342  */
1343 int
1344 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1345 	       vm_inherit_t new_inheritance)
1346 {
1347 	vm_map_entry_t entry;
1348 	vm_map_entry_t temp_entry;
1349 
1350 	switch (new_inheritance) {
1351 	case VM_INHERIT_NONE:
1352 	case VM_INHERIT_COPY:
1353 	case VM_INHERIT_SHARE:
1354 		break;
1355 	default:
1356 		return (KERN_INVALID_ARGUMENT);
1357 	}
1358 
1359 	vm_map_lock(map);
1360 
1361 	VM_MAP_RANGE_CHECK(map, start, end);
1362 
1363 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
1364 		entry = temp_entry;
1365 		vm_map_clip_start(map, entry, start);
1366 	} else
1367 		entry = temp_entry->next;
1368 
1369 	while ((entry != &map->header) && (entry->start < end)) {
1370 		vm_map_clip_end(map, entry, end);
1371 
1372 		entry->inheritance = new_inheritance;
1373 
1374 		vm_map_simplify_entry(map, entry);
1375 
1376 		entry = entry->next;
1377 	}
1378 
1379 	vm_map_unlock(map);
1380 	return (KERN_SUCCESS);
1381 }
1382 
1383 /*
1384  * Implement the semantics of mlock
1385  */
1386 int
1387 vm_map_user_pageable(map, start, end, new_pageable)
1388 	vm_map_t map;
1389 	vm_offset_t start;
1390 	vm_offset_t end;
1391 	boolean_t new_pageable;
1392 {
1393 	vm_map_entry_t entry;
1394 	vm_map_entry_t start_entry;
1395 	vm_offset_t estart;
1396 	int rv;
1397 
1398 	vm_map_lock(map);
1399 	VM_MAP_RANGE_CHECK(map, start, end);
1400 
1401 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1402 		vm_map_unlock(map);
1403 		return (KERN_INVALID_ADDRESS);
1404 	}
1405 
1406 	if (new_pageable) {
1407 
1408 		entry = start_entry;
1409 		vm_map_clip_start(map, entry, start);
1410 
1411 		/*
1412 		 * Now decrement the wiring count for each region. If a region
1413 		 * becomes completely unwired, unwire its physical pages and
1414 		 * mappings.
1415 		 */
1416 		while ((entry != &map->header) && (entry->start < end)) {
1417 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1418 				vm_map_clip_end(map, entry, end);
1419 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1420 				entry->wired_count--;
1421 				if (entry->wired_count == 0)
1422 					vm_fault_unwire(map, entry->start, entry->end);
1423 			}
1424 			vm_map_simplify_entry(map,entry);
1425 			entry = entry->next;
1426 		}
1427 	} else {
1428 
1429 		entry = start_entry;
1430 
1431 		while ((entry != &map->header) && (entry->start < end)) {
1432 
1433 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1434 				entry = entry->next;
1435 				continue;
1436 			}
1437 
1438 			if (entry->wired_count != 0) {
1439 				entry->wired_count++;
1440 				entry->eflags |= MAP_ENTRY_USER_WIRED;
1441 				entry = entry->next;
1442 				continue;
1443 			}
1444 
1445 			/* Here on entry being newly wired */
1446 
1447 			if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1448 				int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1449 				if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
1450 
1451 					vm_object_shadow(&entry->object.vm_object,
1452 					    &entry->offset,
1453 					    atop(entry->end - entry->start));
1454 					entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1455 
1456 				} else if (entry->object.vm_object == NULL) {
1457 
1458 					entry->object.vm_object =
1459 					    vm_object_allocate(OBJT_DEFAULT,
1460 						atop(entry->end - entry->start));
1461 					entry->offset = (vm_offset_t) 0;
1462 
1463 				}
1464 			}
1465 
1466 			vm_map_clip_start(map, entry, start);
1467 			vm_map_clip_end(map, entry, end);
1468 
1469 			entry->wired_count++;
1470 			entry->eflags |= MAP_ENTRY_USER_WIRED;
1471 			estart = entry->start;
1472 
1473 			/* First we need to allow map modifications */
1474 			vm_map_set_recursive(map);
1475 			vm_map_lock_downgrade(map);
1476 			map->timestamp++;
1477 
1478 			rv = vm_fault_user_wire(map, entry->start, entry->end);
1479 			if (rv) {
1480 
1481 				entry->wired_count--;
1482 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1483 
1484 				vm_map_clear_recursive(map);
1485 				vm_map_unlock(map);
1486 
1487 				(void) vm_map_user_pageable(map, start, entry->start, TRUE);
1488 				return rv;
1489 			}
1490 
1491 			vm_map_clear_recursive(map);
1492 			if (vm_map_lock_upgrade(map)) {
1493 				vm_map_lock(map);
1494 				if (vm_map_lookup_entry(map, estart, &entry)
1495 				    == FALSE) {
1496 					vm_map_unlock(map);
1497 					(void) vm_map_user_pageable(map,
1498 								    start,
1499 								    estart,
1500 								    TRUE);
1501 					return (KERN_INVALID_ADDRESS);
1502 				}
1503 			}
1504 			vm_map_simplify_entry(map,entry);
1505 		}
1506 	}
1507 	map->timestamp++;
1508 	vm_map_unlock(map);
1509 	return KERN_SUCCESS;
1510 }
1511 
1512 /*
1513  *	vm_map_pageable:
1514  *
1515  *	Sets the pageability of the specified address
1516  *	range in the target map.  Regions specified
1517  *	as not pageable require locked-down physical
1518  *	memory and physical page maps.
1519  *
1520  *	The map must not be locked, but a reference
1521  *	must remain to the map throughout the call.
1522  */
1523 int
1524 vm_map_pageable(map, start, end, new_pageable)
1525 	vm_map_t map;
1526 	vm_offset_t start;
1527 	vm_offset_t end;
1528 	boolean_t new_pageable;
1529 {
1530 	vm_map_entry_t entry;
1531 	vm_map_entry_t start_entry;
1532 	vm_offset_t failed = 0;
1533 	int rv;
1534 
1535 	vm_map_lock(map);
1536 
1537 	VM_MAP_RANGE_CHECK(map, start, end);
1538 
1539 	/*
1540 	 * Only one pageability change may take place at one time, since
1541 	 * vm_fault assumes it will be called only once for each
1542 	 * wiring/unwiring.  Therefore, we have to make sure we're actually
1543 	 * changing the pageability for the entire region.  We do so before
1544 	 * making any changes.
1545 	 */
1546 
1547 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1548 		vm_map_unlock(map);
1549 		return (KERN_INVALID_ADDRESS);
1550 	}
1551 	entry = start_entry;
1552 
1553 	/*
1554 	 * Actions are rather different for wiring and unwiring, so we have
1555 	 * two separate cases.
1556 	 */
1557 
1558 	if (new_pageable) {
1559 
1560 		vm_map_clip_start(map, entry, start);
1561 
1562 		/*
1563 		 * Unwiring.  First ensure that the range to be unwired is
1564 		 * really wired down and that there are no holes.
1565 		 */
1566 		while ((entry != &map->header) && (entry->start < end)) {
1567 
1568 			if (entry->wired_count == 0 ||
1569 			    (entry->end < end &&
1570 				(entry->next == &map->header ||
1571 				    entry->next->start > entry->end))) {
1572 				vm_map_unlock(map);
1573 				return (KERN_INVALID_ARGUMENT);
1574 			}
1575 			entry = entry->next;
1576 		}
1577 
1578 		/*
1579 		 * Now decrement the wiring count for each region. If a region
1580 		 * becomes completely unwired, unwire its physical pages and
1581 		 * mappings.
1582 		 */
1583 		entry = start_entry;
1584 		while ((entry != &map->header) && (entry->start < end)) {
1585 			vm_map_clip_end(map, entry, end);
1586 
1587 			entry->wired_count--;
1588 			if (entry->wired_count == 0)
1589 				vm_fault_unwire(map, entry->start, entry->end);
1590 
1591 			vm_map_simplify_entry(map, entry);
1592 
1593 			entry = entry->next;
1594 		}
1595 	} else {
1596 		/*
1597 		 * Wiring.  We must do this in two passes:
1598 		 *
1599 		 * 1.  Holding the write lock, we create any shadow or zero-fill
1600 		 * objects that need to be created. Then we clip each map
1601 		 * entry to the region to be wired and increment its wiring
1602 		 * count.  We create objects before clipping the map entries
1603 		 * to avoid object proliferation.
1604 		 *
1605 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
1606 		 * fault in the pages for any newly wired area (wired_count is
1607 		 * 1).
1608 		 *
1609 		 * Downgrading to a read lock for vm_fault_wire avoids a possible
1610 		 * deadlock with another process that may have faulted on one
1611 		 * of the pages to be wired (it would mark the page busy,
1612 		 * blocking us, then in turn block on the map lock that we
1613 		 * hold).  Because of problems in the recursive lock package,
1614 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
1615 		 * any actions that require the write lock must be done
1616 		 * beforehand.  Because we keep the read lock on the map, the
1617 		 * copy-on-write status of the entries we modify here cannot
1618 		 * change.
1619 		 */
1620 
1621 		/*
1622 		 * Pass 1.
1623 		 */
1624 		while ((entry != &map->header) && (entry->start < end)) {
1625 			if (entry->wired_count == 0) {
1626 
1627 				/*
1628 				 * Perform actions of vm_map_lookup that need
1629 				 * the write lock on the map: create a shadow
1630 				 * object for a copy-on-write region, or an
1631 				 * object for a zero-fill region.
1632 				 *
1633 				 * We don't have to do this for entries that
1634 				 * point to sub maps, because we won't
1635 				 * hold the lock on the sub map.
1636 				 */
1637 				if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1638 					int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1639 					if (copyflag &&
1640 					    ((entry->protection & VM_PROT_WRITE) != 0)) {
1641 
1642 						vm_object_shadow(&entry->object.vm_object,
1643 						    &entry->offset,
1644 						    atop(entry->end - entry->start));
1645 						entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1646 					} else if (entry->object.vm_object == NULL) {
1647 						entry->object.vm_object =
1648 						    vm_object_allocate(OBJT_DEFAULT,
1649 							atop(entry->end - entry->start));
1650 						entry->offset = (vm_offset_t) 0;
1651 					}
1652 				}
1653 			}
1654 			vm_map_clip_start(map, entry, start);
1655 			vm_map_clip_end(map, entry, end);
1656 			entry->wired_count++;
1657 
1658 			/*
1659 			 * Check for holes
1660 			 */
1661 			if (entry->end < end &&
1662 			    (entry->next == &map->header ||
1663 				entry->next->start > entry->end)) {
1664 				/*
1665 				 * Found one.  Object creation actions do not
1666 				 * need to be undone, but the wired counts
1667 				 * need to be restored.
1668 				 */
1669 				while (entry != &map->header && entry->end > start) {
1670 					entry->wired_count--;
1671 					entry = entry->prev;
1672 				}
1673 				vm_map_unlock(map);
1674 				return (KERN_INVALID_ARGUMENT);
1675 			}
1676 			entry = entry->next;
1677 		}
1678 
1679 		/*
1680 		 * Pass 2.
1681 		 */
1682 
1683 		/*
1684 		 * HACK HACK HACK HACK
1685 		 *
1686 		 * If we are wiring in the kernel map or a submap of it,
1687 		 * unlock the map to avoid deadlocks.  We trust that the
1688 		 * kernel is well-behaved, and therefore will not do
1689 		 * anything destructive to this region of the map while
1690 		 * we have it unlocked.  We cannot trust user processes
1691 		 * to do the same.
1692 		 *
1693 		 * HACK HACK HACK HACK
1694 		 */
1695 		if (vm_map_pmap(map) == kernel_pmap) {
1696 			vm_map_unlock(map);	/* trust me ... */
1697 		} else {
1698 			vm_map_set_recursive(map);
1699 			vm_map_lock_downgrade(map);
1700 		}
1701 
1702 		rv = 0;
1703 		entry = start_entry;
1704 		while (entry != &map->header && entry->start < end) {
1705 			/*
1706 			 * If vm_fault_wire fails for any page we need to undo
1707 			 * what has been done.  We decrement the wiring count
1708 			 * for those pages which have not yet been wired (now)
1709 			 * and unwire those that have (later).
1710 			 *
1711 			 * XXX this violates the locking protocol on the map,
1712 			 * needs to be fixed.
1713 			 */
1714 			if (rv)
1715 				entry->wired_count--;
1716 			else if (entry->wired_count == 1) {
1717 				rv = vm_fault_wire(map, entry->start, entry->end);
1718 				if (rv) {
1719 					failed = entry->start;
1720 					entry->wired_count--;
1721 				}
1722 			}
1723 			entry = entry->next;
1724 		}
1725 
1726 		if (vm_map_pmap(map) == kernel_pmap) {
1727 			vm_map_lock(map);
1728 		} else {
1729 			vm_map_clear_recursive(map);
1730 		}
1731 		if (rv) {
1732 			vm_map_unlock(map);
1733 			(void) vm_map_pageable(map, start, failed, TRUE);
1734 			return (rv);
1735 		}
1736 		vm_map_simplify_entry(map, start_entry);
1737 	}
1738 
1739 	vm_map_unlock(map);
1740 
1741 	return (KERN_SUCCESS);
1742 }
1743 
1744 /*
1745  * vm_map_clean
1746  *
1747  * Push any dirty cached pages in the address range to their pager.
1748  * If syncio is TRUE, dirty pages are written synchronously.
1749  * If invalidate is TRUE, any cached pages are freed as well.
1750  *
1751  * Returns an error if any part of the specified range is not mapped.
1752  */
1753 int
1754 vm_map_clean(map, start, end, syncio, invalidate)
1755 	vm_map_t map;
1756 	vm_offset_t start;
1757 	vm_offset_t end;
1758 	boolean_t syncio;
1759 	boolean_t invalidate;
1760 {
1761 	vm_map_entry_t current;
1762 	vm_map_entry_t entry;
1763 	vm_size_t size;
1764 	vm_object_t object;
1765 	vm_ooffset_t offset;
1766 
1767 	vm_map_lock_read(map);
1768 	VM_MAP_RANGE_CHECK(map, start, end);
1769 	if (!vm_map_lookup_entry(map, start, &entry)) {
1770 		vm_map_unlock_read(map);
1771 		return (KERN_INVALID_ADDRESS);
1772 	}
1773 	/*
1774 	 * Make a first pass to check for holes.
1775 	 */
1776 	for (current = entry; current->start < end; current = current->next) {
1777 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1778 			vm_map_unlock_read(map);
1779 			return (KERN_INVALID_ARGUMENT);
1780 		}
1781 		if (end > current->end &&
1782 		    (current->next == &map->header ||
1783 			current->end != current->next->start)) {
1784 			vm_map_unlock_read(map);
1785 			return (KERN_INVALID_ADDRESS);
1786 		}
1787 	}
1788 
1789 	if (invalidate)
1790 		pmap_remove(vm_map_pmap(map), start, end);
1791 	/*
1792 	 * Make a second pass, cleaning/uncaching pages from the indicated
1793 	 * objects as we go.
1794 	 */
1795 	for (current = entry; current->start < end; current = current->next) {
1796 		offset = current->offset + (start - current->start);
1797 		size = (end <= current->end ? end : current->end) - start;
1798 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1799 			vm_map_t smap;
1800 			vm_map_entry_t tentry;
1801 			vm_size_t tsize;
1802 
1803 			smap = current->object.sub_map;
1804 			vm_map_lock_read(smap);
1805 			(void) vm_map_lookup_entry(smap, offset, &tentry);
1806 			tsize = tentry->end - offset;
1807 			if (tsize < size)
1808 				size = tsize;
1809 			object = tentry->object.vm_object;
1810 			offset = tentry->offset + (offset - tentry->start);
1811 			vm_map_unlock_read(smap);
1812 		} else {
1813 			object = current->object.vm_object;
1814 		}
1815 		/*
1816 		 * Note that there is absolutely no sense in writing out
1817 		 * anonymous objects, so we track down the vnode object
1818 		 * to write out.
1819 		 * We invalidate (remove) all pages from the address space
1820 		 * anyway, for semantic correctness.
1821 		 */
1822 		while (object->backing_object) {
1823 			object = object->backing_object;
1824 			offset += object->backing_object_offset;
1825 			if (object->size < OFF_TO_IDX( offset + size))
1826 				size = IDX_TO_OFF(object->size) - offset;
1827 		}
1828 		if (object && (object->type == OBJT_VNODE)) {
1829 			/*
1830 			 * Flush pages if writing is allowed. XXX should we continue
1831 			 * on an error?
1832 			 *
1833 			 * XXX Doing async I/O and then removing all the pages from
1834 			 *     the object before it completes is probably a very bad
1835 			 *     idea.
1836 			 */
1837 			if (current->protection & VM_PROT_WRITE) {
1838 				int flags;
1839 				if (object->type == OBJT_VNODE)
1840 					vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
1841 				flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
1842 				flags |= invalidate ? OBJPC_INVAL : 0;
1843 		   	    vm_object_page_clean(object,
1844 					OFF_TO_IDX(offset),
1845 					OFF_TO_IDX(offset + size + PAGE_MASK),
1846 					flags);
1847 				if (invalidate) {
1848 					vm_object_pip_wait(object, "objmcl");
1849 					vm_object_page_remove(object,
1850 						OFF_TO_IDX(offset),
1851 						OFF_TO_IDX(offset + size + PAGE_MASK),
1852 						FALSE);
1853 				}
1854 				if (object->type == OBJT_VNODE)
1855 					VOP_UNLOCK(object->handle, 0, curproc);
1856 			}
1857 		}
1858 		start += size;
1859 	}
1860 
1861 	vm_map_unlock_read(map);
1862 	return (KERN_SUCCESS);
1863 }
1864 
1865 /*
1866  *	vm_map_entry_unwire:	[ internal use only ]
1867  *
1868  *	Make the region specified by this entry pageable.
1869  *
1870  *	The map in question should be locked.
1871  *	[This is the reason for this routine's existence.]
1872  */
1873 static void
1874 vm_map_entry_unwire(map, entry)
1875 	vm_map_t map;
1876 	vm_map_entry_t entry;
1877 {
1878 	vm_fault_unwire(map, entry->start, entry->end);
1879 	entry->wired_count = 0;
1880 }
1881 
1882 /*
1883  *	vm_map_entry_delete:	[ internal use only ]
1884  *
1885  *	Deallocate the given entry from the target map.
1886  */
1887 static void
1888 vm_map_entry_delete(map, entry)
1889 	vm_map_t map;
1890 	vm_map_entry_t entry;
1891 {
1892 	vm_map_entry_unlink(map, entry);
1893 	map->size -= entry->end - entry->start;
1894 
1895 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1896 		vm_object_deallocate(entry->object.vm_object);
1897 	}
1898 
1899 	vm_map_entry_dispose(map, entry);
1900 }
1901 
1902 /*
1903  *	vm_map_delete:	[ internal use only ]
1904  *
1905  *	Deallocates the given address range from the target
1906  *	map.
1907  */
1908 int
1909 vm_map_delete(map, start, end)
1910 	vm_map_t map;
1911 	vm_offset_t start;
1912 	vm_offset_t end;
1913 {
1914 	vm_object_t object;
1915 	vm_map_entry_t entry;
1916 	vm_map_entry_t first_entry;
1917 
1918 	/*
1919 	 * Find the start of the region, and clip it
1920 	 */
1921 
1922 	if (!vm_map_lookup_entry(map, start, &first_entry))
1923 		entry = first_entry->next;
1924 	else {
1925 		entry = first_entry;
1926 		vm_map_clip_start(map, entry, start);
1927 		/*
1928 		 * Fix the lookup hint now, rather than each time though the
1929 		 * loop.
1930 		 */
1931 		SAVE_HINT(map, entry->prev);
1932 	}
1933 
1934 	/*
1935 	 * Save the free space hint
1936 	 */
1937 
1938 	if (entry == &map->header) {
1939 		map->first_free = &map->header;
1940 	} else if (map->first_free->start >= start) {
1941 		map->first_free = entry->prev;
1942 	}
1943 
1944 	/*
1945 	 * Step through all entries in this region
1946 	 */
1947 
1948 	while ((entry != &map->header) && (entry->start < end)) {
1949 		vm_map_entry_t next;
1950 		vm_offset_t s, e;
1951 		vm_pindex_t offidxstart, offidxend, count;
1952 
1953 		vm_map_clip_end(map, entry, end);
1954 
1955 		s = entry->start;
1956 		e = entry->end;
1957 		next = entry->next;
1958 
1959 		offidxstart = OFF_TO_IDX(entry->offset);
1960 		count = OFF_TO_IDX(e - s);
1961 		object = entry->object.vm_object;
1962 
1963 		/*
1964 		 * Unwire before removing addresses from the pmap; otherwise,
1965 		 * unwiring will put the entries back in the pmap.
1966 		 */
1967 		if (entry->wired_count != 0) {
1968 			vm_map_entry_unwire(map, entry);
1969 		}
1970 
1971 		offidxend = offidxstart + count;
1972 
1973 		if ((object == kernel_object) || (object == kmem_object)) {
1974 			vm_object_page_remove(object, offidxstart, offidxend, FALSE);
1975 		} else {
1976 			pmap_remove(map->pmap, s, e);
1977 			if (object != NULL &&
1978 			    object->ref_count != 1 &&
1979 			    (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
1980 			    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
1981 				vm_object_collapse(object);
1982 				vm_object_page_remove(object, offidxstart, offidxend, FALSE);
1983 				if (object->type == OBJT_SWAP) {
1984 					swap_pager_freespace(object, offidxstart, count);
1985 				}
1986 				if (offidxend >= object->size &&
1987 				    offidxstart < object->size) {
1988 					object->size = offidxstart;
1989 				}
1990 			}
1991 		}
1992 
1993 		/*
1994 		 * Delete the entry (which may delete the object) only after
1995 		 * removing all pmap entries pointing to its pages.
1996 		 * (Otherwise, its page frames may be reallocated, and any
1997 		 * modify bits will be set in the wrong object!)
1998 		 */
1999 		vm_map_entry_delete(map, entry);
2000 		entry = next;
2001 	}
2002 	return (KERN_SUCCESS);
2003 }
2004 
2005 /*
2006  *	vm_map_remove:
2007  *
2008  *	Remove the given address range from the target map.
2009  *	This is the exported form of vm_map_delete.
2010  */
2011 int
2012 vm_map_remove(map, start, end)
2013 	vm_map_t map;
2014 	vm_offset_t start;
2015 	vm_offset_t end;
2016 {
2017 	int result, s = 0;
2018 
2019 	if (map == kmem_map || map == mb_map)
2020 		s = splvm();
2021 
2022 	vm_map_lock(map);
2023 	VM_MAP_RANGE_CHECK(map, start, end);
2024 	result = vm_map_delete(map, start, end);
2025 	vm_map_unlock(map);
2026 
2027 	if (map == kmem_map || map == mb_map)
2028 		splx(s);
2029 
2030 	return (result);
2031 }
2032 
2033 /*
2034  *	vm_map_check_protection:
2035  *
2036  *	Assert that the target map allows the specified
2037  *	privilege on the entire address region given.
2038  *	The entire region must be allocated.
2039  */
2040 boolean_t
2041 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2042 			vm_prot_t protection)
2043 {
2044 	vm_map_entry_t entry;
2045 	vm_map_entry_t tmp_entry;
2046 
2047 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2048 		return (FALSE);
2049 	}
2050 	entry = tmp_entry;
2051 
2052 	while (start < end) {
2053 		if (entry == &map->header) {
2054 			return (FALSE);
2055 		}
2056 		/*
2057 		 * No holes allowed!
2058 		 */
2059 
2060 		if (start < entry->start) {
2061 			return (FALSE);
2062 		}
2063 		/*
2064 		 * Check protection associated with entry.
2065 		 */
2066 
2067 		if ((entry->protection & protection) != protection) {
2068 			return (FALSE);
2069 		}
2070 		/* go to next entry */
2071 
2072 		start = entry->end;
2073 		entry = entry->next;
2074 	}
2075 	return (TRUE);
2076 }
2077 
2078 /*
2079  * Split the pages in a map entry into a new object.  This affords
2080  * easier removal of unused pages, and keeps object inheritance from
2081  * being a negative impact on memory usage.
2082  */
2083 static void
2084 vm_map_split(entry)
2085 	vm_map_entry_t entry;
2086 {
2087 	vm_page_t m;
2088 	vm_object_t orig_object, new_object, source;
2089 	vm_offset_t s, e;
2090 	vm_pindex_t offidxstart, offidxend, idx;
2091 	vm_size_t size;
2092 	vm_ooffset_t offset;
2093 
2094 	orig_object = entry->object.vm_object;
2095 	if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
2096 		return;
2097 	if (orig_object->ref_count <= 1)
2098 		return;
2099 
2100 	offset = entry->offset;
2101 	s = entry->start;
2102 	e = entry->end;
2103 
2104 	offidxstart = OFF_TO_IDX(offset);
2105 	offidxend = offidxstart + OFF_TO_IDX(e - s);
2106 	size = offidxend - offidxstart;
2107 
2108 	new_object = vm_pager_allocate(orig_object->type,
2109 		NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL);
2110 	if (new_object == NULL)
2111 		return;
2112 
2113 	source = orig_object->backing_object;
2114 	if (source != NULL) {
2115 		vm_object_reference(source);	/* Referenced by new_object */
2116 		TAILQ_INSERT_TAIL(&source->shadow_head,
2117 				  new_object, shadow_list);
2118 		vm_object_clear_flag(source, OBJ_ONEMAPPING);
2119 		new_object->backing_object_offset =
2120 			orig_object->backing_object_offset + IDX_TO_OFF(offidxstart);
2121 		new_object->backing_object = source;
2122 		source->shadow_count++;
2123 		source->generation++;
2124 	}
2125 
2126 	for (idx = 0; idx < size; idx++) {
2127 		vm_page_t m;
2128 
2129 	retry:
2130 		m = vm_page_lookup(orig_object, offidxstart + idx);
2131 		if (m == NULL)
2132 			continue;
2133 
2134 		/*
2135 		 * We must wait for pending I/O to complete before we can
2136 		 * rename the page.
2137 		 *
2138 		 * We do not have to VM_PROT_NONE the page as mappings should
2139 		 * not be changed by this operation.
2140 		 */
2141 		if (vm_page_sleep_busy(m, TRUE, "spltwt"))
2142 			goto retry;
2143 
2144 		vm_page_busy(m);
2145 		vm_page_rename(m, new_object, idx);
2146 		/* page automatically made dirty by rename and cache handled */
2147 		vm_page_busy(m);
2148 	}
2149 
2150 	if (orig_object->type == OBJT_SWAP) {
2151 		vm_object_pip_add(orig_object, 1);
2152 		/*
2153 		 * copy orig_object pages into new_object
2154 		 * and destroy unneeded pages in
2155 		 * shadow object.
2156 		 */
2157 		swap_pager_copy(orig_object, new_object, offidxstart, 0);
2158 		vm_object_pip_wakeup(orig_object);
2159 	}
2160 
2161 	for (idx = 0; idx < size; idx++) {
2162 		m = vm_page_lookup(new_object, idx);
2163 		if (m) {
2164 			vm_page_wakeup(m);
2165 		}
2166 	}
2167 
2168 	entry->object.vm_object = new_object;
2169 	entry->offset = 0LL;
2170 	vm_object_deallocate(orig_object);
2171 }
2172 
2173 /*
2174  *	vm_map_copy_entry:
2175  *
2176  *	Copies the contents of the source entry to the destination
2177  *	entry.  The entries *must* be aligned properly.
2178  */
2179 static void
2180 vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
2181 	vm_map_t src_map, dst_map;
2182 	vm_map_entry_t src_entry, dst_entry;
2183 {
2184 	vm_object_t src_object;
2185 
2186 	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2187 		return;
2188 
2189 	if (src_entry->wired_count == 0) {
2190 
2191 		/*
2192 		 * If the source entry is marked needs_copy, it is already
2193 		 * write-protected.
2194 		 */
2195 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2196 			pmap_protect(src_map->pmap,
2197 			    src_entry->start,
2198 			    src_entry->end,
2199 			    src_entry->protection & ~VM_PROT_WRITE);
2200 		}
2201 
2202 		/*
2203 		 * Make a copy of the object.
2204 		 */
2205 		if ((src_object = src_entry->object.vm_object) != NULL) {
2206 
2207 			if ((src_object->handle == NULL) &&
2208 				(src_object->type == OBJT_DEFAULT ||
2209 				 src_object->type == OBJT_SWAP)) {
2210 				vm_object_collapse(src_object);
2211 				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2212 					vm_map_split(src_entry);
2213 					src_object = src_entry->object.vm_object;
2214 				}
2215 			}
2216 
2217 			vm_object_reference(src_object);
2218 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2219 			dst_entry->object.vm_object = src_object;
2220 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2221 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2222 			dst_entry->offset = src_entry->offset;
2223 		} else {
2224 			dst_entry->object.vm_object = NULL;
2225 			dst_entry->offset = 0;
2226 		}
2227 
2228 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2229 		    dst_entry->end - dst_entry->start, src_entry->start);
2230 	} else {
2231 		/*
2232 		 * Of course, wired down pages can't be set copy-on-write.
2233 		 * Cause wired pages to be copied into the new map by
2234 		 * simulating faults (the new pages are pageable)
2235 		 */
2236 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2237 	}
2238 }
2239 
2240 /*
2241  * vmspace_fork:
2242  * Create a new process vmspace structure and vm_map
2243  * based on those of an existing process.  The new map
2244  * is based on the old map, according to the inheritance
2245  * values on the regions in that map.
2246  *
2247  * The source map must not be locked.
2248  */
2249 struct vmspace *
2250 vmspace_fork(vm1)
2251 	struct vmspace *vm1;
2252 {
2253 	struct vmspace *vm2;
2254 	vm_map_t old_map = &vm1->vm_map;
2255 	vm_map_t new_map;
2256 	vm_map_entry_t old_entry;
2257 	vm_map_entry_t new_entry;
2258 	vm_object_t object;
2259 
2260 	vm_map_lock(old_map);
2261 
2262 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2263 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2264 	    (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2265 	new_map = &vm2->vm_map;	/* XXX */
2266 	new_map->timestamp = 1;
2267 
2268 	old_entry = old_map->header.next;
2269 
2270 	while (old_entry != &old_map->header) {
2271 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2272 			panic("vm_map_fork: encountered a submap");
2273 
2274 		switch (old_entry->inheritance) {
2275 		case VM_INHERIT_NONE:
2276 			break;
2277 
2278 		case VM_INHERIT_SHARE:
2279 			/*
2280 			 * Clone the entry, creating the shared object if necessary.
2281 			 */
2282 			object = old_entry->object.vm_object;
2283 			if (object == NULL) {
2284 				object = vm_object_allocate(OBJT_DEFAULT,
2285 					atop(old_entry->end - old_entry->start));
2286 				old_entry->object.vm_object = object;
2287 				old_entry->offset = (vm_offset_t) 0;
2288 			} else if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2289 				vm_object_shadow(&old_entry->object.vm_object,
2290 					&old_entry->offset,
2291 					atop(old_entry->end - old_entry->start));
2292 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2293 				object = old_entry->object.vm_object;
2294 			}
2295 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
2296 
2297 			/*
2298 			 * Clone the entry, referencing the shared object.
2299 			 */
2300 			new_entry = vm_map_entry_create(new_map);
2301 			*new_entry = *old_entry;
2302 			new_entry->wired_count = 0;
2303 			vm_object_reference(object);
2304 
2305 			/*
2306 			 * Insert the entry into the new map -- we know we're
2307 			 * inserting at the end of the new map.
2308 			 */
2309 
2310 			vm_map_entry_link(new_map, new_map->header.prev,
2311 			    new_entry);
2312 
2313 			/*
2314 			 * Update the physical map
2315 			 */
2316 
2317 			pmap_copy(new_map->pmap, old_map->pmap,
2318 			    new_entry->start,
2319 			    (old_entry->end - old_entry->start),
2320 			    old_entry->start);
2321 			break;
2322 
2323 		case VM_INHERIT_COPY:
2324 			/*
2325 			 * Clone the entry and link into the map.
2326 			 */
2327 			new_entry = vm_map_entry_create(new_map);
2328 			*new_entry = *old_entry;
2329 			new_entry->wired_count = 0;
2330 			new_entry->object.vm_object = NULL;
2331 			vm_map_entry_link(new_map, new_map->header.prev,
2332 			    new_entry);
2333 			vm_map_copy_entry(old_map, new_map, old_entry,
2334 			    new_entry);
2335 			break;
2336 		}
2337 		old_entry = old_entry->next;
2338 	}
2339 
2340 	new_map->size = old_map->size;
2341 	vm_map_unlock(old_map);
2342 
2343 	return (vm2);
2344 }
2345 
2346 /*
2347  * Unshare the specified VM space for exec.  If other processes are
2348  * mapped to it, then create a new one.  The new vmspace is null.
2349  */
2350 
2351 void
2352 vmspace_exec(struct proc *p) {
2353 	struct vmspace *oldvmspace = p->p_vmspace;
2354 	struct vmspace *newvmspace;
2355 	vm_map_t map = &p->p_vmspace->vm_map;
2356 
2357 	newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
2358 	bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
2359 	    (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
2360 	/*
2361 	 * This code is written like this for prototype purposes.  The
2362 	 * goal is to avoid running down the vmspace here, but let the
2363 	 * other process's that are still using the vmspace to finally
2364 	 * run it down.  Even though there is little or no chance of blocking
2365 	 * here, it is a good idea to keep this form for future mods.
2366 	 */
2367 	vmspace_free(oldvmspace);
2368 	p->p_vmspace = newvmspace;
2369 	if (p == curproc)
2370 		pmap_activate(p);
2371 }
2372 
2373 /*
2374  * Unshare the specified VM space for forcing COW.  This
2375  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
2376  */
2377 
2378 void
2379 vmspace_unshare(struct proc *p) {
2380 	struct vmspace *oldvmspace = p->p_vmspace;
2381 	struct vmspace *newvmspace;
2382 
2383 	if (oldvmspace->vm_refcnt == 1)
2384 		return;
2385 	newvmspace = vmspace_fork(oldvmspace);
2386 	vmspace_free(oldvmspace);
2387 	p->p_vmspace = newvmspace;
2388 	if (p == curproc)
2389 		pmap_activate(p);
2390 }
2391 
2392 
2393 /*
2394  *	vm_map_lookup:
2395  *
2396  *	Finds the VM object, offset, and
2397  *	protection for a given virtual address in the
2398  *	specified map, assuming a page fault of the
2399  *	type specified.
2400  *
2401  *	Leaves the map in question locked for read; return
2402  *	values are guaranteed until a vm_map_lookup_done
2403  *	call is performed.  Note that the map argument
2404  *	is in/out; the returned map must be used in
2405  *	the call to vm_map_lookup_done.
2406  *
2407  *	A handle (out_entry) is returned for use in
2408  *	vm_map_lookup_done, to make that fast.
2409  *
2410  *	If a lookup is requested with "write protection"
2411  *	specified, the map may be changed to perform virtual
2412  *	copying operations, although the data referenced will
2413  *	remain the same.
2414  */
2415 int
2416 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
2417 	      vm_offset_t vaddr,
2418 	      vm_prot_t fault_typea,
2419 	      vm_map_entry_t *out_entry,	/* OUT */
2420 	      vm_object_t *object,		/* OUT */
2421 	      vm_pindex_t *pindex,		/* OUT */
2422 	      vm_prot_t *out_prot,		/* OUT */
2423 	      boolean_t *wired)			/* OUT */
2424 {
2425 	vm_map_entry_t entry;
2426 	vm_map_t map = *var_map;
2427 	vm_prot_t prot;
2428 	vm_prot_t fault_type = fault_typea;
2429 
2430 RetryLookup:;
2431 
2432 	/*
2433 	 * Lookup the faulting address.
2434 	 */
2435 
2436 	vm_map_lock_read(map);
2437 
2438 #define	RETURN(why) \
2439 		{ \
2440 		vm_map_unlock_read(map); \
2441 		return(why); \
2442 		}
2443 
2444 	/*
2445 	 * If the map has an interesting hint, try it before calling full
2446 	 * blown lookup routine.
2447 	 */
2448 
2449 	entry = map->hint;
2450 
2451 	*out_entry = entry;
2452 
2453 	if ((entry == &map->header) ||
2454 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
2455 		vm_map_entry_t tmp_entry;
2456 
2457 		/*
2458 		 * Entry was either not a valid hint, or the vaddr was not
2459 		 * contained in the entry, so do a full lookup.
2460 		 */
2461 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2462 			RETURN(KERN_INVALID_ADDRESS);
2463 
2464 		entry = tmp_entry;
2465 		*out_entry = entry;
2466 	}
2467 
2468 	/*
2469 	 * Handle submaps.
2470 	 */
2471 
2472 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2473 		vm_map_t old_map = map;
2474 
2475 		*var_map = map = entry->object.sub_map;
2476 		vm_map_unlock_read(old_map);
2477 		goto RetryLookup;
2478 	}
2479 
2480 	/*
2481 	 * Check whether this task is allowed to have this page.
2482 	 * Note the special case for MAP_ENTRY_COW
2483 	 * pages with an override.  This is to implement a forced
2484 	 * COW for debuggers.
2485 	 */
2486 
2487 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
2488 		prot = entry->max_protection;
2489 	else
2490 		prot = entry->protection;
2491 
2492 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
2493 	if ((fault_type & prot) != fault_type) {
2494 			RETURN(KERN_PROTECTION_FAILURE);
2495 	}
2496 
2497 	if (entry->wired_count && (fault_type & VM_PROT_WRITE) &&
2498 			(entry->eflags & MAP_ENTRY_COW) &&
2499 			(fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
2500 			RETURN(KERN_PROTECTION_FAILURE);
2501 	}
2502 
2503 	/*
2504 	 * If this page is not pageable, we have to get it for all possible
2505 	 * accesses.
2506 	 */
2507 
2508 	*wired = (entry->wired_count != 0);
2509 	if (*wired)
2510 		prot = fault_type = entry->protection;
2511 
2512 	/*
2513 	 * If the entry was copy-on-write, we either ...
2514 	 */
2515 
2516 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2517 		/*
2518 		 * If we want to write the page, we may as well handle that
2519 		 * now since we've got the map locked.
2520 		 *
2521 		 * If we don't need to write the page, we just demote the
2522 		 * permissions allowed.
2523 		 */
2524 
2525 		if (fault_type & VM_PROT_WRITE) {
2526 			/*
2527 			 * Make a new object, and place it in the object
2528 			 * chain.  Note that no new references have appeared
2529 			 * -- one just moved from the map to the new
2530 			 * object.
2531 			 */
2532 
2533 			if (vm_map_lock_upgrade(map))
2534 				goto RetryLookup;
2535 
2536 			vm_object_shadow(
2537 			    &entry->object.vm_object,
2538 			    &entry->offset,
2539 			    atop(entry->end - entry->start));
2540 
2541 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2542 			vm_map_lock_downgrade(map);
2543 		} else {
2544 			/*
2545 			 * We're attempting to read a copy-on-write page --
2546 			 * don't allow writes.
2547 			 */
2548 
2549 			prot &= ~VM_PROT_WRITE;
2550 		}
2551 	}
2552 
2553 	/*
2554 	 * Create an object if necessary.
2555 	 */
2556 	if (entry->object.vm_object == NULL) {
2557 		if (vm_map_lock_upgrade(map))
2558 			goto RetryLookup;
2559 
2560 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2561 		    atop(entry->end - entry->start));
2562 		entry->offset = 0;
2563 		vm_map_lock_downgrade(map);
2564 	}
2565 
2566 	/*
2567 	 * Return the object/offset from this entry.  If the entry was
2568 	 * copy-on-write or empty, it has been fixed up.
2569 	 */
2570 
2571 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
2572 	*object = entry->object.vm_object;
2573 
2574 	/*
2575 	 * Return whether this is the only map sharing this data.
2576 	 */
2577 
2578 	*out_prot = prot;
2579 	return (KERN_SUCCESS);
2580 
2581 #undef	RETURN
2582 }
2583 
2584 /*
2585  *	vm_map_lookup_done:
2586  *
2587  *	Releases locks acquired by a vm_map_lookup
2588  *	(according to the handle returned by that lookup).
2589  */
2590 
2591 void
2592 vm_map_lookup_done(map, entry)
2593 	vm_map_t map;
2594 	vm_map_entry_t entry;
2595 {
2596 	/*
2597 	 * Unlock the main-level map
2598 	 */
2599 
2600 	vm_map_unlock_read(map);
2601 }
2602 
2603 /*
2604  * Implement uiomove with VM operations.  This handles (and collateral changes)
2605  * support every combination of source object modification, and COW type
2606  * operations.
2607  */
2608 int
2609 vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
2610 	vm_map_t mapa;
2611 	vm_object_t srcobject;
2612 	off_t cp;
2613 	int cnta;
2614 	vm_offset_t uaddra;
2615 	int *npages;
2616 {
2617 	vm_map_t map;
2618 	vm_object_t first_object, oldobject, object;
2619 	vm_map_entry_t entry;
2620 	vm_prot_t prot;
2621 	boolean_t wired;
2622 	int tcnt, rv;
2623 	vm_offset_t uaddr, start, end, tend;
2624 	vm_pindex_t first_pindex, osize, oindex;
2625 	off_t ooffset;
2626 	int cnt;
2627 
2628 	if (npages)
2629 		*npages = 0;
2630 
2631 	cnt = cnta;
2632 	uaddr = uaddra;
2633 
2634 	while (cnt > 0) {
2635 		map = mapa;
2636 
2637 		if ((vm_map_lookup(&map, uaddr,
2638 			VM_PROT_READ, &entry, &first_object,
2639 			&first_pindex, &prot, &wired)) != KERN_SUCCESS) {
2640 			return EFAULT;
2641 		}
2642 
2643 		vm_map_clip_start(map, entry, uaddr);
2644 
2645 		tcnt = cnt;
2646 		tend = uaddr + tcnt;
2647 		if (tend > entry->end) {
2648 			tcnt = entry->end - uaddr;
2649 			tend = entry->end;
2650 		}
2651 
2652 		vm_map_clip_end(map, entry, tend);
2653 
2654 		start = entry->start;
2655 		end = entry->end;
2656 
2657 		osize = atop(tcnt);
2658 
2659 		oindex = OFF_TO_IDX(cp);
2660 		if (npages) {
2661 			vm_pindex_t idx;
2662 			for (idx = 0; idx < osize; idx++) {
2663 				vm_page_t m;
2664 				if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
2665 					vm_map_lookup_done(map, entry);
2666 					return 0;
2667 				}
2668 				/*
2669 				 * disallow busy or invalid pages, but allow
2670 				 * m->busy pages if they are entirely valid.
2671 				 */
2672 				if ((m->flags & PG_BUSY) ||
2673 					((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
2674 					vm_map_lookup_done(map, entry);
2675 					return 0;
2676 				}
2677 			}
2678 		}
2679 
2680 /*
2681  * If we are changing an existing map entry, just redirect
2682  * the object, and change mappings.
2683  */
2684 		if ((first_object->type == OBJT_VNODE) &&
2685 			((oldobject = entry->object.vm_object) == first_object)) {
2686 
2687 			if ((entry->offset != cp) || (oldobject != srcobject)) {
2688 				/*
2689    				* Remove old window into the file
2690    				*/
2691 				pmap_remove (map->pmap, uaddr, tend);
2692 
2693 				/*
2694    				* Force copy on write for mmaped regions
2695    				*/
2696 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2697 
2698 				/*
2699    				* Point the object appropriately
2700    				*/
2701 				if (oldobject != srcobject) {
2702 
2703 				/*
2704    				* Set the object optimization hint flag
2705    				*/
2706 					vm_object_set_flag(srcobject, OBJ_OPT);
2707 					vm_object_reference(srcobject);
2708 					entry->object.vm_object = srcobject;
2709 
2710 					if (oldobject) {
2711 						vm_object_deallocate(oldobject);
2712 					}
2713 				}
2714 
2715 				entry->offset = cp;
2716 				map->timestamp++;
2717 			} else {
2718 				pmap_remove (map->pmap, uaddr, tend);
2719 			}
2720 
2721 		} else if ((first_object->ref_count == 1) &&
2722 			(first_object->size == osize) &&
2723 			((first_object->type == OBJT_DEFAULT) ||
2724 				(first_object->type == OBJT_SWAP)) ) {
2725 
2726 			oldobject = first_object->backing_object;
2727 
2728 			if ((first_object->backing_object_offset != cp) ||
2729 				(oldobject != srcobject)) {
2730 				/*
2731    				* Remove old window into the file
2732    				*/
2733 				pmap_remove (map->pmap, uaddr, tend);
2734 
2735 				/*
2736 				 * Remove unneeded old pages
2737 				 */
2738 				vm_object_page_remove(first_object, 0, 0, 0);
2739 
2740 				/*
2741 				 * Invalidate swap space
2742 				 */
2743 				if (first_object->type == OBJT_SWAP) {
2744 					swap_pager_freespace(first_object,
2745 						0,
2746 						first_object->size);
2747 				}
2748 
2749 				/*
2750    				* Force copy on write for mmaped regions
2751    				*/
2752 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2753 
2754 				/*
2755    				* Point the object appropriately
2756    				*/
2757 				if (oldobject != srcobject) {
2758 
2759 				/*
2760    				* Set the object optimization hint flag
2761    				*/
2762 					vm_object_set_flag(srcobject, OBJ_OPT);
2763 					vm_object_reference(srcobject);
2764 
2765 					if (oldobject) {
2766 						TAILQ_REMOVE(&oldobject->shadow_head,
2767 							first_object, shadow_list);
2768 						oldobject->shadow_count--;
2769 						vm_object_deallocate(oldobject);
2770 					}
2771 
2772 					TAILQ_INSERT_TAIL(&srcobject->shadow_head,
2773 						first_object, shadow_list);
2774 					srcobject->shadow_count++;
2775 
2776 					first_object->backing_object = srcobject;
2777 				}
2778 				first_object->backing_object_offset = cp;
2779 				map->timestamp++;
2780 			} else {
2781 				pmap_remove (map->pmap, uaddr, tend);
2782 			}
2783 /*
2784  * Otherwise, we have to do a logical mmap.
2785  */
2786 		} else {
2787 
2788 			vm_object_set_flag(srcobject, OBJ_OPT);
2789 			vm_object_reference(srcobject);
2790 
2791 			pmap_remove (map->pmap, uaddr, tend);
2792 
2793 			vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2794 			vm_map_lock_upgrade(map);
2795 
2796 			if (entry == &map->header) {
2797 				map->first_free = &map->header;
2798 			} else if (map->first_free->start >= start) {
2799 				map->first_free = entry->prev;
2800 			}
2801 
2802 			SAVE_HINT(map, entry->prev);
2803 			vm_map_entry_delete(map, entry);
2804 
2805 			object = srcobject;
2806 			ooffset = cp;
2807 
2808 			rv = vm_map_insert(map, object, ooffset, start, tend,
2809 				VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
2810 
2811 			if (rv != KERN_SUCCESS)
2812 				panic("vm_uiomove: could not insert new entry: %d", rv);
2813 		}
2814 
2815 /*
2816  * Map the window directly, if it is already in memory
2817  */
2818 		pmap_object_init_pt(map->pmap, uaddr,
2819 			srcobject, oindex, tcnt, 0);
2820 
2821 		map->timestamp++;
2822 		vm_map_unlock(map);
2823 
2824 		cnt -= tcnt;
2825 		uaddr += tcnt;
2826 		cp += tcnt;
2827 		if (npages)
2828 			*npages += osize;
2829 	}
2830 	return 0;
2831 }
2832 
2833 /*
2834  * Performs the copy_on_write operations necessary to allow the virtual copies
2835  * into user space to work.  This has to be called for write(2) system calls
2836  * from other processes, file unlinking, and file size shrinkage.
2837  */
2838 void
2839 vm_freeze_copyopts(object, froma, toa)
2840 	vm_object_t object;
2841 	vm_pindex_t froma, toa;
2842 {
2843 	int rv;
2844 	vm_object_t robject;
2845 	vm_pindex_t idx;
2846 
2847 	if ((object == NULL) ||
2848 		((object->flags & OBJ_OPT) == 0))
2849 		return;
2850 
2851 	if (object->shadow_count > object->ref_count)
2852 		panic("vm_freeze_copyopts: sc > rc");
2853 
2854 	while((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) {
2855 		vm_pindex_t bo_pindex;
2856 		vm_page_t m_in, m_out;
2857 
2858 		bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
2859 
2860 		vm_object_reference(robject);
2861 
2862 		vm_object_pip_wait(robject, "objfrz");
2863 
2864 		if (robject->ref_count == 1) {
2865 			vm_object_deallocate(robject);
2866 			continue;
2867 		}
2868 
2869 		vm_object_pip_add(robject, 1);
2870 
2871 		for (idx = 0; idx < robject->size; idx++) {
2872 
2873 			m_out = vm_page_grab(robject, idx,
2874 						VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
2875 
2876 			if (m_out->valid == 0) {
2877 				m_in = vm_page_grab(object, bo_pindex + idx,
2878 						VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
2879 				if (m_in->valid == 0) {
2880 					rv = vm_pager_get_pages(object, &m_in, 1, 0);
2881 					if (rv != VM_PAGER_OK) {
2882 						printf("vm_freeze_copyopts: cannot read page from file: %x\n", m_in->pindex);
2883 						continue;
2884 					}
2885 					vm_page_deactivate(m_in);
2886 				}
2887 
2888 				vm_page_protect(m_in, VM_PROT_NONE);
2889 				pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
2890 				m_out->valid = m_in->valid;
2891 				vm_page_dirty(m_out);
2892 				vm_page_activate(m_out);
2893 				vm_page_wakeup(m_in);
2894 			}
2895 			vm_page_wakeup(m_out);
2896 		}
2897 
2898 		object->shadow_count--;
2899 		object->ref_count--;
2900 		TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);
2901 		robject->backing_object = NULL;
2902 		robject->backing_object_offset = 0;
2903 
2904 		vm_object_pip_wakeup(robject);
2905 		vm_object_deallocate(robject);
2906 	}
2907 
2908 	vm_object_clear_flag(object, OBJ_OPT);
2909 }
2910 
2911 #include "opt_ddb.h"
2912 #ifdef DDB
2913 #include <sys/kernel.h>
2914 
2915 #include <ddb/ddb.h>
2916 
2917 /*
2918  *	vm_map_print:	[ debug ]
2919  */
2920 DB_SHOW_COMMAND(map, vm_map_print)
2921 {
2922 	static int nlines;
2923 	/* XXX convert args. */
2924 	vm_map_t map = (vm_map_t)addr;
2925 	boolean_t full = have_addr;
2926 
2927 	vm_map_entry_t entry;
2928 
2929 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
2930 	    (void *)map,
2931 	    (void *)map->pmap, map->nentries, map->timestamp);
2932 	nlines++;
2933 
2934 	if (!full && db_indent)
2935 		return;
2936 
2937 	db_indent += 2;
2938 	for (entry = map->header.next; entry != &map->header;
2939 	    entry = entry->next) {
2940 		db_iprintf("map entry %p: start=%p, end=%p\n",
2941 		    (void *)entry, (void *)entry->start, (void *)entry->end);
2942 		nlines++;
2943 		{
2944 			static char *inheritance_name[4] =
2945 			{"share", "copy", "none", "donate_copy"};
2946 
2947 			db_iprintf(" prot=%x/%x/%s",
2948 			    entry->protection,
2949 			    entry->max_protection,
2950 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
2951 			if (entry->wired_count != 0)
2952 				db_printf(", wired");
2953 		}
2954 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2955 			/* XXX no %qd in kernel.  Truncate entry->offset. */
2956 			db_printf(", share=%p, offset=0x%lx\n",
2957 			    (void *)entry->object.sub_map,
2958 			    (long)entry->offset);
2959 			nlines++;
2960 			if ((entry->prev == &map->header) ||
2961 			    (entry->prev->object.sub_map !=
2962 				entry->object.sub_map)) {
2963 				db_indent += 2;
2964 				vm_map_print((db_expr_t)(intptr_t)
2965 					     entry->object.sub_map,
2966 					     full, 0, (char *)0);
2967 				db_indent -= 2;
2968 			}
2969 		} else {
2970 			/* XXX no %qd in kernel.  Truncate entry->offset. */
2971 			db_printf(", object=%p, offset=0x%lx",
2972 			    (void *)entry->object.vm_object,
2973 			    (long)entry->offset);
2974 			if (entry->eflags & MAP_ENTRY_COW)
2975 				db_printf(", copy (%s)",
2976 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
2977 			db_printf("\n");
2978 			nlines++;
2979 
2980 			if ((entry->prev == &map->header) ||
2981 			    (entry->prev->object.vm_object !=
2982 				entry->object.vm_object)) {
2983 				db_indent += 2;
2984 				vm_object_print((db_expr_t)(intptr_t)
2985 						entry->object.vm_object,
2986 						full, 0, (char *)0);
2987 				nlines += 4;
2988 				db_indent -= 2;
2989 			}
2990 		}
2991 	}
2992 	db_indent -= 2;
2993 	if (db_indent == 0)
2994 		nlines = 0;
2995 }
2996 
2997 
2998 DB_SHOW_COMMAND(procvm, procvm)
2999 {
3000 	struct proc *p;
3001 
3002 	if (have_addr) {
3003 		p = (struct proc *) addr;
3004 	} else {
3005 		p = curproc;
3006 	}
3007 
3008 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3009 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3010 	    (void *)vmspace_pmap(p->p_vmspace));
3011 
3012 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3013 }
3014 
3015 #endif /* DDB */
3016