xref: /freebsd/sys/vm/vm_map.c (revision 2ad872c5794e4c26fdf6ed219ad3f09ca0d5304a)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $Id: vm_map.c,v 1.138 1998/10/25 17:44:58 phk Exp $
65  */
66 
67 /*
68  *	Virtual memory mapping module.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/malloc.h>
74 #include <sys/proc.h>
75 #include <sys/vmmeter.h>
76 #include <sys/mman.h>
77 #include <sys/vnode.h>
78 #ifdef VM_STACK
79 #include <sys/resourcevar.h>
80 #endif
81 
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/vm_prot.h>
85 #include <vm/vm_inherit.h>
86 #include <sys/lock.h>
87 #include <vm/pmap.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_pager.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_extern.h>
94 #include <vm/default_pager.h>
95 #include <vm/swap_pager.h>
96 #include <vm/vm_zone.h>
97 
98 /*
99  *	Virtual memory maps provide for the mapping, protection,
100  *	and sharing of virtual memory objects.  In addition,
101  *	this module provides for an efficient virtual copy of
102  *	memory from one map to another.
103  *
104  *	Synchronization is required prior to most operations.
105  *
106  *	Maps consist of an ordered doubly-linked list of simple
107  *	entries; a single hint is used to speed up lookups.
108  *
109  *	In order to properly represent the sharing of virtual
110  *	memory regions among maps, the map structure is bi-level.
111  *	Top-level ("address") maps refer to regions of sharable
112  *	virtual memory.  These regions are implemented as
113  *	("sharing") maps, which then refer to the actual virtual
114  *	memory objects.  When two address maps "share" memory,
115  *	their top-level maps both have references to the same
116  *	sharing map.  When memory is virtual-copied from one
117  *	address map to another, the references in the sharing
118  *	maps are actually copied -- no copying occurs at the
119  *	virtual memory object level.
120  *
121  *	Since portions of maps are specified by start/end addreses,
122  *	which may not align with existing map entries, all
123  *	routines merely "clip" entries to these start/end values.
124  *	[That is, an entry is split into two, bordering at a
125  *	start or end value.]  Note that these clippings may not
126  *	always be necessary (as the two resulting entries are then
127  *	not changed); however, the clipping is done for convenience.
128  *	No attempt is currently made to "glue back together" two
129  *	abutting entries.
130  *
131  *	As mentioned above, virtual copy operations are performed
132  *	by copying VM object references from one sharing map to
133  *	another, and then marking both regions as copy-on-write.
134  *	It is important to note that only one writeable reference
135  *	to a VM object region exists in any map -- this means that
136  *	shadow object creation can be delayed until a write operation
137  *	occurs.
138  */
139 
140 /*
141  *	vm_map_startup:
142  *
143  *	Initialize the vm_map module.  Must be called before
144  *	any other vm_map routines.
145  *
146  *	Map and entry structures are allocated from the general
147  *	purpose memory pool with some exceptions:
148  *
149  *	- The kernel map and kmem submap are allocated statically.
150  *	- Kernel map entries are allocated out of a static pool.
151  *
152  *	These restrictions are necessary since malloc() uses the
153  *	maps and requires map entries.
154  */
155 
156 extern char kstack[];
157 extern int inmprotect;
158 
159 static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store;
160 static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone;
161 static struct vm_object kmapentobj, mapentobj, mapobj;
162 #define MAP_ENTRY_INIT	128
163 static struct vm_map_entry map_entry_init[MAX_MAPENT];
164 static struct vm_map_entry kmap_entry_init[MAX_KMAPENT];
165 static struct vm_map map_init[MAX_KMAP];
166 
167 static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
168 static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
169 static vm_map_entry_t vm_map_entry_create __P((vm_map_t));
170 static void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t));
171 static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
172 static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
173 static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t,
174 		vm_map_entry_t));
175 static void vm_map_split __P((vm_map_entry_t));
176 
177 void
178 vm_map_startup()
179 {
180 	mapzone = &mapzone_store;
181 	zbootinit(mapzone, "MAP", sizeof (struct vm_map),
182 		map_init, MAX_KMAP);
183 	kmapentzone = &kmapentzone_store;
184 	zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry),
185 		kmap_entry_init, MAX_KMAPENT);
186 	mapentzone = &mapentzone_store;
187 	zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
188 		map_entry_init, MAX_MAPENT);
189 }
190 
191 /*
192  * Allocate a vmspace structure, including a vm_map and pmap,
193  * and initialize those structures.  The refcnt is set to 1.
194  * The remaining fields must be initialized by the caller.
195  */
196 struct vmspace *
197 vmspace_alloc(min, max)
198 	vm_offset_t min, max;
199 {
200 	struct vmspace *vm;
201 
202 	vm = zalloc(vmspace_zone);
203 	bzero(&vm->vm_map, sizeof vm->vm_map);
204 	vm_map_init(&vm->vm_map, min, max);
205 	pmap_pinit(&vm->vm_pmap);
206 	vm->vm_map.pmap = &vm->vm_pmap;		/* XXX */
207 	vm->vm_refcnt = 1;
208 	vm->vm_shm = NULL;
209 	return (vm);
210 }
211 
212 void
213 vm_init2(void) {
214 	zinitna(kmapentzone, &kmapentobj,
215 		NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1);
216 	zinitna(mapentzone, &mapentobj,
217 		NULL, 0, 0, 0, 1);
218 	zinitna(mapzone, &mapobj,
219 		NULL, 0, 0, 0, 1);
220 	vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
221 	pmap_init2();
222 	vm_object_init2();
223 }
224 
225 void
226 vmspace_free(vm)
227 	struct vmspace *vm;
228 {
229 
230 	if (vm->vm_refcnt == 0)
231 		panic("vmspace_free: attempt to free already freed vmspace");
232 
233 	if (--vm->vm_refcnt == 0) {
234 
235 		/*
236 		 * Lock the map, to wait out all other references to it.
237 		 * Delete all of the mappings and pages they hold, then call
238 		 * the pmap module to reclaim anything left.
239 		 */
240 		vm_map_lock(&vm->vm_map);
241 		(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
242 		    vm->vm_map.max_offset);
243 		vm_map_unlock(&vm->vm_map);
244 
245 		pmap_release(&vm->vm_pmap);
246 		zfree(vmspace_zone, vm);
247 	}
248 }
249 
250 /*
251  *	vm_map_create:
252  *
253  *	Creates and returns a new empty VM map with
254  *	the given physical map structure, and having
255  *	the given lower and upper address bounds.
256  */
257 vm_map_t
258 vm_map_create(pmap, min, max)
259 	pmap_t pmap;
260 	vm_offset_t min, max;
261 {
262 	vm_map_t result;
263 
264 	result = zalloc(mapzone);
265 	vm_map_init(result, min, max);
266 	result->pmap = pmap;
267 	return (result);
268 }
269 
270 /*
271  * Initialize an existing vm_map structure
272  * such as that in the vmspace structure.
273  * The pmap is set elsewhere.
274  */
275 void
276 vm_map_init(map, min, max)
277 	struct vm_map *map;
278 	vm_offset_t min, max;
279 {
280 	map->header.next = map->header.prev = &map->header;
281 	map->nentries = 0;
282 	map->size = 0;
283 	map->is_main_map = TRUE;
284 	map->system_map = 0;
285 	map->min_offset = min;
286 	map->max_offset = max;
287 	map->first_free = &map->header;
288 	map->hint = &map->header;
289 	map->timestamp = 0;
290 	lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
291 }
292 
293 /*
294  *	vm_map_entry_dispose:	[ internal use only ]
295  *
296  *	Inverse of vm_map_entry_create.
297  */
298 static void
299 vm_map_entry_dispose(map, entry)
300 	vm_map_t map;
301 	vm_map_entry_t entry;
302 {
303 	zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry);
304 }
305 
306 /*
307  *	vm_map_entry_create:	[ internal use only ]
308  *
309  *	Allocates a VM map entry for insertion.
310  *	No entry fields are filled in.  This routine is
311  */
312 static vm_map_entry_t
313 vm_map_entry_create(map)
314 	vm_map_t map;
315 {
316 	return zalloc((map->system_map || !mapentzone) ? kmapentzone : mapentzone);
317 }
318 
319 /*
320  *	vm_map_entry_{un,}link:
321  *
322  *	Insert/remove entries from maps.
323  */
324 #define	vm_map_entry_link(map, after_where, entry) \
325 		{ \
326 		(map)->nentries++; \
327 		(map)->timestamp++; \
328 		(entry)->prev = (after_where); \
329 		(entry)->next = (after_where)->next; \
330 		(entry)->prev->next = (entry); \
331 		(entry)->next->prev = (entry); \
332 		}
333 #define	vm_map_entry_unlink(map, entry) \
334 		{ \
335 		(map)->nentries--; \
336 		(map)->timestamp++; \
337 		(entry)->next->prev = (entry)->prev; \
338 		(entry)->prev->next = (entry)->next; \
339 		}
340 
341 /*
342  *	SAVE_HINT:
343  *
344  *	Saves the specified entry as the hint for
345  *	future lookups.
346  */
347 #define	SAVE_HINT(map,value) \
348 		(map)->hint = (value);
349 
350 /*
351  *	vm_map_lookup_entry:	[ internal use only ]
352  *
353  *	Finds the map entry containing (or
354  *	immediately preceding) the specified address
355  *	in the given map; the entry is returned
356  *	in the "entry" parameter.  The boolean
357  *	result indicates whether the address is
358  *	actually contained in the map.
359  */
360 boolean_t
361 vm_map_lookup_entry(map, address, entry)
362 	vm_map_t map;
363 	vm_offset_t address;
364 	vm_map_entry_t *entry;	/* OUT */
365 {
366 	vm_map_entry_t cur;
367 	vm_map_entry_t last;
368 
369 	/*
370 	 * Start looking either from the head of the list, or from the hint.
371 	 */
372 
373 	cur = map->hint;
374 
375 	if (cur == &map->header)
376 		cur = cur->next;
377 
378 	if (address >= cur->start) {
379 		/*
380 		 * Go from hint to end of list.
381 		 *
382 		 * But first, make a quick check to see if we are already looking
383 		 * at the entry we want (which is usually the case). Note also
384 		 * that we don't need to save the hint here... it is the same
385 		 * hint (unless we are at the header, in which case the hint
386 		 * didn't buy us anything anyway).
387 		 */
388 		last = &map->header;
389 		if ((cur != last) && (cur->end > address)) {
390 			*entry = cur;
391 			return (TRUE);
392 		}
393 	} else {
394 		/*
395 		 * Go from start to hint, *inclusively*
396 		 */
397 		last = cur->next;
398 		cur = map->header.next;
399 	}
400 
401 	/*
402 	 * Search linearly
403 	 */
404 
405 	while (cur != last) {
406 		if (cur->end > address) {
407 			if (address >= cur->start) {
408 				/*
409 				 * Save this lookup for future hints, and
410 				 * return
411 				 */
412 
413 				*entry = cur;
414 				SAVE_HINT(map, cur);
415 				return (TRUE);
416 			}
417 			break;
418 		}
419 		cur = cur->next;
420 	}
421 	*entry = cur->prev;
422 	SAVE_HINT(map, *entry);
423 	return (FALSE);
424 }
425 
426 /*
427  *	vm_map_insert:
428  *
429  *	Inserts the given whole VM object into the target
430  *	map at the specified address range.  The object's
431  *	size should match that of the address range.
432  *
433  *	Requires that the map be locked, and leaves it so.
434  */
435 int
436 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
437 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
438 	      int cow)
439 {
440 	vm_map_entry_t new_entry;
441 	vm_map_entry_t prev_entry;
442 	vm_map_entry_t temp_entry;
443 	vm_object_t prev_object;
444 	u_char protoeflags;
445 
446 	if ((object != NULL) && (cow & MAP_NOFAULT)) {
447 		panic("vm_map_insert: paradoxical MAP_NOFAULT request");
448 	}
449 
450 	/*
451 	 * Check that the start and end points are not bogus.
452 	 */
453 
454 	if ((start < map->min_offset) || (end > map->max_offset) ||
455 	    (start >= end))
456 		return (KERN_INVALID_ADDRESS);
457 
458 	/*
459 	 * Find the entry prior to the proposed starting address; if it's part
460 	 * of an existing entry, this range is bogus.
461 	 */
462 
463 	if (vm_map_lookup_entry(map, start, &temp_entry))
464 		return (KERN_NO_SPACE);
465 
466 	prev_entry = temp_entry;
467 
468 	/*
469 	 * Assert that the next entry doesn't overlap the end point.
470 	 */
471 
472 	if ((prev_entry->next != &map->header) &&
473 	    (prev_entry->next->start < end))
474 		return (KERN_NO_SPACE);
475 
476 	protoeflags = 0;
477 	if (cow & MAP_COPY_NEEDED)
478 		protoeflags |= MAP_ENTRY_NEEDS_COPY;
479 
480 	if (cow & MAP_COPY_ON_WRITE)
481 		protoeflags |= MAP_ENTRY_COW;
482 
483 	if (cow & MAP_NOFAULT)
484 		protoeflags |= MAP_ENTRY_NOFAULT;
485 
486 	/*
487 	 * See if we can avoid creating a new entry by extending one of our
488 	 * neighbors.  Or at least extend the object.
489 	 */
490 
491 	if ((object == NULL) &&
492 	    (prev_entry != &map->header) &&
493 	    (( prev_entry->eflags & (MAP_ENTRY_IS_A_MAP | MAP_ENTRY_IS_SUB_MAP)) == 0) &&
494 		((prev_entry->object.vm_object == NULL) ||
495 			(prev_entry->object.vm_object->type == OBJT_DEFAULT)) &&
496 	    (prev_entry->end == start) &&
497 	    (prev_entry->wired_count == 0)) {
498 
499 
500 		if ((protoeflags == prev_entry->eflags) &&
501 		    ((cow & MAP_NOFAULT) ||
502 		     vm_object_coalesce(prev_entry->object.vm_object,
503 					OFF_TO_IDX(prev_entry->offset),
504 					(vm_size_t) (prev_entry->end - prev_entry->start),
505 					(vm_size_t) (end - prev_entry->end)))) {
506 
507 			/*
508 			 * Coalesced the two objects.  Can we extend the
509 			 * previous map entry to include the new range?
510 			 */
511 			if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
512 			    (prev_entry->protection == prot) &&
513 			    (prev_entry->max_protection == max)) {
514 
515 				map->size += (end - prev_entry->end);
516 				prev_entry->end = end;
517 				if ((cow & MAP_NOFAULT) == 0) {
518 					prev_object = prev_entry->object.vm_object;
519 					default_pager_convert_to_swapq(prev_object);
520 				}
521 				return (KERN_SUCCESS);
522 			}
523 			else {
524 				object = prev_entry->object.vm_object;
525 				offset = prev_entry->offset + (prev_entry->end -
526 							       prev_entry->start);
527 
528 				vm_object_reference(object);
529 			}
530 		}
531 	}
532 
533 	/*
534 	 * Create a new entry
535 	 */
536 
537 	new_entry = vm_map_entry_create(map);
538 	new_entry->start = start;
539 	new_entry->end = end;
540 
541 	new_entry->eflags = protoeflags;
542 	new_entry->object.vm_object = object;
543 	new_entry->offset = offset;
544 #ifdef VM_STACK
545 	new_entry->avail_ssize = 0;
546 #endif
547 
548 	if (object) {
549 		if ((object->ref_count > 1) || (object->shadow_count != 0)) {
550 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
551 		} else {
552 			vm_object_set_flag(object, OBJ_ONEMAPPING);
553 		}
554 	}
555 
556 	if (map->is_main_map) {
557 		new_entry->inheritance = VM_INHERIT_DEFAULT;
558 		new_entry->protection = prot;
559 		new_entry->max_protection = max;
560 		new_entry->wired_count = 0;
561 	}
562 	/*
563 	 * Insert the new entry into the list
564 	 */
565 
566 	vm_map_entry_link(map, prev_entry, new_entry);
567 	map->size += new_entry->end - new_entry->start;
568 
569 	/*
570 	 * Update the free space hint
571 	 */
572 	if ((map->first_free == prev_entry) &&
573 		(prev_entry->end >= new_entry->start))
574 		map->first_free = new_entry;
575 
576 	default_pager_convert_to_swapq(object);
577 	return (KERN_SUCCESS);
578 }
579 
580 #ifdef VM_STACK
581 int
582 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
583 	      vm_prot_t prot, vm_prot_t max, int cow)
584 {
585 	vm_map_entry_t prev_entry;
586 	vm_map_entry_t new_stack_entry;
587 	vm_size_t      init_ssize;
588 	int            rv;
589 
590 	if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
591 		return (KERN_NO_SPACE);
592 
593 	if (max_ssize < SGROWSIZ)
594 		init_ssize = max_ssize;
595 	else
596 		init_ssize = SGROWSIZ;
597 
598 	vm_map_lock(map);
599 
600 	/* If addr is already mapped, no go */
601 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
602 		vm_map_unlock(map);
603 		return (KERN_NO_SPACE);
604 	}
605 
606 	/* If we can't accomodate max_ssize in the current mapping,
607 	 * no go.  However, we need to be aware that subsequent user
608 	 * mappings might map into the space we have reserved for
609 	 * stack, and currently this space is not protected.
610 	 *
611 	 * Hopefully we will at least detect this condition
612 	 * when we try to grow the stack.
613 	 */
614 	if ((prev_entry->next != &map->header) &&
615 	    (prev_entry->next->start < addrbos + max_ssize)) {
616 		vm_map_unlock(map);
617 		return (KERN_NO_SPACE);
618 	}
619 
620 	/* We initially map a stack of only init_ssize.  We will
621 	 * grow as needed later.  Since this is to be a grow
622 	 * down stack, we map at the top of the range.
623 	 *
624 	 * Note: we would normally expect prot and max to be
625 	 * VM_PROT_ALL, and cow to be 0.  Possibly we should
626 	 * eliminate these as input parameters, and just
627 	 * pass these values here in the insert call.
628 	 */
629 	rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
630 	                   addrbos + max_ssize, prot, max, cow);
631 
632 	/* Now set the avail_ssize amount */
633 	if (rv == KERN_SUCCESS){
634 		new_stack_entry = prev_entry->next;
635 		if (new_stack_entry->end   != addrbos + max_ssize ||
636 		    new_stack_entry->start != addrbos + max_ssize - init_ssize)
637 			panic ("Bad entry start/end for new stack entry");
638 		else
639 			new_stack_entry->avail_ssize = max_ssize - init_ssize;
640 	}
641 
642 	vm_map_unlock(map);
643 	return (rv);
644 }
645 
646 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
647  * desired address is already mapped, or if we successfully grow
648  * the stack.  Also returns KERN_SUCCESS if addr is outside the
649  * stack range (this is strange, but preserves compatibility with
650  * the grow function in vm_machdep.c).
651  */
652 int
653 vm_map_growstack (struct proc *p, vm_offset_t addr)
654 {
655 	vm_map_entry_t prev_entry;
656 	vm_map_entry_t stack_entry;
657 	vm_map_entry_t new_stack_entry;
658 	struct vmspace *vm = p->p_vmspace;
659 	vm_map_t map = &vm->vm_map;
660 	vm_offset_t    end;
661 	int      grow_amount;
662 	int      rv;
663 	int      is_procstack = 0;
664 
665 	vm_map_lock(map);
666 
667 	/* If addr is already in the entry range, no need to grow.*/
668 	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
669 		vm_map_unlock(map);
670 		return (KERN_SUCCESS);
671 	}
672 
673 	if ((stack_entry = prev_entry->next) == &map->header) {
674 		vm_map_unlock(map);
675 		return (KERN_SUCCESS);
676 	}
677 	if (prev_entry == &map->header)
678 		end = stack_entry->start - stack_entry->avail_ssize;
679 	else
680 		end = prev_entry->end;
681 
682 	/* This next test mimics the old grow function in vm_machdep.c.
683 	 * It really doesn't quite make sense, but we do it anyway
684 	 * for compatibility.
685 	 *
686 	 * If not growable stack, return success.  This signals the
687 	 * caller to proceed as he would normally with normal vm.
688 	 */
689 	if (stack_entry->avail_ssize < 1 ||
690 	    addr >= stack_entry->start ||
691 	    addr <  stack_entry->start - stack_entry->avail_ssize) {
692 		vm_map_unlock(map);
693 		return (KERN_SUCCESS);
694 	}
695 
696 	/* Find the minimum grow amount */
697 	grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
698 	if (grow_amount > stack_entry->avail_ssize) {
699 		vm_map_unlock(map);
700 		return (KERN_NO_SPACE);
701 	}
702 
703 	/* If there is no longer enough space between the entries
704 	 * nogo, and adjust the available space.  Note: this
705 	 * should only happen if the user has mapped into the
706 	 * stack area after the stack was created, and is
707 	 * probably an error.
708 	 *
709 	 * This also effectively destroys any guard page the user
710 	 * might have intended by limiting the stack size.
711 	 */
712 	if (grow_amount > stack_entry->start - end) {
713 		stack_entry->avail_ssize = stack_entry->start - end;
714 		vm_map_unlock(map);
715 		return (KERN_NO_SPACE);
716 	}
717 
718 	if (addr >= (vm_offset_t)vm->vm_maxsaddr)
719 		is_procstack = 1;
720 
721 	/* If this is the main process stack, see if we're over the
722 	 * stack limit.
723 	 */
724 	if (is_procstack && (vm->vm_ssize + grow_amount >
725 			     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
726 		vm_map_unlock(map);
727 		return (KERN_NO_SPACE);
728 	}
729 
730 	/* Round up the grow amount modulo SGROWSIZ */
731 	grow_amount = roundup (grow_amount, SGROWSIZ);
732 	if (grow_amount > stack_entry->avail_ssize) {
733 		grow_amount = stack_entry->avail_ssize;
734 	}
735 	if (is_procstack && (vm->vm_ssize + grow_amount >
736 	                     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
737 		grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
738 		              vm->vm_ssize;
739 	}
740 
741 	/* Get the preliminary new entry start value */
742 	addr = stack_entry->start - grow_amount;
743 
744 	/* If this puts us into the previous entry, cut back our growth
745 	 * to the available space.  Also, see the note above.
746 	 */
747 	if (addr < end) {
748 		stack_entry->avail_ssize = stack_entry->start - end;
749 		addr = end;
750 	}
751 
752 	rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
753 			   stack_entry->protection,
754 			   stack_entry->max_protection,
755 			   0);
756 
757 	/* Adjust the available stack space by the amount we grew. */
758 	if (rv == KERN_SUCCESS) {
759 		new_stack_entry = prev_entry->next;
760 		if (new_stack_entry->end   != stack_entry->start  ||
761 		    new_stack_entry->start != addr)
762 			panic ("Bad stack grow start/end in new stack entry");
763 		else {
764 			new_stack_entry->avail_ssize = stack_entry->avail_ssize -
765 							(new_stack_entry->end -
766 							 new_stack_entry->start);
767 			vm->vm_ssize += new_stack_entry->end -
768 					new_stack_entry->start;
769 		}
770 	}
771 
772 	vm_map_unlock(map);
773 	return (rv);
774 
775 }
776 #endif
777 
778 /*
779  * Find sufficient space for `length' bytes in the given map, starting at
780  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
781  */
782 int
783 vm_map_findspace(map, start, length, addr)
784 	vm_map_t map;
785 	vm_offset_t start;
786 	vm_size_t length;
787 	vm_offset_t *addr;
788 {
789 	vm_map_entry_t entry, next;
790 	vm_offset_t end;
791 
792 	if (start < map->min_offset)
793 		start = map->min_offset;
794 	if (start > map->max_offset)
795 		return (1);
796 
797 	/*
798 	 * Look for the first possible address; if there's already something
799 	 * at this address, we have to start after it.
800 	 */
801 	if (start == map->min_offset) {
802 		if ((entry = map->first_free) != &map->header)
803 			start = entry->end;
804 	} else {
805 		vm_map_entry_t tmp;
806 
807 		if (vm_map_lookup_entry(map, start, &tmp))
808 			start = tmp->end;
809 		entry = tmp;
810 	}
811 
812 	/*
813 	 * Look through the rest of the map, trying to fit a new region in the
814 	 * gap between existing regions, or after the very last region.
815 	 */
816 	for (;; start = (entry = next)->end) {
817 		/*
818 		 * Find the end of the proposed new region.  Be sure we didn't
819 		 * go beyond the end of the map, or wrap around the address;
820 		 * if so, we lose.  Otherwise, if this is the last entry, or
821 		 * if the proposed new region fits before the next entry, we
822 		 * win.
823 		 */
824 		end = start + length;
825 		if (end > map->max_offset || end < start)
826 			return (1);
827 		next = entry->next;
828 		if (next == &map->header || next->start >= end)
829 			break;
830 	}
831 	SAVE_HINT(map, entry);
832 	*addr = start;
833 	if (map == kernel_map) {
834 		vm_offset_t ksize;
835 		if ((ksize = round_page(start + length)) > kernel_vm_end) {
836 			pmap_growkernel(ksize);
837 		}
838 	}
839 	return (0);
840 }
841 
842 /*
843  *	vm_map_find finds an unallocated region in the target address
844  *	map with the given length.  The search is defined to be
845  *	first-fit from the specified address; the region found is
846  *	returned in the same parameter.
847  *
848  */
849 int
850 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
851 	    vm_offset_t *addr,	/* IN/OUT */
852 	    vm_size_t length, boolean_t find_space, vm_prot_t prot,
853 	    vm_prot_t max, int cow)
854 {
855 	vm_offset_t start;
856 	int result, s = 0;
857 
858 	start = *addr;
859 
860 	if (map == kmem_map || map == mb_map)
861 		s = splvm();
862 
863 	vm_map_lock(map);
864 	if (find_space) {
865 		if (vm_map_findspace(map, start, length, addr)) {
866 			vm_map_unlock(map);
867 			if (map == kmem_map || map == mb_map)
868 				splx(s);
869 			return (KERN_NO_SPACE);
870 		}
871 		start = *addr;
872 	}
873 	result = vm_map_insert(map, object, offset,
874 		start, start + length, prot, max, cow);
875 	vm_map_unlock(map);
876 
877 	if (map == kmem_map || map == mb_map)
878 		splx(s);
879 
880 	return (result);
881 }
882 
883 /*
884  *	vm_map_simplify_entry:
885  *
886  *	Simplify the given map entry by merging with either neighbor.
887  */
888 void
889 vm_map_simplify_entry(map, entry)
890 	vm_map_t map;
891 	vm_map_entry_t entry;
892 {
893 	vm_map_entry_t next, prev;
894 	vm_size_t prevsize, esize;
895 
896 	if (entry->eflags & (MAP_ENTRY_IS_SUB_MAP|MAP_ENTRY_IS_A_MAP))
897 		return;
898 
899 	prev = entry->prev;
900 	if (prev != &map->header) {
901 		prevsize = prev->end - prev->start;
902 		if ( (prev->end == entry->start) &&
903 		     (prev->object.vm_object == entry->object.vm_object) &&
904 		     (!prev->object.vm_object ||
905 				(prev->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
906 		     (!prev->object.vm_object ||
907 			(prev->offset + prevsize == entry->offset)) &&
908 		     (prev->eflags == entry->eflags) &&
909 		     (prev->protection == entry->protection) &&
910 		     (prev->max_protection == entry->max_protection) &&
911 		     (prev->inheritance == entry->inheritance) &&
912 		     (prev->wired_count == entry->wired_count)) {
913 			if (map->first_free == prev)
914 				map->first_free = entry;
915 			if (map->hint == prev)
916 				map->hint = entry;
917 			vm_map_entry_unlink(map, prev);
918 			entry->start = prev->start;
919 			entry->offset = prev->offset;
920 			if (prev->object.vm_object)
921 				vm_object_deallocate(prev->object.vm_object);
922 			vm_map_entry_dispose(map, prev);
923 		}
924 	}
925 
926 	next = entry->next;
927 	if (next != &map->header) {
928 		esize = entry->end - entry->start;
929 		if ((entry->end == next->start) &&
930 		    (next->object.vm_object == entry->object.vm_object) &&
931 		    (!next->object.vm_object ||
932 				(next->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
933 		     (!entry->object.vm_object ||
934 			(entry->offset + esize == next->offset)) &&
935 		    (next->eflags == entry->eflags) &&
936 		    (next->protection == entry->protection) &&
937 		    (next->max_protection == entry->max_protection) &&
938 		    (next->inheritance == entry->inheritance) &&
939 		    (next->wired_count == entry->wired_count)) {
940 			if (map->first_free == next)
941 				map->first_free = entry;
942 			if (map->hint == next)
943 				map->hint = entry;
944 			vm_map_entry_unlink(map, next);
945 			entry->end = next->end;
946 			if (next->object.vm_object)
947 				vm_object_deallocate(next->object.vm_object);
948 			vm_map_entry_dispose(map, next);
949 	        }
950 	}
951 }
952 /*
953  *	vm_map_clip_start:	[ internal use only ]
954  *
955  *	Asserts that the given entry begins at or after
956  *	the specified address; if necessary,
957  *	it splits the entry into two.
958  */
959 #define vm_map_clip_start(map, entry, startaddr) \
960 { \
961 	if (startaddr > entry->start) \
962 		_vm_map_clip_start(map, entry, startaddr); \
963 	else if (entry->object.vm_object && (entry->object.vm_object->ref_count == 1)) \
964 		vm_object_set_flag(entry->object.vm_object, OBJ_ONEMAPPING); \
965 }
966 
967 /*
968  *	This routine is called only when it is known that
969  *	the entry must be split.
970  */
971 static void
972 _vm_map_clip_start(map, entry, start)
973 	vm_map_t map;
974 	vm_map_entry_t entry;
975 	vm_offset_t start;
976 {
977 	vm_map_entry_t new_entry;
978 
979 	/*
980 	 * Split off the front portion -- note that we must insert the new
981 	 * entry BEFORE this one, so that this entry has the specified
982 	 * starting address.
983 	 */
984 
985 	vm_map_simplify_entry(map, entry);
986 
987 	/*
988 	 * If there is no object backing this entry, we might as well create
989 	 * one now.  If we defer it, an object can get created after the map
990 	 * is clipped, and individual objects will be created for the split-up
991 	 * map.  This is a bit of a hack, but is also about the best place to
992 	 * put this improvement.
993 	 */
994 
995 	if (entry->object.vm_object == NULL) {
996 		vm_object_t object;
997 		object = vm_object_allocate(OBJT_DEFAULT,
998 				atop(entry->end - entry->start));
999 		entry->object.vm_object = object;
1000 		entry->offset = 0;
1001 	}
1002 
1003 	new_entry = vm_map_entry_create(map);
1004 	*new_entry = *entry;
1005 
1006 	new_entry->end = start;
1007 	entry->offset += (start - entry->start);
1008 	entry->start = start;
1009 
1010 	vm_map_entry_link(map, entry->prev, new_entry);
1011 
1012 	if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1013 		if (new_entry->object.vm_object->ref_count == 1)
1014 			vm_object_set_flag(new_entry->object.vm_object,
1015 					   OBJ_ONEMAPPING);
1016 		vm_object_reference(new_entry->object.vm_object);
1017 	}
1018 }
1019 
1020 /*
1021  *	vm_map_clip_end:	[ internal use only ]
1022  *
1023  *	Asserts that the given entry ends at or before
1024  *	the specified address; if necessary,
1025  *	it splits the entry into two.
1026  */
1027 
1028 #define vm_map_clip_end(map, entry, endaddr) \
1029 { \
1030 	if (endaddr < entry->end) \
1031 		_vm_map_clip_end(map, entry, endaddr); \
1032 	else if (entry->object.vm_object && (entry->object.vm_object->ref_count == 1)) \
1033 		vm_object_set_flag(entry->object.vm_object, OBJ_ONEMAPPING); \
1034 }
1035 
1036 /*
1037  *	This routine is called only when it is known that
1038  *	the entry must be split.
1039  */
1040 static void
1041 _vm_map_clip_end(map, entry, end)
1042 	vm_map_t map;
1043 	vm_map_entry_t entry;
1044 	vm_offset_t end;
1045 {
1046 	vm_map_entry_t new_entry;
1047 
1048 	/*
1049 	 * If there is no object backing this entry, we might as well create
1050 	 * one now.  If we defer it, an object can get created after the map
1051 	 * is clipped, and individual objects will be created for the split-up
1052 	 * map.  This is a bit of a hack, but is also about the best place to
1053 	 * put this improvement.
1054 	 */
1055 
1056 	if (entry->object.vm_object == NULL) {
1057 		vm_object_t object;
1058 		object = vm_object_allocate(OBJT_DEFAULT,
1059 				atop(entry->end - entry->start));
1060 		entry->object.vm_object = object;
1061 		entry->offset = 0;
1062 	}
1063 
1064 	/*
1065 	 * Create a new entry and insert it AFTER the specified entry
1066 	 */
1067 
1068 	new_entry = vm_map_entry_create(map);
1069 	*new_entry = *entry;
1070 
1071 	new_entry->start = entry->end = end;
1072 	new_entry->offset += (end - entry->start);
1073 
1074 	vm_map_entry_link(map, entry, new_entry);
1075 
1076 	if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1077 		if (new_entry->object.vm_object->ref_count == 1)
1078 			vm_object_set_flag(new_entry->object.vm_object,
1079 					   OBJ_ONEMAPPING);
1080 		vm_object_reference(new_entry->object.vm_object);
1081 	}
1082 }
1083 
1084 /*
1085  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
1086  *
1087  *	Asserts that the starting and ending region
1088  *	addresses fall within the valid range of the map.
1089  */
1090 #define	VM_MAP_RANGE_CHECK(map, start, end)		\
1091 		{					\
1092 		if (start < vm_map_min(map))		\
1093 			start = vm_map_min(map);	\
1094 		if (end > vm_map_max(map))		\
1095 			end = vm_map_max(map);		\
1096 		if (start > end)			\
1097 			start = end;			\
1098 		}
1099 
1100 /*
1101  *	vm_map_submap:		[ kernel use only ]
1102  *
1103  *	Mark the given range as handled by a subordinate map.
1104  *
1105  *	This range must have been created with vm_map_find,
1106  *	and no other operations may have been performed on this
1107  *	range prior to calling vm_map_submap.
1108  *
1109  *	Only a limited number of operations can be performed
1110  *	within this rage after calling vm_map_submap:
1111  *		vm_fault
1112  *	[Don't try vm_map_copy!]
1113  *
1114  *	To remove a submapping, one must first remove the
1115  *	range from the superior map, and then destroy the
1116  *	submap (if desired).  [Better yet, don't try it.]
1117  */
1118 int
1119 vm_map_submap(map, start, end, submap)
1120 	vm_map_t map;
1121 	vm_offset_t start;
1122 	vm_offset_t end;
1123 	vm_map_t submap;
1124 {
1125 	vm_map_entry_t entry;
1126 	int result = KERN_INVALID_ARGUMENT;
1127 
1128 	vm_map_lock(map);
1129 
1130 	VM_MAP_RANGE_CHECK(map, start, end);
1131 
1132 	if (vm_map_lookup_entry(map, start, &entry)) {
1133 		vm_map_clip_start(map, entry, start);
1134 	} else
1135 		entry = entry->next;
1136 
1137 	vm_map_clip_end(map, entry, end);
1138 
1139 	if ((entry->start == start) && (entry->end == end) &&
1140 	    ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_COW)) == 0) &&
1141 	    (entry->object.vm_object == NULL)) {
1142 		entry->object.sub_map = submap;
1143 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1144 		result = KERN_SUCCESS;
1145 	}
1146 	vm_map_unlock(map);
1147 
1148 	return (result);
1149 }
1150 
1151 /*
1152  *	vm_map_protect:
1153  *
1154  *	Sets the protection of the specified address
1155  *	region in the target map.  If "set_max" is
1156  *	specified, the maximum protection is to be set;
1157  *	otherwise, only the current protection is affected.
1158  */
1159 int
1160 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1161 	       vm_prot_t new_prot, boolean_t set_max)
1162 {
1163 	vm_map_entry_t current;
1164 	vm_map_entry_t entry;
1165 
1166 	vm_map_lock(map);
1167 
1168 	VM_MAP_RANGE_CHECK(map, start, end);
1169 
1170 	if (vm_map_lookup_entry(map, start, &entry)) {
1171 		vm_map_clip_start(map, entry, start);
1172 	} else {
1173 		entry = entry->next;
1174 	}
1175 
1176 	/*
1177 	 * Make a first pass to check for protection violations.
1178 	 */
1179 
1180 	current = entry;
1181 	while ((current != &map->header) && (current->start < end)) {
1182 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1183 			vm_map_unlock(map);
1184 			return (KERN_INVALID_ARGUMENT);
1185 		}
1186 		if ((new_prot & current->max_protection) != new_prot) {
1187 			vm_map_unlock(map);
1188 			return (KERN_PROTECTION_FAILURE);
1189 		}
1190 		current = current->next;
1191 	}
1192 
1193 	/*
1194 	 * Go back and fix up protections. [Note that clipping is not
1195 	 * necessary the second time.]
1196 	 */
1197 
1198 	current = entry;
1199 
1200 	while ((current != &map->header) && (current->start < end)) {
1201 		vm_prot_t old_prot;
1202 
1203 		vm_map_clip_end(map, current, end);
1204 
1205 		old_prot = current->protection;
1206 		if (set_max)
1207 			current->protection =
1208 			    (current->max_protection = new_prot) &
1209 			    old_prot;
1210 		else
1211 			current->protection = new_prot;
1212 
1213 		/*
1214 		 * Update physical map if necessary. Worry about copy-on-write
1215 		 * here -- CHECK THIS XXX
1216 		 */
1217 
1218 		if (current->protection != old_prot) {
1219 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1220 							VM_PROT_ALL)
1221 
1222 			if (current->eflags & MAP_ENTRY_IS_A_MAP) {
1223 				vm_map_entry_t share_entry;
1224 				vm_offset_t share_end;
1225 
1226 				vm_map_lock(current->object.share_map);
1227 				(void) vm_map_lookup_entry(
1228 				    current->object.share_map,
1229 				    current->offset,
1230 				    &share_entry);
1231 				share_end = current->offset +
1232 				    (current->end - current->start);
1233 				while ((share_entry !=
1234 					&current->object.share_map->header) &&
1235 				    (share_entry->start < share_end)) {
1236 
1237 					pmap_protect(map->pmap,
1238 					    (qmax(share_entry->start,
1239 						    current->offset) -
1240 						current->offset +
1241 						current->start),
1242 					    min(share_entry->end,
1243 						share_end) -
1244 					    current->offset +
1245 					    current->start,
1246 					    current->protection &
1247 					    MASK(share_entry));
1248 
1249 					share_entry = share_entry->next;
1250 				}
1251 				vm_map_unlock(current->object.share_map);
1252 			} else
1253 				pmap_protect(map->pmap, current->start,
1254 				    current->end,
1255 				    current->protection & MASK(entry));
1256 #undef	MASK
1257 		}
1258 
1259 		vm_map_simplify_entry(map, current);
1260 
1261 		current = current->next;
1262 	}
1263 
1264 	map->timestamp++;
1265 	vm_map_unlock(map);
1266 	return (KERN_SUCCESS);
1267 }
1268 
1269 /*
1270  *	vm_map_madvise:
1271  *
1272  * 	This routine traverses a processes map handling the madvise
1273  *	system call.
1274  */
1275 void
1276 vm_map_madvise(map, pmap, start, end, advise)
1277 	vm_map_t map;
1278 	pmap_t pmap;
1279 	vm_offset_t start, end;
1280 	int advise;
1281 {
1282 	vm_map_entry_t current;
1283 	vm_map_entry_t entry;
1284 
1285 	vm_map_lock(map);
1286 
1287 	VM_MAP_RANGE_CHECK(map, start, end);
1288 
1289 	if (vm_map_lookup_entry(map, start, &entry)) {
1290 		vm_map_clip_start(map, entry, start);
1291 	} else
1292 		entry = entry->next;
1293 
1294 	for(current = entry;
1295 		(current != &map->header) && (current->start < end);
1296 		current = current->next) {
1297 		vm_size_t size;
1298 
1299 		if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1300 			continue;
1301 		}
1302 
1303 		vm_map_clip_end(map, current, end);
1304 		size = current->end - current->start;
1305 
1306 		/*
1307 		 * Create an object if needed
1308 		 */
1309 		if (current->object.vm_object == NULL) {
1310 			vm_object_t object;
1311 			if ((advise == MADV_FREE) || (advise == MADV_DONTNEED))
1312 				continue;
1313 			object = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(size));
1314 			current->object.vm_object = object;
1315 			current->offset = 0;
1316 		}
1317 
1318 		switch (advise) {
1319 	case MADV_NORMAL:
1320 			current->object.vm_object->behavior = OBJ_NORMAL;
1321 			break;
1322 	case MADV_SEQUENTIAL:
1323 			current->object.vm_object->behavior = OBJ_SEQUENTIAL;
1324 			break;
1325 	case MADV_RANDOM:
1326 			current->object.vm_object->behavior = OBJ_RANDOM;
1327 			break;
1328 	/*
1329 	 * Right now, we could handle DONTNEED and WILLNEED with common code.
1330 	 * They are mostly the same, except for the potential async reads (NYI).
1331 	 */
1332 	case MADV_FREE:
1333 	case MADV_DONTNEED:
1334 			{
1335 				vm_pindex_t pindex;
1336 				int count;
1337 				pindex = OFF_TO_IDX(current->offset);
1338 				count = OFF_TO_IDX(size);
1339 				/*
1340 				 * MADV_DONTNEED removes the page from all
1341 				 * pmaps, so pmap_remove is not necessary.
1342 				 */
1343 				vm_object_madvise(current->object.vm_object,
1344 					pindex, count, advise);
1345 			}
1346 			break;
1347 
1348 	case MADV_WILLNEED:
1349 			{
1350 				vm_pindex_t pindex;
1351 				int count;
1352 				pindex = OFF_TO_IDX(current->offset);
1353 				count = OFF_TO_IDX(size);
1354 				vm_object_madvise(current->object.vm_object,
1355 					pindex, count, advise);
1356 				pmap_object_init_pt(pmap, current->start,
1357 					current->object.vm_object, pindex,
1358 					(count << PAGE_SHIFT), 0);
1359 			}
1360 			break;
1361 
1362 	default:
1363 			break;
1364 		}
1365 	}
1366 
1367 	map->timestamp++;
1368 	vm_map_simplify_entry(map, entry);
1369 	vm_map_unlock(map);
1370 	return;
1371 }
1372 
1373 
1374 /*
1375  *	vm_map_inherit:
1376  *
1377  *	Sets the inheritance of the specified address
1378  *	range in the target map.  Inheritance
1379  *	affects how the map will be shared with
1380  *	child maps at the time of vm_map_fork.
1381  */
1382 int
1383 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1384 	       vm_inherit_t new_inheritance)
1385 {
1386 	vm_map_entry_t entry;
1387 	vm_map_entry_t temp_entry;
1388 
1389 	switch (new_inheritance) {
1390 	case VM_INHERIT_NONE:
1391 	case VM_INHERIT_COPY:
1392 	case VM_INHERIT_SHARE:
1393 		break;
1394 	default:
1395 		return (KERN_INVALID_ARGUMENT);
1396 	}
1397 
1398 	vm_map_lock(map);
1399 
1400 	VM_MAP_RANGE_CHECK(map, start, end);
1401 
1402 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
1403 		entry = temp_entry;
1404 		vm_map_clip_start(map, entry, start);
1405 	} else
1406 		entry = temp_entry->next;
1407 
1408 	while ((entry != &map->header) && (entry->start < end)) {
1409 		vm_map_clip_end(map, entry, end);
1410 
1411 		entry->inheritance = new_inheritance;
1412 
1413 		entry = entry->next;
1414 	}
1415 
1416 	vm_map_simplify_entry(map, temp_entry);
1417 	map->timestamp++;
1418 	vm_map_unlock(map);
1419 	return (KERN_SUCCESS);
1420 }
1421 
1422 /*
1423  * Implement the semantics of mlock
1424  */
1425 int
1426 vm_map_user_pageable(map, start, end, new_pageable)
1427 	vm_map_t map;
1428 	vm_offset_t start;
1429 	vm_offset_t end;
1430 	boolean_t new_pageable;
1431 {
1432 	vm_map_entry_t entry;
1433 	vm_map_entry_t start_entry;
1434 	vm_offset_t estart;
1435 	int rv;
1436 
1437 	vm_map_lock(map);
1438 	VM_MAP_RANGE_CHECK(map, start, end);
1439 
1440 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1441 		vm_map_unlock(map);
1442 		return (KERN_INVALID_ADDRESS);
1443 	}
1444 
1445 	if (new_pageable) {
1446 
1447 		entry = start_entry;
1448 		vm_map_clip_start(map, entry, start);
1449 
1450 		/*
1451 		 * Now decrement the wiring count for each region. If a region
1452 		 * becomes completely unwired, unwire its physical pages and
1453 		 * mappings.
1454 		 */
1455 		vm_map_set_recursive(map);
1456 
1457 		entry = start_entry;
1458 		while ((entry != &map->header) && (entry->start < end)) {
1459 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1460 				vm_map_clip_end(map, entry, end);
1461 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1462 				entry->wired_count--;
1463 				if (entry->wired_count == 0)
1464 					vm_fault_unwire(map, entry->start, entry->end);
1465 			}
1466 			vm_map_simplify_entry(map,entry);
1467 			entry = entry->next;
1468 		}
1469 		vm_map_clear_recursive(map);
1470 	} else {
1471 
1472 		entry = start_entry;
1473 
1474 		while ((entry != &map->header) && (entry->start < end)) {
1475 
1476 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1477 				entry = entry->next;
1478 				continue;
1479 			}
1480 
1481 			if (entry->wired_count != 0) {
1482 				entry->wired_count++;
1483 				entry->eflags |= MAP_ENTRY_USER_WIRED;
1484 				entry = entry->next;
1485 				continue;
1486 			}
1487 
1488 			/* Here on entry being newly wired */
1489 
1490 			if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1491 				int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1492 				if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
1493 
1494 					vm_object_shadow(&entry->object.vm_object,
1495 					    &entry->offset,
1496 					    atop(entry->end - entry->start));
1497 					entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1498 
1499 				} else if (entry->object.vm_object == NULL) {
1500 
1501 					entry->object.vm_object =
1502 					    vm_object_allocate(OBJT_DEFAULT,
1503 						atop(entry->end - entry->start));
1504 					entry->offset = (vm_offset_t) 0;
1505 
1506 				}
1507 				default_pager_convert_to_swapq(entry->object.vm_object);
1508 			}
1509 
1510 			vm_map_clip_start(map, entry, start);
1511 			vm_map_clip_end(map, entry, end);
1512 
1513 			entry->wired_count++;
1514 			entry->eflags |= MAP_ENTRY_USER_WIRED;
1515 			estart = entry->start;
1516 
1517 			/* First we need to allow map modifications */
1518 			vm_map_set_recursive(map);
1519 			vm_map_lock_downgrade(map);
1520 			map->timestamp++;
1521 
1522 			rv = vm_fault_user_wire(map, entry->start, entry->end);
1523 			if (rv) {
1524 
1525 				entry->wired_count--;
1526 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1527 
1528 				vm_map_clear_recursive(map);
1529 				vm_map_unlock(map);
1530 
1531 				(void) vm_map_user_pageable(map, start, entry->start, TRUE);
1532 				return rv;
1533 			}
1534 
1535 			vm_map_clear_recursive(map);
1536 			if (vm_map_lock_upgrade(map)) {
1537 				vm_map_lock(map);
1538 				if (vm_map_lookup_entry(map, estart, &entry)
1539 				    == FALSE) {
1540 					vm_map_unlock(map);
1541 					(void) vm_map_user_pageable(map,
1542 								    start,
1543 								    estart,
1544 								    TRUE);
1545 					return (KERN_INVALID_ADDRESS);
1546 				}
1547 			}
1548 			vm_map_simplify_entry(map,entry);
1549 		}
1550 	}
1551 	map->timestamp++;
1552 	vm_map_unlock(map);
1553 	return KERN_SUCCESS;
1554 }
1555 
1556 /*
1557  *	vm_map_pageable:
1558  *
1559  *	Sets the pageability of the specified address
1560  *	range in the target map.  Regions specified
1561  *	as not pageable require locked-down physical
1562  *	memory and physical page maps.
1563  *
1564  *	The map must not be locked, but a reference
1565  *	must remain to the map throughout the call.
1566  */
1567 int
1568 vm_map_pageable(map, start, end, new_pageable)
1569 	vm_map_t map;
1570 	vm_offset_t start;
1571 	vm_offset_t end;
1572 	boolean_t new_pageable;
1573 {
1574 	vm_map_entry_t entry;
1575 	vm_map_entry_t start_entry;
1576 	vm_offset_t failed = 0;
1577 	int rv;
1578 
1579 	vm_map_lock(map);
1580 
1581 	VM_MAP_RANGE_CHECK(map, start, end);
1582 
1583 	/*
1584 	 * Only one pageability change may take place at one time, since
1585 	 * vm_fault assumes it will be called only once for each
1586 	 * wiring/unwiring.  Therefore, we have to make sure we're actually
1587 	 * changing the pageability for the entire region.  We do so before
1588 	 * making any changes.
1589 	 */
1590 
1591 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1592 		vm_map_unlock(map);
1593 		return (KERN_INVALID_ADDRESS);
1594 	}
1595 	entry = start_entry;
1596 
1597 	/*
1598 	 * Actions are rather different for wiring and unwiring, so we have
1599 	 * two separate cases.
1600 	 */
1601 
1602 	if (new_pageable) {
1603 
1604 		vm_map_clip_start(map, entry, start);
1605 
1606 		/*
1607 		 * Unwiring.  First ensure that the range to be unwired is
1608 		 * really wired down and that there are no holes.
1609 		 */
1610 		while ((entry != &map->header) && (entry->start < end)) {
1611 
1612 			if (entry->wired_count == 0 ||
1613 			    (entry->end < end &&
1614 				(entry->next == &map->header ||
1615 				    entry->next->start > entry->end))) {
1616 				vm_map_unlock(map);
1617 				return (KERN_INVALID_ARGUMENT);
1618 			}
1619 			entry = entry->next;
1620 		}
1621 
1622 		/*
1623 		 * Now decrement the wiring count for each region. If a region
1624 		 * becomes completely unwired, unwire its physical pages and
1625 		 * mappings.
1626 		 */
1627 		vm_map_set_recursive(map);
1628 
1629 		entry = start_entry;
1630 		while ((entry != &map->header) && (entry->start < end)) {
1631 			vm_map_clip_end(map, entry, end);
1632 
1633 			entry->wired_count--;
1634 			if (entry->wired_count == 0)
1635 				vm_fault_unwire(map, entry->start, entry->end);
1636 
1637 			entry = entry->next;
1638 		}
1639 		vm_map_simplify_entry(map, start_entry);
1640 		vm_map_clear_recursive(map);
1641 	} else {
1642 		/*
1643 		 * Wiring.  We must do this in two passes:
1644 		 *
1645 		 * 1.  Holding the write lock, we create any shadow or zero-fill
1646 		 * objects that need to be created. Then we clip each map
1647 		 * entry to the region to be wired and increment its wiring
1648 		 * count.  We create objects before clipping the map entries
1649 		 * to avoid object proliferation.
1650 		 *
1651 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
1652 		 * fault in the pages for any newly wired area (wired_count is
1653 		 * 1).
1654 		 *
1655 		 * Downgrading to a read lock for vm_fault_wire avoids a possible
1656 		 * deadlock with another process that may have faulted on one
1657 		 * of the pages to be wired (it would mark the page busy,
1658 		 * blocking us, then in turn block on the map lock that we
1659 		 * hold).  Because of problems in the recursive lock package,
1660 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
1661 		 * any actions that require the write lock must be done
1662 		 * beforehand.  Because we keep the read lock on the map, the
1663 		 * copy-on-write status of the entries we modify here cannot
1664 		 * change.
1665 		 */
1666 
1667 		/*
1668 		 * Pass 1.
1669 		 */
1670 		while ((entry != &map->header) && (entry->start < end)) {
1671 			if (entry->wired_count == 0) {
1672 
1673 				/*
1674 				 * Perform actions of vm_map_lookup that need
1675 				 * the write lock on the map: create a shadow
1676 				 * object for a copy-on-write region, or an
1677 				 * object for a zero-fill region.
1678 				 *
1679 				 * We don't have to do this for entries that
1680 				 * point to sharing maps, because we won't
1681 				 * hold the lock on the sharing map.
1682 				 */
1683 				if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1684 					int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1685 					if (copyflag &&
1686 					    ((entry->protection & VM_PROT_WRITE) != 0)) {
1687 
1688 						vm_object_shadow(&entry->object.vm_object,
1689 						    &entry->offset,
1690 						    atop(entry->end - entry->start));
1691 						entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1692 					} else if (entry->object.vm_object == NULL) {
1693 						entry->object.vm_object =
1694 						    vm_object_allocate(OBJT_DEFAULT,
1695 							atop(entry->end - entry->start));
1696 						entry->offset = (vm_offset_t) 0;
1697 					}
1698 					default_pager_convert_to_swapq(entry->object.vm_object);
1699 				}
1700 			}
1701 			vm_map_clip_start(map, entry, start);
1702 			vm_map_clip_end(map, entry, end);
1703 			entry->wired_count++;
1704 
1705 			/*
1706 			 * Check for holes
1707 			 */
1708 			if (entry->end < end &&
1709 			    (entry->next == &map->header ||
1710 				entry->next->start > entry->end)) {
1711 				/*
1712 				 * Found one.  Object creation actions do not
1713 				 * need to be undone, but the wired counts
1714 				 * need to be restored.
1715 				 */
1716 				while (entry != &map->header && entry->end > start) {
1717 					entry->wired_count--;
1718 					entry = entry->prev;
1719 				}
1720 				map->timestamp++;
1721 				vm_map_unlock(map);
1722 				return (KERN_INVALID_ARGUMENT);
1723 			}
1724 			entry = entry->next;
1725 		}
1726 
1727 		/*
1728 		 * Pass 2.
1729 		 */
1730 
1731 		/*
1732 		 * HACK HACK HACK HACK
1733 		 *
1734 		 * If we are wiring in the kernel map or a submap of it,
1735 		 * unlock the map to avoid deadlocks.  We trust that the
1736 		 * kernel is well-behaved, and therefore will not do
1737 		 * anything destructive to this region of the map while
1738 		 * we have it unlocked.  We cannot trust user processes
1739 		 * to do the same.
1740 		 *
1741 		 * HACK HACK HACK HACK
1742 		 */
1743 		if (vm_map_pmap(map) == kernel_pmap) {
1744 			vm_map_unlock(map);	/* trust me ... */
1745 		} else {
1746 			vm_map_set_recursive(map);
1747 			vm_map_lock_downgrade(map);
1748 		}
1749 
1750 		rv = 0;
1751 		entry = start_entry;
1752 		while (entry != &map->header && entry->start < end) {
1753 			/*
1754 			 * If vm_fault_wire fails for any page we need to undo
1755 			 * what has been done.  We decrement the wiring count
1756 			 * for those pages which have not yet been wired (now)
1757 			 * and unwire those that have (later).
1758 			 *
1759 			 * XXX this violates the locking protocol on the map,
1760 			 * needs to be fixed.
1761 			 */
1762 			if (rv)
1763 				entry->wired_count--;
1764 			else if (entry->wired_count == 1) {
1765 				rv = vm_fault_wire(map, entry->start, entry->end);
1766 				if (rv) {
1767 					failed = entry->start;
1768 					entry->wired_count--;
1769 				}
1770 			}
1771 			entry = entry->next;
1772 		}
1773 
1774 		if (vm_map_pmap(map) == kernel_pmap) {
1775 			vm_map_lock(map);
1776 		} else {
1777 			vm_map_clear_recursive(map);
1778 		}
1779 		if (rv) {
1780 			vm_map_unlock(map);
1781 			(void) vm_map_pageable(map, start, failed, TRUE);
1782 			return (rv);
1783 		}
1784 		vm_map_simplify_entry(map, start_entry);
1785 	}
1786 
1787 	vm_map_unlock(map);
1788 
1789 	map->timestamp++;
1790 	return (KERN_SUCCESS);
1791 }
1792 
1793 /*
1794  * vm_map_clean
1795  *
1796  * Push any dirty cached pages in the address range to their pager.
1797  * If syncio is TRUE, dirty pages are written synchronously.
1798  * If invalidate is TRUE, any cached pages are freed as well.
1799  *
1800  * Returns an error if any part of the specified range is not mapped.
1801  */
1802 int
1803 vm_map_clean(map, start, end, syncio, invalidate)
1804 	vm_map_t map;
1805 	vm_offset_t start;
1806 	vm_offset_t end;
1807 	boolean_t syncio;
1808 	boolean_t invalidate;
1809 {
1810 	vm_map_entry_t current;
1811 	vm_map_entry_t entry;
1812 	vm_size_t size;
1813 	vm_object_t object;
1814 	vm_ooffset_t offset;
1815 
1816 	vm_map_lock_read(map);
1817 	VM_MAP_RANGE_CHECK(map, start, end);
1818 	if (!vm_map_lookup_entry(map, start, &entry)) {
1819 		vm_map_unlock_read(map);
1820 		return (KERN_INVALID_ADDRESS);
1821 	}
1822 	/*
1823 	 * Make a first pass to check for holes.
1824 	 */
1825 	for (current = entry; current->start < end; current = current->next) {
1826 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1827 			vm_map_unlock_read(map);
1828 			return (KERN_INVALID_ARGUMENT);
1829 		}
1830 		if (end > current->end &&
1831 		    (current->next == &map->header ||
1832 			current->end != current->next->start)) {
1833 			vm_map_unlock_read(map);
1834 			return (KERN_INVALID_ADDRESS);
1835 		}
1836 	}
1837 
1838 	if (invalidate)
1839 		pmap_remove(vm_map_pmap(map), start, end);
1840 	/*
1841 	 * Make a second pass, cleaning/uncaching pages from the indicated
1842 	 * objects as we go.
1843 	 */
1844 	for (current = entry; current->start < end; current = current->next) {
1845 		offset = current->offset + (start - current->start);
1846 		size = (end <= current->end ? end : current->end) - start;
1847 		if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1848 			vm_map_t smap;
1849 			vm_map_entry_t tentry;
1850 			vm_size_t tsize;
1851 
1852 			smap = current->object.share_map;
1853 			vm_map_lock_read(smap);
1854 			(void) vm_map_lookup_entry(smap, offset, &tentry);
1855 			tsize = tentry->end - offset;
1856 			if (tsize < size)
1857 				size = tsize;
1858 			object = tentry->object.vm_object;
1859 			offset = tentry->offset + (offset - tentry->start);
1860 			vm_map_unlock_read(smap);
1861 		} else {
1862 			object = current->object.vm_object;
1863 		}
1864 		/*
1865 		 * Note that there is absolutely no sense in writing out
1866 		 * anonymous objects, so we track down the vnode object
1867 		 * to write out.
1868 		 * We invalidate (remove) all pages from the address space
1869 		 * anyway, for semantic correctness.
1870 		 */
1871 		while (object->backing_object) {
1872 			object = object->backing_object;
1873 			offset += object->backing_object_offset;
1874 			if (object->size < OFF_TO_IDX( offset + size))
1875 				size = IDX_TO_OFF(object->size) - offset;
1876 		}
1877 		if (object && (object->type == OBJT_VNODE)) {
1878 			/*
1879 			 * Flush pages if writing is allowed. XXX should we continue
1880 			 * on an error?
1881 			 *
1882 			 * XXX Doing async I/O and then removing all the pages from
1883 			 *     the object before it completes is probably a very bad
1884 			 *     idea.
1885 			 */
1886 			if (current->protection & VM_PROT_WRITE) {
1887 				int flags;
1888 				if (object->type == OBJT_VNODE)
1889 					vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
1890 				flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
1891 				flags |= invalidate ? OBJPC_INVAL : 0;
1892 		   	    vm_object_page_clean(object,
1893 					OFF_TO_IDX(offset),
1894 					OFF_TO_IDX(offset + size + PAGE_MASK),
1895 					flags);
1896 				if (invalidate) {
1897 					vm_object_pip_wait(object, "objmcl");
1898 					vm_object_page_remove(object,
1899 						OFF_TO_IDX(offset),
1900 						OFF_TO_IDX(offset + size + PAGE_MASK),
1901 						FALSE);
1902 				}
1903 				if (object->type == OBJT_VNODE)
1904 					VOP_UNLOCK(object->handle, 0, curproc);
1905 			}
1906 		}
1907 		start += size;
1908 	}
1909 
1910 	vm_map_unlock_read(map);
1911 	return (KERN_SUCCESS);
1912 }
1913 
1914 /*
1915  *	vm_map_entry_unwire:	[ internal use only ]
1916  *
1917  *	Make the region specified by this entry pageable.
1918  *
1919  *	The map in question should be locked.
1920  *	[This is the reason for this routine's existence.]
1921  */
1922 static void
1923 vm_map_entry_unwire(map, entry)
1924 	vm_map_t map;
1925 	vm_map_entry_t entry;
1926 {
1927 	vm_fault_unwire(map, entry->start, entry->end);
1928 	entry->wired_count = 0;
1929 }
1930 
1931 /*
1932  *	vm_map_entry_delete:	[ internal use only ]
1933  *
1934  *	Deallocate the given entry from the target map.
1935  */
1936 static void
1937 vm_map_entry_delete(map, entry)
1938 	vm_map_t map;
1939 	vm_map_entry_t entry;
1940 {
1941 	vm_map_entry_unlink(map, entry);
1942 	map->size -= entry->end - entry->start;
1943 
1944 	if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1945 		vm_object_deallocate(entry->object.vm_object);
1946 	}
1947 
1948 	vm_map_entry_dispose(map, entry);
1949 }
1950 
1951 /*
1952  *	vm_map_delete:	[ internal use only ]
1953  *
1954  *	Deallocates the given address range from the target
1955  *	map.
1956  *
1957  *	When called with a sharing map, removes pages from
1958  *	that region from all physical maps.
1959  */
1960 int
1961 vm_map_delete(map, start, end)
1962 	vm_map_t map;
1963 	vm_offset_t start;
1964 	vm_offset_t end;
1965 {
1966 	vm_object_t object;
1967 	vm_map_entry_t entry;
1968 	vm_map_entry_t first_entry;
1969 
1970 	/*
1971 	 * Find the start of the region, and clip it
1972 	 */
1973 
1974 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
1975 		entry = first_entry->next;
1976 		object = entry->object.vm_object;
1977 		if (object && (object->ref_count == 1) && (object->shadow_count == 0))
1978 			vm_object_set_flag(object, OBJ_ONEMAPPING);
1979 	} else {
1980 		entry = first_entry;
1981 		vm_map_clip_start(map, entry, start);
1982 		/*
1983 		 * Fix the lookup hint now, rather than each time though the
1984 		 * loop.
1985 		 */
1986 		SAVE_HINT(map, entry->prev);
1987 	}
1988 
1989 	/*
1990 	 * Save the free space hint
1991 	 */
1992 
1993 	if (entry == &map->header) {
1994 		map->first_free = &map->header;
1995 	} else if (map->first_free->start >= start) {
1996 		map->first_free = entry->prev;
1997 	}
1998 
1999 	/*
2000 	 * Step through all entries in this region
2001 	 */
2002 
2003 	while ((entry != &map->header) && (entry->start < end)) {
2004 		vm_map_entry_t next;
2005 		vm_offset_t s, e;
2006 		vm_pindex_t offidxstart, offidxend, count;
2007 
2008 		vm_map_clip_end(map, entry, end);
2009 
2010 		s = entry->start;
2011 		e = entry->end;
2012 		next = entry->next;
2013 
2014 		offidxstart = OFF_TO_IDX(entry->offset);
2015 		count = OFF_TO_IDX(e - s);
2016 		object = entry->object.vm_object;
2017 
2018 		/*
2019 		 * Unwire before removing addresses from the pmap; otherwise,
2020 		 * unwiring will put the entries back in the pmap.
2021 		 */
2022 		if (entry->wired_count != 0) {
2023 			vm_map_entry_unwire(map, entry);
2024 		}
2025 
2026 		offidxend = offidxstart + count;
2027 		/*
2028 		 * If this is a sharing map, we must remove *all* references
2029 		 * to this data, since we can't find all of the physical maps
2030 		 * which are sharing it.
2031 		 */
2032 
2033 		if ((object == kernel_object) || (object == kmem_object)) {
2034 			vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2035 		} else if (!map->is_main_map) {
2036 			vm_object_pmap_remove(object, offidxstart, offidxend);
2037 		} else {
2038 			pmap_remove(map->pmap, s, e);
2039 			if (object &&
2040 				((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) &&
2041 				((object->type == OBJT_SWAP) || (object->type == OBJT_DEFAULT))) {
2042 				vm_object_collapse(object);
2043 				vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2044 				if (object->type == OBJT_SWAP) {
2045 					swap_pager_freespace(object, offidxstart, count);
2046 				}
2047 
2048 				if ((offidxend >= object->size) &&
2049 					(offidxstart < object->size)) {
2050 						object->size = offidxstart;
2051 				}
2052 			}
2053 		}
2054 
2055 		/*
2056 		 * Delete the entry (which may delete the object) only after
2057 		 * removing all pmap entries pointing to its pages.
2058 		 * (Otherwise, its page frames may be reallocated, and any
2059 		 * modify bits will be set in the wrong object!)
2060 		 */
2061 		vm_map_entry_delete(map, entry);
2062 		entry = next;
2063 	}
2064 	return (KERN_SUCCESS);
2065 }
2066 
2067 /*
2068  *	vm_map_remove:
2069  *
2070  *	Remove the given address range from the target map.
2071  *	This is the exported form of vm_map_delete.
2072  */
2073 int
2074 vm_map_remove(map, start, end)
2075 	vm_map_t map;
2076 	vm_offset_t start;
2077 	vm_offset_t end;
2078 {
2079 	int result, s = 0;
2080 
2081 	if (map == kmem_map || map == mb_map)
2082 		s = splvm();
2083 
2084 	vm_map_lock(map);
2085 	VM_MAP_RANGE_CHECK(map, start, end);
2086 	result = vm_map_delete(map, start, end);
2087 	vm_map_unlock(map);
2088 
2089 	if (map == kmem_map || map == mb_map)
2090 		splx(s);
2091 
2092 	return (result);
2093 }
2094 
2095 /*
2096  *	vm_map_check_protection:
2097  *
2098  *	Assert that the target map allows the specified
2099  *	privilege on the entire address region given.
2100  *	The entire region must be allocated.
2101  */
2102 boolean_t
2103 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2104 			vm_prot_t protection)
2105 {
2106 	vm_map_entry_t entry;
2107 	vm_map_entry_t tmp_entry;
2108 
2109 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2110 		return (FALSE);
2111 	}
2112 	entry = tmp_entry;
2113 
2114 	while (start < end) {
2115 		if (entry == &map->header) {
2116 			return (FALSE);
2117 		}
2118 		/*
2119 		 * No holes allowed!
2120 		 */
2121 
2122 		if (start < entry->start) {
2123 			return (FALSE);
2124 		}
2125 		/*
2126 		 * Check protection associated with entry.
2127 		 */
2128 
2129 		if ((entry->protection & protection) != protection) {
2130 			return (FALSE);
2131 		}
2132 		/* go to next entry */
2133 
2134 		start = entry->end;
2135 		entry = entry->next;
2136 	}
2137 	return (TRUE);
2138 }
2139 
2140 /*
2141  * Split the pages in a map entry into a new object.  This affords
2142  * easier removal of unused pages, and keeps object inheritance from
2143  * being a negative impact on memory usage.
2144  */
2145 static void
2146 vm_map_split(entry)
2147 	vm_map_entry_t entry;
2148 {
2149 	vm_page_t m;
2150 	vm_object_t orig_object, new_object, source;
2151 	vm_offset_t s, e;
2152 	vm_pindex_t offidxstart, offidxend, idx;
2153 	vm_size_t size;
2154 	vm_ooffset_t offset;
2155 
2156 	orig_object = entry->object.vm_object;
2157 	if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
2158 		return;
2159 	if (orig_object->ref_count <= 1)
2160 		return;
2161 
2162 	offset = entry->offset;
2163 	s = entry->start;
2164 	e = entry->end;
2165 
2166 	offidxstart = OFF_TO_IDX(offset);
2167 	offidxend = offidxstart + OFF_TO_IDX(e - s);
2168 	size = offidxend - offidxstart;
2169 
2170 	new_object = vm_pager_allocate(orig_object->type,
2171 		NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL);
2172 	if (new_object == NULL)
2173 		return;
2174 
2175 	source = orig_object->backing_object;
2176 	if (source != NULL) {
2177 		vm_object_reference(source);	/* Referenced by new_object */
2178 		TAILQ_INSERT_TAIL(&source->shadow_head,
2179 				  new_object, shadow_list);
2180 		vm_object_clear_flag(source, OBJ_ONEMAPPING);
2181 		new_object->backing_object_offset =
2182 			orig_object->backing_object_offset + IDX_TO_OFF(offidxstart);
2183 		new_object->backing_object = source;
2184 		source->shadow_count++;
2185 		source->generation++;
2186 	}
2187 
2188 	for (idx = 0; idx < size; idx++) {
2189 		vm_page_t m;
2190 
2191 	retry:
2192 		m = vm_page_lookup(orig_object, offidxstart + idx);
2193 		if (m == NULL)
2194 			continue;
2195 		if (m->flags & PG_BUSY) {
2196 			vm_page_flag_set(m, PG_WANTED);
2197 			tsleep(m, PVM, "spltwt", 0);
2198 			goto retry;
2199 		}
2200 
2201 		vm_page_busy(m);
2202 		vm_page_protect(m, VM_PROT_NONE);
2203 		vm_page_rename(m, new_object, idx);
2204 		m->dirty = VM_PAGE_BITS_ALL;
2205 		vm_page_busy(m);
2206 	}
2207 
2208 	if (orig_object->type == OBJT_SWAP) {
2209 		vm_object_pip_add(orig_object, 1);
2210 		/*
2211 		 * copy orig_object pages into new_object
2212 		 * and destroy unneeded pages in
2213 		 * shadow object.
2214 		 */
2215 		swap_pager_copy(orig_object, OFF_TO_IDX(orig_object->paging_offset),
2216 		    new_object, OFF_TO_IDX(new_object->paging_offset),
2217 			offidxstart, 0);
2218 		vm_object_pip_wakeup(orig_object);
2219 	}
2220 
2221 	for (idx = 0; idx < size; idx++) {
2222 		m = vm_page_lookup(new_object, idx);
2223 		if (m) {
2224 			vm_page_wakeup(m);
2225 		}
2226 	}
2227 
2228 	entry->object.vm_object = new_object;
2229 	entry->offset = 0LL;
2230 	vm_object_deallocate(orig_object);
2231 }
2232 
2233 /*
2234  *	vm_map_copy_entry:
2235  *
2236  *	Copies the contents of the source entry to the destination
2237  *	entry.  The entries *must* be aligned properly.
2238  */
2239 static void
2240 vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
2241 	vm_map_t src_map, dst_map;
2242 	vm_map_entry_t src_entry, dst_entry;
2243 {
2244 	vm_object_t src_object;
2245 
2246 	if ((dst_entry->eflags|src_entry->eflags) &
2247 		(MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
2248 		return;
2249 
2250 	if (src_entry->wired_count == 0) {
2251 
2252 		/*
2253 		 * If the source entry is marked needs_copy, it is already
2254 		 * write-protected.
2255 		 */
2256 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2257 			pmap_protect(src_map->pmap,
2258 			    src_entry->start,
2259 			    src_entry->end,
2260 			    src_entry->protection & ~VM_PROT_WRITE);
2261 		}
2262 
2263 		/*
2264 		 * Make a copy of the object.
2265 		 */
2266 		if (src_object = src_entry->object.vm_object) {
2267 
2268 			if ((src_object->handle == NULL) &&
2269 				(src_object->type == OBJT_DEFAULT ||
2270 				 src_object->type == OBJT_SWAP)) {
2271 				vm_object_collapse(src_object);
2272 				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2273 					vm_map_split(src_entry);
2274 					src_map->timestamp++;
2275 					src_object = src_entry->object.vm_object;
2276 				}
2277 			}
2278 
2279 			vm_object_reference(src_object);
2280 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2281 			dst_entry->object.vm_object = src_object;
2282 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2283 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2284 			dst_entry->offset = src_entry->offset;
2285 		} else {
2286 			dst_entry->object.vm_object = NULL;
2287 			dst_entry->offset = 0;
2288 		}
2289 
2290 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2291 		    dst_entry->end - dst_entry->start, src_entry->start);
2292 	} else {
2293 		/*
2294 		 * Of course, wired down pages can't be set copy-on-write.
2295 		 * Cause wired pages to be copied into the new map by
2296 		 * simulating faults (the new pages are pageable)
2297 		 */
2298 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2299 	}
2300 }
2301 
2302 /*
2303  * vmspace_fork:
2304  * Create a new process vmspace structure and vm_map
2305  * based on those of an existing process.  The new map
2306  * is based on the old map, according to the inheritance
2307  * values on the regions in that map.
2308  *
2309  * The source map must not be locked.
2310  */
2311 struct vmspace *
2312 vmspace_fork(vm1)
2313 	struct vmspace *vm1;
2314 {
2315 	struct vmspace *vm2;
2316 	vm_map_t old_map = &vm1->vm_map;
2317 	vm_map_t new_map;
2318 	vm_map_entry_t old_entry;
2319 	vm_map_entry_t new_entry;
2320 	pmap_t new_pmap;
2321 	vm_object_t object;
2322 
2323 	vm_map_lock(old_map);
2324 
2325 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2326 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2327 	    (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2328 	new_pmap = &vm2->vm_pmap;	/* XXX */
2329 	new_map = &vm2->vm_map;	/* XXX */
2330 	new_map->timestamp = 1;
2331 
2332 	old_entry = old_map->header.next;
2333 
2334 	while (old_entry != &old_map->header) {
2335 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2336 			panic("vm_map_fork: encountered a submap");
2337 
2338 		switch (old_entry->inheritance) {
2339 		case VM_INHERIT_NONE:
2340 			break;
2341 
2342 		case VM_INHERIT_SHARE:
2343 			/*
2344 			 * Clone the entry, creating the shared object if necessary.
2345 			 */
2346 			object = old_entry->object.vm_object;
2347 			if (object == NULL) {
2348 				object = vm_object_allocate(OBJT_DEFAULT,
2349 					atop(old_entry->end - old_entry->start));
2350 				old_entry->object.vm_object = object;
2351 				old_entry->offset = (vm_offset_t) 0;
2352 			} else if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2353 				vm_object_shadow(&old_entry->object.vm_object,
2354 					&old_entry->offset,
2355 					atop(old_entry->end - old_entry->start));
2356 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2357 				object = old_entry->object.vm_object;
2358 			}
2359 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
2360 
2361 			/*
2362 			 * Clone the entry, referencing the sharing map.
2363 			 */
2364 			new_entry = vm_map_entry_create(new_map);
2365 			*new_entry = *old_entry;
2366 			new_entry->wired_count = 0;
2367 			vm_object_reference(object);
2368 
2369 			/*
2370 			 * Insert the entry into the new map -- we know we're
2371 			 * inserting at the end of the new map.
2372 			 */
2373 
2374 			vm_map_entry_link(new_map, new_map->header.prev,
2375 			    new_entry);
2376 
2377 			/*
2378 			 * Update the physical map
2379 			 */
2380 
2381 			pmap_copy(new_map->pmap, old_map->pmap,
2382 			    new_entry->start,
2383 			    (old_entry->end - old_entry->start),
2384 			    old_entry->start);
2385 			break;
2386 
2387 		case VM_INHERIT_COPY:
2388 			/*
2389 			 * Clone the entry and link into the map.
2390 			 */
2391 			new_entry = vm_map_entry_create(new_map);
2392 			*new_entry = *old_entry;
2393 			new_entry->wired_count = 0;
2394 			new_entry->object.vm_object = NULL;
2395 			new_entry->eflags &= ~MAP_ENTRY_IS_A_MAP;
2396 			vm_map_entry_link(new_map, new_map->header.prev,
2397 			    new_entry);
2398 			vm_map_copy_entry(old_map, new_map, old_entry,
2399 			    new_entry);
2400 			break;
2401 		}
2402 		old_entry = old_entry->next;
2403 	}
2404 
2405 	new_map->size = old_map->size;
2406 	vm_map_unlock(old_map);
2407 	old_map->timestamp++;
2408 
2409 	return (vm2);
2410 }
2411 
2412 /*
2413  * Unshare the specified VM space for exec.  If other processes are
2414  * mapped to it, then create a new one.  The new vmspace is null.
2415  */
2416 
2417 void
2418 vmspace_exec(struct proc *p) {
2419 	struct vmspace *oldvmspace = p->p_vmspace;
2420 	struct vmspace *newvmspace;
2421 	vm_map_t map = &p->p_vmspace->vm_map;
2422 
2423 	newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
2424 	bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
2425 	    (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
2426 	/*
2427 	 * This code is written like this for prototype purposes.  The
2428 	 * goal is to avoid running down the vmspace here, but let the
2429 	 * other process's that are still using the vmspace to finally
2430 	 * run it down.  Even though there is little or no chance of blocking
2431 	 * here, it is a good idea to keep this form for future mods.
2432 	 */
2433 	vmspace_free(oldvmspace);
2434 	p->p_vmspace = newvmspace;
2435 	if (p == curproc)
2436 		pmap_activate(p);
2437 }
2438 
2439 /*
2440  * Unshare the specified VM space for forcing COW.  This
2441  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
2442  */
2443 
2444 void
2445 vmspace_unshare(struct proc *p) {
2446 	struct vmspace *oldvmspace = p->p_vmspace;
2447 	struct vmspace *newvmspace;
2448 
2449 	if (oldvmspace->vm_refcnt == 1)
2450 		return;
2451 	newvmspace = vmspace_fork(oldvmspace);
2452 	vmspace_free(oldvmspace);
2453 	p->p_vmspace = newvmspace;
2454 	if (p == curproc)
2455 		pmap_activate(p);
2456 }
2457 
2458 
2459 /*
2460  *	vm_map_lookup:
2461  *
2462  *	Finds the VM object, offset, and
2463  *	protection for a given virtual address in the
2464  *	specified map, assuming a page fault of the
2465  *	type specified.
2466  *
2467  *	Leaves the map in question locked for read; return
2468  *	values are guaranteed until a vm_map_lookup_done
2469  *	call is performed.  Note that the map argument
2470  *	is in/out; the returned map must be used in
2471  *	the call to vm_map_lookup_done.
2472  *
2473  *	A handle (out_entry) is returned for use in
2474  *	vm_map_lookup_done, to make that fast.
2475  *
2476  *	If a lookup is requested with "write protection"
2477  *	specified, the map may be changed to perform virtual
2478  *	copying operations, although the data referenced will
2479  *	remain the same.
2480  */
2481 int
2482 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
2483 	      vm_offset_t vaddr,
2484 	      vm_prot_t fault_typea,
2485 	      vm_map_entry_t *out_entry,	/* OUT */
2486 	      vm_object_t *object,		/* OUT */
2487 	      vm_pindex_t *pindex,		/* OUT */
2488 	      vm_prot_t *out_prot,		/* OUT */
2489 	      boolean_t *wired)			/* OUT */
2490 {
2491 	vm_map_t share_map;
2492 	vm_offset_t share_offset;
2493 	vm_map_entry_t entry;
2494 	vm_map_t map = *var_map;
2495 	vm_prot_t prot;
2496 	boolean_t su;
2497 	vm_prot_t fault_type = fault_typea;
2498 
2499 RetryLookup:;
2500 
2501 	/*
2502 	 * Lookup the faulting address.
2503 	 */
2504 
2505 	vm_map_lock_read(map);
2506 
2507 #define	RETURN(why) \
2508 		{ \
2509 		vm_map_unlock_read(map); \
2510 		return(why); \
2511 		}
2512 
2513 	/*
2514 	 * If the map has an interesting hint, try it before calling full
2515 	 * blown lookup routine.
2516 	 */
2517 
2518 	entry = map->hint;
2519 
2520 	*out_entry = entry;
2521 
2522 	if ((entry == &map->header) ||
2523 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
2524 		vm_map_entry_t tmp_entry;
2525 
2526 		/*
2527 		 * Entry was either not a valid hint, or the vaddr was not
2528 		 * contained in the entry, so do a full lookup.
2529 		 */
2530 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2531 			RETURN(KERN_INVALID_ADDRESS);
2532 
2533 		entry = tmp_entry;
2534 		*out_entry = entry;
2535 	}
2536 
2537 	/*
2538 	 * Handle submaps.
2539 	 */
2540 
2541 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2542 		vm_map_t old_map = map;
2543 
2544 		*var_map = map = entry->object.sub_map;
2545 		vm_map_unlock_read(old_map);
2546 		goto RetryLookup;
2547 	}
2548 
2549 	/*
2550 	 * Check whether this task is allowed to have this page.
2551 	 * Note the special case for MAP_ENTRY_COW
2552 	 * pages with an override.  This is to implement a forced
2553 	 * COW for debuggers.
2554 	 */
2555 
2556 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
2557 		prot = entry->max_protection;
2558 	else
2559 		prot = entry->protection;
2560 
2561 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
2562 	if ((fault_type & prot) != fault_type) {
2563 			RETURN(KERN_PROTECTION_FAILURE);
2564 	}
2565 
2566 	if (entry->wired_count && (fault_type & VM_PROT_WRITE) &&
2567 			(entry->eflags & MAP_ENTRY_COW) &&
2568 			(fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
2569 			RETURN(KERN_PROTECTION_FAILURE);
2570 	}
2571 
2572 	/*
2573 	 * If this page is not pageable, we have to get it for all possible
2574 	 * accesses.
2575 	 */
2576 
2577 	*wired = (entry->wired_count != 0);
2578 	if (*wired)
2579 		prot = fault_type = entry->protection;
2580 
2581 	/*
2582 	 * If we don't already have a VM object, track it down.
2583 	 */
2584 
2585 	su = (entry->eflags & MAP_ENTRY_IS_A_MAP) == 0;
2586 	if (su) {
2587 		share_map = map;
2588 		share_offset = vaddr;
2589 	} else {
2590 		vm_map_entry_t share_entry;
2591 
2592 		/*
2593 		 * Compute the sharing map, and offset into it.
2594 		 */
2595 
2596 		share_map = entry->object.share_map;
2597 		share_offset = (vaddr - entry->start) + entry->offset;
2598 
2599 		/*
2600 		 * Look for the backing store object and offset
2601 		 */
2602 
2603 		vm_map_lock_read(share_map);
2604 
2605 		if (!vm_map_lookup_entry(share_map, share_offset,
2606 			&share_entry)) {
2607 			vm_map_unlock_read(share_map);
2608 			RETURN(KERN_INVALID_ADDRESS);
2609 		}
2610 		entry = share_entry;
2611 	}
2612 
2613 	/*
2614 	 * If the entry was copy-on-write, we either ...
2615 	 */
2616 
2617 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2618 		/*
2619 		 * If we want to write the page, we may as well handle that
2620 		 * now since we've got the sharing map locked.
2621 		 *
2622 		 * If we don't need to write the page, we just demote the
2623 		 * permissions allowed.
2624 		 */
2625 
2626 		if (fault_type & VM_PROT_WRITE) {
2627 			/*
2628 			 * Make a new object, and place it in the object
2629 			 * chain.  Note that no new references have appeared
2630 			 * -- one just moved from the share map to the new
2631 			 * object.
2632 			 */
2633 
2634 			if (vm_map_lock_upgrade(share_map)) {
2635 				if (share_map != map)
2636 					vm_map_unlock_read(map);
2637 
2638 				goto RetryLookup;
2639 			}
2640 			vm_object_shadow(
2641 			    &entry->object.vm_object,
2642 			    &entry->offset,
2643 			    atop(entry->end - entry->start));
2644 
2645 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2646 			vm_map_lock_downgrade(share_map);
2647 		} else {
2648 			/*
2649 			 * We're attempting to read a copy-on-write page --
2650 			 * don't allow writes.
2651 			 */
2652 
2653 			prot &= ~VM_PROT_WRITE;
2654 		}
2655 	}
2656 
2657 	/*
2658 	 * Create an object if necessary.
2659 	 */
2660 	if (entry->object.vm_object == NULL) {
2661 
2662 		if (vm_map_lock_upgrade(share_map)) {
2663 			if (share_map != map)
2664 				vm_map_unlock_read(map);
2665 			goto RetryLookup;
2666 		}
2667 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2668 		    atop(entry->end - entry->start));
2669 		entry->offset = 0;
2670 		vm_map_lock_downgrade(share_map);
2671 	}
2672 
2673 	if (entry->object.vm_object->type == OBJT_DEFAULT)
2674 		default_pager_convert_to_swapq(entry->object.vm_object);
2675 	/*
2676 	 * Return the object/offset from this entry.  If the entry was
2677 	 * copy-on-write or empty, it has been fixed up.
2678 	 */
2679 
2680 	*pindex = OFF_TO_IDX((share_offset - entry->start) + entry->offset);
2681 	*object = entry->object.vm_object;
2682 
2683 	/*
2684 	 * Return whether this is the only map sharing this data.
2685 	 */
2686 
2687 	*out_prot = prot;
2688 	return (KERN_SUCCESS);
2689 
2690 #undef	RETURN
2691 }
2692 
2693 /*
2694  *	vm_map_lookup_done:
2695  *
2696  *	Releases locks acquired by a vm_map_lookup
2697  *	(according to the handle returned by that lookup).
2698  */
2699 
2700 void
2701 vm_map_lookup_done(map, entry)
2702 	vm_map_t map;
2703 	vm_map_entry_t entry;
2704 {
2705 	/*
2706 	 * If this entry references a map, unlock it first.
2707 	 */
2708 
2709 	if (entry->eflags & MAP_ENTRY_IS_A_MAP)
2710 		vm_map_unlock_read(entry->object.share_map);
2711 
2712 	/*
2713 	 * Unlock the main-level map
2714 	 */
2715 
2716 	vm_map_unlock_read(map);
2717 }
2718 
2719 /*
2720  * Implement uiomove with VM operations.  This handles (and collateral changes)
2721  * support every combination of source object modification, and COW type
2722  * operations.
2723  */
2724 int
2725 vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
2726 	vm_map_t mapa;
2727 	vm_object_t srcobject;
2728 	off_t cp;
2729 	int cnta;
2730 	vm_offset_t uaddra;
2731 	int *npages;
2732 {
2733 	vm_map_t map;
2734 	vm_object_t first_object, oldobject, object;
2735 	vm_map_entry_t entry;
2736 	vm_prot_t prot;
2737 	boolean_t wired;
2738 	int tcnt, rv;
2739 	vm_offset_t uaddr, start, end, tend;
2740 	vm_pindex_t first_pindex, osize, oindex;
2741 	off_t ooffset;
2742 	int cnt;
2743 
2744 	if (npages)
2745 		*npages = 0;
2746 
2747 	cnt = cnta;
2748 	uaddr = uaddra;
2749 
2750 	while (cnt > 0) {
2751 		map = mapa;
2752 
2753 		if ((vm_map_lookup(&map, uaddr,
2754 			VM_PROT_READ, &entry, &first_object,
2755 			&first_pindex, &prot, &wired)) != KERN_SUCCESS) {
2756 			return EFAULT;
2757 		}
2758 
2759 		vm_map_clip_start(map, entry, uaddr);
2760 
2761 		tcnt = cnt;
2762 		tend = uaddr + tcnt;
2763 		if (tend > entry->end) {
2764 			tcnt = entry->end - uaddr;
2765 			tend = entry->end;
2766 		}
2767 
2768 		vm_map_clip_end(map, entry, tend);
2769 
2770 		start = entry->start;
2771 		end = entry->end;
2772 
2773 		osize = atop(tcnt);
2774 
2775 		oindex = OFF_TO_IDX(cp);
2776 		if (npages) {
2777 			vm_pindex_t idx;
2778 			for (idx = 0; idx < osize; idx++) {
2779 				vm_page_t m;
2780 				if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
2781 					vm_map_lookup_done(map, entry);
2782 					return 0;
2783 				}
2784 				if ((m->flags & PG_BUSY) ||
2785 					((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
2786 					vm_map_lookup_done(map, entry);
2787 					return 0;
2788 				}
2789 			}
2790 		}
2791 
2792 /*
2793  * If we are changing an existing map entry, just redirect
2794  * the object, and change mappings.
2795  */
2796 		if ((first_object->type == OBJT_VNODE) &&
2797 			((oldobject = entry->object.vm_object) == first_object)) {
2798 
2799 			if ((entry->offset != cp) || (oldobject != srcobject)) {
2800 				/*
2801    				* Remove old window into the file
2802    				*/
2803 				pmap_remove (map->pmap, uaddr, tend);
2804 
2805 				/*
2806    				* Force copy on write for mmaped regions
2807    				*/
2808 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2809 
2810 				/*
2811    				* Point the object appropriately
2812    				*/
2813 				if (oldobject != srcobject) {
2814 
2815 				/*
2816    				* Set the object optimization hint flag
2817    				*/
2818 					vm_object_set_flag(srcobject, OBJ_OPT);
2819 					vm_object_reference(srcobject);
2820 					entry->object.vm_object = srcobject;
2821 
2822 					if (oldobject) {
2823 						vm_object_deallocate(oldobject);
2824 					}
2825 				}
2826 
2827 				entry->offset = cp;
2828 				map->timestamp++;
2829 			} else {
2830 				pmap_remove (map->pmap, uaddr, tend);
2831 			}
2832 
2833 		} else if ((first_object->ref_count == 1) &&
2834 			(first_object->size == osize) &&
2835 			((first_object->type == OBJT_DEFAULT) ||
2836 				(first_object->type == OBJT_SWAP)) ) {
2837 
2838 			oldobject = first_object->backing_object;
2839 
2840 			if ((first_object->backing_object_offset != cp) ||
2841 				(oldobject != srcobject)) {
2842 				/*
2843    				* Remove old window into the file
2844    				*/
2845 				pmap_remove (map->pmap, uaddr, tend);
2846 
2847 				/*
2848 				 * Remove unneeded old pages
2849 				 */
2850 				if (first_object->resident_page_count) {
2851 					vm_object_page_remove (first_object, 0, 0, 0);
2852 				}
2853 
2854 				/*
2855 				 * Invalidate swap space
2856 				 */
2857 				if (first_object->type == OBJT_SWAP) {
2858 					swap_pager_freespace(first_object,
2859 						OFF_TO_IDX(first_object->paging_offset),
2860 						first_object->size);
2861 				}
2862 
2863 				/*
2864    				* Force copy on write for mmaped regions
2865    				*/
2866 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2867 
2868 				/*
2869    				* Point the object appropriately
2870    				*/
2871 				if (oldobject != srcobject) {
2872 
2873 				/*
2874    				* Set the object optimization hint flag
2875    				*/
2876 					vm_object_set_flag(srcobject, OBJ_OPT);
2877 					vm_object_reference(srcobject);
2878 
2879 					if (oldobject) {
2880 						TAILQ_REMOVE(&oldobject->shadow_head,
2881 							first_object, shadow_list);
2882 						oldobject->shadow_count--;
2883 						vm_object_deallocate(oldobject);
2884 					}
2885 
2886 					TAILQ_INSERT_TAIL(&srcobject->shadow_head,
2887 						first_object, shadow_list);
2888 					srcobject->shadow_count++;
2889 
2890 					first_object->backing_object = srcobject;
2891 				}
2892 				first_object->backing_object_offset = cp;
2893 				map->timestamp++;
2894 			} else {
2895 				pmap_remove (map->pmap, uaddr, tend);
2896 			}
2897 /*
2898  * Otherwise, we have to do a logical mmap.
2899  */
2900 		} else {
2901 
2902 			vm_object_set_flag(srcobject, OBJ_OPT);
2903 			vm_object_reference(srcobject);
2904 
2905 			pmap_remove (map->pmap, uaddr, tend);
2906 
2907 			vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2908 			vm_map_lock_upgrade(map);
2909 
2910 			if (entry == &map->header) {
2911 				map->first_free = &map->header;
2912 			} else if (map->first_free->start >= start) {
2913 				map->first_free = entry->prev;
2914 			}
2915 
2916 			SAVE_HINT(map, entry->prev);
2917 			vm_map_entry_delete(map, entry);
2918 
2919 			object = srcobject;
2920 			ooffset = cp;
2921 #if 0
2922 			vm_object_shadow(&object, &ooffset, osize);
2923 #endif
2924 
2925 			rv = vm_map_insert(map, object, ooffset, start, tend,
2926 				VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE|MAP_COPY_NEEDED);
2927 
2928 			if (rv != KERN_SUCCESS)
2929 				panic("vm_uiomove: could not insert new entry: %d", rv);
2930 		}
2931 
2932 /*
2933  * Map the window directly, if it is already in memory
2934  */
2935 		pmap_object_init_pt(map->pmap, uaddr,
2936 			srcobject, oindex, tcnt, 0);
2937 
2938 		map->timestamp++;
2939 		vm_map_unlock(map);
2940 
2941 		cnt -= tcnt;
2942 		uaddr += tcnt;
2943 		cp += tcnt;
2944 		if (npages)
2945 			*npages += osize;
2946 	}
2947 	return 0;
2948 }
2949 
2950 /*
2951  * Performs the copy_on_write operations necessary to allow the virtual copies
2952  * into user space to work.  This has to be called for write(2) system calls
2953  * from other processes, file unlinking, and file size shrinkage.
2954  */
2955 void
2956 vm_freeze_copyopts(object, froma, toa)
2957 	vm_object_t object;
2958 	vm_pindex_t froma, toa;
2959 {
2960 	int rv;
2961 	vm_object_t robject;
2962 	vm_pindex_t idx;
2963 
2964 	if ((object == NULL) ||
2965 		((object->flags & OBJ_OPT) == 0))
2966 		return;
2967 
2968 	if (object->shadow_count > object->ref_count)
2969 		panic("vm_freeze_copyopts: sc > rc");
2970 
2971 	while( robject = TAILQ_FIRST(&object->shadow_head)) {
2972 		vm_pindex_t bo_pindex;
2973 		vm_page_t m_in, m_out;
2974 
2975 		bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
2976 
2977 		vm_object_reference(robject);
2978 
2979 		vm_object_pip_wait(robject, "objfrz");
2980 
2981 		if (robject->ref_count == 1) {
2982 			vm_object_deallocate(robject);
2983 			continue;
2984 		}
2985 
2986 		vm_object_pip_add(robject, 1);
2987 
2988 		for (idx = 0; idx < robject->size; idx++) {
2989 
2990 			m_out = vm_page_grab(robject, idx,
2991 						VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
2992 
2993 			if (m_out->valid == 0) {
2994 				m_in = vm_page_grab(object, bo_pindex + idx,
2995 						VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
2996 				if (m_in->valid == 0) {
2997 					rv = vm_pager_get_pages(object, &m_in, 1, 0);
2998 					if (rv != VM_PAGER_OK) {
2999 						printf("vm_freeze_copyopts: cannot read page from file: %x\n", m_in->pindex);
3000 						continue;
3001 					}
3002 					vm_page_deactivate(m_in);
3003 				}
3004 
3005 				vm_page_protect(m_in, VM_PROT_NONE);
3006 				pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
3007 				m_out->valid = m_in->valid;
3008 				m_out->dirty = VM_PAGE_BITS_ALL;
3009 
3010 				vm_page_activate(m_out);
3011 
3012 				vm_page_wakeup(m_in);
3013 			}
3014 			vm_page_wakeup(m_out);
3015 		}
3016 
3017 		object->shadow_count--;
3018 		object->ref_count--;
3019 		TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);
3020 		robject->backing_object = NULL;
3021 		robject->backing_object_offset = 0;
3022 
3023 		vm_object_pip_wakeup(robject);
3024 		vm_object_deallocate(robject);
3025 	}
3026 
3027 	vm_object_clear_flag(object, OBJ_OPT);
3028 }
3029 
3030 #include "opt_ddb.h"
3031 #ifdef DDB
3032 #include <sys/kernel.h>
3033 
3034 #include <ddb/ddb.h>
3035 
3036 /*
3037  *	vm_map_print:	[ debug ]
3038  */
3039 DB_SHOW_COMMAND(map, vm_map_print)
3040 {
3041 	static int nlines;
3042 	/* XXX convert args. */
3043 	vm_map_t map = (vm_map_t)addr;
3044 	boolean_t full = have_addr;
3045 
3046 	vm_map_entry_t entry;
3047 
3048 	db_iprintf("%s map %p: pmap=%p, nentries=%d, version=%u\n",
3049 	    (map->is_main_map ? "Task" : "Share"), (void *)map,
3050 	    (void *)map->pmap, map->nentries, map->timestamp);
3051 	nlines++;
3052 
3053 	if (!full && db_indent)
3054 		return;
3055 
3056 	db_indent += 2;
3057 	for (entry = map->header.next; entry != &map->header;
3058 	    entry = entry->next) {
3059 #if 0
3060 		if (nlines > 18) {
3061 			db_printf("--More--");
3062 			cngetc();
3063 			db_printf("\r");
3064 			nlines = 0;
3065 		}
3066 #endif
3067 
3068 		db_iprintf("map entry %p: start=%p, end=%p\n",
3069 		    (void *)entry, (void *)entry->start, (void *)entry->end);
3070 		nlines++;
3071 		if (map->is_main_map) {
3072 			static char *inheritance_name[4] =
3073 			{"share", "copy", "none", "donate_copy"};
3074 
3075 			db_iprintf(" prot=%x/%x/%s",
3076 			    entry->protection,
3077 			    entry->max_protection,
3078 			    inheritance_name[entry->inheritance]);
3079 			if (entry->wired_count != 0)
3080 				db_printf(", wired");
3081 		}
3082 		if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
3083 			/* XXX no %qd in kernel.  Truncate entry->offset. */
3084 			db_printf(", share=%p, offset=0x%lx\n",
3085 			    (void *)entry->object.share_map,
3086 			    (long)entry->offset);
3087 			nlines++;
3088 			if ((entry->prev == &map->header) ||
3089 			    ((entry->prev->eflags & MAP_ENTRY_IS_A_MAP) == 0) ||
3090 			    (entry->prev->object.share_map !=
3091 				entry->object.share_map)) {
3092 				db_indent += 2;
3093 				vm_map_print((db_expr_t)(intptr_t)
3094 					     entry->object.share_map,
3095 					     full, 0, (char *)0);
3096 				db_indent -= 2;
3097 			}
3098 		} else {
3099 			/* XXX no %qd in kernel.  Truncate entry->offset. */
3100 			db_printf(", object=%p, offset=0x%lx",
3101 			    (void *)entry->object.vm_object,
3102 			    (long)entry->offset);
3103 			if (entry->eflags & MAP_ENTRY_COW)
3104 				db_printf(", copy (%s)",
3105 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3106 			db_printf("\n");
3107 			nlines++;
3108 
3109 			if ((entry->prev == &map->header) ||
3110 			    (entry->prev->eflags & MAP_ENTRY_IS_A_MAP) ||
3111 			    (entry->prev->object.vm_object !=
3112 				entry->object.vm_object)) {
3113 				db_indent += 2;
3114 				vm_object_print((db_expr_t)(intptr_t)
3115 						entry->object.vm_object,
3116 						full, 0, (char *)0);
3117 				nlines += 4;
3118 				db_indent -= 2;
3119 			}
3120 		}
3121 	}
3122 	db_indent -= 2;
3123 	if (db_indent == 0)
3124 		nlines = 0;
3125 }
3126 
3127 
3128 DB_SHOW_COMMAND(procvm, procvm)
3129 {
3130 	struct proc *p;
3131 
3132 	if (have_addr) {
3133 		p = (struct proc *) addr;
3134 	} else {
3135 		p = curproc;
3136 	}
3137 
3138 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3139 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3140 	    (void *)&p->p_vmspace->vm_pmap);
3141 
3142 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3143 }
3144 
3145 #endif /* DDB */
3146