xref: /freebsd/sys/vm/vm_map.c (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  */
64 
65 /*
66  *	Virtual memory mapping module.
67  */
68 
69 #include <sys/cdefs.h>
70 __FBSDID("$FreeBSD$");
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/ktr.h>
75 #include <sys/lock.h>
76 #include <sys/mutex.h>
77 #include <sys/proc.h>
78 #include <sys/vmmeter.h>
79 #include <sys/mman.h>
80 #include <sys/vnode.h>
81 #include <sys/resourcevar.h>
82 #include <sys/sysent.h>
83 #include <sys/shm.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/pmap.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_pager.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_extern.h>
94 #include <vm/swap_pager.h>
95 #include <vm/uma.h>
96 
97 /*
98  *	Virtual memory maps provide for the mapping, protection,
99  *	and sharing of virtual memory objects.  In addition,
100  *	this module provides for an efficient virtual copy of
101  *	memory from one map to another.
102  *
103  *	Synchronization is required prior to most operations.
104  *
105  *	Maps consist of an ordered doubly-linked list of simple
106  *	entries; a single hint is used to speed up lookups.
107  *
108  *	Since portions of maps are specified by start/end addresses,
109  *	which may not align with existing map entries, all
110  *	routines merely "clip" entries to these start/end values.
111  *	[That is, an entry is split into two, bordering at a
112  *	start or end value.]  Note that these clippings may not
113  *	always be necessary (as the two resulting entries are then
114  *	not changed); however, the clipping is done for convenience.
115  *
116  *	As mentioned above, virtual copy operations are performed
117  *	by copying VM object references from one map to
118  *	another, and then marking both regions as copy-on-write.
119  */
120 
121 /*
122  *	vm_map_startup:
123  *
124  *	Initialize the vm_map module.  Must be called before
125  *	any other vm_map routines.
126  *
127  *	Map and entry structures are allocated from the general
128  *	purpose memory pool with some exceptions:
129  *
130  *	- The kernel map and kmem submap are allocated statically.
131  *	- Kernel map entries are allocated out of a static pool.
132  *
133  *	These restrictions are necessary since malloc() uses the
134  *	maps and requires map entries.
135  */
136 
137 static struct mtx map_sleep_mtx;
138 static uma_zone_t mapentzone;
139 static uma_zone_t kmapentzone;
140 static uma_zone_t mapzone;
141 static uma_zone_t vmspace_zone;
142 static struct vm_object kmapentobj;
143 static void vmspace_zinit(void *mem, int size);
144 static void vmspace_zfini(void *mem, int size);
145 static void vm_map_zinit(void *mem, int size);
146 static void vm_map_zfini(void *mem, int size);
147 static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max);
148 
149 #ifdef INVARIANTS
150 static void vm_map_zdtor(void *mem, int size, void *arg);
151 static void vmspace_zdtor(void *mem, int size, void *arg);
152 #endif
153 
154 void
155 vm_map_startup(void)
156 {
157 	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
158 	mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
159 #ifdef INVARIANTS
160 	    vm_map_zdtor,
161 #else
162 	    NULL,
163 #endif
164 	    vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
165 	uma_prealloc(mapzone, MAX_KMAP);
166 	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
167 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
168 	    UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
169 	uma_prealloc(kmapentzone, MAX_KMAPENT);
170 	mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
171 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
172 	uma_prealloc(mapentzone, MAX_MAPENT);
173 }
174 
175 static void
176 vmspace_zfini(void *mem, int size)
177 {
178 	struct vmspace *vm;
179 
180 	vm = (struct vmspace *)mem;
181 
182 	vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
183 }
184 
185 static void
186 vmspace_zinit(void *mem, int size)
187 {
188 	struct vmspace *vm;
189 
190 	vm = (struct vmspace *)mem;
191 
192 	vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map));
193 }
194 
195 static void
196 vm_map_zfini(void *mem, int size)
197 {
198 	vm_map_t map;
199 
200 	map = (vm_map_t)mem;
201 	mtx_destroy(&map->system_mtx);
202 	lockdestroy(&map->lock);
203 }
204 
205 static void
206 vm_map_zinit(void *mem, int size)
207 {
208 	vm_map_t map;
209 
210 	map = (vm_map_t)mem;
211 	map->nentries = 0;
212 	map->size = 0;
213 	map->infork = 0;
214 	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
215 	lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
216 }
217 
218 #ifdef INVARIANTS
219 static void
220 vmspace_zdtor(void *mem, int size, void *arg)
221 {
222 	struct vmspace *vm;
223 
224 	vm = (struct vmspace *)mem;
225 
226 	vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
227 }
228 static void
229 vm_map_zdtor(void *mem, int size, void *arg)
230 {
231 	vm_map_t map;
232 
233 	map = (vm_map_t)mem;
234 	KASSERT(map->nentries == 0,
235 	    ("map %p nentries == %d on free.",
236 	    map, map->nentries));
237 	KASSERT(map->size == 0,
238 	    ("map %p size == %lu on free.",
239 	    map, (unsigned long)map->size));
240 	KASSERT(map->infork == 0,
241 	    ("map %p infork == %d on free.",
242 	    map, map->infork));
243 }
244 #endif	/* INVARIANTS */
245 
246 /*
247  * Allocate a vmspace structure, including a vm_map and pmap,
248  * and initialize those structures.  The refcnt is set to 1.
249  * The remaining fields must be initialized by the caller.
250  */
251 struct vmspace *
252 vmspace_alloc(min, max)
253 	vm_offset_t min, max;
254 {
255 	struct vmspace *vm;
256 
257 	vm = uma_zalloc(vmspace_zone, M_WAITOK);
258 	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
259 	_vm_map_init(&vm->vm_map, min, max);
260 	pmap_pinit(vmspace_pmap(vm));
261 	vm->vm_map.pmap = vmspace_pmap(vm);		/* XXX */
262 	vm->vm_refcnt = 1;
263 	vm->vm_shm = NULL;
264 	vm->vm_exitingcnt = 0;
265 	return (vm);
266 }
267 
268 void
269 vm_init2(void)
270 {
271 	uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count,
272 	    (VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE) / 8);
273 	vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
274 #ifdef INVARIANTS
275 	    vmspace_zdtor,
276 #else
277 	    NULL,
278 #endif
279 	    vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
280 	pmap_init2();
281 }
282 
283 static __inline void
284 vmspace_dofree(struct vmspace *vm)
285 {
286 	CTR1(KTR_VM, "vmspace_free: %p", vm);
287 
288 	/*
289 	 * Make sure any SysV shm is freed, it might not have been in
290 	 * exit1().
291 	 */
292 	shmexit(vm);
293 
294 	/*
295 	 * Lock the map, to wait out all other references to it.
296 	 * Delete all of the mappings and pages they hold, then call
297 	 * the pmap module to reclaim anything left.
298 	 */
299 	vm_map_lock(&vm->vm_map);
300 	(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
301 	    vm->vm_map.max_offset);
302 	vm_map_unlock(&vm->vm_map);
303 
304 	pmap_release(vmspace_pmap(vm));
305 	uma_zfree(vmspace_zone, vm);
306 }
307 
308 void
309 vmspace_free(struct vmspace *vm)
310 {
311 	GIANT_REQUIRED;
312 
313 	if (vm->vm_refcnt == 0)
314 		panic("vmspace_free: attempt to free already freed vmspace");
315 
316 	if (--vm->vm_refcnt == 0 && vm->vm_exitingcnt == 0)
317 		vmspace_dofree(vm);
318 }
319 
320 void
321 vmspace_exitfree(struct proc *p)
322 {
323 	struct vmspace *vm;
324 
325 	GIANT_REQUIRED;
326 	vm = p->p_vmspace;
327 	p->p_vmspace = NULL;
328 
329 	/*
330 	 * cleanup by parent process wait()ing on exiting child.  vm_refcnt
331 	 * may not be 0 (e.g. fork() and child exits without exec()ing).
332 	 * exitingcnt may increment above 0 and drop back down to zero
333 	 * several times while vm_refcnt is held non-zero.  vm_refcnt
334 	 * may also increment above 0 and drop back down to zero several
335 	 * times while vm_exitingcnt is held non-zero.
336 	 *
337 	 * The last wait on the exiting child's vmspace will clean up
338 	 * the remainder of the vmspace.
339 	 */
340 	if (--vm->vm_exitingcnt == 0 && vm->vm_refcnt == 0)
341 		vmspace_dofree(vm);
342 }
343 
344 void
345 _vm_map_lock(vm_map_t map, const char *file, int line)
346 {
347 	int error;
348 
349 	if (map->system_map)
350 		_mtx_lock_flags(&map->system_mtx, 0, file, line);
351 	else {
352 		error = lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread);
353 		KASSERT(error == 0, ("%s: failed to get lock", __func__));
354 	}
355 	map->timestamp++;
356 }
357 
358 void
359 _vm_map_unlock(vm_map_t map, const char *file, int line)
360 {
361 
362 	if (map->system_map)
363 		_mtx_unlock_flags(&map->system_mtx, 0, file, line);
364 	else
365 		lockmgr(&map->lock, LK_RELEASE, NULL, curthread);
366 }
367 
368 void
369 _vm_map_lock_read(vm_map_t map, const char *file, int line)
370 {
371 	int error;
372 
373 	if (map->system_map)
374 		_mtx_lock_flags(&map->system_mtx, 0, file, line);
375 	else {
376 		error = lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread);
377 		KASSERT(error == 0, ("%s: failed to get lock", __func__));
378 	}
379 }
380 
381 void
382 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
383 {
384 
385 	if (map->system_map)
386 		_mtx_unlock_flags(&map->system_mtx, 0, file, line);
387 	else
388 		lockmgr(&map->lock, LK_RELEASE, NULL, curthread);
389 }
390 
391 int
392 _vm_map_trylock(vm_map_t map, const char *file, int line)
393 {
394 	int error;
395 
396 	error = map->system_map ?
397 	    !_mtx_trylock(&map->system_mtx, 0, file, line) :
398 	    lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, NULL, curthread);
399 	if (error == 0)
400 		map->timestamp++;
401 	return (error == 0);
402 }
403 
404 int
405 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
406 {
407 	int error;
408 
409 	error = map->system_map ?
410 	    !_mtx_trylock(&map->system_mtx, 0, file, line) :
411 	    lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, NULL, curthread);
412 	return (error == 0);
413 }
414 
415 int
416 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
417 {
418 
419 	if (map->system_map) {
420 #ifdef INVARIANTS
421 		_mtx_assert(&map->system_mtx, MA_OWNED, file, line);
422 #endif
423 	} else
424 		KASSERT(lockstatus(&map->lock, curthread) == LK_EXCLUSIVE,
425 		    ("%s: lock not held", __func__));
426 	map->timestamp++;
427 	return (0);
428 }
429 
430 void
431 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
432 {
433 
434 	if (map->system_map) {
435 #ifdef INVARIANTS
436 		_mtx_assert(&map->system_mtx, MA_OWNED, file, line);
437 #endif
438 	} else
439 		KASSERT(lockstatus(&map->lock, curthread) == LK_EXCLUSIVE,
440 		    ("%s: lock not held", __func__));
441 }
442 
443 /*
444  *	vm_map_unlock_and_wait:
445  */
446 int
447 vm_map_unlock_and_wait(vm_map_t map, boolean_t user_wait)
448 {
449 
450 	mtx_lock(&map_sleep_mtx);
451 	vm_map_unlock(map);
452 	return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 0));
453 }
454 
455 /*
456  *	vm_map_wakeup:
457  */
458 void
459 vm_map_wakeup(vm_map_t map)
460 {
461 
462 	/*
463 	 * Acquire and release map_sleep_mtx to prevent a wakeup()
464 	 * from being performed (and lost) between the vm_map_unlock()
465 	 * and the msleep() in vm_map_unlock_and_wait().
466 	 */
467 	mtx_lock(&map_sleep_mtx);
468 	mtx_unlock(&map_sleep_mtx);
469 	wakeup(&map->root);
470 }
471 
472 long
473 vmspace_resident_count(struct vmspace *vmspace)
474 {
475 	return pmap_resident_count(vmspace_pmap(vmspace));
476 }
477 
478 /*
479  *	vm_map_create:
480  *
481  *	Creates and returns a new empty VM map with
482  *	the given physical map structure, and having
483  *	the given lower and upper address bounds.
484  */
485 vm_map_t
486 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
487 {
488 	vm_map_t result;
489 
490 	result = uma_zalloc(mapzone, M_WAITOK);
491 	CTR1(KTR_VM, "vm_map_create: %p", result);
492 	_vm_map_init(result, min, max);
493 	result->pmap = pmap;
494 	return (result);
495 }
496 
497 /*
498  * Initialize an existing vm_map structure
499  * such as that in the vmspace structure.
500  * The pmap is set elsewhere.
501  */
502 static void
503 _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
504 {
505 
506 	map->header.next = map->header.prev = &map->header;
507 	map->needs_wakeup = FALSE;
508 	map->system_map = 0;
509 	map->min_offset = min;
510 	map->max_offset = max;
511 	map->first_free = &map->header;
512 	map->root = NULL;
513 	map->timestamp = 0;
514 }
515 
516 void
517 vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
518 {
519 	_vm_map_init(map, min, max);
520 	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
521 	lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
522 }
523 
524 /*
525  *	vm_map_entry_dispose:	[ internal use only ]
526  *
527  *	Inverse of vm_map_entry_create.
528  */
529 static void
530 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
531 {
532 	uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
533 }
534 
535 /*
536  *	vm_map_entry_create:	[ internal use only ]
537  *
538  *	Allocates a VM map entry for insertion.
539  *	No entry fields are filled in.
540  */
541 static vm_map_entry_t
542 vm_map_entry_create(vm_map_t map)
543 {
544 	vm_map_entry_t new_entry;
545 
546 	if (map->system_map)
547 		new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
548 	else
549 		new_entry = uma_zalloc(mapentzone, M_WAITOK);
550 	if (new_entry == NULL)
551 		panic("vm_map_entry_create: kernel resources exhausted");
552 	return (new_entry);
553 }
554 
555 /*
556  *	vm_map_entry_set_behavior:
557  *
558  *	Set the expected access behavior, either normal, random, or
559  *	sequential.
560  */
561 static __inline void
562 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
563 {
564 	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
565 	    (behavior & MAP_ENTRY_BEHAV_MASK);
566 }
567 
568 /*
569  *	vm_map_entry_splay:
570  *
571  *	Implements Sleator and Tarjan's top-down splay algorithm.  Returns
572  *	the vm_map_entry containing the given address.  If, however, that
573  *	address is not found in the vm_map, returns a vm_map_entry that is
574  *	adjacent to the address, coming before or after it.
575  */
576 static vm_map_entry_t
577 vm_map_entry_splay(vm_offset_t address, vm_map_entry_t root)
578 {
579 	struct vm_map_entry dummy;
580 	vm_map_entry_t lefttreemax, righttreemin, y;
581 
582 	if (root == NULL)
583 		return (root);
584 	lefttreemax = righttreemin = &dummy;
585 	for (;; root = y) {
586 		if (address < root->start) {
587 			if ((y = root->left) == NULL)
588 				break;
589 			if (address < y->start) {
590 				/* Rotate right. */
591 				root->left = y->right;
592 				y->right = root;
593 				root = y;
594 				if ((y = root->left) == NULL)
595 					break;
596 			}
597 			/* Link into the new root's right tree. */
598 			righttreemin->left = root;
599 			righttreemin = root;
600 		} else if (address >= root->end) {
601 			if ((y = root->right) == NULL)
602 				break;
603 			if (address >= y->end) {
604 				/* Rotate left. */
605 				root->right = y->left;
606 				y->left = root;
607 				root = y;
608 				if ((y = root->right) == NULL)
609 					break;
610 			}
611 			/* Link into the new root's left tree. */
612 			lefttreemax->right = root;
613 			lefttreemax = root;
614 		} else
615 			break;
616 	}
617 	/* Assemble the new root. */
618 	lefttreemax->right = root->left;
619 	righttreemin->left = root->right;
620 	root->left = dummy.right;
621 	root->right = dummy.left;
622 	return (root);
623 }
624 
625 /*
626  *	vm_map_entry_{un,}link:
627  *
628  *	Insert/remove entries from maps.
629  */
630 static void
631 vm_map_entry_link(vm_map_t map,
632 		  vm_map_entry_t after_where,
633 		  vm_map_entry_t entry)
634 {
635 
636 	CTR4(KTR_VM,
637 	    "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
638 	    map->nentries, entry, after_where);
639 	map->nentries++;
640 	entry->prev = after_where;
641 	entry->next = after_where->next;
642 	entry->next->prev = entry;
643 	after_where->next = entry;
644 
645 	if (after_where != &map->header) {
646 		if (after_where != map->root)
647 			vm_map_entry_splay(after_where->start, map->root);
648 		entry->right = after_where->right;
649 		entry->left = after_where;
650 		after_where->right = NULL;
651 	} else {
652 		entry->right = map->root;
653 		entry->left = NULL;
654 	}
655 	map->root = entry;
656 }
657 
658 static void
659 vm_map_entry_unlink(vm_map_t map,
660 		    vm_map_entry_t entry)
661 {
662 	vm_map_entry_t next, prev, root;
663 
664 	if (entry != map->root)
665 		vm_map_entry_splay(entry->start, map->root);
666 	if (entry->left == NULL)
667 		root = entry->right;
668 	else {
669 		root = vm_map_entry_splay(entry->start, entry->left);
670 		root->right = entry->right;
671 	}
672 	map->root = root;
673 
674 	prev = entry->prev;
675 	next = entry->next;
676 	next->prev = prev;
677 	prev->next = next;
678 	map->nentries--;
679 	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
680 	    map->nentries, entry);
681 }
682 
683 /*
684  *	vm_map_lookup_entry:	[ internal use only ]
685  *
686  *	Finds the map entry containing (or
687  *	immediately preceding) the specified address
688  *	in the given map; the entry is returned
689  *	in the "entry" parameter.  The boolean
690  *	result indicates whether the address is
691  *	actually contained in the map.
692  */
693 boolean_t
694 vm_map_lookup_entry(
695 	vm_map_t map,
696 	vm_offset_t address,
697 	vm_map_entry_t *entry)	/* OUT */
698 {
699 	vm_map_entry_t cur;
700 
701 	cur = vm_map_entry_splay(address, map->root);
702 	if (cur == NULL)
703 		*entry = &map->header;
704 	else {
705 		map->root = cur;
706 
707 		if (address >= cur->start) {
708 			*entry = cur;
709 			if (cur->end > address)
710 				return (TRUE);
711 		} else
712 			*entry = cur->prev;
713 	}
714 	return (FALSE);
715 }
716 
717 /*
718  *	vm_map_insert:
719  *
720  *	Inserts the given whole VM object into the target
721  *	map at the specified address range.  The object's
722  *	size should match that of the address range.
723  *
724  *	Requires that the map be locked, and leaves it so.
725  *
726  *	If object is non-NULL, ref count must be bumped by caller
727  *	prior to making call to account for the new entry.
728  */
729 int
730 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
731 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
732 	      int cow)
733 {
734 	vm_map_entry_t new_entry;
735 	vm_map_entry_t prev_entry;
736 	vm_map_entry_t temp_entry;
737 	vm_eflags_t protoeflags;
738 
739 	/*
740 	 * Check that the start and end points are not bogus.
741 	 */
742 	if ((start < map->min_offset) || (end > map->max_offset) ||
743 	    (start >= end))
744 		return (KERN_INVALID_ADDRESS);
745 
746 	/*
747 	 * Find the entry prior to the proposed starting address; if it's part
748 	 * of an existing entry, this range is bogus.
749 	 */
750 	if (vm_map_lookup_entry(map, start, &temp_entry))
751 		return (KERN_NO_SPACE);
752 
753 	prev_entry = temp_entry;
754 
755 	/*
756 	 * Assert that the next entry doesn't overlap the end point.
757 	 */
758 	if ((prev_entry->next != &map->header) &&
759 	    (prev_entry->next->start < end))
760 		return (KERN_NO_SPACE);
761 
762 	protoeflags = 0;
763 
764 	if (cow & MAP_COPY_ON_WRITE)
765 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
766 
767 	if (cow & MAP_NOFAULT) {
768 		protoeflags |= MAP_ENTRY_NOFAULT;
769 
770 		KASSERT(object == NULL,
771 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
772 	}
773 	if (cow & MAP_DISABLE_SYNCER)
774 		protoeflags |= MAP_ENTRY_NOSYNC;
775 	if (cow & MAP_DISABLE_COREDUMP)
776 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
777 
778 	if (object != NULL) {
779 		/*
780 		 * OBJ_ONEMAPPING must be cleared unless this mapping
781 		 * is trivially proven to be the only mapping for any
782 		 * of the object's pages.  (Object granularity
783 		 * reference counting is insufficient to recognize
784 		 * aliases with precision.)
785 		 */
786 		VM_OBJECT_LOCK(object);
787 		if (object->ref_count > 1 || object->shadow_count != 0)
788 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
789 		VM_OBJECT_UNLOCK(object);
790 	}
791 	else if ((prev_entry != &map->header) &&
792 		 (prev_entry->eflags == protoeflags) &&
793 		 (prev_entry->end == start) &&
794 		 (prev_entry->wired_count == 0) &&
795 		 ((prev_entry->object.vm_object == NULL) ||
796 		  vm_object_coalesce(prev_entry->object.vm_object,
797 				     OFF_TO_IDX(prev_entry->offset),
798 				     (vm_size_t)(prev_entry->end - prev_entry->start),
799 				     (vm_size_t)(end - prev_entry->end)))) {
800 		/*
801 		 * We were able to extend the object.  Determine if we
802 		 * can extend the previous map entry to include the
803 		 * new range as well.
804 		 */
805 		if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
806 		    (prev_entry->protection == prot) &&
807 		    (prev_entry->max_protection == max)) {
808 			map->size += (end - prev_entry->end);
809 			prev_entry->end = end;
810 			vm_map_simplify_entry(map, prev_entry);
811 			return (KERN_SUCCESS);
812 		}
813 
814 		/*
815 		 * If we can extend the object but cannot extend the
816 		 * map entry, we have to create a new map entry.  We
817 		 * must bump the ref count on the extended object to
818 		 * account for it.  object may be NULL.
819 		 */
820 		object = prev_entry->object.vm_object;
821 		offset = prev_entry->offset +
822 			(prev_entry->end - prev_entry->start);
823 		vm_object_reference(object);
824 	}
825 
826 	/*
827 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
828 	 * in things like the buffer map where we manage kva but do not manage
829 	 * backing objects.
830 	 */
831 
832 	/*
833 	 * Create a new entry
834 	 */
835 	new_entry = vm_map_entry_create(map);
836 	new_entry->start = start;
837 	new_entry->end = end;
838 
839 	new_entry->eflags = protoeflags;
840 	new_entry->object.vm_object = object;
841 	new_entry->offset = offset;
842 	new_entry->avail_ssize = 0;
843 
844 	new_entry->inheritance = VM_INHERIT_DEFAULT;
845 	new_entry->protection = prot;
846 	new_entry->max_protection = max;
847 	new_entry->wired_count = 0;
848 
849 	/*
850 	 * Insert the new entry into the list
851 	 */
852 	vm_map_entry_link(map, prev_entry, new_entry);
853 	map->size += new_entry->end - new_entry->start;
854 
855 	/*
856 	 * Update the free space hint
857 	 */
858 	if ((map->first_free == prev_entry) &&
859 	    (prev_entry->end >= new_entry->start)) {
860 		map->first_free = new_entry;
861 	}
862 
863 #if 0
864 	/*
865 	 * Temporarily removed to avoid MAP_STACK panic, due to
866 	 * MAP_STACK being a huge hack.  Will be added back in
867 	 * when MAP_STACK (and the user stack mapping) is fixed.
868 	 */
869 	/*
870 	 * It may be possible to simplify the entry
871 	 */
872 	vm_map_simplify_entry(map, new_entry);
873 #endif
874 
875 	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
876 		vm_map_pmap_enter(map, start,
877 				    object, OFF_TO_IDX(offset), end - start,
878 				    cow & MAP_PREFAULT_PARTIAL);
879 	}
880 
881 	return (KERN_SUCCESS);
882 }
883 
884 /*
885  * Find sufficient space for `length' bytes in the given map, starting at
886  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
887  */
888 int
889 vm_map_findspace(
890 	vm_map_t map,
891 	vm_offset_t start,
892 	vm_size_t length,
893 	vm_offset_t *addr)
894 {
895 	vm_map_entry_t entry, next;
896 	vm_offset_t end;
897 
898 	if (start < map->min_offset)
899 		start = map->min_offset;
900 	if (start > map->max_offset)
901 		return (1);
902 
903 	/*
904 	 * Look for the first possible address; if there's already something
905 	 * at this address, we have to start after it.
906 	 */
907 	if (start == map->min_offset) {
908 		if ((entry = map->first_free) != &map->header)
909 			start = entry->end;
910 	} else {
911 		vm_map_entry_t tmp;
912 
913 		if (vm_map_lookup_entry(map, start, &tmp))
914 			start = tmp->end;
915 		entry = tmp;
916 	}
917 
918 	/*
919 	 * Look through the rest of the map, trying to fit a new region in the
920 	 * gap between existing regions, or after the very last region.
921 	 */
922 	for (;; start = (entry = next)->end) {
923 		/*
924 		 * Find the end of the proposed new region.  Be sure we didn't
925 		 * go beyond the end of the map, or wrap around the address;
926 		 * if so, we lose.  Otherwise, if this is the last entry, or
927 		 * if the proposed new region fits before the next entry, we
928 		 * win.
929 		 */
930 		end = start + length;
931 		if (end > map->max_offset || end < start)
932 			return (1);
933 		next = entry->next;
934 		if (next == &map->header || next->start >= end)
935 			break;
936 	}
937 	*addr = start;
938 	if (map == kernel_map) {
939 		vm_offset_t ksize;
940 		if ((ksize = round_page(start + length)) > kernel_vm_end) {
941 			pmap_growkernel(ksize);
942 		}
943 	}
944 	return (0);
945 }
946 
947 /*
948  *	vm_map_find finds an unallocated region in the target address
949  *	map with the given length.  The search is defined to be
950  *	first-fit from the specified address; the region found is
951  *	returned in the same parameter.
952  *
953  *	If object is non-NULL, ref count must be bumped by caller
954  *	prior to making call to account for the new entry.
955  */
956 int
957 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
958 	    vm_offset_t *addr,	/* IN/OUT */
959 	    vm_size_t length, boolean_t find_space, vm_prot_t prot,
960 	    vm_prot_t max, int cow)
961 {
962 	vm_offset_t start;
963 	int result, s = 0;
964 
965 	start = *addr;
966 
967 	if (map == kmem_map)
968 		s = splvm();
969 
970 	vm_map_lock(map);
971 	if (find_space) {
972 		if (vm_map_findspace(map, start, length, addr)) {
973 			vm_map_unlock(map);
974 			if (map == kmem_map)
975 				splx(s);
976 			return (KERN_NO_SPACE);
977 		}
978 		start = *addr;
979 	}
980 	result = vm_map_insert(map, object, offset,
981 		start, start + length, prot, max, cow);
982 	vm_map_unlock(map);
983 
984 	if (map == kmem_map)
985 		splx(s);
986 
987 	return (result);
988 }
989 
990 /*
991  *	vm_map_simplify_entry:
992  *
993  *	Simplify the given map entry by merging with either neighbor.  This
994  *	routine also has the ability to merge with both neighbors.
995  *
996  *	The map must be locked.
997  *
998  *	This routine guarentees that the passed entry remains valid (though
999  *	possibly extended).  When merging, this routine may delete one or
1000  *	both neighbors.
1001  */
1002 void
1003 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1004 {
1005 	vm_map_entry_t next, prev;
1006 	vm_size_t prevsize, esize;
1007 
1008 	if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
1009 		return;
1010 
1011 	prev = entry->prev;
1012 	if (prev != &map->header) {
1013 		prevsize = prev->end - prev->start;
1014 		if ( (prev->end == entry->start) &&
1015 		     (prev->object.vm_object == entry->object.vm_object) &&
1016 		     (!prev->object.vm_object ||
1017 			(prev->offset + prevsize == entry->offset)) &&
1018 		     (prev->eflags == entry->eflags) &&
1019 		     (prev->protection == entry->protection) &&
1020 		     (prev->max_protection == entry->max_protection) &&
1021 		     (prev->inheritance == entry->inheritance) &&
1022 		     (prev->wired_count == entry->wired_count)) {
1023 			if (map->first_free == prev)
1024 				map->first_free = entry;
1025 			vm_map_entry_unlink(map, prev);
1026 			entry->start = prev->start;
1027 			entry->offset = prev->offset;
1028 			if (prev->object.vm_object)
1029 				vm_object_deallocate(prev->object.vm_object);
1030 			vm_map_entry_dispose(map, prev);
1031 		}
1032 	}
1033 
1034 	next = entry->next;
1035 	if (next != &map->header) {
1036 		esize = entry->end - entry->start;
1037 		if ((entry->end == next->start) &&
1038 		    (next->object.vm_object == entry->object.vm_object) &&
1039 		     (!entry->object.vm_object ||
1040 			(entry->offset + esize == next->offset)) &&
1041 		    (next->eflags == entry->eflags) &&
1042 		    (next->protection == entry->protection) &&
1043 		    (next->max_protection == entry->max_protection) &&
1044 		    (next->inheritance == entry->inheritance) &&
1045 		    (next->wired_count == entry->wired_count)) {
1046 			if (map->first_free == next)
1047 				map->first_free = entry;
1048 			vm_map_entry_unlink(map, next);
1049 			entry->end = next->end;
1050 			if (next->object.vm_object)
1051 				vm_object_deallocate(next->object.vm_object);
1052 			vm_map_entry_dispose(map, next);
1053 	        }
1054 	}
1055 }
1056 /*
1057  *	vm_map_clip_start:	[ internal use only ]
1058  *
1059  *	Asserts that the given entry begins at or after
1060  *	the specified address; if necessary,
1061  *	it splits the entry into two.
1062  */
1063 #define vm_map_clip_start(map, entry, startaddr) \
1064 { \
1065 	if (startaddr > entry->start) \
1066 		_vm_map_clip_start(map, entry, startaddr); \
1067 }
1068 
1069 /*
1070  *	This routine is called only when it is known that
1071  *	the entry must be split.
1072  */
1073 static void
1074 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1075 {
1076 	vm_map_entry_t new_entry;
1077 
1078 	/*
1079 	 * Split off the front portion -- note that we must insert the new
1080 	 * entry BEFORE this one, so that this entry has the specified
1081 	 * starting address.
1082 	 */
1083 	vm_map_simplify_entry(map, entry);
1084 
1085 	/*
1086 	 * If there is no object backing this entry, we might as well create
1087 	 * one now.  If we defer it, an object can get created after the map
1088 	 * is clipped, and individual objects will be created for the split-up
1089 	 * map.  This is a bit of a hack, but is also about the best place to
1090 	 * put this improvement.
1091 	 */
1092 	if (entry->object.vm_object == NULL && !map->system_map) {
1093 		vm_object_t object;
1094 		object = vm_object_allocate(OBJT_DEFAULT,
1095 				atop(entry->end - entry->start));
1096 		entry->object.vm_object = object;
1097 		entry->offset = 0;
1098 	}
1099 
1100 	new_entry = vm_map_entry_create(map);
1101 	*new_entry = *entry;
1102 
1103 	new_entry->end = start;
1104 	entry->offset += (start - entry->start);
1105 	entry->start = start;
1106 
1107 	vm_map_entry_link(map, entry->prev, new_entry);
1108 
1109 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1110 		vm_object_reference(new_entry->object.vm_object);
1111 	}
1112 }
1113 
1114 /*
1115  *	vm_map_clip_end:	[ internal use only ]
1116  *
1117  *	Asserts that the given entry ends at or before
1118  *	the specified address; if necessary,
1119  *	it splits the entry into two.
1120  */
1121 #define vm_map_clip_end(map, entry, endaddr) \
1122 { \
1123 	if ((endaddr) < (entry->end)) \
1124 		_vm_map_clip_end((map), (entry), (endaddr)); \
1125 }
1126 
1127 /*
1128  *	This routine is called only when it is known that
1129  *	the entry must be split.
1130  */
1131 static void
1132 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1133 {
1134 	vm_map_entry_t new_entry;
1135 
1136 	/*
1137 	 * If there is no object backing this entry, we might as well create
1138 	 * one now.  If we defer it, an object can get created after the map
1139 	 * is clipped, and individual objects will be created for the split-up
1140 	 * map.  This is a bit of a hack, but is also about the best place to
1141 	 * put this improvement.
1142 	 */
1143 	if (entry->object.vm_object == NULL && !map->system_map) {
1144 		vm_object_t object;
1145 		object = vm_object_allocate(OBJT_DEFAULT,
1146 				atop(entry->end - entry->start));
1147 		entry->object.vm_object = object;
1148 		entry->offset = 0;
1149 	}
1150 
1151 	/*
1152 	 * Create a new entry and insert it AFTER the specified entry
1153 	 */
1154 	new_entry = vm_map_entry_create(map);
1155 	*new_entry = *entry;
1156 
1157 	new_entry->start = entry->end = end;
1158 	new_entry->offset += (end - entry->start);
1159 
1160 	vm_map_entry_link(map, entry, new_entry);
1161 
1162 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1163 		vm_object_reference(new_entry->object.vm_object);
1164 	}
1165 }
1166 
1167 /*
1168  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
1169  *
1170  *	Asserts that the starting and ending region
1171  *	addresses fall within the valid range of the map.
1172  */
1173 #define	VM_MAP_RANGE_CHECK(map, start, end)		\
1174 		{					\
1175 		if (start < vm_map_min(map))		\
1176 			start = vm_map_min(map);	\
1177 		if (end > vm_map_max(map))		\
1178 			end = vm_map_max(map);		\
1179 		if (start > end)			\
1180 			start = end;			\
1181 		}
1182 
1183 /*
1184  *	vm_map_submap:		[ kernel use only ]
1185  *
1186  *	Mark the given range as handled by a subordinate map.
1187  *
1188  *	This range must have been created with vm_map_find,
1189  *	and no other operations may have been performed on this
1190  *	range prior to calling vm_map_submap.
1191  *
1192  *	Only a limited number of operations can be performed
1193  *	within this rage after calling vm_map_submap:
1194  *		vm_fault
1195  *	[Don't try vm_map_copy!]
1196  *
1197  *	To remove a submapping, one must first remove the
1198  *	range from the superior map, and then destroy the
1199  *	submap (if desired).  [Better yet, don't try it.]
1200  */
1201 int
1202 vm_map_submap(
1203 	vm_map_t map,
1204 	vm_offset_t start,
1205 	vm_offset_t end,
1206 	vm_map_t submap)
1207 {
1208 	vm_map_entry_t entry;
1209 	int result = KERN_INVALID_ARGUMENT;
1210 
1211 	vm_map_lock(map);
1212 
1213 	VM_MAP_RANGE_CHECK(map, start, end);
1214 
1215 	if (vm_map_lookup_entry(map, start, &entry)) {
1216 		vm_map_clip_start(map, entry, start);
1217 	} else
1218 		entry = entry->next;
1219 
1220 	vm_map_clip_end(map, entry, end);
1221 
1222 	if ((entry->start == start) && (entry->end == end) &&
1223 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1224 	    (entry->object.vm_object == NULL)) {
1225 		entry->object.sub_map = submap;
1226 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1227 		result = KERN_SUCCESS;
1228 	}
1229 	vm_map_unlock(map);
1230 
1231 	return (result);
1232 }
1233 
1234 /*
1235  * The maximum number of pages to map
1236  */
1237 #define	MAX_INIT_PT	96
1238 
1239 /*
1240  *	vm_map_pmap_enter:
1241  *
1242  *	Preload the mappings for the given object into the specified
1243  *	map.  This eliminates the soft faults on process startup and
1244  *	immediately after an mmap(2).
1245  */
1246 void
1247 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr,
1248     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1249 {
1250 	vm_offset_t tmpidx;
1251 	int psize;
1252 	vm_page_t p, mpte;
1253 
1254 	if (object == NULL)
1255 		return;
1256 	mtx_lock(&Giant);
1257 	VM_OBJECT_LOCK(object);
1258 	if (object->type == OBJT_DEVICE) {
1259 		pmap_object_init_pt(map->pmap, addr, object, pindex, size);
1260 		goto unlock_return;
1261 	}
1262 
1263 	psize = atop(size);
1264 
1265 	if (object->type != OBJT_VNODE ||
1266 	    ((flags & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
1267 	     (object->resident_page_count > MAX_INIT_PT))) {
1268 		goto unlock_return;
1269 	}
1270 
1271 	if (psize + pindex > object->size) {
1272 		if (object->size < pindex)
1273 			goto unlock_return;
1274 		psize = object->size - pindex;
1275 	}
1276 
1277 	mpte = NULL;
1278 
1279 	if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
1280 		if (p->pindex < pindex) {
1281 			p = vm_page_splay(pindex, object->root);
1282 			if ((object->root = p)->pindex < pindex)
1283 				p = TAILQ_NEXT(p, listq);
1284 		}
1285 	}
1286 	/*
1287 	 * Assert: the variable p is either (1) the page with the
1288 	 * least pindex greater than or equal to the parameter pindex
1289 	 * or (2) NULL.
1290 	 */
1291 	for (;
1292 	     p != NULL && (tmpidx = p->pindex - pindex) < psize;
1293 	     p = TAILQ_NEXT(p, listq)) {
1294 		/*
1295 		 * don't allow an madvise to blow away our really
1296 		 * free pages allocating pv entries.
1297 		 */
1298 		if ((flags & MAP_PREFAULT_MADVISE) &&
1299 		    cnt.v_free_count < cnt.v_free_reserved) {
1300 			break;
1301 		}
1302 		vm_page_lock_queues();
1303 		if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
1304 		    (p->busy == 0) &&
1305 		    (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
1306 			if ((p->queue - p->pc) == PQ_CACHE)
1307 				vm_page_deactivate(p);
1308 			vm_page_busy(p);
1309 			vm_page_unlock_queues();
1310 			VM_OBJECT_UNLOCK(object);
1311 			mpte = pmap_enter_quick(map->pmap,
1312 				addr + ptoa(tmpidx), p, mpte);
1313 			VM_OBJECT_LOCK(object);
1314 			vm_page_lock_queues();
1315 			vm_page_wakeup(p);
1316 		}
1317 		vm_page_unlock_queues();
1318 	}
1319 unlock_return:
1320 	VM_OBJECT_UNLOCK(object);
1321 	mtx_unlock(&Giant);
1322 }
1323 
1324 /*
1325  *	vm_map_protect:
1326  *
1327  *	Sets the protection of the specified address
1328  *	region in the target map.  If "set_max" is
1329  *	specified, the maximum protection is to be set;
1330  *	otherwise, only the current protection is affected.
1331  */
1332 int
1333 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1334 	       vm_prot_t new_prot, boolean_t set_max)
1335 {
1336 	vm_map_entry_t current;
1337 	vm_map_entry_t entry;
1338 
1339 	vm_map_lock(map);
1340 
1341 	VM_MAP_RANGE_CHECK(map, start, end);
1342 
1343 	if (vm_map_lookup_entry(map, start, &entry)) {
1344 		vm_map_clip_start(map, entry, start);
1345 	} else {
1346 		entry = entry->next;
1347 	}
1348 
1349 	/*
1350 	 * Make a first pass to check for protection violations.
1351 	 */
1352 	current = entry;
1353 	while ((current != &map->header) && (current->start < end)) {
1354 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1355 			vm_map_unlock(map);
1356 			return (KERN_INVALID_ARGUMENT);
1357 		}
1358 		if ((new_prot & current->max_protection) != new_prot) {
1359 			vm_map_unlock(map);
1360 			return (KERN_PROTECTION_FAILURE);
1361 		}
1362 		current = current->next;
1363 	}
1364 
1365 	/*
1366 	 * Go back and fix up protections. [Note that clipping is not
1367 	 * necessary the second time.]
1368 	 */
1369 	current = entry;
1370 	while ((current != &map->header) && (current->start < end)) {
1371 		vm_prot_t old_prot;
1372 
1373 		vm_map_clip_end(map, current, end);
1374 
1375 		old_prot = current->protection;
1376 		if (set_max)
1377 			current->protection =
1378 			    (current->max_protection = new_prot) &
1379 			    old_prot;
1380 		else
1381 			current->protection = new_prot;
1382 
1383 		/*
1384 		 * Update physical map if necessary. Worry about copy-on-write
1385 		 * here -- CHECK THIS XXX
1386 		 */
1387 		if (current->protection != old_prot) {
1388 			mtx_lock(&Giant);
1389 			vm_page_lock_queues();
1390 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1391 							VM_PROT_ALL)
1392 			pmap_protect(map->pmap, current->start,
1393 			    current->end,
1394 			    current->protection & MASK(current));
1395 #undef	MASK
1396 			vm_page_unlock_queues();
1397 			mtx_unlock(&Giant);
1398 		}
1399 		vm_map_simplify_entry(map, current);
1400 		current = current->next;
1401 	}
1402 	vm_map_unlock(map);
1403 	return (KERN_SUCCESS);
1404 }
1405 
1406 /*
1407  *	vm_map_madvise:
1408  *
1409  * 	This routine traverses a processes map handling the madvise
1410  *	system call.  Advisories are classified as either those effecting
1411  *	the vm_map_entry structure, or those effecting the underlying
1412  *	objects.
1413  */
1414 int
1415 vm_map_madvise(
1416 	vm_map_t map,
1417 	vm_offset_t start,
1418 	vm_offset_t end,
1419 	int behav)
1420 {
1421 	vm_map_entry_t current, entry;
1422 	int modify_map = 0;
1423 
1424 	/*
1425 	 * Some madvise calls directly modify the vm_map_entry, in which case
1426 	 * we need to use an exclusive lock on the map and we need to perform
1427 	 * various clipping operations.  Otherwise we only need a read-lock
1428 	 * on the map.
1429 	 */
1430 	switch(behav) {
1431 	case MADV_NORMAL:
1432 	case MADV_SEQUENTIAL:
1433 	case MADV_RANDOM:
1434 	case MADV_NOSYNC:
1435 	case MADV_AUTOSYNC:
1436 	case MADV_NOCORE:
1437 	case MADV_CORE:
1438 		modify_map = 1;
1439 		vm_map_lock(map);
1440 		break;
1441 	case MADV_WILLNEED:
1442 	case MADV_DONTNEED:
1443 	case MADV_FREE:
1444 		vm_map_lock_read(map);
1445 		break;
1446 	default:
1447 		return (KERN_INVALID_ARGUMENT);
1448 	}
1449 
1450 	/*
1451 	 * Locate starting entry and clip if necessary.
1452 	 */
1453 	VM_MAP_RANGE_CHECK(map, start, end);
1454 
1455 	if (vm_map_lookup_entry(map, start, &entry)) {
1456 		if (modify_map)
1457 			vm_map_clip_start(map, entry, start);
1458 	} else {
1459 		entry = entry->next;
1460 	}
1461 
1462 	if (modify_map) {
1463 		/*
1464 		 * madvise behaviors that are implemented in the vm_map_entry.
1465 		 *
1466 		 * We clip the vm_map_entry so that behavioral changes are
1467 		 * limited to the specified address range.
1468 		 */
1469 		for (current = entry;
1470 		     (current != &map->header) && (current->start < end);
1471 		     current = current->next
1472 		) {
1473 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1474 				continue;
1475 
1476 			vm_map_clip_end(map, current, end);
1477 
1478 			switch (behav) {
1479 			case MADV_NORMAL:
1480 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1481 				break;
1482 			case MADV_SEQUENTIAL:
1483 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1484 				break;
1485 			case MADV_RANDOM:
1486 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1487 				break;
1488 			case MADV_NOSYNC:
1489 				current->eflags |= MAP_ENTRY_NOSYNC;
1490 				break;
1491 			case MADV_AUTOSYNC:
1492 				current->eflags &= ~MAP_ENTRY_NOSYNC;
1493 				break;
1494 			case MADV_NOCORE:
1495 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
1496 				break;
1497 			case MADV_CORE:
1498 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
1499 				break;
1500 			default:
1501 				break;
1502 			}
1503 			vm_map_simplify_entry(map, current);
1504 		}
1505 		vm_map_unlock(map);
1506 	} else {
1507 		vm_pindex_t pindex;
1508 		int count;
1509 
1510 		/*
1511 		 * madvise behaviors that are implemented in the underlying
1512 		 * vm_object.
1513 		 *
1514 		 * Since we don't clip the vm_map_entry, we have to clip
1515 		 * the vm_object pindex and count.
1516 		 */
1517 		for (current = entry;
1518 		     (current != &map->header) && (current->start < end);
1519 		     current = current->next
1520 		) {
1521 			vm_offset_t useStart;
1522 
1523 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1524 				continue;
1525 
1526 			pindex = OFF_TO_IDX(current->offset);
1527 			count = atop(current->end - current->start);
1528 			useStart = current->start;
1529 
1530 			if (current->start < start) {
1531 				pindex += atop(start - current->start);
1532 				count -= atop(start - current->start);
1533 				useStart = start;
1534 			}
1535 			if (current->end > end)
1536 				count -= atop(current->end - end);
1537 
1538 			if (count <= 0)
1539 				continue;
1540 
1541 			vm_object_madvise(current->object.vm_object,
1542 					  pindex, count, behav);
1543 			if (behav == MADV_WILLNEED) {
1544 				vm_map_pmap_enter(map,
1545 				    useStart,
1546 				    current->object.vm_object,
1547 				    pindex,
1548 				    (count << PAGE_SHIFT),
1549 				    MAP_PREFAULT_MADVISE
1550 				);
1551 			}
1552 		}
1553 		vm_map_unlock_read(map);
1554 	}
1555 	return (0);
1556 }
1557 
1558 
1559 /*
1560  *	vm_map_inherit:
1561  *
1562  *	Sets the inheritance of the specified address
1563  *	range in the target map.  Inheritance
1564  *	affects how the map will be shared with
1565  *	child maps at the time of vm_map_fork.
1566  */
1567 int
1568 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1569 	       vm_inherit_t new_inheritance)
1570 {
1571 	vm_map_entry_t entry;
1572 	vm_map_entry_t temp_entry;
1573 
1574 	switch (new_inheritance) {
1575 	case VM_INHERIT_NONE:
1576 	case VM_INHERIT_COPY:
1577 	case VM_INHERIT_SHARE:
1578 		break;
1579 	default:
1580 		return (KERN_INVALID_ARGUMENT);
1581 	}
1582 	vm_map_lock(map);
1583 	VM_MAP_RANGE_CHECK(map, start, end);
1584 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
1585 		entry = temp_entry;
1586 		vm_map_clip_start(map, entry, start);
1587 	} else
1588 		entry = temp_entry->next;
1589 	while ((entry != &map->header) && (entry->start < end)) {
1590 		vm_map_clip_end(map, entry, end);
1591 		entry->inheritance = new_inheritance;
1592 		vm_map_simplify_entry(map, entry);
1593 		entry = entry->next;
1594 	}
1595 	vm_map_unlock(map);
1596 	return (KERN_SUCCESS);
1597 }
1598 
1599 /*
1600  *	vm_map_unwire:
1601  *
1602  *	Implements both kernel and user unwiring.
1603  */
1604 int
1605 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
1606     int flags)
1607 {
1608 	vm_map_entry_t entry, first_entry, tmp_entry;
1609 	vm_offset_t saved_start;
1610 	unsigned int last_timestamp;
1611 	int rv;
1612 	boolean_t need_wakeup, result, user_unwire;
1613 
1614 	user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
1615 	vm_map_lock(map);
1616 	VM_MAP_RANGE_CHECK(map, start, end);
1617 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
1618 		if (flags & VM_MAP_WIRE_HOLESOK)
1619 			first_entry = map->header.next;
1620 		else {
1621 			vm_map_unlock(map);
1622 			return (KERN_INVALID_ADDRESS);
1623 		}
1624 	}
1625 	last_timestamp = map->timestamp;
1626 	entry = first_entry;
1627 	while (entry != &map->header && entry->start < end) {
1628 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1629 			/*
1630 			 * We have not yet clipped the entry.
1631 			 */
1632 			saved_start = (start >= entry->start) ? start :
1633 			    entry->start;
1634 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1635 			if (vm_map_unlock_and_wait(map, user_unwire)) {
1636 				/*
1637 				 * Allow interruption of user unwiring?
1638 				 */
1639 			}
1640 			vm_map_lock(map);
1641 			if (last_timestamp+1 != map->timestamp) {
1642 				/*
1643 				 * Look again for the entry because the map was
1644 				 * modified while it was unlocked.
1645 				 * Specifically, the entry may have been
1646 				 * clipped, merged, or deleted.
1647 				 */
1648 				if (!vm_map_lookup_entry(map, saved_start,
1649 				    &tmp_entry)) {
1650 					if (saved_start == start) {
1651 						/*
1652 						 * First_entry has been deleted.
1653 						 */
1654 						vm_map_unlock(map);
1655 						return (KERN_INVALID_ADDRESS);
1656 					}
1657 					end = saved_start;
1658 					rv = KERN_INVALID_ADDRESS;
1659 					goto done;
1660 				}
1661 				if (entry == first_entry)
1662 					first_entry = tmp_entry;
1663 				else
1664 					first_entry = NULL;
1665 				entry = tmp_entry;
1666 			}
1667 			last_timestamp = map->timestamp;
1668 			continue;
1669 		}
1670 		vm_map_clip_start(map, entry, start);
1671 		vm_map_clip_end(map, entry, end);
1672 		/*
1673 		 * Mark the entry in case the map lock is released.  (See
1674 		 * above.)
1675 		 */
1676 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1677 		/*
1678 		 * Check the map for holes in the specified region.
1679 		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
1680 		 */
1681 		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
1682 		    (entry->end < end && (entry->next == &map->header ||
1683 		    entry->next->start > entry->end))) {
1684 			end = entry->end;
1685 			rv = KERN_INVALID_ADDRESS;
1686 			goto done;
1687 		}
1688 		/*
1689 		 * Require that the entry is wired.
1690 		 */
1691 		if (entry->wired_count == 0 || (user_unwire &&
1692 		    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)) {
1693 			end = entry->end;
1694 			rv = KERN_INVALID_ARGUMENT;
1695 			goto done;
1696 		}
1697 		entry = entry->next;
1698 	}
1699 	rv = KERN_SUCCESS;
1700 done:
1701 	need_wakeup = FALSE;
1702 	if (first_entry == NULL) {
1703 		result = vm_map_lookup_entry(map, start, &first_entry);
1704 		KASSERT(result, ("vm_map_unwire: lookup failed"));
1705 	}
1706 	entry = first_entry;
1707 	while (entry != &map->header && entry->start < end) {
1708 		if (rv == KERN_SUCCESS) {
1709 			if (user_unwire)
1710 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1711 			entry->wired_count--;
1712 			if (entry->wired_count == 0) {
1713 				/*
1714 				 * Retain the map lock.
1715 				 */
1716 				vm_fault_unwire(map, entry->start, entry->end);
1717 			}
1718 		}
1719 		KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
1720 			("vm_map_unwire: in-transition flag missing"));
1721 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1722 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1723 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1724 			need_wakeup = TRUE;
1725 		}
1726 		vm_map_simplify_entry(map, entry);
1727 		entry = entry->next;
1728 	}
1729 	vm_map_unlock(map);
1730 	if (need_wakeup)
1731 		vm_map_wakeup(map);
1732 	return (rv);
1733 }
1734 
1735 /*
1736  *	vm_map_wire:
1737  *
1738  *	Implements both kernel and user wiring.
1739  */
1740 int
1741 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
1742     int flags)
1743 {
1744 	vm_map_entry_t entry, first_entry, tmp_entry;
1745 	vm_offset_t saved_end, saved_start;
1746 	unsigned int last_timestamp;
1747 	int rv;
1748 	boolean_t need_wakeup, result, user_wire;
1749 
1750 	user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
1751 	vm_map_lock(map);
1752 	VM_MAP_RANGE_CHECK(map, start, end);
1753 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
1754 		if (flags & VM_MAP_WIRE_HOLESOK)
1755 			first_entry = map->header.next;
1756 		else {
1757 			vm_map_unlock(map);
1758 			return (KERN_INVALID_ADDRESS);
1759 		}
1760 	}
1761 	last_timestamp = map->timestamp;
1762 	entry = first_entry;
1763 	while (entry != &map->header && entry->start < end) {
1764 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1765 			/*
1766 			 * We have not yet clipped the entry.
1767 			 */
1768 			saved_start = (start >= entry->start) ? start :
1769 			    entry->start;
1770 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1771 			if (vm_map_unlock_and_wait(map, user_wire)) {
1772 				/*
1773 				 * Allow interruption of user wiring?
1774 				 */
1775 			}
1776 			vm_map_lock(map);
1777 			if (last_timestamp + 1 != map->timestamp) {
1778 				/*
1779 				 * Look again for the entry because the map was
1780 				 * modified while it was unlocked.
1781 				 * Specifically, the entry may have been
1782 				 * clipped, merged, or deleted.
1783 				 */
1784 				if (!vm_map_lookup_entry(map, saved_start,
1785 				    &tmp_entry)) {
1786 					if (saved_start == start) {
1787 						/*
1788 						 * first_entry has been deleted.
1789 						 */
1790 						vm_map_unlock(map);
1791 						return (KERN_INVALID_ADDRESS);
1792 					}
1793 					end = saved_start;
1794 					rv = KERN_INVALID_ADDRESS;
1795 					goto done;
1796 				}
1797 				if (entry == first_entry)
1798 					first_entry = tmp_entry;
1799 				else
1800 					first_entry = NULL;
1801 				entry = tmp_entry;
1802 			}
1803 			last_timestamp = map->timestamp;
1804 			continue;
1805 		}
1806 		vm_map_clip_start(map, entry, start);
1807 		vm_map_clip_end(map, entry, end);
1808 		/*
1809 		 * Mark the entry in case the map lock is released.  (See
1810 		 * above.)
1811 		 */
1812 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1813 		/*
1814 		 *
1815 		 */
1816 		if (entry->wired_count == 0) {
1817 			entry->wired_count++;
1818 			saved_start = entry->start;
1819 			saved_end = entry->end;
1820 			/*
1821 			 * Release the map lock, relying on the in-transition
1822 			 * mark.
1823 			 */
1824 			vm_map_unlock(map);
1825 			rv = vm_fault_wire(map, saved_start, saved_end,
1826 			    user_wire);
1827 			vm_map_lock(map);
1828 			if (last_timestamp + 1 != map->timestamp) {
1829 				/*
1830 				 * Look again for the entry because the map was
1831 				 * modified while it was unlocked.  The entry
1832 				 * may have been clipped, but NOT merged or
1833 				 * deleted.
1834 				 */
1835 				result = vm_map_lookup_entry(map, saved_start,
1836 				    &tmp_entry);
1837 				KASSERT(result, ("vm_map_wire: lookup failed"));
1838 				if (entry == first_entry)
1839 					first_entry = tmp_entry;
1840 				else
1841 					first_entry = NULL;
1842 				entry = tmp_entry;
1843 				while (entry->end < saved_end) {
1844 					if (rv != KERN_SUCCESS) {
1845 						KASSERT(entry->wired_count == 1,
1846 						    ("vm_map_wire: bad count"));
1847 						entry->wired_count = -1;
1848 					}
1849 					entry = entry->next;
1850 				}
1851 			}
1852 			last_timestamp = map->timestamp;
1853 			if (rv != KERN_SUCCESS) {
1854 				KASSERT(entry->wired_count == 1,
1855 				    ("vm_map_wire: bad count"));
1856 				/*
1857 				 * Assign an out-of-range value to represent
1858 				 * the failure to wire this entry.
1859 				 */
1860 				entry->wired_count = -1;
1861 				end = entry->end;
1862 				goto done;
1863 			}
1864 		} else if (!user_wire ||
1865 			   (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
1866 			entry->wired_count++;
1867 		}
1868 		/*
1869 		 * Check the map for holes in the specified region.
1870 		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
1871 		 */
1872 		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
1873 		    (entry->end < end && (entry->next == &map->header ||
1874 		    entry->next->start > entry->end))) {
1875 			end = entry->end;
1876 			rv = KERN_INVALID_ADDRESS;
1877 			goto done;
1878 		}
1879 		entry = entry->next;
1880 	}
1881 	rv = KERN_SUCCESS;
1882 done:
1883 	need_wakeup = FALSE;
1884 	if (first_entry == NULL) {
1885 		result = vm_map_lookup_entry(map, start, &first_entry);
1886 		KASSERT(result, ("vm_map_wire: lookup failed"));
1887 	}
1888 	entry = first_entry;
1889 	while (entry != &map->header && entry->start < end) {
1890 		if (rv == KERN_SUCCESS) {
1891 			if (user_wire)
1892 				entry->eflags |= MAP_ENTRY_USER_WIRED;
1893 		} else if (entry->wired_count == -1) {
1894 			/*
1895 			 * Wiring failed on this entry.  Thus, unwiring is
1896 			 * unnecessary.
1897 			 */
1898 			entry->wired_count = 0;
1899 		} else {
1900 			if (!user_wire ||
1901 			    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)
1902 				entry->wired_count--;
1903 			if (entry->wired_count == 0) {
1904 				/*
1905 				 * Retain the map lock.
1906 				 */
1907 				vm_fault_unwire(map, entry->start, entry->end);
1908 			}
1909 		}
1910 		KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
1911 			("vm_map_wire: in-transition flag missing"));
1912 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1913 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1914 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1915 			need_wakeup = TRUE;
1916 		}
1917 		vm_map_simplify_entry(map, entry);
1918 		entry = entry->next;
1919 	}
1920 	vm_map_unlock(map);
1921 	if (need_wakeup)
1922 		vm_map_wakeup(map);
1923 	return (rv);
1924 }
1925 
1926 /*
1927  * vm_map_clean
1928  *
1929  * Push any dirty cached pages in the address range to their pager.
1930  * If syncio is TRUE, dirty pages are written synchronously.
1931  * If invalidate is TRUE, any cached pages are freed as well.
1932  *
1933  * Returns an error if any part of the specified range is not mapped.
1934  */
1935 int
1936 vm_map_clean(
1937 	vm_map_t map,
1938 	vm_offset_t start,
1939 	vm_offset_t end,
1940 	boolean_t syncio,
1941 	boolean_t invalidate)
1942 {
1943 	vm_map_entry_t current;
1944 	vm_map_entry_t entry;
1945 	vm_size_t size;
1946 	vm_object_t object;
1947 	vm_ooffset_t offset;
1948 
1949 	GIANT_REQUIRED;
1950 
1951 	vm_map_lock_read(map);
1952 	VM_MAP_RANGE_CHECK(map, start, end);
1953 	if (!vm_map_lookup_entry(map, start, &entry)) {
1954 		vm_map_unlock_read(map);
1955 		return (KERN_INVALID_ADDRESS);
1956 	}
1957 	/*
1958 	 * Make a first pass to check for holes.
1959 	 */
1960 	for (current = entry; current->start < end; current = current->next) {
1961 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1962 			vm_map_unlock_read(map);
1963 			return (KERN_INVALID_ARGUMENT);
1964 		}
1965 		if (end > current->end &&
1966 		    (current->next == &map->header ||
1967 			current->end != current->next->start)) {
1968 			vm_map_unlock_read(map);
1969 			return (KERN_INVALID_ADDRESS);
1970 		}
1971 	}
1972 
1973 	if (invalidate) {
1974 		vm_page_lock_queues();
1975 		pmap_remove(map->pmap, start, end);
1976 		vm_page_unlock_queues();
1977 	}
1978 	/*
1979 	 * Make a second pass, cleaning/uncaching pages from the indicated
1980 	 * objects as we go.
1981 	 */
1982 	for (current = entry; current->start < end; current = current->next) {
1983 		offset = current->offset + (start - current->start);
1984 		size = (end <= current->end ? end : current->end) - start;
1985 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1986 			vm_map_t smap;
1987 			vm_map_entry_t tentry;
1988 			vm_size_t tsize;
1989 
1990 			smap = current->object.sub_map;
1991 			vm_map_lock_read(smap);
1992 			(void) vm_map_lookup_entry(smap, offset, &tentry);
1993 			tsize = tentry->end - offset;
1994 			if (tsize < size)
1995 				size = tsize;
1996 			object = tentry->object.vm_object;
1997 			offset = tentry->offset + (offset - tentry->start);
1998 			vm_map_unlock_read(smap);
1999 		} else {
2000 			object = current->object.vm_object;
2001 		}
2002 		/*
2003 		 * Note that there is absolutely no sense in writing out
2004 		 * anonymous objects, so we track down the vnode object
2005 		 * to write out.
2006 		 * We invalidate (remove) all pages from the address space
2007 		 * anyway, for semantic correctness.
2008 		 *
2009 		 * note: certain anonymous maps, such as MAP_NOSYNC maps,
2010 		 * may start out with a NULL object.
2011 		 */
2012 		while (object && object->backing_object) {
2013 			object = object->backing_object;
2014 			offset += object->backing_object_offset;
2015 			if (object->size < OFF_TO_IDX(offset + size))
2016 				size = IDX_TO_OFF(object->size) - offset;
2017 		}
2018 		if (object && (object->type == OBJT_VNODE) &&
2019 		    (current->protection & VM_PROT_WRITE)) {
2020 			/*
2021 			 * Flush pages if writing is allowed, invalidate them
2022 			 * if invalidation requested.  Pages undergoing I/O
2023 			 * will be ignored by vm_object_page_remove().
2024 			 *
2025 			 * We cannot lock the vnode and then wait for paging
2026 			 * to complete without deadlocking against vm_fault.
2027 			 * Instead we simply call vm_object_page_remove() and
2028 			 * allow it to block internally on a page-by-page
2029 			 * basis when it encounters pages undergoing async
2030 			 * I/O.
2031 			 */
2032 			int flags;
2033 
2034 			vm_object_reference(object);
2035 			vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
2036 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
2037 			flags |= invalidate ? OBJPC_INVAL : 0;
2038 			VM_OBJECT_LOCK(object);
2039 			vm_object_page_clean(object,
2040 			    OFF_TO_IDX(offset),
2041 			    OFF_TO_IDX(offset + size + PAGE_MASK),
2042 			    flags);
2043 			VM_OBJECT_UNLOCK(object);
2044 			VOP_UNLOCK(object->handle, 0, curthread);
2045 			vm_object_deallocate(object);
2046 		}
2047 		if (object && invalidate &&
2048 		    ((object->type == OBJT_VNODE) ||
2049 		     (object->type == OBJT_DEVICE))) {
2050 			VM_OBJECT_LOCK(object);
2051 			vm_object_page_remove(object,
2052 			    OFF_TO_IDX(offset),
2053 			    OFF_TO_IDX(offset + size + PAGE_MASK),
2054 			    FALSE);
2055 			VM_OBJECT_UNLOCK(object);
2056                 }
2057 		start += size;
2058 	}
2059 
2060 	vm_map_unlock_read(map);
2061 	return (KERN_SUCCESS);
2062 }
2063 
2064 /*
2065  *	vm_map_entry_unwire:	[ internal use only ]
2066  *
2067  *	Make the region specified by this entry pageable.
2068  *
2069  *	The map in question should be locked.
2070  *	[This is the reason for this routine's existence.]
2071  */
2072 static void
2073 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2074 {
2075 	vm_fault_unwire(map, entry->start, entry->end);
2076 	entry->wired_count = 0;
2077 }
2078 
2079 /*
2080  *	vm_map_entry_delete:	[ internal use only ]
2081  *
2082  *	Deallocate the given entry from the target map.
2083  */
2084 static void
2085 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2086 {
2087 	vm_map_entry_unlink(map, entry);
2088 	map->size -= entry->end - entry->start;
2089 
2090 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2091 		vm_object_deallocate(entry->object.vm_object);
2092 	}
2093 
2094 	vm_map_entry_dispose(map, entry);
2095 }
2096 
2097 /*
2098  *	vm_map_delete:	[ internal use only ]
2099  *
2100  *	Deallocates the given address range from the target
2101  *	map.
2102  */
2103 int
2104 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2105 {
2106 	vm_object_t object;
2107 	vm_map_entry_t entry;
2108 	vm_map_entry_t first_entry;
2109 
2110 	/*
2111 	 * Find the start of the region, and clip it
2112 	 */
2113 	if (!vm_map_lookup_entry(map, start, &first_entry))
2114 		entry = first_entry->next;
2115 	else {
2116 		entry = first_entry;
2117 		vm_map_clip_start(map, entry, start);
2118 	}
2119 
2120 	/*
2121 	 * Save the free space hint
2122 	 */
2123 	if (entry == &map->header) {
2124 		map->first_free = &map->header;
2125 	} else if (map->first_free->start >= start) {
2126 		map->first_free = entry->prev;
2127 	}
2128 
2129 	/*
2130 	 * Step through all entries in this region
2131 	 */
2132 	while ((entry != &map->header) && (entry->start < end)) {
2133 		vm_map_entry_t next;
2134 		vm_offset_t s, e;
2135 		vm_pindex_t offidxstart, offidxend, count;
2136 
2137 		/*
2138 		 * Wait for wiring or unwiring of an entry to complete.
2139 		 */
2140 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) {
2141 			unsigned int last_timestamp;
2142 			vm_offset_t saved_start;
2143 			vm_map_entry_t tmp_entry;
2144 
2145 			saved_start = entry->start;
2146 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2147 			last_timestamp = map->timestamp;
2148 			(void) vm_map_unlock_and_wait(map, FALSE);
2149 			vm_map_lock(map);
2150 			if (last_timestamp + 1 != map->timestamp) {
2151 				/*
2152 				 * Look again for the entry because the map was
2153 				 * modified while it was unlocked.
2154 				 * Specifically, the entry may have been
2155 				 * clipped, merged, or deleted.
2156 				 */
2157 				if (!vm_map_lookup_entry(map, saved_start,
2158 							 &tmp_entry))
2159 					entry = tmp_entry->next;
2160 				else {
2161 					entry = tmp_entry;
2162 					vm_map_clip_start(map, entry,
2163 							  saved_start);
2164 				}
2165 			}
2166 			continue;
2167 		}
2168 		vm_map_clip_end(map, entry, end);
2169 
2170 		s = entry->start;
2171 		e = entry->end;
2172 		next = entry->next;
2173 
2174 		offidxstart = OFF_TO_IDX(entry->offset);
2175 		count = OFF_TO_IDX(e - s);
2176 		object = entry->object.vm_object;
2177 
2178 		/*
2179 		 * Unwire before removing addresses from the pmap; otherwise,
2180 		 * unwiring will put the entries back in the pmap.
2181 		 */
2182 		if (entry->wired_count != 0) {
2183 			vm_map_entry_unwire(map, entry);
2184 		}
2185 
2186 		offidxend = offidxstart + count;
2187 
2188 		if (object == kernel_object || object == kmem_object) {
2189 			VM_OBJECT_LOCK(object);
2190 			vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2191 			VM_OBJECT_UNLOCK(object);
2192 		} else {
2193 			mtx_lock(&Giant);
2194 			vm_page_lock_queues();
2195 			pmap_remove(map->pmap, s, e);
2196 			vm_page_unlock_queues();
2197 			if (object != NULL) {
2198 				VM_OBJECT_LOCK(object);
2199 				if (object->ref_count != 1 &&
2200 				    (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
2201 				    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2202 					vm_object_collapse(object);
2203 					vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2204 					if (object->type == OBJT_SWAP)
2205 						swap_pager_freespace(object, offidxstart, count);
2206 					if (offidxend >= object->size &&
2207 					    offidxstart < object->size)
2208 						object->size = offidxstart;
2209 				}
2210 				VM_OBJECT_UNLOCK(object);
2211 			}
2212 			mtx_unlock(&Giant);
2213 		}
2214 
2215 		/*
2216 		 * Delete the entry (which may delete the object) only after
2217 		 * removing all pmap entries pointing to its pages.
2218 		 * (Otherwise, its page frames may be reallocated, and any
2219 		 * modify bits will be set in the wrong object!)
2220 		 */
2221 		vm_map_entry_delete(map, entry);
2222 		entry = next;
2223 	}
2224 	return (KERN_SUCCESS);
2225 }
2226 
2227 /*
2228  *	vm_map_remove:
2229  *
2230  *	Remove the given address range from the target map.
2231  *	This is the exported form of vm_map_delete.
2232  */
2233 int
2234 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2235 {
2236 	int result, s = 0;
2237 
2238 	if (map == kmem_map)
2239 		s = splvm();
2240 
2241 	vm_map_lock(map);
2242 	VM_MAP_RANGE_CHECK(map, start, end);
2243 	result = vm_map_delete(map, start, end);
2244 	vm_map_unlock(map);
2245 
2246 	if (map == kmem_map)
2247 		splx(s);
2248 
2249 	return (result);
2250 }
2251 
2252 /*
2253  *	vm_map_check_protection:
2254  *
2255  *	Assert that the target map allows the specified privilege on the
2256  *	entire address region given.  The entire region must be allocated.
2257  *
2258  *	WARNING!  This code does not and should not check whether the
2259  *	contents of the region is accessible.  For example a smaller file
2260  *	might be mapped into a larger address space.
2261  *
2262  *	NOTE!  This code is also called by munmap().
2263  */
2264 boolean_t
2265 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2266 			vm_prot_t protection)
2267 {
2268 	vm_map_entry_t entry;
2269 	vm_map_entry_t tmp_entry;
2270 
2271 	vm_map_lock_read(map);
2272 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2273 		vm_map_unlock_read(map);
2274 		return (FALSE);
2275 	}
2276 	entry = tmp_entry;
2277 
2278 	while (start < end) {
2279 		if (entry == &map->header) {
2280 			vm_map_unlock_read(map);
2281 			return (FALSE);
2282 		}
2283 		/*
2284 		 * No holes allowed!
2285 		 */
2286 		if (start < entry->start) {
2287 			vm_map_unlock_read(map);
2288 			return (FALSE);
2289 		}
2290 		/*
2291 		 * Check protection associated with entry.
2292 		 */
2293 		if ((entry->protection & protection) != protection) {
2294 			vm_map_unlock_read(map);
2295 			return (FALSE);
2296 		}
2297 		/* go to next entry */
2298 		start = entry->end;
2299 		entry = entry->next;
2300 	}
2301 	vm_map_unlock_read(map);
2302 	return (TRUE);
2303 }
2304 
2305 /*
2306  *	vm_map_copy_entry:
2307  *
2308  *	Copies the contents of the source entry to the destination
2309  *	entry.  The entries *must* be aligned properly.
2310  */
2311 static void
2312 vm_map_copy_entry(
2313 	vm_map_t src_map,
2314 	vm_map_t dst_map,
2315 	vm_map_entry_t src_entry,
2316 	vm_map_entry_t dst_entry)
2317 {
2318 	vm_object_t src_object;
2319 
2320 	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2321 		return;
2322 
2323 	if (src_entry->wired_count == 0) {
2324 
2325 		/*
2326 		 * If the source entry is marked needs_copy, it is already
2327 		 * write-protected.
2328 		 */
2329 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2330 			vm_page_lock_queues();
2331 			pmap_protect(src_map->pmap,
2332 			    src_entry->start,
2333 			    src_entry->end,
2334 			    src_entry->protection & ~VM_PROT_WRITE);
2335 			vm_page_unlock_queues();
2336 		}
2337 
2338 		/*
2339 		 * Make a copy of the object.
2340 		 */
2341 		if ((src_object = src_entry->object.vm_object) != NULL) {
2342 
2343 			if ((src_object->handle == NULL) &&
2344 				(src_object->type == OBJT_DEFAULT ||
2345 				 src_object->type == OBJT_SWAP)) {
2346 				VM_OBJECT_LOCK(src_object);
2347 				vm_object_collapse(src_object);
2348 				VM_OBJECT_UNLOCK(src_object);
2349 				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2350 					vm_object_split(src_entry);
2351 					src_object = src_entry->object.vm_object;
2352 				}
2353 			}
2354 
2355 			vm_object_reference(src_object);
2356 			VM_OBJECT_LOCK(src_object);
2357 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2358 			VM_OBJECT_UNLOCK(src_object);
2359 			dst_entry->object.vm_object = src_object;
2360 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2361 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2362 			dst_entry->offset = src_entry->offset;
2363 		} else {
2364 			dst_entry->object.vm_object = NULL;
2365 			dst_entry->offset = 0;
2366 		}
2367 
2368 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2369 		    dst_entry->end - dst_entry->start, src_entry->start);
2370 	} else {
2371 		/*
2372 		 * Of course, wired down pages can't be set copy-on-write.
2373 		 * Cause wired pages to be copied into the new map by
2374 		 * simulating faults (the new pages are pageable)
2375 		 */
2376 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2377 	}
2378 }
2379 
2380 /*
2381  * vmspace_fork:
2382  * Create a new process vmspace structure and vm_map
2383  * based on those of an existing process.  The new map
2384  * is based on the old map, according to the inheritance
2385  * values on the regions in that map.
2386  *
2387  * The source map must not be locked.
2388  */
2389 struct vmspace *
2390 vmspace_fork(struct vmspace *vm1)
2391 {
2392 	struct vmspace *vm2;
2393 	vm_map_t old_map = &vm1->vm_map;
2394 	vm_map_t new_map;
2395 	vm_map_entry_t old_entry;
2396 	vm_map_entry_t new_entry;
2397 	vm_object_t object;
2398 
2399 	GIANT_REQUIRED;
2400 
2401 	vm_map_lock(old_map);
2402 	old_map->infork = 1;
2403 
2404 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2405 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2406 	    (caddr_t) &vm1->vm_endcopy - (caddr_t) &vm1->vm_startcopy);
2407 	new_map = &vm2->vm_map;	/* XXX */
2408 	new_map->timestamp = 1;
2409 
2410 	/* Do not inherit the MAP_WIREFUTURE property. */
2411 	if ((new_map->flags & MAP_WIREFUTURE) == MAP_WIREFUTURE)
2412 		new_map->flags &= ~MAP_WIREFUTURE;
2413 
2414 	old_entry = old_map->header.next;
2415 
2416 	while (old_entry != &old_map->header) {
2417 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2418 			panic("vm_map_fork: encountered a submap");
2419 
2420 		switch (old_entry->inheritance) {
2421 		case VM_INHERIT_NONE:
2422 			break;
2423 
2424 		case VM_INHERIT_SHARE:
2425 			/*
2426 			 * Clone the entry, creating the shared object if necessary.
2427 			 */
2428 			object = old_entry->object.vm_object;
2429 			if (object == NULL) {
2430 				object = vm_object_allocate(OBJT_DEFAULT,
2431 					atop(old_entry->end - old_entry->start));
2432 				old_entry->object.vm_object = object;
2433 				old_entry->offset = (vm_offset_t) 0;
2434 			}
2435 
2436 			/*
2437 			 * Add the reference before calling vm_object_shadow
2438 			 * to insure that a shadow object is created.
2439 			 */
2440 			vm_object_reference(object);
2441 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2442 				vm_object_shadow(&old_entry->object.vm_object,
2443 					&old_entry->offset,
2444 					atop(old_entry->end - old_entry->start));
2445 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2446 				/* Transfer the second reference too. */
2447 				vm_object_reference(
2448 				    old_entry->object.vm_object);
2449 				vm_object_deallocate(object);
2450 				object = old_entry->object.vm_object;
2451 			}
2452 			VM_OBJECT_LOCK(object);
2453 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
2454 			VM_OBJECT_UNLOCK(object);
2455 
2456 			/*
2457 			 * Clone the entry, referencing the shared object.
2458 			 */
2459 			new_entry = vm_map_entry_create(new_map);
2460 			*new_entry = *old_entry;
2461 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2462 			new_entry->wired_count = 0;
2463 
2464 			/*
2465 			 * Insert the entry into the new map -- we know we're
2466 			 * inserting at the end of the new map.
2467 			 */
2468 			vm_map_entry_link(new_map, new_map->header.prev,
2469 			    new_entry);
2470 
2471 			/*
2472 			 * Update the physical map
2473 			 */
2474 			pmap_copy(new_map->pmap, old_map->pmap,
2475 			    new_entry->start,
2476 			    (old_entry->end - old_entry->start),
2477 			    old_entry->start);
2478 			break;
2479 
2480 		case VM_INHERIT_COPY:
2481 			/*
2482 			 * Clone the entry and link into the map.
2483 			 */
2484 			new_entry = vm_map_entry_create(new_map);
2485 			*new_entry = *old_entry;
2486 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2487 			new_entry->wired_count = 0;
2488 			new_entry->object.vm_object = NULL;
2489 			vm_map_entry_link(new_map, new_map->header.prev,
2490 			    new_entry);
2491 			vm_map_copy_entry(old_map, new_map, old_entry,
2492 			    new_entry);
2493 			break;
2494 		}
2495 		old_entry = old_entry->next;
2496 	}
2497 
2498 	new_map->size = old_map->size;
2499 	old_map->infork = 0;
2500 	vm_map_unlock(old_map);
2501 
2502 	return (vm2);
2503 }
2504 
2505 int
2506 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
2507 	      vm_prot_t prot, vm_prot_t max, int cow)
2508 {
2509 	vm_map_entry_t prev_entry;
2510 	vm_map_entry_t new_stack_entry;
2511 	vm_size_t      init_ssize;
2512 	int            rv;
2513 
2514 	if (addrbos < vm_map_min(map))
2515 		return (KERN_NO_SPACE);
2516 	if (addrbos > map->max_offset)
2517 		return (KERN_NO_SPACE);
2518 	if (max_ssize < sgrowsiz)
2519 		init_ssize = max_ssize;
2520 	else
2521 		init_ssize = sgrowsiz;
2522 
2523 	vm_map_lock(map);
2524 
2525 	/* If addr is already mapped, no go */
2526 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
2527 		vm_map_unlock(map);
2528 		return (KERN_NO_SPACE);
2529 	}
2530 
2531 	/* If we would blow our VMEM resource limit, no go */
2532 	if (map->size + init_ssize >
2533 	    curthread->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2534 		vm_map_unlock(map);
2535 		return (KERN_NO_SPACE);
2536 	}
2537 
2538 	/* If we can't accomodate max_ssize in the current mapping,
2539 	 * no go.  However, we need to be aware that subsequent user
2540 	 * mappings might map into the space we have reserved for
2541 	 * stack, and currently this space is not protected.
2542 	 *
2543 	 * Hopefully we will at least detect this condition
2544 	 * when we try to grow the stack.
2545 	 */
2546 	if ((prev_entry->next != &map->header) &&
2547 	    (prev_entry->next->start < addrbos + max_ssize)) {
2548 		vm_map_unlock(map);
2549 		return (KERN_NO_SPACE);
2550 	}
2551 
2552 	/* We initially map a stack of only init_ssize.  We will
2553 	 * grow as needed later.  Since this is to be a grow
2554 	 * down stack, we map at the top of the range.
2555 	 *
2556 	 * Note: we would normally expect prot and max to be
2557 	 * VM_PROT_ALL, and cow to be 0.  Possibly we should
2558 	 * eliminate these as input parameters, and just
2559 	 * pass these values here in the insert call.
2560 	 */
2561 	rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
2562 	                   addrbos + max_ssize, prot, max, cow);
2563 
2564 	/* Now set the avail_ssize amount */
2565 	if (rv == KERN_SUCCESS){
2566 		if (prev_entry != &map->header)
2567 			vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize);
2568 		new_stack_entry = prev_entry->next;
2569 		if (new_stack_entry->end   != addrbos + max_ssize ||
2570 		    new_stack_entry->start != addrbos + max_ssize - init_ssize)
2571 			panic ("Bad entry start/end for new stack entry");
2572 
2573 		new_stack_entry->avail_ssize = max_ssize - init_ssize;
2574 		new_stack_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
2575 	}
2576 
2577 	vm_map_unlock(map);
2578 	return (rv);
2579 }
2580 
2581 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
2582  * desired address is already mapped, or if we successfully grow
2583  * the stack.  Also returns KERN_SUCCESS if addr is outside the
2584  * stack range (this is strange, but preserves compatibility with
2585  * the grow function in vm_machdep.c).
2586  */
2587 int
2588 vm_map_growstack(struct proc *p, vm_offset_t addr)
2589 {
2590 	vm_map_entry_t next_entry, prev_entry;
2591 	vm_map_entry_t new_entry, stack_entry;
2592 	struct vmspace *vm = p->p_vmspace;
2593 	vm_map_t map = &vm->vm_map;
2594 	vm_offset_t end;
2595 	size_t grow_amount, max_grow;
2596 	int is_procstack, rv;
2597 
2598 	GIANT_REQUIRED;
2599 
2600 Retry:
2601 	vm_map_lock_read(map);
2602 
2603 	/* If addr is already in the entry range, no need to grow.*/
2604 	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
2605 		vm_map_unlock_read(map);
2606 		return (KERN_SUCCESS);
2607 	}
2608 
2609 	next_entry = prev_entry->next;
2610 	if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) {
2611 		/*
2612 		 * This entry does not grow upwards. Since the address lies
2613 		 * beyond this entry, the next entry (if one exists) has to
2614 		 * be a downward growable entry. The entry list header is
2615 		 * never a growable entry, so it suffices to check the flags.
2616 		 */
2617 		if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) {
2618 			vm_map_unlock_read(map);
2619 			return (KERN_SUCCESS);
2620 		}
2621 		stack_entry = next_entry;
2622 	} else {
2623 		/*
2624 		 * This entry grows upward. If the next entry does not at
2625 		 * least grow downwards, this is the entry we need to grow.
2626 		 * otherwise we have two possible choices and we have to
2627 		 * select one.
2628 		 */
2629 		if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) {
2630 			/*
2631 			 * We have two choices; grow the entry closest to
2632 			 * the address to minimize the amount of growth.
2633 			 */
2634 			if (addr - prev_entry->end <= next_entry->start - addr)
2635 				stack_entry = prev_entry;
2636 			else
2637 				stack_entry = next_entry;
2638 		} else
2639 			stack_entry = prev_entry;
2640 	}
2641 
2642 	if (stack_entry == next_entry) {
2643 		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo"));
2644 		KASSERT(addr < stack_entry->start, ("foo"));
2645 		end = (prev_entry != &map->header) ? prev_entry->end :
2646 		    stack_entry->start - stack_entry->avail_ssize;
2647 		grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE);
2648 		max_grow = stack_entry->start - end;
2649 	} else {
2650 		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo"));
2651 		KASSERT(addr > stack_entry->end, ("foo"));
2652 		end = (next_entry != &map->header) ? next_entry->start :
2653 		    stack_entry->end + stack_entry->avail_ssize;
2654 		grow_amount = roundup(addr - stack_entry->end, PAGE_SIZE);
2655 		max_grow = end - stack_entry->end;
2656 	}
2657 
2658 	if (grow_amount > stack_entry->avail_ssize) {
2659 		vm_map_unlock_read(map);
2660 		return (KERN_NO_SPACE);
2661 	}
2662 
2663 	/*
2664 	 * If there is no longer enough space between the entries nogo, and
2665 	 * adjust the available space.  Note: this  should only happen if the
2666 	 * user has mapped into the stack area after the stack was created,
2667 	 * and is probably an error.
2668 	 *
2669 	 * This also effectively destroys any guard page the user might have
2670 	 * intended by limiting the stack size.
2671 	 */
2672 	if (grow_amount > max_grow) {
2673 		if (vm_map_lock_upgrade(map))
2674 			goto Retry;
2675 
2676 		stack_entry->avail_ssize = max_grow;
2677 
2678 		vm_map_unlock(map);
2679 		return (KERN_NO_SPACE);
2680 	}
2681 
2682 	is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0;
2683 
2684 	/*
2685 	 * If this is the main process stack, see if we're over the stack
2686 	 * limit.
2687 	 */
2688 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2689 			     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2690 		vm_map_unlock_read(map);
2691 		return (KERN_NO_SPACE);
2692 	}
2693 
2694 	/* Round up the grow amount modulo SGROWSIZ */
2695 	grow_amount = roundup (grow_amount, sgrowsiz);
2696 	if (grow_amount > stack_entry->avail_ssize)
2697 		grow_amount = stack_entry->avail_ssize;
2698 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2699 	                     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2700 		grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
2701 		              ctob(vm->vm_ssize);
2702 	}
2703 
2704 	/* If we would blow our VMEM resource limit, no go */
2705 	if (map->size + grow_amount >
2706 	    curthread->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2707 		vm_map_unlock_read(map);
2708 		return (KERN_NO_SPACE);
2709 	}
2710 
2711 	if (vm_map_lock_upgrade(map))
2712 		goto Retry;
2713 
2714 	if (stack_entry == next_entry) {
2715 		/*
2716 		 * Growing downward.
2717 		 */
2718 		/* Get the preliminary new entry start value */
2719 		addr = stack_entry->start - grow_amount;
2720 
2721 		/*
2722 		 * If this puts us into the previous entry, cut back our
2723 		 * growth to the available space. Also, see the note above.
2724 		 */
2725 		if (addr < end) {
2726 			stack_entry->avail_ssize = max_grow;
2727 			addr = end;
2728 		}
2729 
2730 		rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
2731 		    p->p_sysent->sv_stackprot, VM_PROT_ALL, 0);
2732 
2733 		/* Adjust the available stack space by the amount we grew. */
2734 		if (rv == KERN_SUCCESS) {
2735 			if (prev_entry != &map->header)
2736 				vm_map_clip_end(map, prev_entry, addr);
2737 			new_entry = prev_entry->next;
2738 			KASSERT(new_entry == stack_entry->prev, ("foo"));
2739 			KASSERT(new_entry->end == stack_entry->start, ("foo"));
2740 			KASSERT(new_entry->start == addr, ("foo"));
2741 			grow_amount = new_entry->end - new_entry->start;
2742 			new_entry->avail_ssize = stack_entry->avail_ssize -
2743 			    grow_amount;
2744 			stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN;
2745 			new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
2746 		}
2747 	} else {
2748 		/*
2749 		 * Growing upward.
2750 		 */
2751 		addr = stack_entry->end + grow_amount;
2752 
2753 		/*
2754 		 * If this puts us into the next entry, cut back our growth
2755 		 * to the available space. Also, see the note above.
2756 		 */
2757 		if (addr > end) {
2758 			stack_entry->avail_ssize = end - stack_entry->end;
2759 			addr = end;
2760 		}
2761 
2762 		grow_amount = addr - stack_entry->end;
2763 
2764 		/* Grow the underlying object if applicable. */
2765 		if (stack_entry->object.vm_object == NULL ||
2766 		    vm_object_coalesce(stack_entry->object.vm_object,
2767 		    OFF_TO_IDX(stack_entry->offset),
2768 		    (vm_size_t)(stack_entry->end - stack_entry->start),
2769 		    (vm_size_t)grow_amount)) {
2770 			/* Update the current entry. */
2771 			stack_entry->end = addr;
2772 			rv = KERN_SUCCESS;
2773 
2774 			if (next_entry != &map->header)
2775 				vm_map_clip_start(map, next_entry, addr);
2776 		} else
2777 			rv = KERN_FAILURE;
2778 	}
2779 
2780 	if (rv == KERN_SUCCESS && is_procstack)
2781 		vm->vm_ssize += btoc(grow_amount);
2782 
2783 	vm_map_unlock(map);
2784 
2785 	/*
2786 	 * Heed the MAP_WIREFUTURE flag if it was set for this process.
2787 	 */
2788 	if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) {
2789 		vm_map_wire(map,
2790 		    (stack_entry == next_entry) ? addr : addr - grow_amount,
2791 		    (stack_entry == next_entry) ? stack_entry->start : addr,
2792 		    (p->p_flag & P_SYSTEM)
2793 		    ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES
2794 		    : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
2795 	}
2796 
2797 	return (rv);
2798 }
2799 
2800 /*
2801  * Unshare the specified VM space for exec.  If other processes are
2802  * mapped to it, then create a new one.  The new vmspace is null.
2803  */
2804 void
2805 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
2806 {
2807 	struct vmspace *oldvmspace = p->p_vmspace;
2808 	struct vmspace *newvmspace;
2809 
2810 	GIANT_REQUIRED;
2811 	newvmspace = vmspace_alloc(minuser, maxuser);
2812 	bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
2813 	    (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
2814 	/*
2815 	 * This code is written like this for prototype purposes.  The
2816 	 * goal is to avoid running down the vmspace here, but let the
2817 	 * other process's that are still using the vmspace to finally
2818 	 * run it down.  Even though there is little or no chance of blocking
2819 	 * here, it is a good idea to keep this form for future mods.
2820 	 */
2821 	p->p_vmspace = newvmspace;
2822 	pmap_pinit2(vmspace_pmap(newvmspace));
2823 	vmspace_free(oldvmspace);
2824 	if (p == curthread->td_proc)		/* XXXKSE ? */
2825 		pmap_activate(curthread);
2826 }
2827 
2828 /*
2829  * Unshare the specified VM space for forcing COW.  This
2830  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
2831  */
2832 void
2833 vmspace_unshare(struct proc *p)
2834 {
2835 	struct vmspace *oldvmspace = p->p_vmspace;
2836 	struct vmspace *newvmspace;
2837 
2838 	GIANT_REQUIRED;
2839 	if (oldvmspace->vm_refcnt == 1)
2840 		return;
2841 	newvmspace = vmspace_fork(oldvmspace);
2842 	p->p_vmspace = newvmspace;
2843 	pmap_pinit2(vmspace_pmap(newvmspace));
2844 	vmspace_free(oldvmspace);
2845 	if (p == curthread->td_proc)		/* XXXKSE ? */
2846 		pmap_activate(curthread);
2847 }
2848 
2849 /*
2850  *	vm_map_lookup:
2851  *
2852  *	Finds the VM object, offset, and
2853  *	protection for a given virtual address in the
2854  *	specified map, assuming a page fault of the
2855  *	type specified.
2856  *
2857  *	Leaves the map in question locked for read; return
2858  *	values are guaranteed until a vm_map_lookup_done
2859  *	call is performed.  Note that the map argument
2860  *	is in/out; the returned map must be used in
2861  *	the call to vm_map_lookup_done.
2862  *
2863  *	A handle (out_entry) is returned for use in
2864  *	vm_map_lookup_done, to make that fast.
2865  *
2866  *	If a lookup is requested with "write protection"
2867  *	specified, the map may be changed to perform virtual
2868  *	copying operations, although the data referenced will
2869  *	remain the same.
2870  */
2871 int
2872 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
2873 	      vm_offset_t vaddr,
2874 	      vm_prot_t fault_typea,
2875 	      vm_map_entry_t *out_entry,	/* OUT */
2876 	      vm_object_t *object,		/* OUT */
2877 	      vm_pindex_t *pindex,		/* OUT */
2878 	      vm_prot_t *out_prot,		/* OUT */
2879 	      boolean_t *wired)			/* OUT */
2880 {
2881 	vm_map_entry_t entry;
2882 	vm_map_t map = *var_map;
2883 	vm_prot_t prot;
2884 	vm_prot_t fault_type = fault_typea;
2885 
2886 RetryLookup:;
2887 	/*
2888 	 * Lookup the faulting address.
2889 	 */
2890 
2891 	vm_map_lock_read(map);
2892 #define	RETURN(why) \
2893 		{ \
2894 		vm_map_unlock_read(map); \
2895 		return (why); \
2896 		}
2897 
2898 	/*
2899 	 * If the map has an interesting hint, try it before calling full
2900 	 * blown lookup routine.
2901 	 */
2902 	entry = map->root;
2903 	*out_entry = entry;
2904 	if (entry == NULL ||
2905 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
2906 		/*
2907 		 * Entry was either not a valid hint, or the vaddr was not
2908 		 * contained in the entry, so do a full lookup.
2909 		 */
2910 		if (!vm_map_lookup_entry(map, vaddr, out_entry))
2911 			RETURN(KERN_INVALID_ADDRESS);
2912 
2913 		entry = *out_entry;
2914 	}
2915 
2916 	/*
2917 	 * Handle submaps.
2918 	 */
2919 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2920 		vm_map_t old_map = map;
2921 
2922 		*var_map = map = entry->object.sub_map;
2923 		vm_map_unlock_read(old_map);
2924 		goto RetryLookup;
2925 	}
2926 
2927 	/*
2928 	 * Check whether this task is allowed to have this page.
2929 	 * Note the special case for MAP_ENTRY_COW
2930 	 * pages with an override.  This is to implement a forced
2931 	 * COW for debuggers.
2932 	 */
2933 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
2934 		prot = entry->max_protection;
2935 	else
2936 		prot = entry->protection;
2937 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
2938 	if ((fault_type & prot) != fault_type) {
2939 			RETURN(KERN_PROTECTION_FAILURE);
2940 	}
2941 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
2942 	    (entry->eflags & MAP_ENTRY_COW) &&
2943 	    (fault_type & VM_PROT_WRITE) &&
2944 	    (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
2945 		RETURN(KERN_PROTECTION_FAILURE);
2946 	}
2947 
2948 	/*
2949 	 * If this page is not pageable, we have to get it for all possible
2950 	 * accesses.
2951 	 */
2952 	*wired = (entry->wired_count != 0);
2953 	if (*wired)
2954 		prot = fault_type = entry->protection;
2955 
2956 	/*
2957 	 * If the entry was copy-on-write, we either ...
2958 	 */
2959 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2960 		/*
2961 		 * If we want to write the page, we may as well handle that
2962 		 * now since we've got the map locked.
2963 		 *
2964 		 * If we don't need to write the page, we just demote the
2965 		 * permissions allowed.
2966 		 */
2967 		if (fault_type & VM_PROT_WRITE) {
2968 			/*
2969 			 * Make a new object, and place it in the object
2970 			 * chain.  Note that no new references have appeared
2971 			 * -- one just moved from the map to the new
2972 			 * object.
2973 			 */
2974 			if (vm_map_lock_upgrade(map))
2975 				goto RetryLookup;
2976 
2977 			vm_object_shadow(
2978 			    &entry->object.vm_object,
2979 			    &entry->offset,
2980 			    atop(entry->end - entry->start));
2981 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2982 
2983 			vm_map_lock_downgrade(map);
2984 		} else {
2985 			/*
2986 			 * We're attempting to read a copy-on-write page --
2987 			 * don't allow writes.
2988 			 */
2989 			prot &= ~VM_PROT_WRITE;
2990 		}
2991 	}
2992 
2993 	/*
2994 	 * Create an object if necessary.
2995 	 */
2996 	if (entry->object.vm_object == NULL &&
2997 	    !map->system_map) {
2998 		if (vm_map_lock_upgrade(map))
2999 			goto RetryLookup;
3000 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
3001 		    atop(entry->end - entry->start));
3002 		entry->offset = 0;
3003 		vm_map_lock_downgrade(map);
3004 	}
3005 
3006 	/*
3007 	 * Return the object/offset from this entry.  If the entry was
3008 	 * copy-on-write or empty, it has been fixed up.
3009 	 */
3010 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3011 	*object = entry->object.vm_object;
3012 
3013 	/*
3014 	 * Return whether this is the only map sharing this data.
3015 	 */
3016 	*out_prot = prot;
3017 	return (KERN_SUCCESS);
3018 
3019 #undef	RETURN
3020 }
3021 
3022 /*
3023  *	vm_map_lookup_done:
3024  *
3025  *	Releases locks acquired by a vm_map_lookup
3026  *	(according to the handle returned by that lookup).
3027  */
3028 void
3029 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
3030 {
3031 	/*
3032 	 * Unlock the main-level map
3033 	 */
3034 	vm_map_unlock_read(map);
3035 }
3036 
3037 #include "opt_ddb.h"
3038 #ifdef DDB
3039 #include <sys/kernel.h>
3040 
3041 #include <ddb/ddb.h>
3042 
3043 /*
3044  *	vm_map_print:	[ debug ]
3045  */
3046 DB_SHOW_COMMAND(map, vm_map_print)
3047 {
3048 	static int nlines;
3049 	/* XXX convert args. */
3050 	vm_map_t map = (vm_map_t)addr;
3051 	boolean_t full = have_addr;
3052 
3053 	vm_map_entry_t entry;
3054 
3055 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3056 	    (void *)map,
3057 	    (void *)map->pmap, map->nentries, map->timestamp);
3058 	nlines++;
3059 
3060 	if (!full && db_indent)
3061 		return;
3062 
3063 	db_indent += 2;
3064 	for (entry = map->header.next; entry != &map->header;
3065 	    entry = entry->next) {
3066 		db_iprintf("map entry %p: start=%p, end=%p\n",
3067 		    (void *)entry, (void *)entry->start, (void *)entry->end);
3068 		nlines++;
3069 		{
3070 			static char *inheritance_name[4] =
3071 			{"share", "copy", "none", "donate_copy"};
3072 
3073 			db_iprintf(" prot=%x/%x/%s",
3074 			    entry->protection,
3075 			    entry->max_protection,
3076 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
3077 			if (entry->wired_count != 0)
3078 				db_printf(", wired");
3079 		}
3080 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3081 			db_printf(", share=%p, offset=0x%jx\n",
3082 			    (void *)entry->object.sub_map,
3083 			    (uintmax_t)entry->offset);
3084 			nlines++;
3085 			if ((entry->prev == &map->header) ||
3086 			    (entry->prev->object.sub_map !=
3087 				entry->object.sub_map)) {
3088 				db_indent += 2;
3089 				vm_map_print((db_expr_t)(intptr_t)
3090 					     entry->object.sub_map,
3091 					     full, 0, (char *)0);
3092 				db_indent -= 2;
3093 			}
3094 		} else {
3095 			db_printf(", object=%p, offset=0x%jx",
3096 			    (void *)entry->object.vm_object,
3097 			    (uintmax_t)entry->offset);
3098 			if (entry->eflags & MAP_ENTRY_COW)
3099 				db_printf(", copy (%s)",
3100 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3101 			db_printf("\n");
3102 			nlines++;
3103 
3104 			if ((entry->prev == &map->header) ||
3105 			    (entry->prev->object.vm_object !=
3106 				entry->object.vm_object)) {
3107 				db_indent += 2;
3108 				vm_object_print((db_expr_t)(intptr_t)
3109 						entry->object.vm_object,
3110 						full, 0, (char *)0);
3111 				nlines += 4;
3112 				db_indent -= 2;
3113 			}
3114 		}
3115 	}
3116 	db_indent -= 2;
3117 	if (db_indent == 0)
3118 		nlines = 0;
3119 }
3120 
3121 
3122 DB_SHOW_COMMAND(procvm, procvm)
3123 {
3124 	struct proc *p;
3125 
3126 	if (have_addr) {
3127 		p = (struct proc *) addr;
3128 	} else {
3129 		p = curproc;
3130 	}
3131 
3132 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3133 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3134 	    (void *)vmspace_pmap(p->p_vmspace));
3135 
3136 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3137 }
3138 
3139 #endif /* DDB */
3140