xref: /freebsd/sys/vm/vm_map.c (revision dce6e6518b85561495cff38a3074a69d29d58a55)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  */
64 
65 /*
66  *	Virtual memory mapping module.
67  */
68 
69 #include <sys/cdefs.h>
70 __FBSDID("$FreeBSD$");
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/ktr.h>
75 #include <sys/lock.h>
76 #include <sys/mutex.h>
77 #include <sys/proc.h>
78 #include <sys/vmmeter.h>
79 #include <sys/mman.h>
80 #include <sys/vnode.h>
81 #include <sys/resourcevar.h>
82 #include <sys/sysent.h>
83 #include <sys/shm.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/pmap.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_pager.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_extern.h>
94 #include <vm/swap_pager.h>
95 #include <vm/uma.h>
96 
97 /*
98  *	Virtual memory maps provide for the mapping, protection,
99  *	and sharing of virtual memory objects.  In addition,
100  *	this module provides for an efficient virtual copy of
101  *	memory from one map to another.
102  *
103  *	Synchronization is required prior to most operations.
104  *
105  *	Maps consist of an ordered doubly-linked list of simple
106  *	entries; a single hint is used to speed up lookups.
107  *
108  *	Since portions of maps are specified by start/end addresses,
109  *	which may not align with existing map entries, all
110  *	routines merely "clip" entries to these start/end values.
111  *	[That is, an entry is split into two, bordering at a
112  *	start or end value.]  Note that these clippings may not
113  *	always be necessary (as the two resulting entries are then
114  *	not changed); however, the clipping is done for convenience.
115  *
116  *	As mentioned above, virtual copy operations are performed
117  *	by copying VM object references from one map to
118  *	another, and then marking both regions as copy-on-write.
119  */
120 
121 /*
122  *	vm_map_startup:
123  *
124  *	Initialize the vm_map module.  Must be called before
125  *	any other vm_map routines.
126  *
127  *	Map and entry structures are allocated from the general
128  *	purpose memory pool with some exceptions:
129  *
130  *	- The kernel map and kmem submap are allocated statically.
131  *	- Kernel map entries are allocated out of a static pool.
132  *
133  *	These restrictions are necessary since malloc() uses the
134  *	maps and requires map entries.
135  */
136 
137 static struct mtx map_sleep_mtx;
138 static uma_zone_t mapentzone;
139 static uma_zone_t kmapentzone;
140 static uma_zone_t mapzone;
141 static uma_zone_t vmspace_zone;
142 static struct vm_object kmapentobj;
143 static void vmspace_zinit(void *mem, int size);
144 static void vmspace_zfini(void *mem, int size);
145 static void vm_map_zinit(void *mem, int size);
146 static void vm_map_zfini(void *mem, int size);
147 static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max);
148 
149 #ifdef INVARIANTS
150 static void vm_map_zdtor(void *mem, int size, void *arg);
151 static void vmspace_zdtor(void *mem, int size, void *arg);
152 #endif
153 
154 void
155 vm_map_startup(void)
156 {
157 	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
158 	mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
159 #ifdef INVARIANTS
160 	    vm_map_zdtor,
161 #else
162 	    NULL,
163 #endif
164 	    vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
165 	uma_prealloc(mapzone, MAX_KMAP);
166 	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
167 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
168 	    UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
169 	uma_prealloc(kmapentzone, MAX_KMAPENT);
170 	mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
171 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
172 	uma_prealloc(mapentzone, MAX_MAPENT);
173 }
174 
175 static void
176 vmspace_zfini(void *mem, int size)
177 {
178 	struct vmspace *vm;
179 
180 	vm = (struct vmspace *)mem;
181 
182 	vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
183 }
184 
185 static void
186 vmspace_zinit(void *mem, int size)
187 {
188 	struct vmspace *vm;
189 
190 	vm = (struct vmspace *)mem;
191 
192 	vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map));
193 }
194 
195 static void
196 vm_map_zfini(void *mem, int size)
197 {
198 	vm_map_t map;
199 
200 	map = (vm_map_t)mem;
201 	mtx_destroy(&map->system_mtx);
202 	lockdestroy(&map->lock);
203 }
204 
205 static void
206 vm_map_zinit(void *mem, int size)
207 {
208 	vm_map_t map;
209 
210 	map = (vm_map_t)mem;
211 	map->nentries = 0;
212 	map->size = 0;
213 	map->infork = 0;
214 	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
215 	lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
216 }
217 
218 #ifdef INVARIANTS
219 static void
220 vmspace_zdtor(void *mem, int size, void *arg)
221 {
222 	struct vmspace *vm;
223 
224 	vm = (struct vmspace *)mem;
225 
226 	vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
227 }
228 static void
229 vm_map_zdtor(void *mem, int size, void *arg)
230 {
231 	vm_map_t map;
232 
233 	map = (vm_map_t)mem;
234 	KASSERT(map->nentries == 0,
235 	    ("map %p nentries == %d on free.",
236 	    map, map->nentries));
237 	KASSERT(map->size == 0,
238 	    ("map %p size == %lu on free.",
239 	    map, (unsigned long)map->size));
240 	KASSERT(map->infork == 0,
241 	    ("map %p infork == %d on free.",
242 	    map, map->infork));
243 }
244 #endif	/* INVARIANTS */
245 
246 /*
247  * Allocate a vmspace structure, including a vm_map and pmap,
248  * and initialize those structures.  The refcnt is set to 1.
249  * The remaining fields must be initialized by the caller.
250  */
251 struct vmspace *
252 vmspace_alloc(min, max)
253 	vm_offset_t min, max;
254 {
255 	struct vmspace *vm;
256 
257 	GIANT_REQUIRED;
258 	vm = uma_zalloc(vmspace_zone, M_WAITOK);
259 	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
260 	_vm_map_init(&vm->vm_map, min, max);
261 	pmap_pinit(vmspace_pmap(vm));
262 	vm->vm_map.pmap = vmspace_pmap(vm);		/* XXX */
263 	vm->vm_refcnt = 1;
264 	vm->vm_shm = NULL;
265 	vm->vm_exitingcnt = 0;
266 	return (vm);
267 }
268 
269 void
270 vm_init2(void)
271 {
272 	uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count,
273 	    (VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE) / 8);
274 	vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
275 #ifdef INVARIANTS
276 	    vmspace_zdtor,
277 #else
278 	    NULL,
279 #endif
280 	    vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
281 	pmap_init2();
282 }
283 
284 static __inline void
285 vmspace_dofree(struct vmspace *vm)
286 {
287 	CTR1(KTR_VM, "vmspace_free: %p", vm);
288 
289 	/*
290 	 * Make sure any SysV shm is freed, it might not have been in
291 	 * exit1().
292 	 */
293 	shmexit(vm);
294 
295 	/*
296 	 * Lock the map, to wait out all other references to it.
297 	 * Delete all of the mappings and pages they hold, then call
298 	 * the pmap module to reclaim anything left.
299 	 */
300 	vm_map_lock(&vm->vm_map);
301 	(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
302 	    vm->vm_map.max_offset);
303 	vm_map_unlock(&vm->vm_map);
304 
305 	pmap_release(vmspace_pmap(vm));
306 	uma_zfree(vmspace_zone, vm);
307 }
308 
309 void
310 vmspace_free(struct vmspace *vm)
311 {
312 	GIANT_REQUIRED;
313 
314 	if (vm->vm_refcnt == 0)
315 		panic("vmspace_free: attempt to free already freed vmspace");
316 
317 	if (--vm->vm_refcnt == 0 && vm->vm_exitingcnt == 0)
318 		vmspace_dofree(vm);
319 }
320 
321 void
322 vmspace_exitfree(struct proc *p)
323 {
324 	struct vmspace *vm;
325 
326 	GIANT_REQUIRED;
327 	vm = p->p_vmspace;
328 	p->p_vmspace = NULL;
329 
330 	/*
331 	 * cleanup by parent process wait()ing on exiting child.  vm_refcnt
332 	 * may not be 0 (e.g. fork() and child exits without exec()ing).
333 	 * exitingcnt may increment above 0 and drop back down to zero
334 	 * several times while vm_refcnt is held non-zero.  vm_refcnt
335 	 * may also increment above 0 and drop back down to zero several
336 	 * times while vm_exitingcnt is held non-zero.
337 	 *
338 	 * The last wait on the exiting child's vmspace will clean up
339 	 * the remainder of the vmspace.
340 	 */
341 	if (--vm->vm_exitingcnt == 0 && vm->vm_refcnt == 0)
342 		vmspace_dofree(vm);
343 }
344 
345 /*
346  * vmspace_swap_count() - count the approximate swap useage in pages for a
347  *			  vmspace.
348  *
349  *	The map must be locked.
350  *
351  *	Swap useage is determined by taking the proportional swap used by
352  *	VM objects backing the VM map.  To make up for fractional losses,
353  *	if the VM object has any swap use at all the associated map entries
354  *	count for at least 1 swap page.
355  */
356 int
357 vmspace_swap_count(struct vmspace *vmspace)
358 {
359 	vm_map_t map = &vmspace->vm_map;
360 	vm_map_entry_t cur;
361 	int count = 0;
362 
363 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
364 		vm_object_t object;
365 
366 		if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
367 		    (object = cur->object.vm_object) != NULL) {
368 			VM_OBJECT_LOCK(object);
369 			if (object->type == OBJT_SWAP &&
370 			    object->un_pager.swp.swp_bcount != 0) {
371 				int n = (cur->end - cur->start) / PAGE_SIZE;
372 
373 				count += object->un_pager.swp.swp_bcount *
374 				    SWAP_META_PAGES * n / object->size + 1;
375 			}
376 			VM_OBJECT_UNLOCK(object);
377 		}
378 	}
379 	return (count);
380 }
381 
382 void
383 _vm_map_lock(vm_map_t map, const char *file, int line)
384 {
385 	int error;
386 
387 	if (map->system_map)
388 		_mtx_lock_flags(&map->system_mtx, 0, file, line);
389 	else {
390 		error = lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread);
391 		KASSERT(error == 0, ("%s: failed to get lock", __func__));
392 	}
393 	map->timestamp++;
394 }
395 
396 void
397 _vm_map_unlock(vm_map_t map, const char *file, int line)
398 {
399 
400 	if (map->system_map)
401 		_mtx_unlock_flags(&map->system_mtx, 0, file, line);
402 	else
403 		lockmgr(&map->lock, LK_RELEASE, NULL, curthread);
404 }
405 
406 void
407 _vm_map_lock_read(vm_map_t map, const char *file, int line)
408 {
409 	int error;
410 
411 	if (map->system_map)
412 		_mtx_lock_flags(&map->system_mtx, 0, file, line);
413 	else {
414 		error = lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread);
415 		KASSERT(error == 0, ("%s: failed to get lock", __func__));
416 	}
417 }
418 
419 void
420 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
421 {
422 
423 	if (map->system_map)
424 		_mtx_unlock_flags(&map->system_mtx, 0, file, line);
425 	else
426 		lockmgr(&map->lock, LK_RELEASE, NULL, curthread);
427 }
428 
429 int
430 _vm_map_trylock(vm_map_t map, const char *file, int line)
431 {
432 	int error;
433 
434 	error = map->system_map ?
435 	    !_mtx_trylock(&map->system_mtx, 0, file, line) :
436 	    lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, NULL, curthread);
437 	if (error == 0)
438 		map->timestamp++;
439 	return (error == 0);
440 }
441 
442 int
443 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
444 {
445 	int error;
446 
447 	error = map->system_map ?
448 	    !_mtx_trylock(&map->system_mtx, 0, file, line) :
449 	    lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, NULL, curthread);
450 	return (error == 0);
451 }
452 
453 int
454 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
455 {
456 
457 	if (map->system_map) {
458 #ifdef INVARIANTS
459 		_mtx_assert(&map->system_mtx, MA_OWNED, file, line);
460 #endif
461 	} else
462 		KASSERT(lockstatus(&map->lock, curthread) == LK_EXCLUSIVE,
463 		    ("%s: lock not held", __func__));
464 	map->timestamp++;
465 	return (0);
466 }
467 
468 void
469 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
470 {
471 
472 	if (map->system_map) {
473 #ifdef INVARIANTS
474 		_mtx_assert(&map->system_mtx, MA_OWNED, file, line);
475 #endif
476 	} else
477 		KASSERT(lockstatus(&map->lock, curthread) == LK_EXCLUSIVE,
478 		    ("%s: lock not held", __func__));
479 }
480 
481 /*
482  *	vm_map_unlock_and_wait:
483  */
484 int
485 vm_map_unlock_and_wait(vm_map_t map, boolean_t user_wait)
486 {
487 
488 	mtx_lock(&map_sleep_mtx);
489 	vm_map_unlock(map);
490 	return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 0));
491 }
492 
493 /*
494  *	vm_map_wakeup:
495  */
496 void
497 vm_map_wakeup(vm_map_t map)
498 {
499 
500 	/*
501 	 * Acquire and release map_sleep_mtx to prevent a wakeup()
502 	 * from being performed (and lost) between the vm_map_unlock()
503 	 * and the msleep() in vm_map_unlock_and_wait().
504 	 */
505 	mtx_lock(&map_sleep_mtx);
506 	mtx_unlock(&map_sleep_mtx);
507 	wakeup(&map->root);
508 }
509 
510 long
511 vmspace_resident_count(struct vmspace *vmspace)
512 {
513 	return pmap_resident_count(vmspace_pmap(vmspace));
514 }
515 
516 /*
517  *	vm_map_create:
518  *
519  *	Creates and returns a new empty VM map with
520  *	the given physical map structure, and having
521  *	the given lower and upper address bounds.
522  */
523 vm_map_t
524 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
525 {
526 	vm_map_t result;
527 
528 	result = uma_zalloc(mapzone, M_WAITOK);
529 	CTR1(KTR_VM, "vm_map_create: %p", result);
530 	_vm_map_init(result, min, max);
531 	result->pmap = pmap;
532 	return (result);
533 }
534 
535 /*
536  * Initialize an existing vm_map structure
537  * such as that in the vmspace structure.
538  * The pmap is set elsewhere.
539  */
540 static void
541 _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
542 {
543 
544 	map->header.next = map->header.prev = &map->header;
545 	map->needs_wakeup = FALSE;
546 	map->system_map = 0;
547 	map->min_offset = min;
548 	map->max_offset = max;
549 	map->first_free = &map->header;
550 	map->root = NULL;
551 	map->timestamp = 0;
552 }
553 
554 void
555 vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
556 {
557 	_vm_map_init(map, min, max);
558 	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
559 	lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
560 }
561 
562 /*
563  *	vm_map_entry_dispose:	[ internal use only ]
564  *
565  *	Inverse of vm_map_entry_create.
566  */
567 static void
568 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
569 {
570 	uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
571 }
572 
573 /*
574  *	vm_map_entry_create:	[ internal use only ]
575  *
576  *	Allocates a VM map entry for insertion.
577  *	No entry fields are filled in.
578  */
579 static vm_map_entry_t
580 vm_map_entry_create(vm_map_t map)
581 {
582 	vm_map_entry_t new_entry;
583 
584 	if (map->system_map)
585 		new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
586 	else
587 		new_entry = uma_zalloc(mapentzone, M_WAITOK);
588 	if (new_entry == NULL)
589 		panic("vm_map_entry_create: kernel resources exhausted");
590 	return (new_entry);
591 }
592 
593 /*
594  *	vm_map_entry_set_behavior:
595  *
596  *	Set the expected access behavior, either normal, random, or
597  *	sequential.
598  */
599 static __inline void
600 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
601 {
602 	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
603 	    (behavior & MAP_ENTRY_BEHAV_MASK);
604 }
605 
606 /*
607  *	vm_map_entry_splay:
608  *
609  *	Implements Sleator and Tarjan's top-down splay algorithm.  Returns
610  *	the vm_map_entry containing the given address.  If, however, that
611  *	address is not found in the vm_map, returns a vm_map_entry that is
612  *	adjacent to the address, coming before or after it.
613  */
614 static vm_map_entry_t
615 vm_map_entry_splay(vm_offset_t address, vm_map_entry_t root)
616 {
617 	struct vm_map_entry dummy;
618 	vm_map_entry_t lefttreemax, righttreemin, y;
619 
620 	if (root == NULL)
621 		return (root);
622 	lefttreemax = righttreemin = &dummy;
623 	for (;; root = y) {
624 		if (address < root->start) {
625 			if ((y = root->left) == NULL)
626 				break;
627 			if (address < y->start) {
628 				/* Rotate right. */
629 				root->left = y->right;
630 				y->right = root;
631 				root = y;
632 				if ((y = root->left) == NULL)
633 					break;
634 			}
635 			/* Link into the new root's right tree. */
636 			righttreemin->left = root;
637 			righttreemin = root;
638 		} else if (address >= root->end) {
639 			if ((y = root->right) == NULL)
640 				break;
641 			if (address >= y->end) {
642 				/* Rotate left. */
643 				root->right = y->left;
644 				y->left = root;
645 				root = y;
646 				if ((y = root->right) == NULL)
647 					break;
648 			}
649 			/* Link into the new root's left tree. */
650 			lefttreemax->right = root;
651 			lefttreemax = root;
652 		} else
653 			break;
654 	}
655 	/* Assemble the new root. */
656 	lefttreemax->right = root->left;
657 	righttreemin->left = root->right;
658 	root->left = dummy.right;
659 	root->right = dummy.left;
660 	return (root);
661 }
662 
663 /*
664  *	vm_map_entry_{un,}link:
665  *
666  *	Insert/remove entries from maps.
667  */
668 static void
669 vm_map_entry_link(vm_map_t map,
670 		  vm_map_entry_t after_where,
671 		  vm_map_entry_t entry)
672 {
673 
674 	CTR4(KTR_VM,
675 	    "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
676 	    map->nentries, entry, after_where);
677 	map->nentries++;
678 	entry->prev = after_where;
679 	entry->next = after_where->next;
680 	entry->next->prev = entry;
681 	after_where->next = entry;
682 
683 	if (after_where != &map->header) {
684 		if (after_where != map->root)
685 			vm_map_entry_splay(after_where->start, map->root);
686 		entry->right = after_where->right;
687 		entry->left = after_where;
688 		after_where->right = NULL;
689 	} else {
690 		entry->right = map->root;
691 		entry->left = NULL;
692 	}
693 	map->root = entry;
694 }
695 
696 static void
697 vm_map_entry_unlink(vm_map_t map,
698 		    vm_map_entry_t entry)
699 {
700 	vm_map_entry_t next, prev, root;
701 
702 	if (entry != map->root)
703 		vm_map_entry_splay(entry->start, map->root);
704 	if (entry->left == NULL)
705 		root = entry->right;
706 	else {
707 		root = vm_map_entry_splay(entry->start, entry->left);
708 		root->right = entry->right;
709 	}
710 	map->root = root;
711 
712 	prev = entry->prev;
713 	next = entry->next;
714 	next->prev = prev;
715 	prev->next = next;
716 	map->nentries--;
717 	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
718 	    map->nentries, entry);
719 }
720 
721 /*
722  *	vm_map_lookup_entry:	[ internal use only ]
723  *
724  *	Finds the map entry containing (or
725  *	immediately preceding) the specified address
726  *	in the given map; the entry is returned
727  *	in the "entry" parameter.  The boolean
728  *	result indicates whether the address is
729  *	actually contained in the map.
730  */
731 boolean_t
732 vm_map_lookup_entry(
733 	vm_map_t map,
734 	vm_offset_t address,
735 	vm_map_entry_t *entry)	/* OUT */
736 {
737 	vm_map_entry_t cur;
738 
739 	cur = vm_map_entry_splay(address, map->root);
740 	if (cur == NULL)
741 		*entry = &map->header;
742 	else {
743 		map->root = cur;
744 
745 		if (address >= cur->start) {
746 			*entry = cur;
747 			if (cur->end > address)
748 				return (TRUE);
749 		} else
750 			*entry = cur->prev;
751 	}
752 	return (FALSE);
753 }
754 
755 /*
756  *	vm_map_insert:
757  *
758  *	Inserts the given whole VM object into the target
759  *	map at the specified address range.  The object's
760  *	size should match that of the address range.
761  *
762  *	Requires that the map be locked, and leaves it so.
763  *
764  *	If object is non-NULL, ref count must be bumped by caller
765  *	prior to making call to account for the new entry.
766  */
767 int
768 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
769 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
770 	      int cow)
771 {
772 	vm_map_entry_t new_entry;
773 	vm_map_entry_t prev_entry;
774 	vm_map_entry_t temp_entry;
775 	vm_eflags_t protoeflags;
776 
777 	/*
778 	 * Check that the start and end points are not bogus.
779 	 */
780 	if ((start < map->min_offset) || (end > map->max_offset) ||
781 	    (start >= end))
782 		return (KERN_INVALID_ADDRESS);
783 
784 	/*
785 	 * Find the entry prior to the proposed starting address; if it's part
786 	 * of an existing entry, this range is bogus.
787 	 */
788 	if (vm_map_lookup_entry(map, start, &temp_entry))
789 		return (KERN_NO_SPACE);
790 
791 	prev_entry = temp_entry;
792 
793 	/*
794 	 * Assert that the next entry doesn't overlap the end point.
795 	 */
796 	if ((prev_entry->next != &map->header) &&
797 	    (prev_entry->next->start < end))
798 		return (KERN_NO_SPACE);
799 
800 	protoeflags = 0;
801 
802 	if (cow & MAP_COPY_ON_WRITE)
803 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
804 
805 	if (cow & MAP_NOFAULT) {
806 		protoeflags |= MAP_ENTRY_NOFAULT;
807 
808 		KASSERT(object == NULL,
809 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
810 	}
811 	if (cow & MAP_DISABLE_SYNCER)
812 		protoeflags |= MAP_ENTRY_NOSYNC;
813 	if (cow & MAP_DISABLE_COREDUMP)
814 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
815 
816 	if (object != NULL) {
817 		/*
818 		 * OBJ_ONEMAPPING must be cleared unless this mapping
819 		 * is trivially proven to be the only mapping for any
820 		 * of the object's pages.  (Object granularity
821 		 * reference counting is insufficient to recognize
822 		 * aliases with precision.)
823 		 */
824 		VM_OBJECT_LOCK(object);
825 		if (object->ref_count > 1 || object->shadow_count != 0)
826 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
827 		VM_OBJECT_UNLOCK(object);
828 	}
829 	else if ((prev_entry != &map->header) &&
830 		 (prev_entry->eflags == protoeflags) &&
831 		 (prev_entry->end == start) &&
832 		 (prev_entry->wired_count == 0) &&
833 		 ((prev_entry->object.vm_object == NULL) ||
834 		  vm_object_coalesce(prev_entry->object.vm_object,
835 				     OFF_TO_IDX(prev_entry->offset),
836 				     (vm_size_t)(prev_entry->end - prev_entry->start),
837 				     (vm_size_t)(end - prev_entry->end)))) {
838 		/*
839 		 * We were able to extend the object.  Determine if we
840 		 * can extend the previous map entry to include the
841 		 * new range as well.
842 		 */
843 		if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
844 		    (prev_entry->protection == prot) &&
845 		    (prev_entry->max_protection == max)) {
846 			map->size += (end - prev_entry->end);
847 			prev_entry->end = end;
848 			vm_map_simplify_entry(map, prev_entry);
849 			return (KERN_SUCCESS);
850 		}
851 
852 		/*
853 		 * If we can extend the object but cannot extend the
854 		 * map entry, we have to create a new map entry.  We
855 		 * must bump the ref count on the extended object to
856 		 * account for it.  object may be NULL.
857 		 */
858 		object = prev_entry->object.vm_object;
859 		offset = prev_entry->offset +
860 			(prev_entry->end - prev_entry->start);
861 		vm_object_reference(object);
862 	}
863 
864 	/*
865 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
866 	 * in things like the buffer map where we manage kva but do not manage
867 	 * backing objects.
868 	 */
869 
870 	/*
871 	 * Create a new entry
872 	 */
873 	new_entry = vm_map_entry_create(map);
874 	new_entry->start = start;
875 	new_entry->end = end;
876 
877 	new_entry->eflags = protoeflags;
878 	new_entry->object.vm_object = object;
879 	new_entry->offset = offset;
880 	new_entry->avail_ssize = 0;
881 
882 	new_entry->inheritance = VM_INHERIT_DEFAULT;
883 	new_entry->protection = prot;
884 	new_entry->max_protection = max;
885 	new_entry->wired_count = 0;
886 
887 	/*
888 	 * Insert the new entry into the list
889 	 */
890 	vm_map_entry_link(map, prev_entry, new_entry);
891 	map->size += new_entry->end - new_entry->start;
892 
893 	/*
894 	 * Update the free space hint
895 	 */
896 	if ((map->first_free == prev_entry) &&
897 	    (prev_entry->end >= new_entry->start)) {
898 		map->first_free = new_entry;
899 	}
900 
901 #if 0
902 	/*
903 	 * Temporarily removed to avoid MAP_STACK panic, due to
904 	 * MAP_STACK being a huge hack.  Will be added back in
905 	 * when MAP_STACK (and the user stack mapping) is fixed.
906 	 */
907 	/*
908 	 * It may be possible to simplify the entry
909 	 */
910 	vm_map_simplify_entry(map, new_entry);
911 #endif
912 
913 	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
914 		vm_map_pmap_enter(map, start,
915 				    object, OFF_TO_IDX(offset), end - start,
916 				    cow & MAP_PREFAULT_PARTIAL);
917 	}
918 
919 	return (KERN_SUCCESS);
920 }
921 
922 /*
923  * Find sufficient space for `length' bytes in the given map, starting at
924  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
925  */
926 int
927 vm_map_findspace(
928 	vm_map_t map,
929 	vm_offset_t start,
930 	vm_size_t length,
931 	vm_offset_t *addr)
932 {
933 	vm_map_entry_t entry, next;
934 	vm_offset_t end;
935 
936 	if (start < map->min_offset)
937 		start = map->min_offset;
938 	if (start > map->max_offset)
939 		return (1);
940 
941 	/*
942 	 * Look for the first possible address; if there's already something
943 	 * at this address, we have to start after it.
944 	 */
945 	if (start == map->min_offset) {
946 		if ((entry = map->first_free) != &map->header)
947 			start = entry->end;
948 	} else {
949 		vm_map_entry_t tmp;
950 
951 		if (vm_map_lookup_entry(map, start, &tmp))
952 			start = tmp->end;
953 		entry = tmp;
954 	}
955 
956 	/*
957 	 * Look through the rest of the map, trying to fit a new region in the
958 	 * gap between existing regions, or after the very last region.
959 	 */
960 	for (;; start = (entry = next)->end) {
961 		/*
962 		 * Find the end of the proposed new region.  Be sure we didn't
963 		 * go beyond the end of the map, or wrap around the address;
964 		 * if so, we lose.  Otherwise, if this is the last entry, or
965 		 * if the proposed new region fits before the next entry, we
966 		 * win.
967 		 */
968 		end = start + length;
969 		if (end > map->max_offset || end < start)
970 			return (1);
971 		next = entry->next;
972 		if (next == &map->header || next->start >= end)
973 			break;
974 	}
975 	*addr = start;
976 	if (map == kernel_map) {
977 		vm_offset_t ksize;
978 		if ((ksize = round_page(start + length)) > kernel_vm_end) {
979 			pmap_growkernel(ksize);
980 		}
981 	}
982 	return (0);
983 }
984 
985 /*
986  *	vm_map_find finds an unallocated region in the target address
987  *	map with the given length.  The search is defined to be
988  *	first-fit from the specified address; the region found is
989  *	returned in the same parameter.
990  *
991  *	If object is non-NULL, ref count must be bumped by caller
992  *	prior to making call to account for the new entry.
993  */
994 int
995 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
996 	    vm_offset_t *addr,	/* IN/OUT */
997 	    vm_size_t length, boolean_t find_space, vm_prot_t prot,
998 	    vm_prot_t max, int cow)
999 {
1000 	vm_offset_t start;
1001 	int result, s = 0;
1002 
1003 	start = *addr;
1004 
1005 	if (map == kmem_map)
1006 		s = splvm();
1007 
1008 	vm_map_lock(map);
1009 	if (find_space) {
1010 		if (vm_map_findspace(map, start, length, addr)) {
1011 			vm_map_unlock(map);
1012 			if (map == kmem_map)
1013 				splx(s);
1014 			return (KERN_NO_SPACE);
1015 		}
1016 		start = *addr;
1017 	}
1018 	result = vm_map_insert(map, object, offset,
1019 		start, start + length, prot, max, cow);
1020 	vm_map_unlock(map);
1021 
1022 	if (map == kmem_map)
1023 		splx(s);
1024 
1025 	return (result);
1026 }
1027 
1028 /*
1029  *	vm_map_simplify_entry:
1030  *
1031  *	Simplify the given map entry by merging with either neighbor.  This
1032  *	routine also has the ability to merge with both neighbors.
1033  *
1034  *	The map must be locked.
1035  *
1036  *	This routine guarentees that the passed entry remains valid (though
1037  *	possibly extended).  When merging, this routine may delete one or
1038  *	both neighbors.
1039  */
1040 void
1041 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1042 {
1043 	vm_map_entry_t next, prev;
1044 	vm_size_t prevsize, esize;
1045 
1046 	if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
1047 		return;
1048 
1049 	prev = entry->prev;
1050 	if (prev != &map->header) {
1051 		prevsize = prev->end - prev->start;
1052 		if ( (prev->end == entry->start) &&
1053 		     (prev->object.vm_object == entry->object.vm_object) &&
1054 		     (!prev->object.vm_object ||
1055 			(prev->offset + prevsize == entry->offset)) &&
1056 		     (prev->eflags == entry->eflags) &&
1057 		     (prev->protection == entry->protection) &&
1058 		     (prev->max_protection == entry->max_protection) &&
1059 		     (prev->inheritance == entry->inheritance) &&
1060 		     (prev->wired_count == entry->wired_count)) {
1061 			if (map->first_free == prev)
1062 				map->first_free = entry;
1063 			vm_map_entry_unlink(map, prev);
1064 			entry->start = prev->start;
1065 			entry->offset = prev->offset;
1066 			if (prev->object.vm_object)
1067 				vm_object_deallocate(prev->object.vm_object);
1068 			vm_map_entry_dispose(map, prev);
1069 		}
1070 	}
1071 
1072 	next = entry->next;
1073 	if (next != &map->header) {
1074 		esize = entry->end - entry->start;
1075 		if ((entry->end == next->start) &&
1076 		    (next->object.vm_object == entry->object.vm_object) &&
1077 		     (!entry->object.vm_object ||
1078 			(entry->offset + esize == next->offset)) &&
1079 		    (next->eflags == entry->eflags) &&
1080 		    (next->protection == entry->protection) &&
1081 		    (next->max_protection == entry->max_protection) &&
1082 		    (next->inheritance == entry->inheritance) &&
1083 		    (next->wired_count == entry->wired_count)) {
1084 			if (map->first_free == next)
1085 				map->first_free = entry;
1086 			vm_map_entry_unlink(map, next);
1087 			entry->end = next->end;
1088 			if (next->object.vm_object)
1089 				vm_object_deallocate(next->object.vm_object);
1090 			vm_map_entry_dispose(map, next);
1091 	        }
1092 	}
1093 }
1094 /*
1095  *	vm_map_clip_start:	[ internal use only ]
1096  *
1097  *	Asserts that the given entry begins at or after
1098  *	the specified address; if necessary,
1099  *	it splits the entry into two.
1100  */
1101 #define vm_map_clip_start(map, entry, startaddr) \
1102 { \
1103 	if (startaddr > entry->start) \
1104 		_vm_map_clip_start(map, entry, startaddr); \
1105 }
1106 
1107 /*
1108  *	This routine is called only when it is known that
1109  *	the entry must be split.
1110  */
1111 static void
1112 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1113 {
1114 	vm_map_entry_t new_entry;
1115 
1116 	/*
1117 	 * Split off the front portion -- note that we must insert the new
1118 	 * entry BEFORE this one, so that this entry has the specified
1119 	 * starting address.
1120 	 */
1121 	vm_map_simplify_entry(map, entry);
1122 
1123 	/*
1124 	 * If there is no object backing this entry, we might as well create
1125 	 * one now.  If we defer it, an object can get created after the map
1126 	 * is clipped, and individual objects will be created for the split-up
1127 	 * map.  This is a bit of a hack, but is also about the best place to
1128 	 * put this improvement.
1129 	 */
1130 	if (entry->object.vm_object == NULL && !map->system_map) {
1131 		vm_object_t object;
1132 		object = vm_object_allocate(OBJT_DEFAULT,
1133 				atop(entry->end - entry->start));
1134 		entry->object.vm_object = object;
1135 		entry->offset = 0;
1136 	}
1137 
1138 	new_entry = vm_map_entry_create(map);
1139 	*new_entry = *entry;
1140 
1141 	new_entry->end = start;
1142 	entry->offset += (start - entry->start);
1143 	entry->start = start;
1144 
1145 	vm_map_entry_link(map, entry->prev, new_entry);
1146 
1147 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1148 		vm_object_reference(new_entry->object.vm_object);
1149 	}
1150 }
1151 
1152 /*
1153  *	vm_map_clip_end:	[ internal use only ]
1154  *
1155  *	Asserts that the given entry ends at or before
1156  *	the specified address; if necessary,
1157  *	it splits the entry into two.
1158  */
1159 #define vm_map_clip_end(map, entry, endaddr) \
1160 { \
1161 	if ((endaddr) < (entry->end)) \
1162 		_vm_map_clip_end((map), (entry), (endaddr)); \
1163 }
1164 
1165 /*
1166  *	This routine is called only when it is known that
1167  *	the entry must be split.
1168  */
1169 static void
1170 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1171 {
1172 	vm_map_entry_t new_entry;
1173 
1174 	/*
1175 	 * If there is no object backing this entry, we might as well create
1176 	 * one now.  If we defer it, an object can get created after the map
1177 	 * is clipped, and individual objects will be created for the split-up
1178 	 * map.  This is a bit of a hack, but is also about the best place to
1179 	 * put this improvement.
1180 	 */
1181 	if (entry->object.vm_object == NULL && !map->system_map) {
1182 		vm_object_t object;
1183 		object = vm_object_allocate(OBJT_DEFAULT,
1184 				atop(entry->end - entry->start));
1185 		entry->object.vm_object = object;
1186 		entry->offset = 0;
1187 	}
1188 
1189 	/*
1190 	 * Create a new entry and insert it AFTER the specified entry
1191 	 */
1192 	new_entry = vm_map_entry_create(map);
1193 	*new_entry = *entry;
1194 
1195 	new_entry->start = entry->end = end;
1196 	new_entry->offset += (end - entry->start);
1197 
1198 	vm_map_entry_link(map, entry, new_entry);
1199 
1200 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1201 		vm_object_reference(new_entry->object.vm_object);
1202 	}
1203 }
1204 
1205 /*
1206  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
1207  *
1208  *	Asserts that the starting and ending region
1209  *	addresses fall within the valid range of the map.
1210  */
1211 #define	VM_MAP_RANGE_CHECK(map, start, end)		\
1212 		{					\
1213 		if (start < vm_map_min(map))		\
1214 			start = vm_map_min(map);	\
1215 		if (end > vm_map_max(map))		\
1216 			end = vm_map_max(map);		\
1217 		if (start > end)			\
1218 			start = end;			\
1219 		}
1220 
1221 /*
1222  *	vm_map_submap:		[ kernel use only ]
1223  *
1224  *	Mark the given range as handled by a subordinate map.
1225  *
1226  *	This range must have been created with vm_map_find,
1227  *	and no other operations may have been performed on this
1228  *	range prior to calling vm_map_submap.
1229  *
1230  *	Only a limited number of operations can be performed
1231  *	within this rage after calling vm_map_submap:
1232  *		vm_fault
1233  *	[Don't try vm_map_copy!]
1234  *
1235  *	To remove a submapping, one must first remove the
1236  *	range from the superior map, and then destroy the
1237  *	submap (if desired).  [Better yet, don't try it.]
1238  */
1239 int
1240 vm_map_submap(
1241 	vm_map_t map,
1242 	vm_offset_t start,
1243 	vm_offset_t end,
1244 	vm_map_t submap)
1245 {
1246 	vm_map_entry_t entry;
1247 	int result = KERN_INVALID_ARGUMENT;
1248 
1249 	vm_map_lock(map);
1250 
1251 	VM_MAP_RANGE_CHECK(map, start, end);
1252 
1253 	if (vm_map_lookup_entry(map, start, &entry)) {
1254 		vm_map_clip_start(map, entry, start);
1255 	} else
1256 		entry = entry->next;
1257 
1258 	vm_map_clip_end(map, entry, end);
1259 
1260 	if ((entry->start == start) && (entry->end == end) &&
1261 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1262 	    (entry->object.vm_object == NULL)) {
1263 		entry->object.sub_map = submap;
1264 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1265 		result = KERN_SUCCESS;
1266 	}
1267 	vm_map_unlock(map);
1268 
1269 	return (result);
1270 }
1271 
1272 /*
1273  * The maximum number of pages to map
1274  */
1275 #define	MAX_INIT_PT	96
1276 
1277 /*
1278  *	vm_map_pmap_enter:
1279  *
1280  *	Preload the mappings for the given object into the specified
1281  *	map.  This eliminates the soft faults on process startup and
1282  *	immediately after an mmap(2).
1283  */
1284 void
1285 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr,
1286     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1287 {
1288 	vm_offset_t tmpidx;
1289 	int psize;
1290 	vm_page_t p, mpte;
1291 
1292 	if (object == NULL)
1293 		return;
1294 	mtx_lock(&Giant);
1295 	VM_OBJECT_LOCK(object);
1296 	if (object->type == OBJT_DEVICE) {
1297 		pmap_object_init_pt(map->pmap, addr, object, pindex, size);
1298 		goto unlock_return;
1299 	}
1300 
1301 	psize = atop(size);
1302 
1303 	if (object->type != OBJT_VNODE ||
1304 	    ((flags & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
1305 	     (object->resident_page_count > MAX_INIT_PT))) {
1306 		goto unlock_return;
1307 	}
1308 
1309 	if (psize + pindex > object->size) {
1310 		if (object->size < pindex)
1311 			goto unlock_return;
1312 		psize = object->size - pindex;
1313 	}
1314 
1315 	mpte = NULL;
1316 
1317 	if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
1318 		if (p->pindex < pindex) {
1319 			p = vm_page_splay(pindex, object->root);
1320 			if ((object->root = p)->pindex < pindex)
1321 				p = TAILQ_NEXT(p, listq);
1322 		}
1323 	}
1324 	/*
1325 	 * Assert: the variable p is either (1) the page with the
1326 	 * least pindex greater than or equal to the parameter pindex
1327 	 * or (2) NULL.
1328 	 */
1329 	for (;
1330 	     p != NULL && (tmpidx = p->pindex - pindex) < psize;
1331 	     p = TAILQ_NEXT(p, listq)) {
1332 		/*
1333 		 * don't allow an madvise to blow away our really
1334 		 * free pages allocating pv entries.
1335 		 */
1336 		if ((flags & MAP_PREFAULT_MADVISE) &&
1337 		    cnt.v_free_count < cnt.v_free_reserved) {
1338 			break;
1339 		}
1340 		vm_page_lock_queues();
1341 		if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
1342 		    (p->busy == 0) &&
1343 		    (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
1344 			if ((p->queue - p->pc) == PQ_CACHE)
1345 				vm_page_deactivate(p);
1346 			vm_page_busy(p);
1347 			vm_page_unlock_queues();
1348 			VM_OBJECT_UNLOCK(object);
1349 			mpte = pmap_enter_quick(map->pmap,
1350 				addr + ptoa(tmpidx), p, mpte);
1351 			VM_OBJECT_LOCK(object);
1352 			vm_page_lock_queues();
1353 			vm_page_wakeup(p);
1354 		}
1355 		vm_page_unlock_queues();
1356 	}
1357 unlock_return:
1358 	VM_OBJECT_UNLOCK(object);
1359 	mtx_unlock(&Giant);
1360 }
1361 
1362 /*
1363  *	vm_map_protect:
1364  *
1365  *	Sets the protection of the specified address
1366  *	region in the target map.  If "set_max" is
1367  *	specified, the maximum protection is to be set;
1368  *	otherwise, only the current protection is affected.
1369  */
1370 int
1371 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1372 	       vm_prot_t new_prot, boolean_t set_max)
1373 {
1374 	vm_map_entry_t current;
1375 	vm_map_entry_t entry;
1376 
1377 	vm_map_lock(map);
1378 
1379 	VM_MAP_RANGE_CHECK(map, start, end);
1380 
1381 	if (vm_map_lookup_entry(map, start, &entry)) {
1382 		vm_map_clip_start(map, entry, start);
1383 	} else {
1384 		entry = entry->next;
1385 	}
1386 
1387 	/*
1388 	 * Make a first pass to check for protection violations.
1389 	 */
1390 	current = entry;
1391 	while ((current != &map->header) && (current->start < end)) {
1392 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1393 			vm_map_unlock(map);
1394 			return (KERN_INVALID_ARGUMENT);
1395 		}
1396 		if ((new_prot & current->max_protection) != new_prot) {
1397 			vm_map_unlock(map);
1398 			return (KERN_PROTECTION_FAILURE);
1399 		}
1400 		current = current->next;
1401 	}
1402 
1403 	/*
1404 	 * Go back and fix up protections. [Note that clipping is not
1405 	 * necessary the second time.]
1406 	 */
1407 	current = entry;
1408 	while ((current != &map->header) && (current->start < end)) {
1409 		vm_prot_t old_prot;
1410 
1411 		vm_map_clip_end(map, current, end);
1412 
1413 		old_prot = current->protection;
1414 		if (set_max)
1415 			current->protection =
1416 			    (current->max_protection = new_prot) &
1417 			    old_prot;
1418 		else
1419 			current->protection = new_prot;
1420 
1421 		/*
1422 		 * Update physical map if necessary. Worry about copy-on-write
1423 		 * here -- CHECK THIS XXX
1424 		 */
1425 		if (current->protection != old_prot) {
1426 			mtx_lock(&Giant);
1427 			vm_page_lock_queues();
1428 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1429 							VM_PROT_ALL)
1430 			pmap_protect(map->pmap, current->start,
1431 			    current->end,
1432 			    current->protection & MASK(current));
1433 #undef	MASK
1434 			vm_page_unlock_queues();
1435 			mtx_unlock(&Giant);
1436 		}
1437 		vm_map_simplify_entry(map, current);
1438 		current = current->next;
1439 	}
1440 	vm_map_unlock(map);
1441 	return (KERN_SUCCESS);
1442 }
1443 
1444 /*
1445  *	vm_map_madvise:
1446  *
1447  * 	This routine traverses a processes map handling the madvise
1448  *	system call.  Advisories are classified as either those effecting
1449  *	the vm_map_entry structure, or those effecting the underlying
1450  *	objects.
1451  */
1452 int
1453 vm_map_madvise(
1454 	vm_map_t map,
1455 	vm_offset_t start,
1456 	vm_offset_t end,
1457 	int behav)
1458 {
1459 	vm_map_entry_t current, entry;
1460 	int modify_map = 0;
1461 
1462 	/*
1463 	 * Some madvise calls directly modify the vm_map_entry, in which case
1464 	 * we need to use an exclusive lock on the map and we need to perform
1465 	 * various clipping operations.  Otherwise we only need a read-lock
1466 	 * on the map.
1467 	 */
1468 	switch(behav) {
1469 	case MADV_NORMAL:
1470 	case MADV_SEQUENTIAL:
1471 	case MADV_RANDOM:
1472 	case MADV_NOSYNC:
1473 	case MADV_AUTOSYNC:
1474 	case MADV_NOCORE:
1475 	case MADV_CORE:
1476 		modify_map = 1;
1477 		vm_map_lock(map);
1478 		break;
1479 	case MADV_WILLNEED:
1480 	case MADV_DONTNEED:
1481 	case MADV_FREE:
1482 		vm_map_lock_read(map);
1483 		break;
1484 	default:
1485 		return (KERN_INVALID_ARGUMENT);
1486 	}
1487 
1488 	/*
1489 	 * Locate starting entry and clip if necessary.
1490 	 */
1491 	VM_MAP_RANGE_CHECK(map, start, end);
1492 
1493 	if (vm_map_lookup_entry(map, start, &entry)) {
1494 		if (modify_map)
1495 			vm_map_clip_start(map, entry, start);
1496 	} else {
1497 		entry = entry->next;
1498 	}
1499 
1500 	if (modify_map) {
1501 		/*
1502 		 * madvise behaviors that are implemented in the vm_map_entry.
1503 		 *
1504 		 * We clip the vm_map_entry so that behavioral changes are
1505 		 * limited to the specified address range.
1506 		 */
1507 		for (current = entry;
1508 		     (current != &map->header) && (current->start < end);
1509 		     current = current->next
1510 		) {
1511 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1512 				continue;
1513 
1514 			vm_map_clip_end(map, current, end);
1515 
1516 			switch (behav) {
1517 			case MADV_NORMAL:
1518 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1519 				break;
1520 			case MADV_SEQUENTIAL:
1521 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1522 				break;
1523 			case MADV_RANDOM:
1524 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1525 				break;
1526 			case MADV_NOSYNC:
1527 				current->eflags |= MAP_ENTRY_NOSYNC;
1528 				break;
1529 			case MADV_AUTOSYNC:
1530 				current->eflags &= ~MAP_ENTRY_NOSYNC;
1531 				break;
1532 			case MADV_NOCORE:
1533 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
1534 				break;
1535 			case MADV_CORE:
1536 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
1537 				break;
1538 			default:
1539 				break;
1540 			}
1541 			vm_map_simplify_entry(map, current);
1542 		}
1543 		vm_map_unlock(map);
1544 	} else {
1545 		vm_pindex_t pindex;
1546 		int count;
1547 
1548 		/*
1549 		 * madvise behaviors that are implemented in the underlying
1550 		 * vm_object.
1551 		 *
1552 		 * Since we don't clip the vm_map_entry, we have to clip
1553 		 * the vm_object pindex and count.
1554 		 */
1555 		for (current = entry;
1556 		     (current != &map->header) && (current->start < end);
1557 		     current = current->next
1558 		) {
1559 			vm_offset_t useStart;
1560 
1561 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1562 				continue;
1563 
1564 			pindex = OFF_TO_IDX(current->offset);
1565 			count = atop(current->end - current->start);
1566 			useStart = current->start;
1567 
1568 			if (current->start < start) {
1569 				pindex += atop(start - current->start);
1570 				count -= atop(start - current->start);
1571 				useStart = start;
1572 			}
1573 			if (current->end > end)
1574 				count -= atop(current->end - end);
1575 
1576 			if (count <= 0)
1577 				continue;
1578 
1579 			vm_object_madvise(current->object.vm_object,
1580 					  pindex, count, behav);
1581 			if (behav == MADV_WILLNEED) {
1582 				vm_map_pmap_enter(map,
1583 				    useStart,
1584 				    current->object.vm_object,
1585 				    pindex,
1586 				    (count << PAGE_SHIFT),
1587 				    MAP_PREFAULT_MADVISE
1588 				);
1589 			}
1590 		}
1591 		vm_map_unlock_read(map);
1592 	}
1593 	return (0);
1594 }
1595 
1596 
1597 /*
1598  *	vm_map_inherit:
1599  *
1600  *	Sets the inheritance of the specified address
1601  *	range in the target map.  Inheritance
1602  *	affects how the map will be shared with
1603  *	child maps at the time of vm_map_fork.
1604  */
1605 int
1606 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1607 	       vm_inherit_t new_inheritance)
1608 {
1609 	vm_map_entry_t entry;
1610 	vm_map_entry_t temp_entry;
1611 
1612 	switch (new_inheritance) {
1613 	case VM_INHERIT_NONE:
1614 	case VM_INHERIT_COPY:
1615 	case VM_INHERIT_SHARE:
1616 		break;
1617 	default:
1618 		return (KERN_INVALID_ARGUMENT);
1619 	}
1620 	vm_map_lock(map);
1621 	VM_MAP_RANGE_CHECK(map, start, end);
1622 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
1623 		entry = temp_entry;
1624 		vm_map_clip_start(map, entry, start);
1625 	} else
1626 		entry = temp_entry->next;
1627 	while ((entry != &map->header) && (entry->start < end)) {
1628 		vm_map_clip_end(map, entry, end);
1629 		entry->inheritance = new_inheritance;
1630 		vm_map_simplify_entry(map, entry);
1631 		entry = entry->next;
1632 	}
1633 	vm_map_unlock(map);
1634 	return (KERN_SUCCESS);
1635 }
1636 
1637 /*
1638  *	vm_map_unwire:
1639  *
1640  *	Implements both kernel and user unwiring.
1641  */
1642 int
1643 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
1644 	boolean_t user_unwire)
1645 {
1646 	vm_map_entry_t entry, first_entry, tmp_entry;
1647 	vm_offset_t saved_start;
1648 	unsigned int last_timestamp;
1649 	int rv;
1650 	boolean_t need_wakeup, result;
1651 
1652 	vm_map_lock(map);
1653 	VM_MAP_RANGE_CHECK(map, start, end);
1654 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
1655 		vm_map_unlock(map);
1656 		return (KERN_INVALID_ADDRESS);
1657 	}
1658 	last_timestamp = map->timestamp;
1659 	entry = first_entry;
1660 	while (entry != &map->header && entry->start < end) {
1661 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1662 			/*
1663 			 * We have not yet clipped the entry.
1664 			 */
1665 			saved_start = (start >= entry->start) ? start :
1666 			    entry->start;
1667 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1668 			if (vm_map_unlock_and_wait(map, user_unwire)) {
1669 				/*
1670 				 * Allow interruption of user unwiring?
1671 				 */
1672 			}
1673 			vm_map_lock(map);
1674 			if (last_timestamp+1 != map->timestamp) {
1675 				/*
1676 				 * Look again for the entry because the map was
1677 				 * modified while it was unlocked.
1678 				 * Specifically, the entry may have been
1679 				 * clipped, merged, or deleted.
1680 				 */
1681 				if (!vm_map_lookup_entry(map, saved_start,
1682 				    &tmp_entry)) {
1683 					if (saved_start == start) {
1684 						/*
1685 						 * First_entry has been deleted.
1686 						 */
1687 						vm_map_unlock(map);
1688 						return (KERN_INVALID_ADDRESS);
1689 					}
1690 					end = saved_start;
1691 					rv = KERN_INVALID_ADDRESS;
1692 					goto done;
1693 				}
1694 				if (entry == first_entry)
1695 					first_entry = tmp_entry;
1696 				else
1697 					first_entry = NULL;
1698 				entry = tmp_entry;
1699 			}
1700 			last_timestamp = map->timestamp;
1701 			continue;
1702 		}
1703 		vm_map_clip_start(map, entry, start);
1704 		vm_map_clip_end(map, entry, end);
1705 		/*
1706 		 * Mark the entry in case the map lock is released.  (See
1707 		 * above.)
1708 		 */
1709 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1710 		/*
1711 		 * Check the map for holes in the specified region.
1712 		 */
1713 		if (entry->end < end && (entry->next == &map->header ||
1714 		    entry->next->start > entry->end)) {
1715 			end = entry->end;
1716 			rv = KERN_INVALID_ADDRESS;
1717 			goto done;
1718 		}
1719 		/*
1720 		 * Require that the entry is wired.
1721 		 */
1722 		if (entry->wired_count == 0 || (user_unwire &&
1723 		    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)) {
1724 			end = entry->end;
1725 			rv = KERN_INVALID_ARGUMENT;
1726 			goto done;
1727 		}
1728 		entry = entry->next;
1729 	}
1730 	rv = KERN_SUCCESS;
1731 done:
1732 	need_wakeup = FALSE;
1733 	if (first_entry == NULL) {
1734 		result = vm_map_lookup_entry(map, start, &first_entry);
1735 		KASSERT(result, ("vm_map_unwire: lookup failed"));
1736 	}
1737 	entry = first_entry;
1738 	while (entry != &map->header && entry->start < end) {
1739 		if (rv == KERN_SUCCESS) {
1740 			if (user_unwire)
1741 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1742 			entry->wired_count--;
1743 			if (entry->wired_count == 0) {
1744 				/*
1745 				 * Retain the map lock.
1746 				 */
1747 				vm_fault_unwire(map, entry->start, entry->end);
1748 			}
1749 		}
1750 		KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
1751 			("vm_map_unwire: in-transition flag missing"));
1752 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1753 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1754 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1755 			need_wakeup = TRUE;
1756 		}
1757 		vm_map_simplify_entry(map, entry);
1758 		entry = entry->next;
1759 	}
1760 	vm_map_unlock(map);
1761 	if (need_wakeup)
1762 		vm_map_wakeup(map);
1763 	return (rv);
1764 }
1765 
1766 /*
1767  *	vm_map_wire:
1768  *
1769  *	Implements both kernel and user wiring.
1770  */
1771 int
1772 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
1773 	boolean_t user_wire)
1774 {
1775 	vm_map_entry_t entry, first_entry, tmp_entry;
1776 	vm_offset_t saved_end, saved_start;
1777 	unsigned int last_timestamp;
1778 	int rv;
1779 	boolean_t need_wakeup, result;
1780 
1781 	vm_map_lock(map);
1782 	VM_MAP_RANGE_CHECK(map, start, end);
1783 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
1784 		vm_map_unlock(map);
1785 		return (KERN_INVALID_ADDRESS);
1786 	}
1787 	last_timestamp = map->timestamp;
1788 	entry = first_entry;
1789 	while (entry != &map->header && entry->start < end) {
1790 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1791 			/*
1792 			 * We have not yet clipped the entry.
1793 			 */
1794 			saved_start = (start >= entry->start) ? start :
1795 			    entry->start;
1796 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1797 			if (vm_map_unlock_and_wait(map, user_wire)) {
1798 				/*
1799 				 * Allow interruption of user wiring?
1800 				 */
1801 			}
1802 			vm_map_lock(map);
1803 			if (last_timestamp + 1 != map->timestamp) {
1804 				/*
1805 				 * Look again for the entry because the map was
1806 				 * modified while it was unlocked.
1807 				 * Specifically, the entry may have been
1808 				 * clipped, merged, or deleted.
1809 				 */
1810 				if (!vm_map_lookup_entry(map, saved_start,
1811 				    &tmp_entry)) {
1812 					if (saved_start == start) {
1813 						/*
1814 						 * first_entry has been deleted.
1815 						 */
1816 						vm_map_unlock(map);
1817 						return (KERN_INVALID_ADDRESS);
1818 					}
1819 					end = saved_start;
1820 					rv = KERN_INVALID_ADDRESS;
1821 					goto done;
1822 				}
1823 				if (entry == first_entry)
1824 					first_entry = tmp_entry;
1825 				else
1826 					first_entry = NULL;
1827 				entry = tmp_entry;
1828 			}
1829 			last_timestamp = map->timestamp;
1830 			continue;
1831 		}
1832 		vm_map_clip_start(map, entry, start);
1833 		vm_map_clip_end(map, entry, end);
1834 		/*
1835 		 * Mark the entry in case the map lock is released.  (See
1836 		 * above.)
1837 		 */
1838 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1839 		/*
1840 		 *
1841 		 */
1842 		if (entry->wired_count == 0) {
1843 			entry->wired_count++;
1844 			saved_start = entry->start;
1845 			saved_end = entry->end;
1846 			/*
1847 			 * Release the map lock, relying on the in-transition
1848 			 * mark.
1849 			 */
1850 			vm_map_unlock(map);
1851 			rv = vm_fault_wire(map, saved_start, saved_end,
1852 			    user_wire);
1853 			vm_map_lock(map);
1854 			if (last_timestamp + 1 != map->timestamp) {
1855 				/*
1856 				 * Look again for the entry because the map was
1857 				 * modified while it was unlocked.  The entry
1858 				 * may have been clipped, but NOT merged or
1859 				 * deleted.
1860 				 */
1861 				result = vm_map_lookup_entry(map, saved_start,
1862 				    &tmp_entry);
1863 				KASSERT(result, ("vm_map_wire: lookup failed"));
1864 				if (entry == first_entry)
1865 					first_entry = tmp_entry;
1866 				else
1867 					first_entry = NULL;
1868 				entry = tmp_entry;
1869 				while (entry->end < saved_end) {
1870 					if (rv != KERN_SUCCESS) {
1871 						KASSERT(entry->wired_count == 1,
1872 						    ("vm_map_wire: bad count"));
1873 						entry->wired_count = -1;
1874 					}
1875 					entry = entry->next;
1876 				}
1877 			}
1878 			last_timestamp = map->timestamp;
1879 			if (rv != KERN_SUCCESS) {
1880 				KASSERT(entry->wired_count == 1,
1881 				    ("vm_map_wire: bad count"));
1882 				/*
1883 				 * Assign an out-of-range value to represent
1884 				 * the failure to wire this entry.
1885 				 */
1886 				entry->wired_count = -1;
1887 				end = entry->end;
1888 				goto done;
1889 			}
1890 		} else if (!user_wire ||
1891 			   (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
1892 			entry->wired_count++;
1893 		}
1894 		/*
1895 		 * Check the map for holes in the specified region.
1896 		 */
1897 		if (entry->end < end && (entry->next == &map->header ||
1898 		    entry->next->start > entry->end)) {
1899 			end = entry->end;
1900 			rv = KERN_INVALID_ADDRESS;
1901 			goto done;
1902 		}
1903 		entry = entry->next;
1904 	}
1905 	rv = KERN_SUCCESS;
1906 done:
1907 	need_wakeup = FALSE;
1908 	if (first_entry == NULL) {
1909 		result = vm_map_lookup_entry(map, start, &first_entry);
1910 		KASSERT(result, ("vm_map_wire: lookup failed"));
1911 	}
1912 	entry = first_entry;
1913 	while (entry != &map->header && entry->start < end) {
1914 		if (rv == KERN_SUCCESS) {
1915 			if (user_wire)
1916 				entry->eflags |= MAP_ENTRY_USER_WIRED;
1917 		} else if (entry->wired_count == -1) {
1918 			/*
1919 			 * Wiring failed on this entry.  Thus, unwiring is
1920 			 * unnecessary.
1921 			 */
1922 			entry->wired_count = 0;
1923 		} else {
1924 			if (!user_wire ||
1925 			    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)
1926 				entry->wired_count--;
1927 			if (entry->wired_count == 0) {
1928 				/*
1929 				 * Retain the map lock.
1930 				 */
1931 				vm_fault_unwire(map, entry->start, entry->end);
1932 			}
1933 		}
1934 		KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
1935 			("vm_map_wire: in-transition flag missing"));
1936 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1937 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1938 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1939 			need_wakeup = TRUE;
1940 		}
1941 		vm_map_simplify_entry(map, entry);
1942 		entry = entry->next;
1943 	}
1944 	vm_map_unlock(map);
1945 	if (need_wakeup)
1946 		vm_map_wakeup(map);
1947 	return (rv);
1948 }
1949 
1950 /*
1951  * vm_map_clean
1952  *
1953  * Push any dirty cached pages in the address range to their pager.
1954  * If syncio is TRUE, dirty pages are written synchronously.
1955  * If invalidate is TRUE, any cached pages are freed as well.
1956  *
1957  * Returns an error if any part of the specified range is not mapped.
1958  */
1959 int
1960 vm_map_clean(
1961 	vm_map_t map,
1962 	vm_offset_t start,
1963 	vm_offset_t end,
1964 	boolean_t syncio,
1965 	boolean_t invalidate)
1966 {
1967 	vm_map_entry_t current;
1968 	vm_map_entry_t entry;
1969 	vm_size_t size;
1970 	vm_object_t object;
1971 	vm_ooffset_t offset;
1972 
1973 	GIANT_REQUIRED;
1974 
1975 	vm_map_lock_read(map);
1976 	VM_MAP_RANGE_CHECK(map, start, end);
1977 	if (!vm_map_lookup_entry(map, start, &entry)) {
1978 		vm_map_unlock_read(map);
1979 		return (KERN_INVALID_ADDRESS);
1980 	}
1981 	/*
1982 	 * Make a first pass to check for holes.
1983 	 */
1984 	for (current = entry; current->start < end; current = current->next) {
1985 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1986 			vm_map_unlock_read(map);
1987 			return (KERN_INVALID_ARGUMENT);
1988 		}
1989 		if (end > current->end &&
1990 		    (current->next == &map->header ||
1991 			current->end != current->next->start)) {
1992 			vm_map_unlock_read(map);
1993 			return (KERN_INVALID_ADDRESS);
1994 		}
1995 	}
1996 
1997 	if (invalidate) {
1998 		vm_page_lock_queues();
1999 		pmap_remove(map->pmap, start, end);
2000 		vm_page_unlock_queues();
2001 	}
2002 	/*
2003 	 * Make a second pass, cleaning/uncaching pages from the indicated
2004 	 * objects as we go.
2005 	 */
2006 	for (current = entry; current->start < end; current = current->next) {
2007 		offset = current->offset + (start - current->start);
2008 		size = (end <= current->end ? end : current->end) - start;
2009 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2010 			vm_map_t smap;
2011 			vm_map_entry_t tentry;
2012 			vm_size_t tsize;
2013 
2014 			smap = current->object.sub_map;
2015 			vm_map_lock_read(smap);
2016 			(void) vm_map_lookup_entry(smap, offset, &tentry);
2017 			tsize = tentry->end - offset;
2018 			if (tsize < size)
2019 				size = tsize;
2020 			object = tentry->object.vm_object;
2021 			offset = tentry->offset + (offset - tentry->start);
2022 			vm_map_unlock_read(smap);
2023 		} else {
2024 			object = current->object.vm_object;
2025 		}
2026 		/*
2027 		 * Note that there is absolutely no sense in writing out
2028 		 * anonymous objects, so we track down the vnode object
2029 		 * to write out.
2030 		 * We invalidate (remove) all pages from the address space
2031 		 * anyway, for semantic correctness.
2032 		 *
2033 		 * note: certain anonymous maps, such as MAP_NOSYNC maps,
2034 		 * may start out with a NULL object.
2035 		 */
2036 		while (object && object->backing_object) {
2037 			object = object->backing_object;
2038 			offset += object->backing_object_offset;
2039 			if (object->size < OFF_TO_IDX(offset + size))
2040 				size = IDX_TO_OFF(object->size) - offset;
2041 		}
2042 		if (object && (object->type == OBJT_VNODE) &&
2043 		    (current->protection & VM_PROT_WRITE)) {
2044 			/*
2045 			 * Flush pages if writing is allowed, invalidate them
2046 			 * if invalidation requested.  Pages undergoing I/O
2047 			 * will be ignored by vm_object_page_remove().
2048 			 *
2049 			 * We cannot lock the vnode and then wait for paging
2050 			 * to complete without deadlocking against vm_fault.
2051 			 * Instead we simply call vm_object_page_remove() and
2052 			 * allow it to block internally on a page-by-page
2053 			 * basis when it encounters pages undergoing async
2054 			 * I/O.
2055 			 */
2056 			int flags;
2057 
2058 			vm_object_reference(object);
2059 			vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
2060 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
2061 			flags |= invalidate ? OBJPC_INVAL : 0;
2062 			VM_OBJECT_LOCK(object);
2063 			vm_object_page_clean(object,
2064 			    OFF_TO_IDX(offset),
2065 			    OFF_TO_IDX(offset + size + PAGE_MASK),
2066 			    flags);
2067 			VM_OBJECT_UNLOCK(object);
2068 			VOP_UNLOCK(object->handle, 0, curthread);
2069 			vm_object_deallocate(object);
2070 		}
2071 		if (object && invalidate &&
2072 		    ((object->type == OBJT_VNODE) ||
2073 		     (object->type == OBJT_DEVICE))) {
2074 			VM_OBJECT_LOCK(object);
2075 			vm_object_page_remove(object,
2076 			    OFF_TO_IDX(offset),
2077 			    OFF_TO_IDX(offset + size + PAGE_MASK),
2078 			    FALSE);
2079 			VM_OBJECT_UNLOCK(object);
2080                 }
2081 		start += size;
2082 	}
2083 
2084 	vm_map_unlock_read(map);
2085 	return (KERN_SUCCESS);
2086 }
2087 
2088 /*
2089  *	vm_map_entry_unwire:	[ internal use only ]
2090  *
2091  *	Make the region specified by this entry pageable.
2092  *
2093  *	The map in question should be locked.
2094  *	[This is the reason for this routine's existence.]
2095  */
2096 static void
2097 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2098 {
2099 	vm_fault_unwire(map, entry->start, entry->end);
2100 	entry->wired_count = 0;
2101 }
2102 
2103 /*
2104  *	vm_map_entry_delete:	[ internal use only ]
2105  *
2106  *	Deallocate the given entry from the target map.
2107  */
2108 static void
2109 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2110 {
2111 	vm_map_entry_unlink(map, entry);
2112 	map->size -= entry->end - entry->start;
2113 
2114 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2115 		vm_object_deallocate(entry->object.vm_object);
2116 	}
2117 
2118 	vm_map_entry_dispose(map, entry);
2119 }
2120 
2121 /*
2122  *	vm_map_delete:	[ internal use only ]
2123  *
2124  *	Deallocates the given address range from the target
2125  *	map.
2126  */
2127 int
2128 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2129 {
2130 	vm_object_t object;
2131 	vm_map_entry_t entry;
2132 	vm_map_entry_t first_entry;
2133 
2134 	/*
2135 	 * Find the start of the region, and clip it
2136 	 */
2137 	if (!vm_map_lookup_entry(map, start, &first_entry))
2138 		entry = first_entry->next;
2139 	else {
2140 		entry = first_entry;
2141 		vm_map_clip_start(map, entry, start);
2142 	}
2143 
2144 	/*
2145 	 * Save the free space hint
2146 	 */
2147 	if (entry == &map->header) {
2148 		map->first_free = &map->header;
2149 	} else if (map->first_free->start >= start) {
2150 		map->first_free = entry->prev;
2151 	}
2152 
2153 	/*
2154 	 * Step through all entries in this region
2155 	 */
2156 	while ((entry != &map->header) && (entry->start < end)) {
2157 		vm_map_entry_t next;
2158 		vm_offset_t s, e;
2159 		vm_pindex_t offidxstart, offidxend, count;
2160 
2161 		/*
2162 		 * Wait for wiring or unwiring of an entry to complete.
2163 		 */
2164 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) {
2165 			unsigned int last_timestamp;
2166 			vm_offset_t saved_start;
2167 			vm_map_entry_t tmp_entry;
2168 
2169 			saved_start = entry->start;
2170 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2171 			last_timestamp = map->timestamp;
2172 			(void) vm_map_unlock_and_wait(map, FALSE);
2173 			vm_map_lock(map);
2174 			if (last_timestamp + 1 != map->timestamp) {
2175 				/*
2176 				 * Look again for the entry because the map was
2177 				 * modified while it was unlocked.
2178 				 * Specifically, the entry may have been
2179 				 * clipped, merged, or deleted.
2180 				 */
2181 				if (!vm_map_lookup_entry(map, saved_start,
2182 							 &tmp_entry))
2183 					entry = tmp_entry->next;
2184 				else {
2185 					entry = tmp_entry;
2186 					vm_map_clip_start(map, entry,
2187 							  saved_start);
2188 				}
2189 			}
2190 			continue;
2191 		}
2192 		vm_map_clip_end(map, entry, end);
2193 
2194 		s = entry->start;
2195 		e = entry->end;
2196 		next = entry->next;
2197 
2198 		offidxstart = OFF_TO_IDX(entry->offset);
2199 		count = OFF_TO_IDX(e - s);
2200 		object = entry->object.vm_object;
2201 
2202 		/*
2203 		 * Unwire before removing addresses from the pmap; otherwise,
2204 		 * unwiring will put the entries back in the pmap.
2205 		 */
2206 		if (entry->wired_count != 0) {
2207 			vm_map_entry_unwire(map, entry);
2208 		}
2209 
2210 		offidxend = offidxstart + count;
2211 
2212 		if (object == kernel_object || object == kmem_object) {
2213 			VM_OBJECT_LOCK(object);
2214 			vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2215 			VM_OBJECT_UNLOCK(object);
2216 		} else {
2217 			mtx_lock(&Giant);
2218 			vm_page_lock_queues();
2219 			pmap_remove(map->pmap, s, e);
2220 			vm_page_unlock_queues();
2221 			if (object != NULL) {
2222 				VM_OBJECT_LOCK(object);
2223 				if (object->ref_count != 1 &&
2224 				    (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
2225 				    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2226 					vm_object_collapse(object);
2227 					vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2228 					if (object->type == OBJT_SWAP)
2229 						swap_pager_freespace(object, offidxstart, count);
2230 					if (offidxend >= object->size &&
2231 					    offidxstart < object->size)
2232 						object->size = offidxstart;
2233 				}
2234 				VM_OBJECT_UNLOCK(object);
2235 			}
2236 			mtx_unlock(&Giant);
2237 		}
2238 
2239 		/*
2240 		 * Delete the entry (which may delete the object) only after
2241 		 * removing all pmap entries pointing to its pages.
2242 		 * (Otherwise, its page frames may be reallocated, and any
2243 		 * modify bits will be set in the wrong object!)
2244 		 */
2245 		vm_map_entry_delete(map, entry);
2246 		entry = next;
2247 	}
2248 	return (KERN_SUCCESS);
2249 }
2250 
2251 /*
2252  *	vm_map_remove:
2253  *
2254  *	Remove the given address range from the target map.
2255  *	This is the exported form of vm_map_delete.
2256  */
2257 int
2258 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2259 {
2260 	int result, s = 0;
2261 
2262 	if (map == kmem_map)
2263 		s = splvm();
2264 
2265 	vm_map_lock(map);
2266 	VM_MAP_RANGE_CHECK(map, start, end);
2267 	result = vm_map_delete(map, start, end);
2268 	vm_map_unlock(map);
2269 
2270 	if (map == kmem_map)
2271 		splx(s);
2272 
2273 	return (result);
2274 }
2275 
2276 /*
2277  *	vm_map_check_protection:
2278  *
2279  *	Assert that the target map allows the specified privilege on the
2280  *	entire address region given.  The entire region must be allocated.
2281  *
2282  *	WARNING!  This code does not and should not check whether the
2283  *	contents of the region is accessible.  For example a smaller file
2284  *	might be mapped into a larger address space.
2285  *
2286  *	NOTE!  This code is also called by munmap().
2287  */
2288 boolean_t
2289 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2290 			vm_prot_t protection)
2291 {
2292 	vm_map_entry_t entry;
2293 	vm_map_entry_t tmp_entry;
2294 
2295 	vm_map_lock_read(map);
2296 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2297 		vm_map_unlock_read(map);
2298 		return (FALSE);
2299 	}
2300 	entry = tmp_entry;
2301 
2302 	while (start < end) {
2303 		if (entry == &map->header) {
2304 			vm_map_unlock_read(map);
2305 			return (FALSE);
2306 		}
2307 		/*
2308 		 * No holes allowed!
2309 		 */
2310 		if (start < entry->start) {
2311 			vm_map_unlock_read(map);
2312 			return (FALSE);
2313 		}
2314 		/*
2315 		 * Check protection associated with entry.
2316 		 */
2317 		if ((entry->protection & protection) != protection) {
2318 			vm_map_unlock_read(map);
2319 			return (FALSE);
2320 		}
2321 		/* go to next entry */
2322 		start = entry->end;
2323 		entry = entry->next;
2324 	}
2325 	vm_map_unlock_read(map);
2326 	return (TRUE);
2327 }
2328 
2329 /*
2330  *	vm_map_copy_entry:
2331  *
2332  *	Copies the contents of the source entry to the destination
2333  *	entry.  The entries *must* be aligned properly.
2334  */
2335 static void
2336 vm_map_copy_entry(
2337 	vm_map_t src_map,
2338 	vm_map_t dst_map,
2339 	vm_map_entry_t src_entry,
2340 	vm_map_entry_t dst_entry)
2341 {
2342 	vm_object_t src_object;
2343 
2344 	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2345 		return;
2346 
2347 	if (src_entry->wired_count == 0) {
2348 
2349 		/*
2350 		 * If the source entry is marked needs_copy, it is already
2351 		 * write-protected.
2352 		 */
2353 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2354 			vm_page_lock_queues();
2355 			pmap_protect(src_map->pmap,
2356 			    src_entry->start,
2357 			    src_entry->end,
2358 			    src_entry->protection & ~VM_PROT_WRITE);
2359 			vm_page_unlock_queues();
2360 		}
2361 
2362 		/*
2363 		 * Make a copy of the object.
2364 		 */
2365 		if ((src_object = src_entry->object.vm_object) != NULL) {
2366 
2367 			if ((src_object->handle == NULL) &&
2368 				(src_object->type == OBJT_DEFAULT ||
2369 				 src_object->type == OBJT_SWAP)) {
2370 				VM_OBJECT_LOCK(src_object);
2371 				vm_object_collapse(src_object);
2372 				VM_OBJECT_UNLOCK(src_object);
2373 				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2374 					vm_object_split(src_entry);
2375 					src_object = src_entry->object.vm_object;
2376 				}
2377 			}
2378 
2379 			vm_object_reference(src_object);
2380 			VM_OBJECT_LOCK(src_object);
2381 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2382 			VM_OBJECT_UNLOCK(src_object);
2383 			dst_entry->object.vm_object = src_object;
2384 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2385 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2386 			dst_entry->offset = src_entry->offset;
2387 		} else {
2388 			dst_entry->object.vm_object = NULL;
2389 			dst_entry->offset = 0;
2390 		}
2391 
2392 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2393 		    dst_entry->end - dst_entry->start, src_entry->start);
2394 	} else {
2395 		/*
2396 		 * Of course, wired down pages can't be set copy-on-write.
2397 		 * Cause wired pages to be copied into the new map by
2398 		 * simulating faults (the new pages are pageable)
2399 		 */
2400 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2401 	}
2402 }
2403 
2404 /*
2405  * vmspace_fork:
2406  * Create a new process vmspace structure and vm_map
2407  * based on those of an existing process.  The new map
2408  * is based on the old map, according to the inheritance
2409  * values on the regions in that map.
2410  *
2411  * The source map must not be locked.
2412  */
2413 struct vmspace *
2414 vmspace_fork(struct vmspace *vm1)
2415 {
2416 	struct vmspace *vm2;
2417 	vm_map_t old_map = &vm1->vm_map;
2418 	vm_map_t new_map;
2419 	vm_map_entry_t old_entry;
2420 	vm_map_entry_t new_entry;
2421 	vm_object_t object;
2422 
2423 	GIANT_REQUIRED;
2424 
2425 	vm_map_lock(old_map);
2426 	old_map->infork = 1;
2427 
2428 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2429 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2430 	    (caddr_t) &vm1->vm_endcopy - (caddr_t) &vm1->vm_startcopy);
2431 	new_map = &vm2->vm_map;	/* XXX */
2432 	new_map->timestamp = 1;
2433 
2434 	old_entry = old_map->header.next;
2435 
2436 	while (old_entry != &old_map->header) {
2437 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2438 			panic("vm_map_fork: encountered a submap");
2439 
2440 		switch (old_entry->inheritance) {
2441 		case VM_INHERIT_NONE:
2442 			break;
2443 
2444 		case VM_INHERIT_SHARE:
2445 			/*
2446 			 * Clone the entry, creating the shared object if necessary.
2447 			 */
2448 			object = old_entry->object.vm_object;
2449 			if (object == NULL) {
2450 				object = vm_object_allocate(OBJT_DEFAULT,
2451 					atop(old_entry->end - old_entry->start));
2452 				old_entry->object.vm_object = object;
2453 				old_entry->offset = (vm_offset_t) 0;
2454 			}
2455 
2456 			/*
2457 			 * Add the reference before calling vm_object_shadow
2458 			 * to insure that a shadow object is created.
2459 			 */
2460 			vm_object_reference(object);
2461 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2462 				vm_object_shadow(&old_entry->object.vm_object,
2463 					&old_entry->offset,
2464 					atop(old_entry->end - old_entry->start));
2465 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2466 				/* Transfer the second reference too. */
2467 				vm_object_reference(
2468 				    old_entry->object.vm_object);
2469 				vm_object_deallocate(object);
2470 				object = old_entry->object.vm_object;
2471 			}
2472 			VM_OBJECT_LOCK(object);
2473 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
2474 			VM_OBJECT_UNLOCK(object);
2475 
2476 			/*
2477 			 * Clone the entry, referencing the shared object.
2478 			 */
2479 			new_entry = vm_map_entry_create(new_map);
2480 			*new_entry = *old_entry;
2481 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2482 			new_entry->wired_count = 0;
2483 
2484 			/*
2485 			 * Insert the entry into the new map -- we know we're
2486 			 * inserting at the end of the new map.
2487 			 */
2488 			vm_map_entry_link(new_map, new_map->header.prev,
2489 			    new_entry);
2490 
2491 			/*
2492 			 * Update the physical map
2493 			 */
2494 			pmap_copy(new_map->pmap, old_map->pmap,
2495 			    new_entry->start,
2496 			    (old_entry->end - old_entry->start),
2497 			    old_entry->start);
2498 			break;
2499 
2500 		case VM_INHERIT_COPY:
2501 			/*
2502 			 * Clone the entry and link into the map.
2503 			 */
2504 			new_entry = vm_map_entry_create(new_map);
2505 			*new_entry = *old_entry;
2506 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2507 			new_entry->wired_count = 0;
2508 			new_entry->object.vm_object = NULL;
2509 			vm_map_entry_link(new_map, new_map->header.prev,
2510 			    new_entry);
2511 			vm_map_copy_entry(old_map, new_map, old_entry,
2512 			    new_entry);
2513 			break;
2514 		}
2515 		old_entry = old_entry->next;
2516 	}
2517 
2518 	new_map->size = old_map->size;
2519 	old_map->infork = 0;
2520 	vm_map_unlock(old_map);
2521 
2522 	return (vm2);
2523 }
2524 
2525 int
2526 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
2527 	      vm_prot_t prot, vm_prot_t max, int cow)
2528 {
2529 	vm_map_entry_t prev_entry;
2530 	vm_map_entry_t new_stack_entry;
2531 	vm_size_t      init_ssize;
2532 	int            rv;
2533 
2534 	if (addrbos < vm_map_min(map))
2535 		return (KERN_NO_SPACE);
2536 	if (addrbos > map->max_offset)
2537 		return (KERN_NO_SPACE);
2538 	if (max_ssize < sgrowsiz)
2539 		init_ssize = max_ssize;
2540 	else
2541 		init_ssize = sgrowsiz;
2542 
2543 	vm_map_lock(map);
2544 
2545 	/* If addr is already mapped, no go */
2546 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
2547 		vm_map_unlock(map);
2548 		return (KERN_NO_SPACE);
2549 	}
2550 
2551 	/* If we would blow our VMEM resource limit, no go */
2552 	if (map->size + init_ssize >
2553 	    curthread->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2554 		vm_map_unlock(map);
2555 		return (KERN_NO_SPACE);
2556 	}
2557 
2558 	/* If we can't accomodate max_ssize in the current mapping,
2559 	 * no go.  However, we need to be aware that subsequent user
2560 	 * mappings might map into the space we have reserved for
2561 	 * stack, and currently this space is not protected.
2562 	 *
2563 	 * Hopefully we will at least detect this condition
2564 	 * when we try to grow the stack.
2565 	 */
2566 	if ((prev_entry->next != &map->header) &&
2567 	    (prev_entry->next->start < addrbos + max_ssize)) {
2568 		vm_map_unlock(map);
2569 		return (KERN_NO_SPACE);
2570 	}
2571 
2572 	/* We initially map a stack of only init_ssize.  We will
2573 	 * grow as needed later.  Since this is to be a grow
2574 	 * down stack, we map at the top of the range.
2575 	 *
2576 	 * Note: we would normally expect prot and max to be
2577 	 * VM_PROT_ALL, and cow to be 0.  Possibly we should
2578 	 * eliminate these as input parameters, and just
2579 	 * pass these values here in the insert call.
2580 	 */
2581 	rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
2582 	                   addrbos + max_ssize, prot, max, cow);
2583 
2584 	/* Now set the avail_ssize amount */
2585 	if (rv == KERN_SUCCESS){
2586 		if (prev_entry != &map->header)
2587 			vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize);
2588 		new_stack_entry = prev_entry->next;
2589 		if (new_stack_entry->end   != addrbos + max_ssize ||
2590 		    new_stack_entry->start != addrbos + max_ssize - init_ssize)
2591 			panic ("Bad entry start/end for new stack entry");
2592 		else
2593 			new_stack_entry->avail_ssize = max_ssize - init_ssize;
2594 	}
2595 
2596 	vm_map_unlock(map);
2597 	return (rv);
2598 }
2599 
2600 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
2601  * desired address is already mapped, or if we successfully grow
2602  * the stack.  Also returns KERN_SUCCESS if addr is outside the
2603  * stack range (this is strange, but preserves compatibility with
2604  * the grow function in vm_machdep.c).
2605  */
2606 int
2607 vm_map_growstack (struct proc *p, vm_offset_t addr)
2608 {
2609 	vm_map_entry_t prev_entry;
2610 	vm_map_entry_t stack_entry;
2611 	vm_map_entry_t new_stack_entry;
2612 	struct vmspace *vm = p->p_vmspace;
2613 	vm_map_t map = &vm->vm_map;
2614 	vm_offset_t    end;
2615 	int      grow_amount;
2616 	int      rv;
2617 	int      is_procstack;
2618 
2619 	GIANT_REQUIRED;
2620 
2621 Retry:
2622 	vm_map_lock_read(map);
2623 
2624 	/* If addr is already in the entry range, no need to grow.*/
2625 	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
2626 		vm_map_unlock_read(map);
2627 		return (KERN_SUCCESS);
2628 	}
2629 
2630 	if ((stack_entry = prev_entry->next) == &map->header) {
2631 		vm_map_unlock_read(map);
2632 		return (KERN_SUCCESS);
2633 	}
2634 	if (prev_entry == &map->header)
2635 		end = stack_entry->start - stack_entry->avail_ssize;
2636 	else
2637 		end = prev_entry->end;
2638 
2639 	/* This next test mimics the old grow function in vm_machdep.c.
2640 	 * It really doesn't quite make sense, but we do it anyway
2641 	 * for compatibility.
2642 	 *
2643 	 * If not growable stack, return success.  This signals the
2644 	 * caller to proceed as he would normally with normal vm.
2645 	 */
2646 	if (stack_entry->avail_ssize < 1 ||
2647 	    addr >= stack_entry->start ||
2648 	    addr <  stack_entry->start - stack_entry->avail_ssize) {
2649 		vm_map_unlock_read(map);
2650 		return (KERN_SUCCESS);
2651 	}
2652 
2653 	/* Find the minimum grow amount */
2654 	grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
2655 	if (grow_amount > stack_entry->avail_ssize) {
2656 		vm_map_unlock_read(map);
2657 		return (KERN_NO_SPACE);
2658 	}
2659 
2660 	/* If there is no longer enough space between the entries
2661 	 * nogo, and adjust the available space.  Note: this
2662 	 * should only happen if the user has mapped into the
2663 	 * stack area after the stack was created, and is
2664 	 * probably an error.
2665 	 *
2666 	 * This also effectively destroys any guard page the user
2667 	 * might have intended by limiting the stack size.
2668 	 */
2669 	if (grow_amount > stack_entry->start - end) {
2670 		if (vm_map_lock_upgrade(map))
2671 			goto Retry;
2672 
2673 		stack_entry->avail_ssize = stack_entry->start - end;
2674 
2675 		vm_map_unlock(map);
2676 		return (KERN_NO_SPACE);
2677 	}
2678 
2679 	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
2680 
2681 	/* If this is the main process stack, see if we're over the
2682 	 * stack limit.
2683 	 */
2684 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2685 			     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2686 		vm_map_unlock_read(map);
2687 		return (KERN_NO_SPACE);
2688 	}
2689 
2690 	/* Round up the grow amount modulo SGROWSIZ */
2691 	grow_amount = roundup (grow_amount, sgrowsiz);
2692 	if (grow_amount > stack_entry->avail_ssize) {
2693 		grow_amount = stack_entry->avail_ssize;
2694 	}
2695 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2696 	                     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2697 		grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
2698 		              ctob(vm->vm_ssize);
2699 	}
2700 
2701 	/* If we would blow our VMEM resource limit, no go */
2702 	if (map->size + grow_amount >
2703 	    curthread->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2704 		vm_map_unlock_read(map);
2705 		return (KERN_NO_SPACE);
2706 	}
2707 
2708 	if (vm_map_lock_upgrade(map))
2709 		goto Retry;
2710 
2711 	/* Get the preliminary new entry start value */
2712 	addr = stack_entry->start - grow_amount;
2713 
2714 	/* If this puts us into the previous entry, cut back our growth
2715 	 * to the available space.  Also, see the note above.
2716 	 */
2717 	if (addr < end) {
2718 		stack_entry->avail_ssize = stack_entry->start - end;
2719 		addr = end;
2720 	}
2721 
2722 	rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
2723 	    p->p_sysent->sv_stackprot, VM_PROT_ALL, 0);
2724 
2725 	/* Adjust the available stack space by the amount we grew. */
2726 	if (rv == KERN_SUCCESS) {
2727 		if (prev_entry != &map->header)
2728 			vm_map_clip_end(map, prev_entry, addr);
2729 		new_stack_entry = prev_entry->next;
2730 		if (new_stack_entry->end   != stack_entry->start  ||
2731 		    new_stack_entry->start != addr)
2732 			panic ("Bad stack grow start/end in new stack entry");
2733 		else {
2734 			new_stack_entry->avail_ssize = stack_entry->avail_ssize -
2735 							(new_stack_entry->end -
2736 							 new_stack_entry->start);
2737 			if (is_procstack)
2738 				vm->vm_ssize += btoc(new_stack_entry->end -
2739 						     new_stack_entry->start);
2740 		}
2741 	}
2742 
2743 	vm_map_unlock(map);
2744 	return (rv);
2745 }
2746 
2747 /*
2748  * Unshare the specified VM space for exec.  If other processes are
2749  * mapped to it, then create a new one.  The new vmspace is null.
2750  */
2751 void
2752 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
2753 {
2754 	struct vmspace *oldvmspace = p->p_vmspace;
2755 	struct vmspace *newvmspace;
2756 
2757 	GIANT_REQUIRED;
2758 	newvmspace = vmspace_alloc(minuser, maxuser);
2759 	bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
2760 	    (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
2761 	/*
2762 	 * This code is written like this for prototype purposes.  The
2763 	 * goal is to avoid running down the vmspace here, but let the
2764 	 * other process's that are still using the vmspace to finally
2765 	 * run it down.  Even though there is little or no chance of blocking
2766 	 * here, it is a good idea to keep this form for future mods.
2767 	 */
2768 	p->p_vmspace = newvmspace;
2769 	pmap_pinit2(vmspace_pmap(newvmspace));
2770 	vmspace_free(oldvmspace);
2771 	if (p == curthread->td_proc)		/* XXXKSE ? */
2772 		pmap_activate(curthread);
2773 }
2774 
2775 /*
2776  * Unshare the specified VM space for forcing COW.  This
2777  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
2778  */
2779 void
2780 vmspace_unshare(struct proc *p)
2781 {
2782 	struct vmspace *oldvmspace = p->p_vmspace;
2783 	struct vmspace *newvmspace;
2784 
2785 	GIANT_REQUIRED;
2786 	if (oldvmspace->vm_refcnt == 1)
2787 		return;
2788 	newvmspace = vmspace_fork(oldvmspace);
2789 	p->p_vmspace = newvmspace;
2790 	pmap_pinit2(vmspace_pmap(newvmspace));
2791 	vmspace_free(oldvmspace);
2792 	if (p == curthread->td_proc)		/* XXXKSE ? */
2793 		pmap_activate(curthread);
2794 }
2795 
2796 /*
2797  *	vm_map_lookup:
2798  *
2799  *	Finds the VM object, offset, and
2800  *	protection for a given virtual address in the
2801  *	specified map, assuming a page fault of the
2802  *	type specified.
2803  *
2804  *	Leaves the map in question locked for read; return
2805  *	values are guaranteed until a vm_map_lookup_done
2806  *	call is performed.  Note that the map argument
2807  *	is in/out; the returned map must be used in
2808  *	the call to vm_map_lookup_done.
2809  *
2810  *	A handle (out_entry) is returned for use in
2811  *	vm_map_lookup_done, to make that fast.
2812  *
2813  *	If a lookup is requested with "write protection"
2814  *	specified, the map may be changed to perform virtual
2815  *	copying operations, although the data referenced will
2816  *	remain the same.
2817  */
2818 int
2819 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
2820 	      vm_offset_t vaddr,
2821 	      vm_prot_t fault_typea,
2822 	      vm_map_entry_t *out_entry,	/* OUT */
2823 	      vm_object_t *object,		/* OUT */
2824 	      vm_pindex_t *pindex,		/* OUT */
2825 	      vm_prot_t *out_prot,		/* OUT */
2826 	      boolean_t *wired)			/* OUT */
2827 {
2828 	vm_map_entry_t entry;
2829 	vm_map_t map = *var_map;
2830 	vm_prot_t prot;
2831 	vm_prot_t fault_type = fault_typea;
2832 
2833 RetryLookup:;
2834 	/*
2835 	 * Lookup the faulting address.
2836 	 */
2837 
2838 	vm_map_lock_read(map);
2839 #define	RETURN(why) \
2840 		{ \
2841 		vm_map_unlock_read(map); \
2842 		return (why); \
2843 		}
2844 
2845 	/*
2846 	 * If the map has an interesting hint, try it before calling full
2847 	 * blown lookup routine.
2848 	 */
2849 	entry = map->root;
2850 	*out_entry = entry;
2851 	if (entry == NULL ||
2852 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
2853 		/*
2854 		 * Entry was either not a valid hint, or the vaddr was not
2855 		 * contained in the entry, so do a full lookup.
2856 		 */
2857 		if (!vm_map_lookup_entry(map, vaddr, out_entry))
2858 			RETURN(KERN_INVALID_ADDRESS);
2859 
2860 		entry = *out_entry;
2861 	}
2862 
2863 	/*
2864 	 * Handle submaps.
2865 	 */
2866 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2867 		vm_map_t old_map = map;
2868 
2869 		*var_map = map = entry->object.sub_map;
2870 		vm_map_unlock_read(old_map);
2871 		goto RetryLookup;
2872 	}
2873 
2874 	/*
2875 	 * Check whether this task is allowed to have this page.
2876 	 * Note the special case for MAP_ENTRY_COW
2877 	 * pages with an override.  This is to implement a forced
2878 	 * COW for debuggers.
2879 	 */
2880 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
2881 		prot = entry->max_protection;
2882 	else
2883 		prot = entry->protection;
2884 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
2885 	if ((fault_type & prot) != fault_type) {
2886 			RETURN(KERN_PROTECTION_FAILURE);
2887 	}
2888 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
2889 	    (entry->eflags & MAP_ENTRY_COW) &&
2890 	    (fault_type & VM_PROT_WRITE) &&
2891 	    (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
2892 		RETURN(KERN_PROTECTION_FAILURE);
2893 	}
2894 
2895 	/*
2896 	 * If this page is not pageable, we have to get it for all possible
2897 	 * accesses.
2898 	 */
2899 	*wired = (entry->wired_count != 0);
2900 	if (*wired)
2901 		prot = fault_type = entry->protection;
2902 
2903 	/*
2904 	 * If the entry was copy-on-write, we either ...
2905 	 */
2906 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2907 		/*
2908 		 * If we want to write the page, we may as well handle that
2909 		 * now since we've got the map locked.
2910 		 *
2911 		 * If we don't need to write the page, we just demote the
2912 		 * permissions allowed.
2913 		 */
2914 		if (fault_type & VM_PROT_WRITE) {
2915 			/*
2916 			 * Make a new object, and place it in the object
2917 			 * chain.  Note that no new references have appeared
2918 			 * -- one just moved from the map to the new
2919 			 * object.
2920 			 */
2921 			if (vm_map_lock_upgrade(map))
2922 				goto RetryLookup;
2923 
2924 			vm_object_shadow(
2925 			    &entry->object.vm_object,
2926 			    &entry->offset,
2927 			    atop(entry->end - entry->start));
2928 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2929 
2930 			vm_map_lock_downgrade(map);
2931 		} else {
2932 			/*
2933 			 * We're attempting to read a copy-on-write page --
2934 			 * don't allow writes.
2935 			 */
2936 			prot &= ~VM_PROT_WRITE;
2937 		}
2938 	}
2939 
2940 	/*
2941 	 * Create an object if necessary.
2942 	 */
2943 	if (entry->object.vm_object == NULL &&
2944 	    !map->system_map) {
2945 		if (vm_map_lock_upgrade(map))
2946 			goto RetryLookup;
2947 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2948 		    atop(entry->end - entry->start));
2949 		entry->offset = 0;
2950 		vm_map_lock_downgrade(map);
2951 	}
2952 
2953 	/*
2954 	 * Return the object/offset from this entry.  If the entry was
2955 	 * copy-on-write or empty, it has been fixed up.
2956 	 */
2957 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
2958 	*object = entry->object.vm_object;
2959 
2960 	/*
2961 	 * Return whether this is the only map sharing this data.
2962 	 */
2963 	*out_prot = prot;
2964 	return (KERN_SUCCESS);
2965 
2966 #undef	RETURN
2967 }
2968 
2969 /*
2970  *	vm_map_lookup_done:
2971  *
2972  *	Releases locks acquired by a vm_map_lookup
2973  *	(according to the handle returned by that lookup).
2974  */
2975 void
2976 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
2977 {
2978 	/*
2979 	 * Unlock the main-level map
2980 	 */
2981 	vm_map_unlock_read(map);
2982 }
2983 
2984 #include "opt_ddb.h"
2985 #ifdef DDB
2986 #include <sys/kernel.h>
2987 
2988 #include <ddb/ddb.h>
2989 
2990 /*
2991  *	vm_map_print:	[ debug ]
2992  */
2993 DB_SHOW_COMMAND(map, vm_map_print)
2994 {
2995 	static int nlines;
2996 	/* XXX convert args. */
2997 	vm_map_t map = (vm_map_t)addr;
2998 	boolean_t full = have_addr;
2999 
3000 	vm_map_entry_t entry;
3001 
3002 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3003 	    (void *)map,
3004 	    (void *)map->pmap, map->nentries, map->timestamp);
3005 	nlines++;
3006 
3007 	if (!full && db_indent)
3008 		return;
3009 
3010 	db_indent += 2;
3011 	for (entry = map->header.next; entry != &map->header;
3012 	    entry = entry->next) {
3013 		db_iprintf("map entry %p: start=%p, end=%p\n",
3014 		    (void *)entry, (void *)entry->start, (void *)entry->end);
3015 		nlines++;
3016 		{
3017 			static char *inheritance_name[4] =
3018 			{"share", "copy", "none", "donate_copy"};
3019 
3020 			db_iprintf(" prot=%x/%x/%s",
3021 			    entry->protection,
3022 			    entry->max_protection,
3023 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
3024 			if (entry->wired_count != 0)
3025 				db_printf(", wired");
3026 		}
3027 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3028 			db_printf(", share=%p, offset=0x%jx\n",
3029 			    (void *)entry->object.sub_map,
3030 			    (uintmax_t)entry->offset);
3031 			nlines++;
3032 			if ((entry->prev == &map->header) ||
3033 			    (entry->prev->object.sub_map !=
3034 				entry->object.sub_map)) {
3035 				db_indent += 2;
3036 				vm_map_print((db_expr_t)(intptr_t)
3037 					     entry->object.sub_map,
3038 					     full, 0, (char *)0);
3039 				db_indent -= 2;
3040 			}
3041 		} else {
3042 			db_printf(", object=%p, offset=0x%jx",
3043 			    (void *)entry->object.vm_object,
3044 			    (uintmax_t)entry->offset);
3045 			if (entry->eflags & MAP_ENTRY_COW)
3046 				db_printf(", copy (%s)",
3047 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3048 			db_printf("\n");
3049 			nlines++;
3050 
3051 			if ((entry->prev == &map->header) ||
3052 			    (entry->prev->object.vm_object !=
3053 				entry->object.vm_object)) {
3054 				db_indent += 2;
3055 				vm_object_print((db_expr_t)(intptr_t)
3056 						entry->object.vm_object,
3057 						full, 0, (char *)0);
3058 				nlines += 4;
3059 				db_indent -= 2;
3060 			}
3061 		}
3062 	}
3063 	db_indent -= 2;
3064 	if (db_indent == 0)
3065 		nlines = 0;
3066 }
3067 
3068 
3069 DB_SHOW_COMMAND(procvm, procvm)
3070 {
3071 	struct proc *p;
3072 
3073 	if (have_addr) {
3074 		p = (struct proc *) addr;
3075 	} else {
3076 		p = curproc;
3077 	}
3078 
3079 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3080 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3081 	    (void *)vmspace_pmap(p->p_vmspace));
3082 
3083 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3084 }
3085 
3086 #endif /* DDB */
3087