xref: /freebsd/sys/vm/vm_map.c (revision 0cdfe2ae89834a1a4b3468bfac870941ee17f2d5)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 /*
62  *	Virtual memory mapping module.
63  */
64 
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/lock.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/vmmeter.h>
76 #include <sys/mman.h>
77 #include <sys/vnode.h>
78 #include <sys/racct.h>
79 #include <sys/resourcevar.h>
80 #include <sys/rwlock.h>
81 #include <sys/file.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysent.h>
84 #include <sys/shm.h>
85 
86 #include <vm/vm.h>
87 #include <vm/vm_param.h>
88 #include <vm/pmap.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_pager.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_extern.h>
95 #include <vm/vnode_pager.h>
96 #include <vm/swap_pager.h>
97 #include <vm/uma.h>
98 
99 /*
100  *	Virtual memory maps provide for the mapping, protection,
101  *	and sharing of virtual memory objects.  In addition,
102  *	this module provides for an efficient virtual copy of
103  *	memory from one map to another.
104  *
105  *	Synchronization is required prior to most operations.
106  *
107  *	Maps consist of an ordered doubly-linked list of simple
108  *	entries; a self-adjusting binary search tree of these
109  *	entries is used to speed up lookups.
110  *
111  *	Since portions of maps are specified by start/end addresses,
112  *	which may not align with existing map entries, all
113  *	routines merely "clip" entries to these start/end values.
114  *	[That is, an entry is split into two, bordering at a
115  *	start or end value.]  Note that these clippings may not
116  *	always be necessary (as the two resulting entries are then
117  *	not changed); however, the clipping is done for convenience.
118  *
119  *	As mentioned above, virtual copy operations are performed
120  *	by copying VM object references from one map to
121  *	another, and then marking both regions as copy-on-write.
122  */
123 
124 static struct mtx map_sleep_mtx;
125 static uma_zone_t mapentzone;
126 static uma_zone_t kmapentzone;
127 static uma_zone_t mapzone;
128 static uma_zone_t vmspace_zone;
129 static int vmspace_zinit(void *mem, int size, int flags);
130 static void vmspace_zfini(void *mem, int size);
131 static int vm_map_zinit(void *mem, int ize, int flags);
132 static void vm_map_zfini(void *mem, int size);
133 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
134     vm_offset_t max);
135 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
136 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
137 #ifdef INVARIANTS
138 static void vm_map_zdtor(void *mem, int size, void *arg);
139 static void vmspace_zdtor(void *mem, int size, void *arg);
140 #endif
141 
142 #define	ENTRY_CHARGED(e) ((e)->cred != NULL || \
143     ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
144      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
145 
146 /*
147  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
148  * stable.
149  */
150 #define PROC_VMSPACE_LOCK(p) do { } while (0)
151 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
152 
153 /*
154  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
155  *
156  *	Asserts that the starting and ending region
157  *	addresses fall within the valid range of the map.
158  */
159 #define	VM_MAP_RANGE_CHECK(map, start, end)		\
160 		{					\
161 		if (start < vm_map_min(map))		\
162 			start = vm_map_min(map);	\
163 		if (end > vm_map_max(map))		\
164 			end = vm_map_max(map);		\
165 		if (start > end)			\
166 			start = end;			\
167 		}
168 
169 /*
170  *	vm_map_startup:
171  *
172  *	Initialize the vm_map module.  Must be called before
173  *	any other vm_map routines.
174  *
175  *	Map and entry structures are allocated from the general
176  *	purpose memory pool with some exceptions:
177  *
178  *	- The kernel map and kmem submap are allocated statically.
179  *	- Kernel map entries are allocated out of a static pool.
180  *
181  *	These restrictions are necessary since malloc() uses the
182  *	maps and requires map entries.
183  */
184 
185 void
186 vm_map_startup(void)
187 {
188 	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
189 	mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
190 #ifdef INVARIANTS
191 	    vm_map_zdtor,
192 #else
193 	    NULL,
194 #endif
195 	    vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
196 	uma_prealloc(mapzone, MAX_KMAP);
197 	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
198 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
199 	    UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
200 	mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
201 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
202 	vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
203 #ifdef INVARIANTS
204 	    vmspace_zdtor,
205 #else
206 	    NULL,
207 #endif
208 	    vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
209 }
210 
211 static void
212 vmspace_zfini(void *mem, int size)
213 {
214 	struct vmspace *vm;
215 
216 	vm = (struct vmspace *)mem;
217 	vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
218 }
219 
220 static int
221 vmspace_zinit(void *mem, int size, int flags)
222 {
223 	struct vmspace *vm;
224 
225 	vm = (struct vmspace *)mem;
226 
227 	vm->vm_map.pmap = NULL;
228 	(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
229 	return (0);
230 }
231 
232 static void
233 vm_map_zfini(void *mem, int size)
234 {
235 	vm_map_t map;
236 
237 	map = (vm_map_t)mem;
238 	mtx_destroy(&map->system_mtx);
239 	sx_destroy(&map->lock);
240 }
241 
242 static int
243 vm_map_zinit(void *mem, int size, int flags)
244 {
245 	vm_map_t map;
246 
247 	map = (vm_map_t)mem;
248 	memset(map, 0, sizeof(*map));
249 	mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
250 	sx_init(&map->lock, "vm map (user)");
251 	return (0);
252 }
253 
254 #ifdef INVARIANTS
255 static void
256 vmspace_zdtor(void *mem, int size, void *arg)
257 {
258 	struct vmspace *vm;
259 
260 	vm = (struct vmspace *)mem;
261 
262 	vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
263 }
264 static void
265 vm_map_zdtor(void *mem, int size, void *arg)
266 {
267 	vm_map_t map;
268 
269 	map = (vm_map_t)mem;
270 	KASSERT(map->nentries == 0,
271 	    ("map %p nentries == %d on free.",
272 	    map, map->nentries));
273 	KASSERT(map->size == 0,
274 	    ("map %p size == %lu on free.",
275 	    map, (unsigned long)map->size));
276 }
277 #endif	/* INVARIANTS */
278 
279 /*
280  * Allocate a vmspace structure, including a vm_map and pmap,
281  * and initialize those structures.  The refcnt is set to 1.
282  */
283 struct vmspace *
284 vmspace_alloc(min, max)
285 	vm_offset_t min, max;
286 {
287 	struct vmspace *vm;
288 
289 	vm = uma_zalloc(vmspace_zone, M_WAITOK);
290 	if (vm->vm_map.pmap == NULL && !pmap_pinit(vmspace_pmap(vm))) {
291 		uma_zfree(vmspace_zone, vm);
292 		return (NULL);
293 	}
294 	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
295 	_vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
296 	vm->vm_refcnt = 1;
297 	vm->vm_shm = NULL;
298 	vm->vm_swrss = 0;
299 	vm->vm_tsize = 0;
300 	vm->vm_dsize = 0;
301 	vm->vm_ssize = 0;
302 	vm->vm_taddr = 0;
303 	vm->vm_daddr = 0;
304 	vm->vm_maxsaddr = 0;
305 	return (vm);
306 }
307 
308 static void
309 vmspace_container_reset(struct proc *p)
310 {
311 
312 #ifdef RACCT
313 	PROC_LOCK(p);
314 	racct_set(p, RACCT_DATA, 0);
315 	racct_set(p, RACCT_STACK, 0);
316 	racct_set(p, RACCT_RSS, 0);
317 	racct_set(p, RACCT_MEMLOCK, 0);
318 	racct_set(p, RACCT_VMEM, 0);
319 	PROC_UNLOCK(p);
320 #endif
321 }
322 
323 static inline void
324 vmspace_dofree(struct vmspace *vm)
325 {
326 
327 	CTR1(KTR_VM, "vmspace_free: %p", vm);
328 
329 	/*
330 	 * Make sure any SysV shm is freed, it might not have been in
331 	 * exit1().
332 	 */
333 	shmexit(vm);
334 
335 	/*
336 	 * Lock the map, to wait out all other references to it.
337 	 * Delete all of the mappings and pages they hold, then call
338 	 * the pmap module to reclaim anything left.
339 	 */
340 	(void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset,
341 	    vm->vm_map.max_offset);
342 
343 	pmap_release(vmspace_pmap(vm));
344 	vm->vm_map.pmap = NULL;
345 	uma_zfree(vmspace_zone, vm);
346 }
347 
348 void
349 vmspace_free(struct vmspace *vm)
350 {
351 
352 	if (vm->vm_refcnt == 0)
353 		panic("vmspace_free: attempt to free already freed vmspace");
354 
355 	if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
356 		vmspace_dofree(vm);
357 }
358 
359 void
360 vmspace_exitfree(struct proc *p)
361 {
362 	struct vmspace *vm;
363 
364 	PROC_VMSPACE_LOCK(p);
365 	vm = p->p_vmspace;
366 	p->p_vmspace = NULL;
367 	PROC_VMSPACE_UNLOCK(p);
368 	KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
369 	vmspace_free(vm);
370 }
371 
372 void
373 vmspace_exit(struct thread *td)
374 {
375 	int refcnt;
376 	struct vmspace *vm;
377 	struct proc *p;
378 
379 	/*
380 	 * Release user portion of address space.
381 	 * This releases references to vnodes,
382 	 * which could cause I/O if the file has been unlinked.
383 	 * Need to do this early enough that we can still sleep.
384 	 *
385 	 * The last exiting process to reach this point releases as
386 	 * much of the environment as it can. vmspace_dofree() is the
387 	 * slower fallback in case another process had a temporary
388 	 * reference to the vmspace.
389 	 */
390 
391 	p = td->td_proc;
392 	vm = p->p_vmspace;
393 	atomic_add_int(&vmspace0.vm_refcnt, 1);
394 	do {
395 		refcnt = vm->vm_refcnt;
396 		if (refcnt > 1 && p->p_vmspace != &vmspace0) {
397 			/* Switch now since other proc might free vmspace */
398 			PROC_VMSPACE_LOCK(p);
399 			p->p_vmspace = &vmspace0;
400 			PROC_VMSPACE_UNLOCK(p);
401 			pmap_activate(td);
402 		}
403 	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
404 	if (refcnt == 1) {
405 		if (p->p_vmspace != vm) {
406 			/* vmspace not yet freed, switch back */
407 			PROC_VMSPACE_LOCK(p);
408 			p->p_vmspace = vm;
409 			PROC_VMSPACE_UNLOCK(p);
410 			pmap_activate(td);
411 		}
412 		pmap_remove_pages(vmspace_pmap(vm));
413 		/* Switch now since this proc will free vmspace */
414 		PROC_VMSPACE_LOCK(p);
415 		p->p_vmspace = &vmspace0;
416 		PROC_VMSPACE_UNLOCK(p);
417 		pmap_activate(td);
418 		vmspace_dofree(vm);
419 	}
420 	vmspace_container_reset(p);
421 }
422 
423 /* Acquire reference to vmspace owned by another process. */
424 
425 struct vmspace *
426 vmspace_acquire_ref(struct proc *p)
427 {
428 	struct vmspace *vm;
429 	int refcnt;
430 
431 	PROC_VMSPACE_LOCK(p);
432 	vm = p->p_vmspace;
433 	if (vm == NULL) {
434 		PROC_VMSPACE_UNLOCK(p);
435 		return (NULL);
436 	}
437 	do {
438 		refcnt = vm->vm_refcnt;
439 		if (refcnt <= 0) { 	/* Avoid 0->1 transition */
440 			PROC_VMSPACE_UNLOCK(p);
441 			return (NULL);
442 		}
443 	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
444 	if (vm != p->p_vmspace) {
445 		PROC_VMSPACE_UNLOCK(p);
446 		vmspace_free(vm);
447 		return (NULL);
448 	}
449 	PROC_VMSPACE_UNLOCK(p);
450 	return (vm);
451 }
452 
453 void
454 _vm_map_lock(vm_map_t map, const char *file, int line)
455 {
456 
457 	if (map->system_map)
458 		mtx_lock_flags_(&map->system_mtx, 0, file, line);
459 	else
460 		sx_xlock_(&map->lock, file, line);
461 	map->timestamp++;
462 }
463 
464 static void
465 vm_map_process_deferred(void)
466 {
467 	struct thread *td;
468 	vm_map_entry_t entry, next;
469 	vm_object_t object;
470 
471 	td = curthread;
472 	entry = td->td_map_def_user;
473 	td->td_map_def_user = NULL;
474 	while (entry != NULL) {
475 		next = entry->next;
476 		if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) {
477 			/*
478 			 * Decrement the object's writemappings and
479 			 * possibly the vnode's v_writecount.
480 			 */
481 			KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
482 			    ("Submap with writecount"));
483 			object = entry->object.vm_object;
484 			KASSERT(object != NULL, ("No object for writecount"));
485 			vnode_pager_release_writecount(object, entry->start,
486 			    entry->end);
487 		}
488 		vm_map_entry_deallocate(entry, FALSE);
489 		entry = next;
490 	}
491 }
492 
493 void
494 _vm_map_unlock(vm_map_t map, const char *file, int line)
495 {
496 
497 	if (map->system_map)
498 		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
499 	else {
500 		sx_xunlock_(&map->lock, file, line);
501 		vm_map_process_deferred();
502 	}
503 }
504 
505 void
506 _vm_map_lock_read(vm_map_t map, const char *file, int line)
507 {
508 
509 	if (map->system_map)
510 		mtx_lock_flags_(&map->system_mtx, 0, file, line);
511 	else
512 		sx_slock_(&map->lock, file, line);
513 }
514 
515 void
516 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
517 {
518 
519 	if (map->system_map)
520 		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
521 	else {
522 		sx_sunlock_(&map->lock, file, line);
523 		vm_map_process_deferred();
524 	}
525 }
526 
527 int
528 _vm_map_trylock(vm_map_t map, const char *file, int line)
529 {
530 	int error;
531 
532 	error = map->system_map ?
533 	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
534 	    !sx_try_xlock_(&map->lock, file, line);
535 	if (error == 0)
536 		map->timestamp++;
537 	return (error == 0);
538 }
539 
540 int
541 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
542 {
543 	int error;
544 
545 	error = map->system_map ?
546 	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
547 	    !sx_try_slock_(&map->lock, file, line);
548 	return (error == 0);
549 }
550 
551 /*
552  *	_vm_map_lock_upgrade:	[ internal use only ]
553  *
554  *	Tries to upgrade a read (shared) lock on the specified map to a write
555  *	(exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
556  *	non-zero value if the upgrade fails.  If the upgrade fails, the map is
557  *	returned without a read or write lock held.
558  *
559  *	Requires that the map be read locked.
560  */
561 int
562 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
563 {
564 	unsigned int last_timestamp;
565 
566 	if (map->system_map) {
567 		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
568 	} else {
569 		if (!sx_try_upgrade_(&map->lock, file, line)) {
570 			last_timestamp = map->timestamp;
571 			sx_sunlock_(&map->lock, file, line);
572 			vm_map_process_deferred();
573 			/*
574 			 * If the map's timestamp does not change while the
575 			 * map is unlocked, then the upgrade succeeds.
576 			 */
577 			sx_xlock_(&map->lock, file, line);
578 			if (last_timestamp != map->timestamp) {
579 				sx_xunlock_(&map->lock, file, line);
580 				return (1);
581 			}
582 		}
583 	}
584 	map->timestamp++;
585 	return (0);
586 }
587 
588 void
589 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
590 {
591 
592 	if (map->system_map) {
593 		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
594 	} else
595 		sx_downgrade_(&map->lock, file, line);
596 }
597 
598 /*
599  *	vm_map_locked:
600  *
601  *	Returns a non-zero value if the caller holds a write (exclusive) lock
602  *	on the specified map and the value "0" otherwise.
603  */
604 int
605 vm_map_locked(vm_map_t map)
606 {
607 
608 	if (map->system_map)
609 		return (mtx_owned(&map->system_mtx));
610 	else
611 		return (sx_xlocked(&map->lock));
612 }
613 
614 #ifdef INVARIANTS
615 static void
616 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
617 {
618 
619 	if (map->system_map)
620 		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
621 	else
622 		sx_assert_(&map->lock, SA_XLOCKED, file, line);
623 }
624 
625 #define	VM_MAP_ASSERT_LOCKED(map) \
626     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
627 #else
628 #define	VM_MAP_ASSERT_LOCKED(map)
629 #endif
630 
631 /*
632  *	_vm_map_unlock_and_wait:
633  *
634  *	Atomically releases the lock on the specified map and puts the calling
635  *	thread to sleep.  The calling thread will remain asleep until either
636  *	vm_map_wakeup() is performed on the map or the specified timeout is
637  *	exceeded.
638  *
639  *	WARNING!  This function does not perform deferred deallocations of
640  *	objects and map	entries.  Therefore, the calling thread is expected to
641  *	reacquire the map lock after reawakening and later perform an ordinary
642  *	unlock operation, such as vm_map_unlock(), before completing its
643  *	operation on the map.
644  */
645 int
646 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
647 {
648 
649 	mtx_lock(&map_sleep_mtx);
650 	if (map->system_map)
651 		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
652 	else
653 		sx_xunlock_(&map->lock, file, line);
654 	return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
655 	    timo));
656 }
657 
658 /*
659  *	vm_map_wakeup:
660  *
661  *	Awaken any threads that have slept on the map using
662  *	vm_map_unlock_and_wait().
663  */
664 void
665 vm_map_wakeup(vm_map_t map)
666 {
667 
668 	/*
669 	 * Acquire and release map_sleep_mtx to prevent a wakeup()
670 	 * from being performed (and lost) between the map unlock
671 	 * and the msleep() in _vm_map_unlock_and_wait().
672 	 */
673 	mtx_lock(&map_sleep_mtx);
674 	mtx_unlock(&map_sleep_mtx);
675 	wakeup(&map->root);
676 }
677 
678 void
679 vm_map_busy(vm_map_t map)
680 {
681 
682 	VM_MAP_ASSERT_LOCKED(map);
683 	map->busy++;
684 }
685 
686 void
687 vm_map_unbusy(vm_map_t map)
688 {
689 
690 	VM_MAP_ASSERT_LOCKED(map);
691 	KASSERT(map->busy, ("vm_map_unbusy: not busy"));
692 	if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
693 		vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
694 		wakeup(&map->busy);
695 	}
696 }
697 
698 void
699 vm_map_wait_busy(vm_map_t map)
700 {
701 
702 	VM_MAP_ASSERT_LOCKED(map);
703 	while (map->busy) {
704 		vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
705 		if (map->system_map)
706 			msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
707 		else
708 			sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
709 	}
710 	map->timestamp++;
711 }
712 
713 long
714 vmspace_resident_count(struct vmspace *vmspace)
715 {
716 	return pmap_resident_count(vmspace_pmap(vmspace));
717 }
718 
719 /*
720  *	vm_map_create:
721  *
722  *	Creates and returns a new empty VM map with
723  *	the given physical map structure, and having
724  *	the given lower and upper address bounds.
725  */
726 vm_map_t
727 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
728 {
729 	vm_map_t result;
730 
731 	result = uma_zalloc(mapzone, M_WAITOK);
732 	CTR1(KTR_VM, "vm_map_create: %p", result);
733 	_vm_map_init(result, pmap, min, max);
734 	return (result);
735 }
736 
737 /*
738  * Initialize an existing vm_map structure
739  * such as that in the vmspace structure.
740  */
741 static void
742 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
743 {
744 
745 	map->header.next = map->header.prev = &map->header;
746 	map->needs_wakeup = FALSE;
747 	map->system_map = 0;
748 	map->pmap = pmap;
749 	map->min_offset = min;
750 	map->max_offset = max;
751 	map->flags = 0;
752 	map->root = NULL;
753 	map->timestamp = 0;
754 	map->busy = 0;
755 }
756 
757 void
758 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
759 {
760 
761 	_vm_map_init(map, pmap, min, max);
762 	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
763 	sx_init(&map->lock, "user map");
764 }
765 
766 /*
767  *	vm_map_entry_dispose:	[ internal use only ]
768  *
769  *	Inverse of vm_map_entry_create.
770  */
771 static void
772 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
773 {
774 	uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
775 }
776 
777 /*
778  *	vm_map_entry_create:	[ internal use only ]
779  *
780  *	Allocates a VM map entry for insertion.
781  *	No entry fields are filled in.
782  */
783 static vm_map_entry_t
784 vm_map_entry_create(vm_map_t map)
785 {
786 	vm_map_entry_t new_entry;
787 
788 	if (map->system_map)
789 		new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
790 	else
791 		new_entry = uma_zalloc(mapentzone, M_WAITOK);
792 	if (new_entry == NULL)
793 		panic("vm_map_entry_create: kernel resources exhausted");
794 	return (new_entry);
795 }
796 
797 /*
798  *	vm_map_entry_set_behavior:
799  *
800  *	Set the expected access behavior, either normal, random, or
801  *	sequential.
802  */
803 static inline void
804 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
805 {
806 	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
807 	    (behavior & MAP_ENTRY_BEHAV_MASK);
808 }
809 
810 /*
811  *	vm_map_entry_set_max_free:
812  *
813  *	Set the max_free field in a vm_map_entry.
814  */
815 static inline void
816 vm_map_entry_set_max_free(vm_map_entry_t entry)
817 {
818 
819 	entry->max_free = entry->adj_free;
820 	if (entry->left != NULL && entry->left->max_free > entry->max_free)
821 		entry->max_free = entry->left->max_free;
822 	if (entry->right != NULL && entry->right->max_free > entry->max_free)
823 		entry->max_free = entry->right->max_free;
824 }
825 
826 /*
827  *	vm_map_entry_splay:
828  *
829  *	The Sleator and Tarjan top-down splay algorithm with the
830  *	following variation.  Max_free must be computed bottom-up, so
831  *	on the downward pass, maintain the left and right spines in
832  *	reverse order.  Then, make a second pass up each side to fix
833  *	the pointers and compute max_free.  The time bound is O(log n)
834  *	amortized.
835  *
836  *	The new root is the vm_map_entry containing "addr", or else an
837  *	adjacent entry (lower or higher) if addr is not in the tree.
838  *
839  *	The map must be locked, and leaves it so.
840  *
841  *	Returns: the new root.
842  */
843 static vm_map_entry_t
844 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
845 {
846 	vm_map_entry_t llist, rlist;
847 	vm_map_entry_t ltree, rtree;
848 	vm_map_entry_t y;
849 
850 	/* Special case of empty tree. */
851 	if (root == NULL)
852 		return (root);
853 
854 	/*
855 	 * Pass One: Splay down the tree until we find addr or a NULL
856 	 * pointer where addr would go.  llist and rlist are the two
857 	 * sides in reverse order (bottom-up), with llist linked by
858 	 * the right pointer and rlist linked by the left pointer in
859 	 * the vm_map_entry.  Wait until Pass Two to set max_free on
860 	 * the two spines.
861 	 */
862 	llist = NULL;
863 	rlist = NULL;
864 	for (;;) {
865 		/* root is never NULL in here. */
866 		if (addr < root->start) {
867 			y = root->left;
868 			if (y == NULL)
869 				break;
870 			if (addr < y->start && y->left != NULL) {
871 				/* Rotate right and put y on rlist. */
872 				root->left = y->right;
873 				y->right = root;
874 				vm_map_entry_set_max_free(root);
875 				root = y->left;
876 				y->left = rlist;
877 				rlist = y;
878 			} else {
879 				/* Put root on rlist. */
880 				root->left = rlist;
881 				rlist = root;
882 				root = y;
883 			}
884 		} else if (addr >= root->end) {
885 			y = root->right;
886 			if (y == NULL)
887 				break;
888 			if (addr >= y->end && y->right != NULL) {
889 				/* Rotate left and put y on llist. */
890 				root->right = y->left;
891 				y->left = root;
892 				vm_map_entry_set_max_free(root);
893 				root = y->right;
894 				y->right = llist;
895 				llist = y;
896 			} else {
897 				/* Put root on llist. */
898 				root->right = llist;
899 				llist = root;
900 				root = y;
901 			}
902 		} else
903 			break;
904 	}
905 
906 	/*
907 	 * Pass Two: Walk back up the two spines, flip the pointers
908 	 * and set max_free.  The subtrees of the root go at the
909 	 * bottom of llist and rlist.
910 	 */
911 	ltree = root->left;
912 	while (llist != NULL) {
913 		y = llist->right;
914 		llist->right = ltree;
915 		vm_map_entry_set_max_free(llist);
916 		ltree = llist;
917 		llist = y;
918 	}
919 	rtree = root->right;
920 	while (rlist != NULL) {
921 		y = rlist->left;
922 		rlist->left = rtree;
923 		vm_map_entry_set_max_free(rlist);
924 		rtree = rlist;
925 		rlist = y;
926 	}
927 
928 	/*
929 	 * Final assembly: add ltree and rtree as subtrees of root.
930 	 */
931 	root->left = ltree;
932 	root->right = rtree;
933 	vm_map_entry_set_max_free(root);
934 
935 	return (root);
936 }
937 
938 /*
939  *	vm_map_entry_{un,}link:
940  *
941  *	Insert/remove entries from maps.
942  */
943 static void
944 vm_map_entry_link(vm_map_t map,
945 		  vm_map_entry_t after_where,
946 		  vm_map_entry_t entry)
947 {
948 
949 	CTR4(KTR_VM,
950 	    "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
951 	    map->nentries, entry, after_where);
952 	VM_MAP_ASSERT_LOCKED(map);
953 	map->nentries++;
954 	entry->prev = after_where;
955 	entry->next = after_where->next;
956 	entry->next->prev = entry;
957 	after_where->next = entry;
958 
959 	if (after_where != &map->header) {
960 		if (after_where != map->root)
961 			vm_map_entry_splay(after_where->start, map->root);
962 		entry->right = after_where->right;
963 		entry->left = after_where;
964 		after_where->right = NULL;
965 		after_where->adj_free = entry->start - after_where->end;
966 		vm_map_entry_set_max_free(after_where);
967 	} else {
968 		entry->right = map->root;
969 		entry->left = NULL;
970 	}
971 	entry->adj_free = (entry->next == &map->header ? map->max_offset :
972 	    entry->next->start) - entry->end;
973 	vm_map_entry_set_max_free(entry);
974 	map->root = entry;
975 }
976 
977 static void
978 vm_map_entry_unlink(vm_map_t map,
979 		    vm_map_entry_t entry)
980 {
981 	vm_map_entry_t next, prev, root;
982 
983 	VM_MAP_ASSERT_LOCKED(map);
984 	if (entry != map->root)
985 		vm_map_entry_splay(entry->start, map->root);
986 	if (entry->left == NULL)
987 		root = entry->right;
988 	else {
989 		root = vm_map_entry_splay(entry->start, entry->left);
990 		root->right = entry->right;
991 		root->adj_free = (entry->next == &map->header ? map->max_offset :
992 		    entry->next->start) - root->end;
993 		vm_map_entry_set_max_free(root);
994 	}
995 	map->root = root;
996 
997 	prev = entry->prev;
998 	next = entry->next;
999 	next->prev = prev;
1000 	prev->next = next;
1001 	map->nentries--;
1002 	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1003 	    map->nentries, entry);
1004 }
1005 
1006 /*
1007  *	vm_map_entry_resize_free:
1008  *
1009  *	Recompute the amount of free space following a vm_map_entry
1010  *	and propagate that value up the tree.  Call this function after
1011  *	resizing a map entry in-place, that is, without a call to
1012  *	vm_map_entry_link() or _unlink().
1013  *
1014  *	The map must be locked, and leaves it so.
1015  */
1016 static void
1017 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
1018 {
1019 
1020 	/*
1021 	 * Using splay trees without parent pointers, propagating
1022 	 * max_free up the tree is done by moving the entry to the
1023 	 * root and making the change there.
1024 	 */
1025 	if (entry != map->root)
1026 		map->root = vm_map_entry_splay(entry->start, map->root);
1027 
1028 	entry->adj_free = (entry->next == &map->header ? map->max_offset :
1029 	    entry->next->start) - entry->end;
1030 	vm_map_entry_set_max_free(entry);
1031 }
1032 
1033 /*
1034  *	vm_map_lookup_entry:	[ internal use only ]
1035  *
1036  *	Finds the map entry containing (or
1037  *	immediately preceding) the specified address
1038  *	in the given map; the entry is returned
1039  *	in the "entry" parameter.  The boolean
1040  *	result indicates whether the address is
1041  *	actually contained in the map.
1042  */
1043 boolean_t
1044 vm_map_lookup_entry(
1045 	vm_map_t map,
1046 	vm_offset_t address,
1047 	vm_map_entry_t *entry)	/* OUT */
1048 {
1049 	vm_map_entry_t cur;
1050 	boolean_t locked;
1051 
1052 	/*
1053 	 * If the map is empty, then the map entry immediately preceding
1054 	 * "address" is the map's header.
1055 	 */
1056 	cur = map->root;
1057 	if (cur == NULL)
1058 		*entry = &map->header;
1059 	else if (address >= cur->start && cur->end > address) {
1060 		*entry = cur;
1061 		return (TRUE);
1062 	} else if ((locked = vm_map_locked(map)) ||
1063 	    sx_try_upgrade(&map->lock)) {
1064 		/*
1065 		 * Splay requires a write lock on the map.  However, it only
1066 		 * restructures the binary search tree; it does not otherwise
1067 		 * change the map.  Thus, the map's timestamp need not change
1068 		 * on a temporary upgrade.
1069 		 */
1070 		map->root = cur = vm_map_entry_splay(address, cur);
1071 		if (!locked)
1072 			sx_downgrade(&map->lock);
1073 
1074 		/*
1075 		 * If "address" is contained within a map entry, the new root
1076 		 * is that map entry.  Otherwise, the new root is a map entry
1077 		 * immediately before or after "address".
1078 		 */
1079 		if (address >= cur->start) {
1080 			*entry = cur;
1081 			if (cur->end > address)
1082 				return (TRUE);
1083 		} else
1084 			*entry = cur->prev;
1085 	} else
1086 		/*
1087 		 * Since the map is only locked for read access, perform a
1088 		 * standard binary search tree lookup for "address".
1089 		 */
1090 		for (;;) {
1091 			if (address < cur->start) {
1092 				if (cur->left == NULL) {
1093 					*entry = cur->prev;
1094 					break;
1095 				}
1096 				cur = cur->left;
1097 			} else if (cur->end > address) {
1098 				*entry = cur;
1099 				return (TRUE);
1100 			} else {
1101 				if (cur->right == NULL) {
1102 					*entry = cur;
1103 					break;
1104 				}
1105 				cur = cur->right;
1106 			}
1107 		}
1108 	return (FALSE);
1109 }
1110 
1111 /*
1112  *	vm_map_insert:
1113  *
1114  *	Inserts the given whole VM object into the target
1115  *	map at the specified address range.  The object's
1116  *	size should match that of the address range.
1117  *
1118  *	Requires that the map be locked, and leaves it so.
1119  *
1120  *	If object is non-NULL, ref count must be bumped by caller
1121  *	prior to making call to account for the new entry.
1122  */
1123 int
1124 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1125 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
1126 	      int cow)
1127 {
1128 	vm_map_entry_t new_entry;
1129 	vm_map_entry_t prev_entry;
1130 	vm_map_entry_t temp_entry;
1131 	vm_eflags_t protoeflags;
1132 	struct ucred *cred;
1133 	vm_inherit_t inheritance;
1134 	boolean_t charge_prev_obj;
1135 
1136 	VM_MAP_ASSERT_LOCKED(map);
1137 
1138 	/*
1139 	 * Check that the start and end points are not bogus.
1140 	 */
1141 	if ((start < map->min_offset) || (end > map->max_offset) ||
1142 	    (start >= end))
1143 		return (KERN_INVALID_ADDRESS);
1144 
1145 	/*
1146 	 * Find the entry prior to the proposed starting address; if it's part
1147 	 * of an existing entry, this range is bogus.
1148 	 */
1149 	if (vm_map_lookup_entry(map, start, &temp_entry))
1150 		return (KERN_NO_SPACE);
1151 
1152 	prev_entry = temp_entry;
1153 
1154 	/*
1155 	 * Assert that the next entry doesn't overlap the end point.
1156 	 */
1157 	if ((prev_entry->next != &map->header) &&
1158 	    (prev_entry->next->start < end))
1159 		return (KERN_NO_SPACE);
1160 
1161 	protoeflags = 0;
1162 	charge_prev_obj = FALSE;
1163 
1164 	if (cow & MAP_COPY_ON_WRITE)
1165 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1166 
1167 	if (cow & MAP_NOFAULT) {
1168 		protoeflags |= MAP_ENTRY_NOFAULT;
1169 
1170 		KASSERT(object == NULL,
1171 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
1172 	}
1173 	if (cow & MAP_DISABLE_SYNCER)
1174 		protoeflags |= MAP_ENTRY_NOSYNC;
1175 	if (cow & MAP_DISABLE_COREDUMP)
1176 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
1177 	if (cow & MAP_VN_WRITECOUNT)
1178 		protoeflags |= MAP_ENTRY_VN_WRITECNT;
1179 	if (cow & MAP_INHERIT_SHARE)
1180 		inheritance = VM_INHERIT_SHARE;
1181 	else
1182 		inheritance = VM_INHERIT_DEFAULT;
1183 
1184 	cred = NULL;
1185 	KASSERT((object != kmem_object && object != kernel_object) ||
1186 	    ((object == kmem_object || object == kernel_object) &&
1187 		!(protoeflags & MAP_ENTRY_NEEDS_COPY)),
1188 	    ("kmem or kernel object and cow"));
1189 	if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT))
1190 		goto charged;
1191 	if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1192 	    ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1193 		if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1194 			return (KERN_RESOURCE_SHORTAGE);
1195 		KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) ||
1196 		    object->cred == NULL,
1197 		    ("OVERCOMMIT: vm_map_insert o %p", object));
1198 		cred = curthread->td_ucred;
1199 		crhold(cred);
1200 		if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY))
1201 			charge_prev_obj = TRUE;
1202 	}
1203 
1204 charged:
1205 	/* Expand the kernel pmap, if necessary. */
1206 	if (map == kernel_map && end > kernel_vm_end)
1207 		pmap_growkernel(end);
1208 	if (object != NULL) {
1209 		/*
1210 		 * OBJ_ONEMAPPING must be cleared unless this mapping
1211 		 * is trivially proven to be the only mapping for any
1212 		 * of the object's pages.  (Object granularity
1213 		 * reference counting is insufficient to recognize
1214 		 * aliases with precision.)
1215 		 */
1216 		VM_OBJECT_WLOCK(object);
1217 		if (object->ref_count > 1 || object->shadow_count != 0)
1218 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
1219 		VM_OBJECT_WUNLOCK(object);
1220 	}
1221 	else if ((prev_entry != &map->header) &&
1222 		 (prev_entry->eflags == protoeflags) &&
1223 		 (prev_entry->end == start) &&
1224 		 (prev_entry->wired_count == 0) &&
1225 		 (prev_entry->cred == cred ||
1226 		  (prev_entry->object.vm_object != NULL &&
1227 		   (prev_entry->object.vm_object->cred == cred))) &&
1228 		   vm_object_coalesce(prev_entry->object.vm_object,
1229 		       prev_entry->offset,
1230 		       (vm_size_t)(prev_entry->end - prev_entry->start),
1231 		       (vm_size_t)(end - prev_entry->end), charge_prev_obj)) {
1232 		/*
1233 		 * We were able to extend the object.  Determine if we
1234 		 * can extend the previous map entry to include the
1235 		 * new range as well.
1236 		 */
1237 		if ((prev_entry->inheritance == inheritance) &&
1238 		    (prev_entry->protection == prot) &&
1239 		    (prev_entry->max_protection == max)) {
1240 			map->size += (end - prev_entry->end);
1241 			prev_entry->end = end;
1242 			vm_map_entry_resize_free(map, prev_entry);
1243 			vm_map_simplify_entry(map, prev_entry);
1244 			if (cred != NULL)
1245 				crfree(cred);
1246 			return (KERN_SUCCESS);
1247 		}
1248 
1249 		/*
1250 		 * If we can extend the object but cannot extend the
1251 		 * map entry, we have to create a new map entry.  We
1252 		 * must bump the ref count on the extended object to
1253 		 * account for it.  object may be NULL.
1254 		 */
1255 		object = prev_entry->object.vm_object;
1256 		offset = prev_entry->offset +
1257 			(prev_entry->end - prev_entry->start);
1258 		vm_object_reference(object);
1259 		if (cred != NULL && object != NULL && object->cred != NULL &&
1260 		    !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1261 			/* Object already accounts for this uid. */
1262 			crfree(cred);
1263 			cred = NULL;
1264 		}
1265 	}
1266 
1267 	/*
1268 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
1269 	 * in things like the buffer map where we manage kva but do not manage
1270 	 * backing objects.
1271 	 */
1272 
1273 	/*
1274 	 * Create a new entry
1275 	 */
1276 	new_entry = vm_map_entry_create(map);
1277 	new_entry->start = start;
1278 	new_entry->end = end;
1279 	new_entry->cred = NULL;
1280 
1281 	new_entry->eflags = protoeflags;
1282 	new_entry->object.vm_object = object;
1283 	new_entry->offset = offset;
1284 	new_entry->avail_ssize = 0;
1285 
1286 	new_entry->inheritance = inheritance;
1287 	new_entry->protection = prot;
1288 	new_entry->max_protection = max;
1289 	new_entry->wired_count = 0;
1290 	new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1291 	new_entry->next_read = OFF_TO_IDX(offset);
1292 
1293 	KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1294 	    ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry));
1295 	new_entry->cred = cred;
1296 
1297 	/*
1298 	 * Insert the new entry into the list
1299 	 */
1300 	vm_map_entry_link(map, prev_entry, new_entry);
1301 	map->size += new_entry->end - new_entry->start;
1302 
1303 	/*
1304 	 * It may be possible to merge the new entry with the next and/or
1305 	 * previous entries.  However, due to MAP_STACK_* being a hack, a
1306 	 * panic can result from merging such entries.
1307 	 */
1308 	if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0)
1309 		vm_map_simplify_entry(map, new_entry);
1310 
1311 	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
1312 		vm_map_pmap_enter(map, start, prot,
1313 				    object, OFF_TO_IDX(offset), end - start,
1314 				    cow & MAP_PREFAULT_PARTIAL);
1315 	}
1316 
1317 	return (KERN_SUCCESS);
1318 }
1319 
1320 /*
1321  *	vm_map_findspace:
1322  *
1323  *	Find the first fit (lowest VM address) for "length" free bytes
1324  *	beginning at address >= start in the given map.
1325  *
1326  *	In a vm_map_entry, "adj_free" is the amount of free space
1327  *	adjacent (higher address) to this entry, and "max_free" is the
1328  *	maximum amount of contiguous free space in its subtree.  This
1329  *	allows finding a free region in one path down the tree, so
1330  *	O(log n) amortized with splay trees.
1331  *
1332  *	The map must be locked, and leaves it so.
1333  *
1334  *	Returns: 0 on success, and starting address in *addr,
1335  *		 1 if insufficient space.
1336  */
1337 int
1338 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1339     vm_offset_t *addr)	/* OUT */
1340 {
1341 	vm_map_entry_t entry;
1342 	vm_offset_t st;
1343 
1344 	/*
1345 	 * Request must fit within min/max VM address and must avoid
1346 	 * address wrap.
1347 	 */
1348 	if (start < map->min_offset)
1349 		start = map->min_offset;
1350 	if (start + length > map->max_offset || start + length < start)
1351 		return (1);
1352 
1353 	/* Empty tree means wide open address space. */
1354 	if (map->root == NULL) {
1355 		*addr = start;
1356 		return (0);
1357 	}
1358 
1359 	/*
1360 	 * After splay, if start comes before root node, then there
1361 	 * must be a gap from start to the root.
1362 	 */
1363 	map->root = vm_map_entry_splay(start, map->root);
1364 	if (start + length <= map->root->start) {
1365 		*addr = start;
1366 		return (0);
1367 	}
1368 
1369 	/*
1370 	 * Root is the last node that might begin its gap before
1371 	 * start, and this is the last comparison where address
1372 	 * wrap might be a problem.
1373 	 */
1374 	st = (start > map->root->end) ? start : map->root->end;
1375 	if (length <= map->root->end + map->root->adj_free - st) {
1376 		*addr = st;
1377 		return (0);
1378 	}
1379 
1380 	/* With max_free, can immediately tell if no solution. */
1381 	entry = map->root->right;
1382 	if (entry == NULL || length > entry->max_free)
1383 		return (1);
1384 
1385 	/*
1386 	 * Search the right subtree in the order: left subtree, root,
1387 	 * right subtree (first fit).  The previous splay implies that
1388 	 * all regions in the right subtree have addresses > start.
1389 	 */
1390 	while (entry != NULL) {
1391 		if (entry->left != NULL && entry->left->max_free >= length)
1392 			entry = entry->left;
1393 		else if (entry->adj_free >= length) {
1394 			*addr = entry->end;
1395 			return (0);
1396 		} else
1397 			entry = entry->right;
1398 	}
1399 
1400 	/* Can't get here, so panic if we do. */
1401 	panic("vm_map_findspace: max_free corrupt");
1402 }
1403 
1404 int
1405 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1406     vm_offset_t start, vm_size_t length, vm_prot_t prot,
1407     vm_prot_t max, int cow)
1408 {
1409 	vm_offset_t end;
1410 	int result;
1411 
1412 	end = start + length;
1413 	vm_map_lock(map);
1414 	VM_MAP_RANGE_CHECK(map, start, end);
1415 	(void) vm_map_delete(map, start, end);
1416 	result = vm_map_insert(map, object, offset, start, end, prot,
1417 	    max, cow);
1418 	vm_map_unlock(map);
1419 	return (result);
1420 }
1421 
1422 /*
1423  *	vm_map_find finds an unallocated region in the target address
1424  *	map with the given length.  The search is defined to be
1425  *	first-fit from the specified address; the region found is
1426  *	returned in the same parameter.
1427  *
1428  *	If object is non-NULL, ref count must be bumped by caller
1429  *	prior to making call to account for the new entry.
1430  */
1431 int
1432 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1433 	    vm_offset_t *addr,	/* IN/OUT */
1434 	    vm_size_t length, int find_space, vm_prot_t prot,
1435 	    vm_prot_t max, int cow)
1436 {
1437 	vm_offset_t start, initial_addr;
1438 	int result;
1439 
1440 	if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
1441 	    (object->flags & OBJ_COLORED) == 0))
1442 			find_space = VMFS_ANY_SPACE;
1443 	initial_addr = *addr;
1444 again:
1445 	start = initial_addr;
1446 	vm_map_lock(map);
1447 	do {
1448 		if (find_space != VMFS_NO_SPACE) {
1449 			if (vm_map_findspace(map, start, length, addr)) {
1450 				vm_map_unlock(map);
1451 				if (find_space == VMFS_OPTIMAL_SPACE) {
1452 					find_space = VMFS_ANY_SPACE;
1453 					goto again;
1454 				}
1455 				return (KERN_NO_SPACE);
1456 			}
1457 			switch (find_space) {
1458 			case VMFS_ALIGNED_SPACE:
1459 			case VMFS_OPTIMAL_SPACE:
1460 				pmap_align_superpage(object, offset, addr,
1461 				    length);
1462 				break;
1463 			default:
1464 				break;
1465 			}
1466 
1467 			start = *addr;
1468 		}
1469 		result = vm_map_insert(map, object, offset, start, start +
1470 		    length, prot, max, cow);
1471 	} while (result == KERN_NO_SPACE && (find_space == VMFS_ALIGNED_SPACE ||
1472 	    find_space == VMFS_OPTIMAL_SPACE));
1473 	vm_map_unlock(map);
1474 	return (result);
1475 }
1476 
1477 /*
1478  *	vm_map_simplify_entry:
1479  *
1480  *	Simplify the given map entry by merging with either neighbor.  This
1481  *	routine also has the ability to merge with both neighbors.
1482  *
1483  *	The map must be locked.
1484  *
1485  *	This routine guarentees that the passed entry remains valid (though
1486  *	possibly extended).  When merging, this routine may delete one or
1487  *	both neighbors.
1488  */
1489 void
1490 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1491 {
1492 	vm_map_entry_t next, prev;
1493 	vm_size_t prevsize, esize;
1494 
1495 	if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
1496 		return;
1497 
1498 	prev = entry->prev;
1499 	if (prev != &map->header) {
1500 		prevsize = prev->end - prev->start;
1501 		if ( (prev->end == entry->start) &&
1502 		     (prev->object.vm_object == entry->object.vm_object) &&
1503 		     (!prev->object.vm_object ||
1504 			(prev->offset + prevsize == entry->offset)) &&
1505 		     (prev->eflags == entry->eflags) &&
1506 		     (prev->protection == entry->protection) &&
1507 		     (prev->max_protection == entry->max_protection) &&
1508 		     (prev->inheritance == entry->inheritance) &&
1509 		     (prev->wired_count == entry->wired_count) &&
1510 		     (prev->cred == entry->cred)) {
1511 			vm_map_entry_unlink(map, prev);
1512 			entry->start = prev->start;
1513 			entry->offset = prev->offset;
1514 			if (entry->prev != &map->header)
1515 				vm_map_entry_resize_free(map, entry->prev);
1516 
1517 			/*
1518 			 * If the backing object is a vnode object,
1519 			 * vm_object_deallocate() calls vrele().
1520 			 * However, vrele() does not lock the vnode
1521 			 * because the vnode has additional
1522 			 * references.  Thus, the map lock can be kept
1523 			 * without causing a lock-order reversal with
1524 			 * the vnode lock.
1525 			 *
1526 			 * Since we count the number of virtual page
1527 			 * mappings in object->un_pager.vnp.writemappings,
1528 			 * the writemappings value should not be adjusted
1529 			 * when the entry is disposed of.
1530 			 */
1531 			if (prev->object.vm_object)
1532 				vm_object_deallocate(prev->object.vm_object);
1533 			if (prev->cred != NULL)
1534 				crfree(prev->cred);
1535 			vm_map_entry_dispose(map, prev);
1536 		}
1537 	}
1538 
1539 	next = entry->next;
1540 	if (next != &map->header) {
1541 		esize = entry->end - entry->start;
1542 		if ((entry->end == next->start) &&
1543 		    (next->object.vm_object == entry->object.vm_object) &&
1544 		     (!entry->object.vm_object ||
1545 			(entry->offset + esize == next->offset)) &&
1546 		    (next->eflags == entry->eflags) &&
1547 		    (next->protection == entry->protection) &&
1548 		    (next->max_protection == entry->max_protection) &&
1549 		    (next->inheritance == entry->inheritance) &&
1550 		    (next->wired_count == entry->wired_count) &&
1551 		    (next->cred == entry->cred)) {
1552 			vm_map_entry_unlink(map, next);
1553 			entry->end = next->end;
1554 			vm_map_entry_resize_free(map, entry);
1555 
1556 			/*
1557 			 * See comment above.
1558 			 */
1559 			if (next->object.vm_object)
1560 				vm_object_deallocate(next->object.vm_object);
1561 			if (next->cred != NULL)
1562 				crfree(next->cred);
1563 			vm_map_entry_dispose(map, next);
1564 		}
1565 	}
1566 }
1567 /*
1568  *	vm_map_clip_start:	[ internal use only ]
1569  *
1570  *	Asserts that the given entry begins at or after
1571  *	the specified address; if necessary,
1572  *	it splits the entry into two.
1573  */
1574 #define vm_map_clip_start(map, entry, startaddr) \
1575 { \
1576 	if (startaddr > entry->start) \
1577 		_vm_map_clip_start(map, entry, startaddr); \
1578 }
1579 
1580 /*
1581  *	This routine is called only when it is known that
1582  *	the entry must be split.
1583  */
1584 static void
1585 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1586 {
1587 	vm_map_entry_t new_entry;
1588 
1589 	VM_MAP_ASSERT_LOCKED(map);
1590 
1591 	/*
1592 	 * Split off the front portion -- note that we must insert the new
1593 	 * entry BEFORE this one, so that this entry has the specified
1594 	 * starting address.
1595 	 */
1596 	vm_map_simplify_entry(map, entry);
1597 
1598 	/*
1599 	 * If there is no object backing this entry, we might as well create
1600 	 * one now.  If we defer it, an object can get created after the map
1601 	 * is clipped, and individual objects will be created for the split-up
1602 	 * map.  This is a bit of a hack, but is also about the best place to
1603 	 * put this improvement.
1604 	 */
1605 	if (entry->object.vm_object == NULL && !map->system_map) {
1606 		vm_object_t object;
1607 		object = vm_object_allocate(OBJT_DEFAULT,
1608 				atop(entry->end - entry->start));
1609 		entry->object.vm_object = object;
1610 		entry->offset = 0;
1611 		if (entry->cred != NULL) {
1612 			object->cred = entry->cred;
1613 			object->charge = entry->end - entry->start;
1614 			entry->cred = NULL;
1615 		}
1616 	} else if (entry->object.vm_object != NULL &&
1617 		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1618 		   entry->cred != NULL) {
1619 		VM_OBJECT_WLOCK(entry->object.vm_object);
1620 		KASSERT(entry->object.vm_object->cred == NULL,
1621 		    ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
1622 		entry->object.vm_object->cred = entry->cred;
1623 		entry->object.vm_object->charge = entry->end - entry->start;
1624 		VM_OBJECT_WUNLOCK(entry->object.vm_object);
1625 		entry->cred = NULL;
1626 	}
1627 
1628 	new_entry = vm_map_entry_create(map);
1629 	*new_entry = *entry;
1630 
1631 	new_entry->end = start;
1632 	entry->offset += (start - entry->start);
1633 	entry->start = start;
1634 	if (new_entry->cred != NULL)
1635 		crhold(entry->cred);
1636 
1637 	vm_map_entry_link(map, entry->prev, new_entry);
1638 
1639 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1640 		vm_object_reference(new_entry->object.vm_object);
1641 		/*
1642 		 * The object->un_pager.vnp.writemappings for the
1643 		 * object of MAP_ENTRY_VN_WRITECNT type entry shall be
1644 		 * kept as is here.  The virtual pages are
1645 		 * re-distributed among the clipped entries, so the sum is
1646 		 * left the same.
1647 		 */
1648 	}
1649 }
1650 
1651 /*
1652  *	vm_map_clip_end:	[ internal use only ]
1653  *
1654  *	Asserts that the given entry ends at or before
1655  *	the specified address; if necessary,
1656  *	it splits the entry into two.
1657  */
1658 #define vm_map_clip_end(map, entry, endaddr) \
1659 { \
1660 	if ((endaddr) < (entry->end)) \
1661 		_vm_map_clip_end((map), (entry), (endaddr)); \
1662 }
1663 
1664 /*
1665  *	This routine is called only when it is known that
1666  *	the entry must be split.
1667  */
1668 static void
1669 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1670 {
1671 	vm_map_entry_t new_entry;
1672 
1673 	VM_MAP_ASSERT_LOCKED(map);
1674 
1675 	/*
1676 	 * If there is no object backing this entry, we might as well create
1677 	 * one now.  If we defer it, an object can get created after the map
1678 	 * is clipped, and individual objects will be created for the split-up
1679 	 * map.  This is a bit of a hack, but is also about the best place to
1680 	 * put this improvement.
1681 	 */
1682 	if (entry->object.vm_object == NULL && !map->system_map) {
1683 		vm_object_t object;
1684 		object = vm_object_allocate(OBJT_DEFAULT,
1685 				atop(entry->end - entry->start));
1686 		entry->object.vm_object = object;
1687 		entry->offset = 0;
1688 		if (entry->cred != NULL) {
1689 			object->cred = entry->cred;
1690 			object->charge = entry->end - entry->start;
1691 			entry->cred = NULL;
1692 		}
1693 	} else if (entry->object.vm_object != NULL &&
1694 		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1695 		   entry->cred != NULL) {
1696 		VM_OBJECT_WLOCK(entry->object.vm_object);
1697 		KASSERT(entry->object.vm_object->cred == NULL,
1698 		    ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
1699 		entry->object.vm_object->cred = entry->cred;
1700 		entry->object.vm_object->charge = entry->end - entry->start;
1701 		VM_OBJECT_WUNLOCK(entry->object.vm_object);
1702 		entry->cred = NULL;
1703 	}
1704 
1705 	/*
1706 	 * Create a new entry and insert it AFTER the specified entry
1707 	 */
1708 	new_entry = vm_map_entry_create(map);
1709 	*new_entry = *entry;
1710 
1711 	new_entry->start = entry->end = end;
1712 	new_entry->offset += (end - entry->start);
1713 	if (new_entry->cred != NULL)
1714 		crhold(entry->cred);
1715 
1716 	vm_map_entry_link(map, entry, new_entry);
1717 
1718 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1719 		vm_object_reference(new_entry->object.vm_object);
1720 	}
1721 }
1722 
1723 /*
1724  *	vm_map_submap:		[ kernel use only ]
1725  *
1726  *	Mark the given range as handled by a subordinate map.
1727  *
1728  *	This range must have been created with vm_map_find,
1729  *	and no other operations may have been performed on this
1730  *	range prior to calling vm_map_submap.
1731  *
1732  *	Only a limited number of operations can be performed
1733  *	within this rage after calling vm_map_submap:
1734  *		vm_fault
1735  *	[Don't try vm_map_copy!]
1736  *
1737  *	To remove a submapping, one must first remove the
1738  *	range from the superior map, and then destroy the
1739  *	submap (if desired).  [Better yet, don't try it.]
1740  */
1741 int
1742 vm_map_submap(
1743 	vm_map_t map,
1744 	vm_offset_t start,
1745 	vm_offset_t end,
1746 	vm_map_t submap)
1747 {
1748 	vm_map_entry_t entry;
1749 	int result = KERN_INVALID_ARGUMENT;
1750 
1751 	vm_map_lock(map);
1752 
1753 	VM_MAP_RANGE_CHECK(map, start, end);
1754 
1755 	if (vm_map_lookup_entry(map, start, &entry)) {
1756 		vm_map_clip_start(map, entry, start);
1757 	} else
1758 		entry = entry->next;
1759 
1760 	vm_map_clip_end(map, entry, end);
1761 
1762 	if ((entry->start == start) && (entry->end == end) &&
1763 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1764 	    (entry->object.vm_object == NULL)) {
1765 		entry->object.sub_map = submap;
1766 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1767 		result = KERN_SUCCESS;
1768 	}
1769 	vm_map_unlock(map);
1770 
1771 	return (result);
1772 }
1773 
1774 /*
1775  * The maximum number of pages to map
1776  */
1777 #define	MAX_INIT_PT	96
1778 
1779 /*
1780  *	vm_map_pmap_enter:
1781  *
1782  *	Preload read-only mappings for the specified object's resident pages
1783  *	into the target map.  If "flags" is MAP_PREFAULT_PARTIAL, then only
1784  *	the resident pages within the address range [addr, addr + ulmin(size,
1785  *	ptoa(MAX_INIT_PT))) are mapped.  Otherwise, all resident pages within
1786  *	the specified address range are mapped.  This eliminates many soft
1787  *	faults on process startup and immediately after an mmap(2).  Because
1788  *	these are speculative mappings, cached pages are not reactivated and
1789  *	mapped.
1790  */
1791 void
1792 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
1793     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1794 {
1795 	vm_offset_t start;
1796 	vm_page_t p, p_start;
1797 	vm_pindex_t psize, tmpidx;
1798 
1799 	if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
1800 		return;
1801 	VM_OBJECT_RLOCK(object);
1802 	if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1803 		VM_OBJECT_RUNLOCK(object);
1804 		VM_OBJECT_WLOCK(object);
1805 		if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1806 			pmap_object_init_pt(map->pmap, addr, object, pindex,
1807 			    size);
1808 			VM_OBJECT_WUNLOCK(object);
1809 			return;
1810 		}
1811 		VM_OBJECT_LOCK_DOWNGRADE(object);
1812 	}
1813 
1814 	psize = atop(size);
1815 	if (psize > MAX_INIT_PT && (flags & MAP_PREFAULT_PARTIAL) != 0)
1816 		psize = MAX_INIT_PT;
1817 	if (psize + pindex > object->size) {
1818 		if (object->size < pindex) {
1819 			VM_OBJECT_RUNLOCK(object);
1820 			return;
1821 		}
1822 		psize = object->size - pindex;
1823 	}
1824 
1825 	start = 0;
1826 	p_start = NULL;
1827 
1828 	p = vm_page_find_least(object, pindex);
1829 	/*
1830 	 * Assert: the variable p is either (1) the page with the
1831 	 * least pindex greater than or equal to the parameter pindex
1832 	 * or (2) NULL.
1833 	 */
1834 	for (;
1835 	     p != NULL && (tmpidx = p->pindex - pindex) < psize;
1836 	     p = TAILQ_NEXT(p, listq)) {
1837 		/*
1838 		 * don't allow an madvise to blow away our really
1839 		 * free pages allocating pv entries.
1840 		 */
1841 		if ((flags & MAP_PREFAULT_MADVISE) &&
1842 		    cnt.v_free_count < cnt.v_free_reserved) {
1843 			psize = tmpidx;
1844 			break;
1845 		}
1846 		if (p->valid == VM_PAGE_BITS_ALL) {
1847 			if (p_start == NULL) {
1848 				start = addr + ptoa(tmpidx);
1849 				p_start = p;
1850 			}
1851 		} else if (p_start != NULL) {
1852 			pmap_enter_object(map->pmap, start, addr +
1853 			    ptoa(tmpidx), p_start, prot);
1854 			p_start = NULL;
1855 		}
1856 	}
1857 	if (p_start != NULL)
1858 		pmap_enter_object(map->pmap, start, addr + ptoa(psize),
1859 		    p_start, prot);
1860 	VM_OBJECT_RUNLOCK(object);
1861 }
1862 
1863 /*
1864  *	vm_map_protect:
1865  *
1866  *	Sets the protection of the specified address
1867  *	region in the target map.  If "set_max" is
1868  *	specified, the maximum protection is to be set;
1869  *	otherwise, only the current protection is affected.
1870  */
1871 int
1872 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1873 	       vm_prot_t new_prot, boolean_t set_max)
1874 {
1875 	vm_map_entry_t current, entry;
1876 	vm_object_t obj;
1877 	struct ucred *cred;
1878 	vm_prot_t old_prot;
1879 
1880 	vm_map_lock(map);
1881 
1882 	VM_MAP_RANGE_CHECK(map, start, end);
1883 
1884 	if (vm_map_lookup_entry(map, start, &entry)) {
1885 		vm_map_clip_start(map, entry, start);
1886 	} else {
1887 		entry = entry->next;
1888 	}
1889 
1890 	/*
1891 	 * Make a first pass to check for protection violations.
1892 	 */
1893 	current = entry;
1894 	while ((current != &map->header) && (current->start < end)) {
1895 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1896 			vm_map_unlock(map);
1897 			return (KERN_INVALID_ARGUMENT);
1898 		}
1899 		if ((new_prot & current->max_protection) != new_prot) {
1900 			vm_map_unlock(map);
1901 			return (KERN_PROTECTION_FAILURE);
1902 		}
1903 		current = current->next;
1904 	}
1905 
1906 
1907 	/*
1908 	 * Do an accounting pass for private read-only mappings that
1909 	 * now will do cow due to allowed write (e.g. debugger sets
1910 	 * breakpoint on text segment)
1911 	 */
1912 	for (current = entry; (current != &map->header) &&
1913 	     (current->start < end); current = current->next) {
1914 
1915 		vm_map_clip_end(map, current, end);
1916 
1917 		if (set_max ||
1918 		    ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
1919 		    ENTRY_CHARGED(current)) {
1920 			continue;
1921 		}
1922 
1923 		cred = curthread->td_ucred;
1924 		obj = current->object.vm_object;
1925 
1926 		if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
1927 			if (!swap_reserve(current->end - current->start)) {
1928 				vm_map_unlock(map);
1929 				return (KERN_RESOURCE_SHORTAGE);
1930 			}
1931 			crhold(cred);
1932 			current->cred = cred;
1933 			continue;
1934 		}
1935 
1936 		VM_OBJECT_WLOCK(obj);
1937 		if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
1938 			VM_OBJECT_WUNLOCK(obj);
1939 			continue;
1940 		}
1941 
1942 		/*
1943 		 * Charge for the whole object allocation now, since
1944 		 * we cannot distinguish between non-charged and
1945 		 * charged clipped mapping of the same object later.
1946 		 */
1947 		KASSERT(obj->charge == 0,
1948 		    ("vm_map_protect: object %p overcharged\n", obj));
1949 		if (!swap_reserve(ptoa(obj->size))) {
1950 			VM_OBJECT_WUNLOCK(obj);
1951 			vm_map_unlock(map);
1952 			return (KERN_RESOURCE_SHORTAGE);
1953 		}
1954 
1955 		crhold(cred);
1956 		obj->cred = cred;
1957 		obj->charge = ptoa(obj->size);
1958 		VM_OBJECT_WUNLOCK(obj);
1959 	}
1960 
1961 	/*
1962 	 * Go back and fix up protections. [Note that clipping is not
1963 	 * necessary the second time.]
1964 	 */
1965 	current = entry;
1966 	while ((current != &map->header) && (current->start < end)) {
1967 		old_prot = current->protection;
1968 
1969 		if (set_max)
1970 			current->protection =
1971 			    (current->max_protection = new_prot) &
1972 			    old_prot;
1973 		else
1974 			current->protection = new_prot;
1975 
1976 		if ((current->eflags & (MAP_ENTRY_COW | MAP_ENTRY_USER_WIRED))
1977 		     == (MAP_ENTRY_COW | MAP_ENTRY_USER_WIRED) &&
1978 		    (current->protection & VM_PROT_WRITE) != 0 &&
1979 		    (old_prot & VM_PROT_WRITE) == 0) {
1980 			vm_fault_copy_entry(map, map, current, current, NULL);
1981 		}
1982 
1983 		/*
1984 		 * When restricting access, update the physical map.  Worry
1985 		 * about copy-on-write here.
1986 		 */
1987 		if ((old_prot & ~current->protection) != 0) {
1988 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1989 							VM_PROT_ALL)
1990 			pmap_protect(map->pmap, current->start,
1991 			    current->end,
1992 			    current->protection & MASK(current));
1993 #undef	MASK
1994 		}
1995 		vm_map_simplify_entry(map, current);
1996 		current = current->next;
1997 	}
1998 	vm_map_unlock(map);
1999 	return (KERN_SUCCESS);
2000 }
2001 
2002 /*
2003  *	vm_map_madvise:
2004  *
2005  *	This routine traverses a processes map handling the madvise
2006  *	system call.  Advisories are classified as either those effecting
2007  *	the vm_map_entry structure, or those effecting the underlying
2008  *	objects.
2009  */
2010 int
2011 vm_map_madvise(
2012 	vm_map_t map,
2013 	vm_offset_t start,
2014 	vm_offset_t end,
2015 	int behav)
2016 {
2017 	vm_map_entry_t current, entry;
2018 	int modify_map = 0;
2019 
2020 	/*
2021 	 * Some madvise calls directly modify the vm_map_entry, in which case
2022 	 * we need to use an exclusive lock on the map and we need to perform
2023 	 * various clipping operations.  Otherwise we only need a read-lock
2024 	 * on the map.
2025 	 */
2026 	switch(behav) {
2027 	case MADV_NORMAL:
2028 	case MADV_SEQUENTIAL:
2029 	case MADV_RANDOM:
2030 	case MADV_NOSYNC:
2031 	case MADV_AUTOSYNC:
2032 	case MADV_NOCORE:
2033 	case MADV_CORE:
2034 		modify_map = 1;
2035 		vm_map_lock(map);
2036 		break;
2037 	case MADV_WILLNEED:
2038 	case MADV_DONTNEED:
2039 	case MADV_FREE:
2040 		vm_map_lock_read(map);
2041 		break;
2042 	default:
2043 		return (KERN_INVALID_ARGUMENT);
2044 	}
2045 
2046 	/*
2047 	 * Locate starting entry and clip if necessary.
2048 	 */
2049 	VM_MAP_RANGE_CHECK(map, start, end);
2050 
2051 	if (vm_map_lookup_entry(map, start, &entry)) {
2052 		if (modify_map)
2053 			vm_map_clip_start(map, entry, start);
2054 	} else {
2055 		entry = entry->next;
2056 	}
2057 
2058 	if (modify_map) {
2059 		/*
2060 		 * madvise behaviors that are implemented in the vm_map_entry.
2061 		 *
2062 		 * We clip the vm_map_entry so that behavioral changes are
2063 		 * limited to the specified address range.
2064 		 */
2065 		for (current = entry;
2066 		     (current != &map->header) && (current->start < end);
2067 		     current = current->next
2068 		) {
2069 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2070 				continue;
2071 
2072 			vm_map_clip_end(map, current, end);
2073 
2074 			switch (behav) {
2075 			case MADV_NORMAL:
2076 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2077 				break;
2078 			case MADV_SEQUENTIAL:
2079 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2080 				break;
2081 			case MADV_RANDOM:
2082 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2083 				break;
2084 			case MADV_NOSYNC:
2085 				current->eflags |= MAP_ENTRY_NOSYNC;
2086 				break;
2087 			case MADV_AUTOSYNC:
2088 				current->eflags &= ~MAP_ENTRY_NOSYNC;
2089 				break;
2090 			case MADV_NOCORE:
2091 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
2092 				break;
2093 			case MADV_CORE:
2094 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2095 				break;
2096 			default:
2097 				break;
2098 			}
2099 			vm_map_simplify_entry(map, current);
2100 		}
2101 		vm_map_unlock(map);
2102 	} else {
2103 		vm_pindex_t pstart, pend;
2104 
2105 		/*
2106 		 * madvise behaviors that are implemented in the underlying
2107 		 * vm_object.
2108 		 *
2109 		 * Since we don't clip the vm_map_entry, we have to clip
2110 		 * the vm_object pindex and count.
2111 		 */
2112 		for (current = entry;
2113 		     (current != &map->header) && (current->start < end);
2114 		     current = current->next
2115 		) {
2116 			vm_offset_t useStart;
2117 
2118 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2119 				continue;
2120 
2121 			pstart = OFF_TO_IDX(current->offset);
2122 			pend = pstart + atop(current->end - current->start);
2123 			useStart = current->start;
2124 
2125 			if (current->start < start) {
2126 				pstart += atop(start - current->start);
2127 				useStart = start;
2128 			}
2129 			if (current->end > end)
2130 				pend -= atop(current->end - end);
2131 
2132 			if (pstart >= pend)
2133 				continue;
2134 
2135 			vm_object_madvise(current->object.vm_object, pstart,
2136 			    pend, behav);
2137 			if (behav == MADV_WILLNEED) {
2138 				vm_map_pmap_enter(map,
2139 				    useStart,
2140 				    current->protection,
2141 				    current->object.vm_object,
2142 				    pstart,
2143 				    ptoa(pend - pstart),
2144 				    MAP_PREFAULT_MADVISE
2145 				);
2146 			}
2147 		}
2148 		vm_map_unlock_read(map);
2149 	}
2150 	return (0);
2151 }
2152 
2153 
2154 /*
2155  *	vm_map_inherit:
2156  *
2157  *	Sets the inheritance of the specified address
2158  *	range in the target map.  Inheritance
2159  *	affects how the map will be shared with
2160  *	child maps at the time of vmspace_fork.
2161  */
2162 int
2163 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2164 	       vm_inherit_t new_inheritance)
2165 {
2166 	vm_map_entry_t entry;
2167 	vm_map_entry_t temp_entry;
2168 
2169 	switch (new_inheritance) {
2170 	case VM_INHERIT_NONE:
2171 	case VM_INHERIT_COPY:
2172 	case VM_INHERIT_SHARE:
2173 		break;
2174 	default:
2175 		return (KERN_INVALID_ARGUMENT);
2176 	}
2177 	vm_map_lock(map);
2178 	VM_MAP_RANGE_CHECK(map, start, end);
2179 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
2180 		entry = temp_entry;
2181 		vm_map_clip_start(map, entry, start);
2182 	} else
2183 		entry = temp_entry->next;
2184 	while ((entry != &map->header) && (entry->start < end)) {
2185 		vm_map_clip_end(map, entry, end);
2186 		entry->inheritance = new_inheritance;
2187 		vm_map_simplify_entry(map, entry);
2188 		entry = entry->next;
2189 	}
2190 	vm_map_unlock(map);
2191 	return (KERN_SUCCESS);
2192 }
2193 
2194 /*
2195  *	vm_map_unwire:
2196  *
2197  *	Implements both kernel and user unwiring.
2198  */
2199 int
2200 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2201     int flags)
2202 {
2203 	vm_map_entry_t entry, first_entry, tmp_entry;
2204 	vm_offset_t saved_start;
2205 	unsigned int last_timestamp;
2206 	int rv;
2207 	boolean_t need_wakeup, result, user_unwire;
2208 
2209 	user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2210 	vm_map_lock(map);
2211 	VM_MAP_RANGE_CHECK(map, start, end);
2212 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2213 		if (flags & VM_MAP_WIRE_HOLESOK)
2214 			first_entry = first_entry->next;
2215 		else {
2216 			vm_map_unlock(map);
2217 			return (KERN_INVALID_ADDRESS);
2218 		}
2219 	}
2220 	last_timestamp = map->timestamp;
2221 	entry = first_entry;
2222 	while (entry != &map->header && entry->start < end) {
2223 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2224 			/*
2225 			 * We have not yet clipped the entry.
2226 			 */
2227 			saved_start = (start >= entry->start) ? start :
2228 			    entry->start;
2229 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2230 			if (vm_map_unlock_and_wait(map, 0)) {
2231 				/*
2232 				 * Allow interruption of user unwiring?
2233 				 */
2234 			}
2235 			vm_map_lock(map);
2236 			if (last_timestamp+1 != map->timestamp) {
2237 				/*
2238 				 * Look again for the entry because the map was
2239 				 * modified while it was unlocked.
2240 				 * Specifically, the entry may have been
2241 				 * clipped, merged, or deleted.
2242 				 */
2243 				if (!vm_map_lookup_entry(map, saved_start,
2244 				    &tmp_entry)) {
2245 					if (flags & VM_MAP_WIRE_HOLESOK)
2246 						tmp_entry = tmp_entry->next;
2247 					else {
2248 						if (saved_start == start) {
2249 							/*
2250 							 * First_entry has been deleted.
2251 							 */
2252 							vm_map_unlock(map);
2253 							return (KERN_INVALID_ADDRESS);
2254 						}
2255 						end = saved_start;
2256 						rv = KERN_INVALID_ADDRESS;
2257 						goto done;
2258 					}
2259 				}
2260 				if (entry == first_entry)
2261 					first_entry = tmp_entry;
2262 				else
2263 					first_entry = NULL;
2264 				entry = tmp_entry;
2265 			}
2266 			last_timestamp = map->timestamp;
2267 			continue;
2268 		}
2269 		vm_map_clip_start(map, entry, start);
2270 		vm_map_clip_end(map, entry, end);
2271 		/*
2272 		 * Mark the entry in case the map lock is released.  (See
2273 		 * above.)
2274 		 */
2275 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2276 		entry->wiring_thread = curthread;
2277 		/*
2278 		 * Check the map for holes in the specified region.
2279 		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2280 		 */
2281 		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2282 		    (entry->end < end && (entry->next == &map->header ||
2283 		    entry->next->start > entry->end))) {
2284 			end = entry->end;
2285 			rv = KERN_INVALID_ADDRESS;
2286 			goto done;
2287 		}
2288 		/*
2289 		 * If system unwiring, require that the entry is system wired.
2290 		 */
2291 		if (!user_unwire &&
2292 		    vm_map_entry_system_wired_count(entry) == 0) {
2293 			end = entry->end;
2294 			rv = KERN_INVALID_ARGUMENT;
2295 			goto done;
2296 		}
2297 		entry = entry->next;
2298 	}
2299 	rv = KERN_SUCCESS;
2300 done:
2301 	need_wakeup = FALSE;
2302 	if (first_entry == NULL) {
2303 		result = vm_map_lookup_entry(map, start, &first_entry);
2304 		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2305 			first_entry = first_entry->next;
2306 		else
2307 			KASSERT(result, ("vm_map_unwire: lookup failed"));
2308 	}
2309 	for (entry = first_entry; entry != &map->header && entry->start < end;
2310 	    entry = entry->next) {
2311 		/*
2312 		 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2313 		 * space in the unwired region could have been mapped
2314 		 * while the map lock was dropped for draining
2315 		 * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
2316 		 * could be simultaneously wiring this new mapping
2317 		 * entry.  Detect these cases and skip any entries
2318 		 * marked as in transition by us.
2319 		 */
2320 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2321 		    entry->wiring_thread != curthread) {
2322 			KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2323 			    ("vm_map_unwire: !HOLESOK and new/changed entry"));
2324 			continue;
2325 		}
2326 
2327 		if (rv == KERN_SUCCESS && (!user_unwire ||
2328 		    (entry->eflags & MAP_ENTRY_USER_WIRED))) {
2329 			if (user_unwire)
2330 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2331 			entry->wired_count--;
2332 			if (entry->wired_count == 0) {
2333 				/*
2334 				 * Retain the map lock.
2335 				 */
2336 				vm_fault_unwire(map, entry->start, entry->end,
2337 				    entry->object.vm_object != NULL &&
2338 				    (entry->object.vm_object->flags &
2339 				    OBJ_FICTITIOUS) != 0);
2340 			}
2341 		}
2342 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2343 		    ("vm_map_unwire: in-transition flag missing"));
2344 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2345 		entry->wiring_thread = NULL;
2346 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2347 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2348 			need_wakeup = TRUE;
2349 		}
2350 		vm_map_simplify_entry(map, entry);
2351 	}
2352 	vm_map_unlock(map);
2353 	if (need_wakeup)
2354 		vm_map_wakeup(map);
2355 	return (rv);
2356 }
2357 
2358 /*
2359  *	vm_map_wire:
2360  *
2361  *	Implements both kernel and user wiring.
2362  */
2363 int
2364 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2365     int flags)
2366 {
2367 	vm_map_entry_t entry, first_entry, tmp_entry;
2368 	vm_offset_t saved_end, saved_start;
2369 	unsigned int last_timestamp;
2370 	int rv;
2371 	boolean_t fictitious, need_wakeup, result, user_wire;
2372 	vm_prot_t prot;
2373 
2374 	prot = 0;
2375 	if (flags & VM_MAP_WIRE_WRITE)
2376 		prot |= VM_PROT_WRITE;
2377 	user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2378 	vm_map_lock(map);
2379 	VM_MAP_RANGE_CHECK(map, start, end);
2380 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2381 		if (flags & VM_MAP_WIRE_HOLESOK)
2382 			first_entry = first_entry->next;
2383 		else {
2384 			vm_map_unlock(map);
2385 			return (KERN_INVALID_ADDRESS);
2386 		}
2387 	}
2388 	last_timestamp = map->timestamp;
2389 	entry = first_entry;
2390 	while (entry != &map->header && entry->start < end) {
2391 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2392 			/*
2393 			 * We have not yet clipped the entry.
2394 			 */
2395 			saved_start = (start >= entry->start) ? start :
2396 			    entry->start;
2397 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2398 			if (vm_map_unlock_and_wait(map, 0)) {
2399 				/*
2400 				 * Allow interruption of user wiring?
2401 				 */
2402 			}
2403 			vm_map_lock(map);
2404 			if (last_timestamp + 1 != map->timestamp) {
2405 				/*
2406 				 * Look again for the entry because the map was
2407 				 * modified while it was unlocked.
2408 				 * Specifically, the entry may have been
2409 				 * clipped, merged, or deleted.
2410 				 */
2411 				if (!vm_map_lookup_entry(map, saved_start,
2412 				    &tmp_entry)) {
2413 					if (flags & VM_MAP_WIRE_HOLESOK)
2414 						tmp_entry = tmp_entry->next;
2415 					else {
2416 						if (saved_start == start) {
2417 							/*
2418 							 * first_entry has been deleted.
2419 							 */
2420 							vm_map_unlock(map);
2421 							return (KERN_INVALID_ADDRESS);
2422 						}
2423 						end = saved_start;
2424 						rv = KERN_INVALID_ADDRESS;
2425 						goto done;
2426 					}
2427 				}
2428 				if (entry == first_entry)
2429 					first_entry = tmp_entry;
2430 				else
2431 					first_entry = NULL;
2432 				entry = tmp_entry;
2433 			}
2434 			last_timestamp = map->timestamp;
2435 			continue;
2436 		}
2437 		vm_map_clip_start(map, entry, start);
2438 		vm_map_clip_end(map, entry, end);
2439 		/*
2440 		 * Mark the entry in case the map lock is released.  (See
2441 		 * above.)
2442 		 */
2443 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2444 		entry->wiring_thread = curthread;
2445 		if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
2446 		    || (entry->protection & prot) != prot) {
2447 			entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
2448 			if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
2449 				end = entry->end;
2450 				rv = KERN_INVALID_ADDRESS;
2451 				goto done;
2452 			}
2453 			goto next_entry;
2454 		}
2455 		if (entry->wired_count == 0) {
2456 			entry->wired_count++;
2457 			saved_start = entry->start;
2458 			saved_end = entry->end;
2459 			fictitious = entry->object.vm_object != NULL &&
2460 			    (entry->object.vm_object->flags &
2461 			    OBJ_FICTITIOUS) != 0;
2462 			/*
2463 			 * Release the map lock, relying on the in-transition
2464 			 * mark.  Mark the map busy for fork.
2465 			 */
2466 			vm_map_busy(map);
2467 			vm_map_unlock(map);
2468 			rv = vm_fault_wire(map, saved_start, saved_end,
2469 			    fictitious);
2470 			vm_map_lock(map);
2471 			vm_map_unbusy(map);
2472 			if (last_timestamp + 1 != map->timestamp) {
2473 				/*
2474 				 * Look again for the entry because the map was
2475 				 * modified while it was unlocked.  The entry
2476 				 * may have been clipped, but NOT merged or
2477 				 * deleted.
2478 				 */
2479 				result = vm_map_lookup_entry(map, saved_start,
2480 				    &tmp_entry);
2481 				KASSERT(result, ("vm_map_wire: lookup failed"));
2482 				if (entry == first_entry)
2483 					first_entry = tmp_entry;
2484 				else
2485 					first_entry = NULL;
2486 				entry = tmp_entry;
2487 				while (entry->end < saved_end) {
2488 					if (rv != KERN_SUCCESS) {
2489 						KASSERT(entry->wired_count == 1,
2490 						    ("vm_map_wire: bad count"));
2491 						entry->wired_count = -1;
2492 					}
2493 					entry = entry->next;
2494 				}
2495 			}
2496 			last_timestamp = map->timestamp;
2497 			if (rv != KERN_SUCCESS) {
2498 				KASSERT(entry->wired_count == 1,
2499 				    ("vm_map_wire: bad count"));
2500 				/*
2501 				 * Assign an out-of-range value to represent
2502 				 * the failure to wire this entry.
2503 				 */
2504 				entry->wired_count = -1;
2505 				end = entry->end;
2506 				goto done;
2507 			}
2508 		} else if (!user_wire ||
2509 			   (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2510 			entry->wired_count++;
2511 		}
2512 		/*
2513 		 * Check the map for holes in the specified region.
2514 		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2515 		 */
2516 	next_entry:
2517 		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2518 		    (entry->end < end && (entry->next == &map->header ||
2519 		    entry->next->start > entry->end))) {
2520 			end = entry->end;
2521 			rv = KERN_INVALID_ADDRESS;
2522 			goto done;
2523 		}
2524 		entry = entry->next;
2525 	}
2526 	rv = KERN_SUCCESS;
2527 done:
2528 	need_wakeup = FALSE;
2529 	if (first_entry == NULL) {
2530 		result = vm_map_lookup_entry(map, start, &first_entry);
2531 		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2532 			first_entry = first_entry->next;
2533 		else
2534 			KASSERT(result, ("vm_map_wire: lookup failed"));
2535 	}
2536 	for (entry = first_entry; entry != &map->header && entry->start < end;
2537 	    entry = entry->next) {
2538 		if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
2539 			goto next_entry_done;
2540 
2541 		/*
2542 		 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2543 		 * space in the unwired region could have been mapped
2544 		 * while the map lock was dropped for faulting in the
2545 		 * pages or draining MAP_ENTRY_IN_TRANSITION.
2546 		 * Moreover, another thread could be simultaneously
2547 		 * wiring this new mapping entry.  Detect these cases
2548 		 * and skip any entries marked as in transition by us.
2549 		 */
2550 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2551 		    entry->wiring_thread != curthread) {
2552 			KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2553 			    ("vm_map_wire: !HOLESOK and new/changed entry"));
2554 			continue;
2555 		}
2556 
2557 		if (rv == KERN_SUCCESS) {
2558 			if (user_wire)
2559 				entry->eflags |= MAP_ENTRY_USER_WIRED;
2560 		} else if (entry->wired_count == -1) {
2561 			/*
2562 			 * Wiring failed on this entry.  Thus, unwiring is
2563 			 * unnecessary.
2564 			 */
2565 			entry->wired_count = 0;
2566 		} else {
2567 			if (!user_wire ||
2568 			    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)
2569 				entry->wired_count--;
2570 			if (entry->wired_count == 0) {
2571 				/*
2572 				 * Retain the map lock.
2573 				 */
2574 				vm_fault_unwire(map, entry->start, entry->end,
2575 				    entry->object.vm_object != NULL &&
2576 				    (entry->object.vm_object->flags &
2577 				    OBJ_FICTITIOUS) != 0);
2578 			}
2579 		}
2580 	next_entry_done:
2581 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2582 		    ("vm_map_wire: in-transition flag missing %p", entry));
2583 		KASSERT(entry->wiring_thread == curthread,
2584 		    ("vm_map_wire: alien wire %p", entry));
2585 		entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
2586 		    MAP_ENTRY_WIRE_SKIPPED);
2587 		entry->wiring_thread = NULL;
2588 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2589 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2590 			need_wakeup = TRUE;
2591 		}
2592 		vm_map_simplify_entry(map, entry);
2593 	}
2594 	vm_map_unlock(map);
2595 	if (need_wakeup)
2596 		vm_map_wakeup(map);
2597 	return (rv);
2598 }
2599 
2600 /*
2601  * vm_map_sync
2602  *
2603  * Push any dirty cached pages in the address range to their pager.
2604  * If syncio is TRUE, dirty pages are written synchronously.
2605  * If invalidate is TRUE, any cached pages are freed as well.
2606  *
2607  * If the size of the region from start to end is zero, we are
2608  * supposed to flush all modified pages within the region containing
2609  * start.  Unfortunately, a region can be split or coalesced with
2610  * neighboring regions, making it difficult to determine what the
2611  * original region was.  Therefore, we approximate this requirement by
2612  * flushing the current region containing start.
2613  *
2614  * Returns an error if any part of the specified range is not mapped.
2615  */
2616 int
2617 vm_map_sync(
2618 	vm_map_t map,
2619 	vm_offset_t start,
2620 	vm_offset_t end,
2621 	boolean_t syncio,
2622 	boolean_t invalidate)
2623 {
2624 	vm_map_entry_t current;
2625 	vm_map_entry_t entry;
2626 	vm_size_t size;
2627 	vm_object_t object;
2628 	vm_ooffset_t offset;
2629 	unsigned int last_timestamp;
2630 	boolean_t failed;
2631 
2632 	vm_map_lock_read(map);
2633 	VM_MAP_RANGE_CHECK(map, start, end);
2634 	if (!vm_map_lookup_entry(map, start, &entry)) {
2635 		vm_map_unlock_read(map);
2636 		return (KERN_INVALID_ADDRESS);
2637 	} else if (start == end) {
2638 		start = entry->start;
2639 		end = entry->end;
2640 	}
2641 	/*
2642 	 * Make a first pass to check for user-wired memory and holes.
2643 	 */
2644 	for (current = entry; current != &map->header && current->start < end;
2645 	    current = current->next) {
2646 		if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
2647 			vm_map_unlock_read(map);
2648 			return (KERN_INVALID_ARGUMENT);
2649 		}
2650 		if (end > current->end &&
2651 		    (current->next == &map->header ||
2652 			current->end != current->next->start)) {
2653 			vm_map_unlock_read(map);
2654 			return (KERN_INVALID_ADDRESS);
2655 		}
2656 	}
2657 
2658 	if (invalidate)
2659 		pmap_remove(map->pmap, start, end);
2660 	failed = FALSE;
2661 
2662 	/*
2663 	 * Make a second pass, cleaning/uncaching pages from the indicated
2664 	 * objects as we go.
2665 	 */
2666 	for (current = entry; current != &map->header && current->start < end;) {
2667 		offset = current->offset + (start - current->start);
2668 		size = (end <= current->end ? end : current->end) - start;
2669 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2670 			vm_map_t smap;
2671 			vm_map_entry_t tentry;
2672 			vm_size_t tsize;
2673 
2674 			smap = current->object.sub_map;
2675 			vm_map_lock_read(smap);
2676 			(void) vm_map_lookup_entry(smap, offset, &tentry);
2677 			tsize = tentry->end - offset;
2678 			if (tsize < size)
2679 				size = tsize;
2680 			object = tentry->object.vm_object;
2681 			offset = tentry->offset + (offset - tentry->start);
2682 			vm_map_unlock_read(smap);
2683 		} else {
2684 			object = current->object.vm_object;
2685 		}
2686 		vm_object_reference(object);
2687 		last_timestamp = map->timestamp;
2688 		vm_map_unlock_read(map);
2689 		if (!vm_object_sync(object, offset, size, syncio, invalidate))
2690 			failed = TRUE;
2691 		start += size;
2692 		vm_object_deallocate(object);
2693 		vm_map_lock_read(map);
2694 		if (last_timestamp == map->timestamp ||
2695 		    !vm_map_lookup_entry(map, start, &current))
2696 			current = current->next;
2697 	}
2698 
2699 	vm_map_unlock_read(map);
2700 	return (failed ? KERN_FAILURE : KERN_SUCCESS);
2701 }
2702 
2703 /*
2704  *	vm_map_entry_unwire:	[ internal use only ]
2705  *
2706  *	Make the region specified by this entry pageable.
2707  *
2708  *	The map in question should be locked.
2709  *	[This is the reason for this routine's existence.]
2710  */
2711 static void
2712 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2713 {
2714 	vm_fault_unwire(map, entry->start, entry->end,
2715 	    entry->object.vm_object != NULL &&
2716 	    (entry->object.vm_object->flags & OBJ_FICTITIOUS) != 0);
2717 	entry->wired_count = 0;
2718 }
2719 
2720 static void
2721 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
2722 {
2723 
2724 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
2725 		vm_object_deallocate(entry->object.vm_object);
2726 	uma_zfree(system_map ? kmapentzone : mapentzone, entry);
2727 }
2728 
2729 /*
2730  *	vm_map_entry_delete:	[ internal use only ]
2731  *
2732  *	Deallocate the given entry from the target map.
2733  */
2734 static void
2735 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2736 {
2737 	vm_object_t object;
2738 	vm_pindex_t offidxstart, offidxend, count, size1;
2739 	vm_ooffset_t size;
2740 
2741 	vm_map_entry_unlink(map, entry);
2742 	object = entry->object.vm_object;
2743 	size = entry->end - entry->start;
2744 	map->size -= size;
2745 
2746 	if (entry->cred != NULL) {
2747 		swap_release_by_cred(size, entry->cred);
2748 		crfree(entry->cred);
2749 	}
2750 
2751 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2752 	    (object != NULL)) {
2753 		KASSERT(entry->cred == NULL || object->cred == NULL ||
2754 		    (entry->eflags & MAP_ENTRY_NEEDS_COPY),
2755 		    ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
2756 		count = OFF_TO_IDX(size);
2757 		offidxstart = OFF_TO_IDX(entry->offset);
2758 		offidxend = offidxstart + count;
2759 		VM_OBJECT_WLOCK(object);
2760 		if (object->ref_count != 1 &&
2761 		    ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
2762 		    object == kernel_object || object == kmem_object)) {
2763 			vm_object_collapse(object);
2764 
2765 			/*
2766 			 * The option OBJPR_NOTMAPPED can be passed here
2767 			 * because vm_map_delete() already performed
2768 			 * pmap_remove() on the only mapping to this range
2769 			 * of pages.
2770 			 */
2771 			vm_object_page_remove(object, offidxstart, offidxend,
2772 			    OBJPR_NOTMAPPED);
2773 			if (object->type == OBJT_SWAP)
2774 				swap_pager_freespace(object, offidxstart, count);
2775 			if (offidxend >= object->size &&
2776 			    offidxstart < object->size) {
2777 				size1 = object->size;
2778 				object->size = offidxstart;
2779 				if (object->cred != NULL) {
2780 					size1 -= object->size;
2781 					KASSERT(object->charge >= ptoa(size1),
2782 					    ("vm_map_entry_delete: object->charge < 0"));
2783 					swap_release_by_cred(ptoa(size1), object->cred);
2784 					object->charge -= ptoa(size1);
2785 				}
2786 			}
2787 		}
2788 		VM_OBJECT_WUNLOCK(object);
2789 	} else
2790 		entry->object.vm_object = NULL;
2791 	if (map->system_map)
2792 		vm_map_entry_deallocate(entry, TRUE);
2793 	else {
2794 		entry->next = curthread->td_map_def_user;
2795 		curthread->td_map_def_user = entry;
2796 	}
2797 }
2798 
2799 /*
2800  *	vm_map_delete:	[ internal use only ]
2801  *
2802  *	Deallocates the given address range from the target
2803  *	map.
2804  */
2805 int
2806 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2807 {
2808 	vm_map_entry_t entry;
2809 	vm_map_entry_t first_entry;
2810 
2811 	VM_MAP_ASSERT_LOCKED(map);
2812 
2813 	/*
2814 	 * Find the start of the region, and clip it
2815 	 */
2816 	if (!vm_map_lookup_entry(map, start, &first_entry))
2817 		entry = first_entry->next;
2818 	else {
2819 		entry = first_entry;
2820 		vm_map_clip_start(map, entry, start);
2821 	}
2822 
2823 	/*
2824 	 * Step through all entries in this region
2825 	 */
2826 	while ((entry != &map->header) && (entry->start < end)) {
2827 		vm_map_entry_t next;
2828 
2829 		/*
2830 		 * Wait for wiring or unwiring of an entry to complete.
2831 		 * Also wait for any system wirings to disappear on
2832 		 * user maps.
2833 		 */
2834 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
2835 		    (vm_map_pmap(map) != kernel_pmap &&
2836 		    vm_map_entry_system_wired_count(entry) != 0)) {
2837 			unsigned int last_timestamp;
2838 			vm_offset_t saved_start;
2839 			vm_map_entry_t tmp_entry;
2840 
2841 			saved_start = entry->start;
2842 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2843 			last_timestamp = map->timestamp;
2844 			(void) vm_map_unlock_and_wait(map, 0);
2845 			vm_map_lock(map);
2846 			if (last_timestamp + 1 != map->timestamp) {
2847 				/*
2848 				 * Look again for the entry because the map was
2849 				 * modified while it was unlocked.
2850 				 * Specifically, the entry may have been
2851 				 * clipped, merged, or deleted.
2852 				 */
2853 				if (!vm_map_lookup_entry(map, saved_start,
2854 							 &tmp_entry))
2855 					entry = tmp_entry->next;
2856 				else {
2857 					entry = tmp_entry;
2858 					vm_map_clip_start(map, entry,
2859 							  saved_start);
2860 				}
2861 			}
2862 			continue;
2863 		}
2864 		vm_map_clip_end(map, entry, end);
2865 
2866 		next = entry->next;
2867 
2868 		/*
2869 		 * Unwire before removing addresses from the pmap; otherwise,
2870 		 * unwiring will put the entries back in the pmap.
2871 		 */
2872 		if (entry->wired_count != 0) {
2873 			vm_map_entry_unwire(map, entry);
2874 		}
2875 
2876 		pmap_remove(map->pmap, entry->start, entry->end);
2877 
2878 		/*
2879 		 * Delete the entry only after removing all pmap
2880 		 * entries pointing to its pages.  (Otherwise, its
2881 		 * page frames may be reallocated, and any modify bits
2882 		 * will be set in the wrong object!)
2883 		 */
2884 		vm_map_entry_delete(map, entry);
2885 		entry = next;
2886 	}
2887 	return (KERN_SUCCESS);
2888 }
2889 
2890 /*
2891  *	vm_map_remove:
2892  *
2893  *	Remove the given address range from the target map.
2894  *	This is the exported form of vm_map_delete.
2895  */
2896 int
2897 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2898 {
2899 	int result;
2900 
2901 	vm_map_lock(map);
2902 	VM_MAP_RANGE_CHECK(map, start, end);
2903 	result = vm_map_delete(map, start, end);
2904 	vm_map_unlock(map);
2905 	return (result);
2906 }
2907 
2908 /*
2909  *	vm_map_check_protection:
2910  *
2911  *	Assert that the target map allows the specified privilege on the
2912  *	entire address region given.  The entire region must be allocated.
2913  *
2914  *	WARNING!  This code does not and should not check whether the
2915  *	contents of the region is accessible.  For example a smaller file
2916  *	might be mapped into a larger address space.
2917  *
2918  *	NOTE!  This code is also called by munmap().
2919  *
2920  *	The map must be locked.  A read lock is sufficient.
2921  */
2922 boolean_t
2923 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2924 			vm_prot_t protection)
2925 {
2926 	vm_map_entry_t entry;
2927 	vm_map_entry_t tmp_entry;
2928 
2929 	if (!vm_map_lookup_entry(map, start, &tmp_entry))
2930 		return (FALSE);
2931 	entry = tmp_entry;
2932 
2933 	while (start < end) {
2934 		if (entry == &map->header)
2935 			return (FALSE);
2936 		/*
2937 		 * No holes allowed!
2938 		 */
2939 		if (start < entry->start)
2940 			return (FALSE);
2941 		/*
2942 		 * Check protection associated with entry.
2943 		 */
2944 		if ((entry->protection & protection) != protection)
2945 			return (FALSE);
2946 		/* go to next entry */
2947 		start = entry->end;
2948 		entry = entry->next;
2949 	}
2950 	return (TRUE);
2951 }
2952 
2953 /*
2954  *	vm_map_copy_entry:
2955  *
2956  *	Copies the contents of the source entry to the destination
2957  *	entry.  The entries *must* be aligned properly.
2958  */
2959 static void
2960 vm_map_copy_entry(
2961 	vm_map_t src_map,
2962 	vm_map_t dst_map,
2963 	vm_map_entry_t src_entry,
2964 	vm_map_entry_t dst_entry,
2965 	vm_ooffset_t *fork_charge)
2966 {
2967 	vm_object_t src_object;
2968 	vm_map_entry_t fake_entry;
2969 	vm_offset_t size;
2970 	struct ucred *cred;
2971 	int charged;
2972 
2973 	VM_MAP_ASSERT_LOCKED(dst_map);
2974 
2975 	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2976 		return;
2977 
2978 	if (src_entry->wired_count == 0) {
2979 
2980 		/*
2981 		 * If the source entry is marked needs_copy, it is already
2982 		 * write-protected.
2983 		 */
2984 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2985 			pmap_protect(src_map->pmap,
2986 			    src_entry->start,
2987 			    src_entry->end,
2988 			    src_entry->protection & ~VM_PROT_WRITE);
2989 		}
2990 
2991 		/*
2992 		 * Make a copy of the object.
2993 		 */
2994 		size = src_entry->end - src_entry->start;
2995 		if ((src_object = src_entry->object.vm_object) != NULL) {
2996 			VM_OBJECT_WLOCK(src_object);
2997 			charged = ENTRY_CHARGED(src_entry);
2998 			if ((src_object->handle == NULL) &&
2999 				(src_object->type == OBJT_DEFAULT ||
3000 				 src_object->type == OBJT_SWAP)) {
3001 				vm_object_collapse(src_object);
3002 				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
3003 					vm_object_split(src_entry);
3004 					src_object = src_entry->object.vm_object;
3005 				}
3006 			}
3007 			vm_object_reference_locked(src_object);
3008 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3009 			if (src_entry->cred != NULL &&
3010 			    !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
3011 				KASSERT(src_object->cred == NULL,
3012 				    ("OVERCOMMIT: vm_map_copy_entry: cred %p",
3013 				     src_object));
3014 				src_object->cred = src_entry->cred;
3015 				src_object->charge = size;
3016 			}
3017 			VM_OBJECT_WUNLOCK(src_object);
3018 			dst_entry->object.vm_object = src_object;
3019 			if (charged) {
3020 				cred = curthread->td_ucred;
3021 				crhold(cred);
3022 				dst_entry->cred = cred;
3023 				*fork_charge += size;
3024 				if (!(src_entry->eflags &
3025 				      MAP_ENTRY_NEEDS_COPY)) {
3026 					crhold(cred);
3027 					src_entry->cred = cred;
3028 					*fork_charge += size;
3029 				}
3030 			}
3031 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
3032 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
3033 			dst_entry->offset = src_entry->offset;
3034 			if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3035 				/*
3036 				 * MAP_ENTRY_VN_WRITECNT cannot
3037 				 * indicate write reference from
3038 				 * src_entry, since the entry is
3039 				 * marked as needs copy.  Allocate a
3040 				 * fake entry that is used to
3041 				 * decrement object->un_pager.vnp.writecount
3042 				 * at the appropriate time.  Attach
3043 				 * fake_entry to the deferred list.
3044 				 */
3045 				fake_entry = vm_map_entry_create(dst_map);
3046 				fake_entry->eflags = MAP_ENTRY_VN_WRITECNT;
3047 				src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT;
3048 				vm_object_reference(src_object);
3049 				fake_entry->object.vm_object = src_object;
3050 				fake_entry->start = src_entry->start;
3051 				fake_entry->end = src_entry->end;
3052 				fake_entry->next = curthread->td_map_def_user;
3053 				curthread->td_map_def_user = fake_entry;
3054 			}
3055 		} else {
3056 			dst_entry->object.vm_object = NULL;
3057 			dst_entry->offset = 0;
3058 			if (src_entry->cred != NULL) {
3059 				dst_entry->cred = curthread->td_ucred;
3060 				crhold(dst_entry->cred);
3061 				*fork_charge += size;
3062 			}
3063 		}
3064 
3065 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
3066 		    dst_entry->end - dst_entry->start, src_entry->start);
3067 	} else {
3068 		/*
3069 		 * Of course, wired down pages can't be set copy-on-write.
3070 		 * Cause wired pages to be copied into the new map by
3071 		 * simulating faults (the new pages are pageable)
3072 		 */
3073 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
3074 		    fork_charge);
3075 	}
3076 }
3077 
3078 /*
3079  * vmspace_map_entry_forked:
3080  * Update the newly-forked vmspace each time a map entry is inherited
3081  * or copied.  The values for vm_dsize and vm_tsize are approximate
3082  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
3083  */
3084 static void
3085 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
3086     vm_map_entry_t entry)
3087 {
3088 	vm_size_t entrysize;
3089 	vm_offset_t newend;
3090 
3091 	entrysize = entry->end - entry->start;
3092 	vm2->vm_map.size += entrysize;
3093 	if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
3094 		vm2->vm_ssize += btoc(entrysize);
3095 	} else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
3096 	    entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
3097 		newend = MIN(entry->end,
3098 		    (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
3099 		vm2->vm_dsize += btoc(newend - entry->start);
3100 	} else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
3101 	    entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
3102 		newend = MIN(entry->end,
3103 		    (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
3104 		vm2->vm_tsize += btoc(newend - entry->start);
3105 	}
3106 }
3107 
3108 /*
3109  * vmspace_fork:
3110  * Create a new process vmspace structure and vm_map
3111  * based on those of an existing process.  The new map
3112  * is based on the old map, according to the inheritance
3113  * values on the regions in that map.
3114  *
3115  * XXX It might be worth coalescing the entries added to the new vmspace.
3116  *
3117  * The source map must not be locked.
3118  */
3119 struct vmspace *
3120 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
3121 {
3122 	struct vmspace *vm2;
3123 	vm_map_t new_map, old_map;
3124 	vm_map_entry_t new_entry, old_entry;
3125 	vm_object_t object;
3126 	int locked;
3127 
3128 	old_map = &vm1->vm_map;
3129 	/* Copy immutable fields of vm1 to vm2. */
3130 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
3131 	if (vm2 == NULL)
3132 		return (NULL);
3133 	vm2->vm_taddr = vm1->vm_taddr;
3134 	vm2->vm_daddr = vm1->vm_daddr;
3135 	vm2->vm_maxsaddr = vm1->vm_maxsaddr;
3136 	vm_map_lock(old_map);
3137 	if (old_map->busy)
3138 		vm_map_wait_busy(old_map);
3139 	new_map = &vm2->vm_map;
3140 	locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
3141 	KASSERT(locked, ("vmspace_fork: lock failed"));
3142 
3143 	old_entry = old_map->header.next;
3144 
3145 	while (old_entry != &old_map->header) {
3146 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3147 			panic("vm_map_fork: encountered a submap");
3148 
3149 		switch (old_entry->inheritance) {
3150 		case VM_INHERIT_NONE:
3151 			break;
3152 
3153 		case VM_INHERIT_SHARE:
3154 			/*
3155 			 * Clone the entry, creating the shared object if necessary.
3156 			 */
3157 			object = old_entry->object.vm_object;
3158 			if (object == NULL) {
3159 				object = vm_object_allocate(OBJT_DEFAULT,
3160 					atop(old_entry->end - old_entry->start));
3161 				old_entry->object.vm_object = object;
3162 				old_entry->offset = 0;
3163 				if (old_entry->cred != NULL) {
3164 					object->cred = old_entry->cred;
3165 					object->charge = old_entry->end -
3166 					    old_entry->start;
3167 					old_entry->cred = NULL;
3168 				}
3169 			}
3170 
3171 			/*
3172 			 * Add the reference before calling vm_object_shadow
3173 			 * to insure that a shadow object is created.
3174 			 */
3175 			vm_object_reference(object);
3176 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3177 				vm_object_shadow(&old_entry->object.vm_object,
3178 				    &old_entry->offset,
3179 				    old_entry->end - old_entry->start);
3180 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3181 				/* Transfer the second reference too. */
3182 				vm_object_reference(
3183 				    old_entry->object.vm_object);
3184 
3185 				/*
3186 				 * As in vm_map_simplify_entry(), the
3187 				 * vnode lock will not be acquired in
3188 				 * this call to vm_object_deallocate().
3189 				 */
3190 				vm_object_deallocate(object);
3191 				object = old_entry->object.vm_object;
3192 			}
3193 			VM_OBJECT_WLOCK(object);
3194 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
3195 			if (old_entry->cred != NULL) {
3196 				KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
3197 				object->cred = old_entry->cred;
3198 				object->charge = old_entry->end - old_entry->start;
3199 				old_entry->cred = NULL;
3200 			}
3201 
3202 			/*
3203 			 * Assert the correct state of the vnode
3204 			 * v_writecount while the object is locked, to
3205 			 * not relock it later for the assertion
3206 			 * correctness.
3207 			 */
3208 			if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT &&
3209 			    object->type == OBJT_VNODE) {
3210 				KASSERT(((struct vnode *)object->handle)->
3211 				    v_writecount > 0,
3212 				    ("vmspace_fork: v_writecount %p", object));
3213 				KASSERT(object->un_pager.vnp.writemappings > 0,
3214 				    ("vmspace_fork: vnp.writecount %p",
3215 				    object));
3216 			}
3217 			VM_OBJECT_WUNLOCK(object);
3218 
3219 			/*
3220 			 * Clone the entry, referencing the shared object.
3221 			 */
3222 			new_entry = vm_map_entry_create(new_map);
3223 			*new_entry = *old_entry;
3224 			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3225 			    MAP_ENTRY_IN_TRANSITION);
3226 			new_entry->wiring_thread = NULL;
3227 			new_entry->wired_count = 0;
3228 			if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3229 				vnode_pager_update_writecount(object,
3230 				    new_entry->start, new_entry->end);
3231 			}
3232 
3233 			/*
3234 			 * Insert the entry into the new map -- we know we're
3235 			 * inserting at the end of the new map.
3236 			 */
3237 			vm_map_entry_link(new_map, new_map->header.prev,
3238 			    new_entry);
3239 			vmspace_map_entry_forked(vm1, vm2, new_entry);
3240 
3241 			/*
3242 			 * Update the physical map
3243 			 */
3244 			pmap_copy(new_map->pmap, old_map->pmap,
3245 			    new_entry->start,
3246 			    (old_entry->end - old_entry->start),
3247 			    old_entry->start);
3248 			break;
3249 
3250 		case VM_INHERIT_COPY:
3251 			/*
3252 			 * Clone the entry and link into the map.
3253 			 */
3254 			new_entry = vm_map_entry_create(new_map);
3255 			*new_entry = *old_entry;
3256 			/*
3257 			 * Copied entry is COW over the old object.
3258 			 */
3259 			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3260 			    MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT);
3261 			new_entry->wiring_thread = NULL;
3262 			new_entry->wired_count = 0;
3263 			new_entry->object.vm_object = NULL;
3264 			new_entry->cred = NULL;
3265 			vm_map_entry_link(new_map, new_map->header.prev,
3266 			    new_entry);
3267 			vmspace_map_entry_forked(vm1, vm2, new_entry);
3268 			vm_map_copy_entry(old_map, new_map, old_entry,
3269 			    new_entry, fork_charge);
3270 			break;
3271 		}
3272 		old_entry = old_entry->next;
3273 	}
3274 	/*
3275 	 * Use inlined vm_map_unlock() to postpone handling the deferred
3276 	 * map entries, which cannot be done until both old_map and
3277 	 * new_map locks are released.
3278 	 */
3279 	sx_xunlock(&old_map->lock);
3280 	sx_xunlock(&new_map->lock);
3281 	vm_map_process_deferred();
3282 
3283 	return (vm2);
3284 }
3285 
3286 int
3287 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3288     vm_prot_t prot, vm_prot_t max, int cow)
3289 {
3290 	vm_map_entry_t new_entry, prev_entry;
3291 	vm_offset_t bot, top;
3292 	vm_size_t growsize, init_ssize;
3293 	int orient, rv;
3294 	rlim_t lmemlim, vmemlim;
3295 
3296 	/*
3297 	 * The stack orientation is piggybacked with the cow argument.
3298 	 * Extract it into orient and mask the cow argument so that we
3299 	 * don't pass it around further.
3300 	 * NOTE: We explicitly allow bi-directional stacks.
3301 	 */
3302 	orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP);
3303 	cow &= ~orient;
3304 	KASSERT(orient != 0, ("No stack grow direction"));
3305 
3306 	if (addrbos < vm_map_min(map) ||
3307 	    addrbos > vm_map_max(map) ||
3308 	    addrbos + max_ssize < addrbos)
3309 		return (KERN_NO_SPACE);
3310 
3311 	growsize = sgrowsiz;
3312 	init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3313 
3314 	PROC_LOCK(curproc);
3315 	lmemlim = lim_cur(curproc, RLIMIT_MEMLOCK);
3316 	vmemlim = lim_cur(curproc, RLIMIT_VMEM);
3317 	PROC_UNLOCK(curproc);
3318 
3319 	vm_map_lock(map);
3320 
3321 	/* If addr is already mapped, no go */
3322 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
3323 		vm_map_unlock(map);
3324 		return (KERN_NO_SPACE);
3325 	}
3326 
3327 	if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3328 		if (ptoa(pmap_wired_count(map->pmap)) + init_ssize > lmemlim) {
3329 			vm_map_unlock(map);
3330 			return (KERN_NO_SPACE);
3331 		}
3332 	}
3333 
3334 	/* If we would blow our VMEM resource limit, no go */
3335 	if (map->size + init_ssize > vmemlim) {
3336 		vm_map_unlock(map);
3337 		return (KERN_NO_SPACE);
3338 	}
3339 
3340 	/*
3341 	 * If we can't accomodate max_ssize in the current mapping, no go.
3342 	 * However, we need to be aware that subsequent user mappings might
3343 	 * map into the space we have reserved for stack, and currently this
3344 	 * space is not protected.
3345 	 *
3346 	 * Hopefully we will at least detect this condition when we try to
3347 	 * grow the stack.
3348 	 */
3349 	if ((prev_entry->next != &map->header) &&
3350 	    (prev_entry->next->start < addrbos + max_ssize)) {
3351 		vm_map_unlock(map);
3352 		return (KERN_NO_SPACE);
3353 	}
3354 
3355 	/*
3356 	 * We initially map a stack of only init_ssize.  We will grow as
3357 	 * needed later.  Depending on the orientation of the stack (i.e.
3358 	 * the grow direction) we either map at the top of the range, the
3359 	 * bottom of the range or in the middle.
3360 	 *
3361 	 * Note: we would normally expect prot and max to be VM_PROT_ALL,
3362 	 * and cow to be 0.  Possibly we should eliminate these as input
3363 	 * parameters, and just pass these values here in the insert call.
3364 	 */
3365 	if (orient == MAP_STACK_GROWS_DOWN)
3366 		bot = addrbos + max_ssize - init_ssize;
3367 	else if (orient == MAP_STACK_GROWS_UP)
3368 		bot = addrbos;
3369 	else
3370 		bot = round_page(addrbos + max_ssize/2 - init_ssize/2);
3371 	top = bot + init_ssize;
3372 	rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
3373 
3374 	/* Now set the avail_ssize amount. */
3375 	if (rv == KERN_SUCCESS) {
3376 		if (prev_entry != &map->header)
3377 			vm_map_clip_end(map, prev_entry, bot);
3378 		new_entry = prev_entry->next;
3379 		if (new_entry->end != top || new_entry->start != bot)
3380 			panic("Bad entry start/end for new stack entry");
3381 
3382 		new_entry->avail_ssize = max_ssize - init_ssize;
3383 		if (orient & MAP_STACK_GROWS_DOWN)
3384 			new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3385 		if (orient & MAP_STACK_GROWS_UP)
3386 			new_entry->eflags |= MAP_ENTRY_GROWS_UP;
3387 	}
3388 
3389 	vm_map_unlock(map);
3390 	return (rv);
3391 }
3392 
3393 static int stack_guard_page = 0;
3394 TUNABLE_INT("security.bsd.stack_guard_page", &stack_guard_page);
3395 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RW,
3396     &stack_guard_page, 0,
3397     "Insert stack guard page ahead of the growable segments.");
3398 
3399 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3400  * desired address is already mapped, or if we successfully grow
3401  * the stack.  Also returns KERN_SUCCESS if addr is outside the
3402  * stack range (this is strange, but preserves compatibility with
3403  * the grow function in vm_machdep.c).
3404  */
3405 int
3406 vm_map_growstack(struct proc *p, vm_offset_t addr)
3407 {
3408 	vm_map_entry_t next_entry, prev_entry;
3409 	vm_map_entry_t new_entry, stack_entry;
3410 	struct vmspace *vm = p->p_vmspace;
3411 	vm_map_t map = &vm->vm_map;
3412 	vm_offset_t end;
3413 	vm_size_t growsize;
3414 	size_t grow_amount, max_grow;
3415 	rlim_t lmemlim, stacklim, vmemlim;
3416 	int is_procstack, rv;
3417 	struct ucred *cred;
3418 #ifdef notyet
3419 	uint64_t limit;
3420 #endif
3421 #ifdef RACCT
3422 	int error;
3423 #endif
3424 
3425 Retry:
3426 	PROC_LOCK(p);
3427 	lmemlim = lim_cur(p, RLIMIT_MEMLOCK);
3428 	stacklim = lim_cur(p, RLIMIT_STACK);
3429 	vmemlim = lim_cur(p, RLIMIT_VMEM);
3430 	PROC_UNLOCK(p);
3431 
3432 	vm_map_lock_read(map);
3433 
3434 	/* If addr is already in the entry range, no need to grow.*/
3435 	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
3436 		vm_map_unlock_read(map);
3437 		return (KERN_SUCCESS);
3438 	}
3439 
3440 	next_entry = prev_entry->next;
3441 	if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) {
3442 		/*
3443 		 * This entry does not grow upwards. Since the address lies
3444 		 * beyond this entry, the next entry (if one exists) has to
3445 		 * be a downward growable entry. The entry list header is
3446 		 * never a growable entry, so it suffices to check the flags.
3447 		 */
3448 		if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) {
3449 			vm_map_unlock_read(map);
3450 			return (KERN_SUCCESS);
3451 		}
3452 		stack_entry = next_entry;
3453 	} else {
3454 		/*
3455 		 * This entry grows upward. If the next entry does not at
3456 		 * least grow downwards, this is the entry we need to grow.
3457 		 * otherwise we have two possible choices and we have to
3458 		 * select one.
3459 		 */
3460 		if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) {
3461 			/*
3462 			 * We have two choices; grow the entry closest to
3463 			 * the address to minimize the amount of growth.
3464 			 */
3465 			if (addr - prev_entry->end <= next_entry->start - addr)
3466 				stack_entry = prev_entry;
3467 			else
3468 				stack_entry = next_entry;
3469 		} else
3470 			stack_entry = prev_entry;
3471 	}
3472 
3473 	if (stack_entry == next_entry) {
3474 		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo"));
3475 		KASSERT(addr < stack_entry->start, ("foo"));
3476 		end = (prev_entry != &map->header) ? prev_entry->end :
3477 		    stack_entry->start - stack_entry->avail_ssize;
3478 		grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE);
3479 		max_grow = stack_entry->start - end;
3480 	} else {
3481 		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo"));
3482 		KASSERT(addr >= stack_entry->end, ("foo"));
3483 		end = (next_entry != &map->header) ? next_entry->start :
3484 		    stack_entry->end + stack_entry->avail_ssize;
3485 		grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE);
3486 		max_grow = end - stack_entry->end;
3487 	}
3488 
3489 	if (grow_amount > stack_entry->avail_ssize) {
3490 		vm_map_unlock_read(map);
3491 		return (KERN_NO_SPACE);
3492 	}
3493 
3494 	/*
3495 	 * If there is no longer enough space between the entries nogo, and
3496 	 * adjust the available space.  Note: this  should only happen if the
3497 	 * user has mapped into the stack area after the stack was created,
3498 	 * and is probably an error.
3499 	 *
3500 	 * This also effectively destroys any guard page the user might have
3501 	 * intended by limiting the stack size.
3502 	 */
3503 	if (grow_amount + (stack_guard_page ? PAGE_SIZE : 0) > max_grow) {
3504 		if (vm_map_lock_upgrade(map))
3505 			goto Retry;
3506 
3507 		stack_entry->avail_ssize = max_grow;
3508 
3509 		vm_map_unlock(map);
3510 		return (KERN_NO_SPACE);
3511 	}
3512 
3513 	is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0;
3514 
3515 	/*
3516 	 * If this is the main process stack, see if we're over the stack
3517 	 * limit.
3518 	 */
3519 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3520 		vm_map_unlock_read(map);
3521 		return (KERN_NO_SPACE);
3522 	}
3523 #ifdef RACCT
3524 	PROC_LOCK(p);
3525 	if (is_procstack &&
3526 	    racct_set(p, RACCT_STACK, ctob(vm->vm_ssize) + grow_amount)) {
3527 		PROC_UNLOCK(p);
3528 		vm_map_unlock_read(map);
3529 		return (KERN_NO_SPACE);
3530 	}
3531 	PROC_UNLOCK(p);
3532 #endif
3533 
3534 	/* Round up the grow amount modulo sgrowsiz */
3535 	growsize = sgrowsiz;
3536 	grow_amount = roundup(grow_amount, growsize);
3537 	if (grow_amount > stack_entry->avail_ssize)
3538 		grow_amount = stack_entry->avail_ssize;
3539 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3540 		grow_amount = trunc_page((vm_size_t)stacklim) -
3541 		    ctob(vm->vm_ssize);
3542 	}
3543 #ifdef notyet
3544 	PROC_LOCK(p);
3545 	limit = racct_get_available(p, RACCT_STACK);
3546 	PROC_UNLOCK(p);
3547 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
3548 		grow_amount = limit - ctob(vm->vm_ssize);
3549 #endif
3550 	if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3551 		if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
3552 			vm_map_unlock_read(map);
3553 			rv = KERN_NO_SPACE;
3554 			goto out;
3555 		}
3556 #ifdef RACCT
3557 		PROC_LOCK(p);
3558 		if (racct_set(p, RACCT_MEMLOCK,
3559 		    ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
3560 			PROC_UNLOCK(p);
3561 			vm_map_unlock_read(map);
3562 			rv = KERN_NO_SPACE;
3563 			goto out;
3564 		}
3565 		PROC_UNLOCK(p);
3566 #endif
3567 	}
3568 	/* If we would blow our VMEM resource limit, no go */
3569 	if (map->size + grow_amount > vmemlim) {
3570 		vm_map_unlock_read(map);
3571 		rv = KERN_NO_SPACE;
3572 		goto out;
3573 	}
3574 #ifdef RACCT
3575 	PROC_LOCK(p);
3576 	if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
3577 		PROC_UNLOCK(p);
3578 		vm_map_unlock_read(map);
3579 		rv = KERN_NO_SPACE;
3580 		goto out;
3581 	}
3582 	PROC_UNLOCK(p);
3583 #endif
3584 
3585 	if (vm_map_lock_upgrade(map))
3586 		goto Retry;
3587 
3588 	if (stack_entry == next_entry) {
3589 		/*
3590 		 * Growing downward.
3591 		 */
3592 		/* Get the preliminary new entry start value */
3593 		addr = stack_entry->start - grow_amount;
3594 
3595 		/*
3596 		 * If this puts us into the previous entry, cut back our
3597 		 * growth to the available space. Also, see the note above.
3598 		 */
3599 		if (addr < end) {
3600 			stack_entry->avail_ssize = max_grow;
3601 			addr = end;
3602 			if (stack_guard_page)
3603 				addr += PAGE_SIZE;
3604 		}
3605 
3606 		rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
3607 		    next_entry->protection, next_entry->max_protection, 0);
3608 
3609 		/* Adjust the available stack space by the amount we grew. */
3610 		if (rv == KERN_SUCCESS) {
3611 			if (prev_entry != &map->header)
3612 				vm_map_clip_end(map, prev_entry, addr);
3613 			new_entry = prev_entry->next;
3614 			KASSERT(new_entry == stack_entry->prev, ("foo"));
3615 			KASSERT(new_entry->end == stack_entry->start, ("foo"));
3616 			KASSERT(new_entry->start == addr, ("foo"));
3617 			grow_amount = new_entry->end - new_entry->start;
3618 			new_entry->avail_ssize = stack_entry->avail_ssize -
3619 			    grow_amount;
3620 			stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN;
3621 			new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3622 		}
3623 	} else {
3624 		/*
3625 		 * Growing upward.
3626 		 */
3627 		addr = stack_entry->end + grow_amount;
3628 
3629 		/*
3630 		 * If this puts us into the next entry, cut back our growth
3631 		 * to the available space. Also, see the note above.
3632 		 */
3633 		if (addr > end) {
3634 			stack_entry->avail_ssize = end - stack_entry->end;
3635 			addr = end;
3636 			if (stack_guard_page)
3637 				addr -= PAGE_SIZE;
3638 		}
3639 
3640 		grow_amount = addr - stack_entry->end;
3641 		cred = stack_entry->cred;
3642 		if (cred == NULL && stack_entry->object.vm_object != NULL)
3643 			cred = stack_entry->object.vm_object->cred;
3644 		if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
3645 			rv = KERN_NO_SPACE;
3646 		/* Grow the underlying object if applicable. */
3647 		else if (stack_entry->object.vm_object == NULL ||
3648 			 vm_object_coalesce(stack_entry->object.vm_object,
3649 			 stack_entry->offset,
3650 			 (vm_size_t)(stack_entry->end - stack_entry->start),
3651 			 (vm_size_t)grow_amount, cred != NULL)) {
3652 			map->size += (addr - stack_entry->end);
3653 			/* Update the current entry. */
3654 			stack_entry->end = addr;
3655 			stack_entry->avail_ssize -= grow_amount;
3656 			vm_map_entry_resize_free(map, stack_entry);
3657 			rv = KERN_SUCCESS;
3658 
3659 			if (next_entry != &map->header)
3660 				vm_map_clip_start(map, next_entry, addr);
3661 		} else
3662 			rv = KERN_FAILURE;
3663 	}
3664 
3665 	if (rv == KERN_SUCCESS && is_procstack)
3666 		vm->vm_ssize += btoc(grow_amount);
3667 
3668 	vm_map_unlock(map);
3669 
3670 	/*
3671 	 * Heed the MAP_WIREFUTURE flag if it was set for this process.
3672 	 */
3673 	if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) {
3674 		vm_map_wire(map,
3675 		    (stack_entry == next_entry) ? addr : addr - grow_amount,
3676 		    (stack_entry == next_entry) ? stack_entry->start : addr,
3677 		    (p->p_flag & P_SYSTEM)
3678 		    ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES
3679 		    : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
3680 	}
3681 
3682 out:
3683 #ifdef RACCT
3684 	if (rv != KERN_SUCCESS) {
3685 		PROC_LOCK(p);
3686 		error = racct_set(p, RACCT_VMEM, map->size);
3687 		KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
3688 		if (!old_mlock) {
3689 			error = racct_set(p, RACCT_MEMLOCK,
3690 			    ptoa(pmap_wired_count(map->pmap)));
3691 			KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
3692 		}
3693 	    	error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
3694 		KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
3695 		PROC_UNLOCK(p);
3696 	}
3697 #endif
3698 
3699 	return (rv);
3700 }
3701 
3702 /*
3703  * Unshare the specified VM space for exec.  If other processes are
3704  * mapped to it, then create a new one.  The new vmspace is null.
3705  */
3706 int
3707 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
3708 {
3709 	struct vmspace *oldvmspace = p->p_vmspace;
3710 	struct vmspace *newvmspace;
3711 
3712 	newvmspace = vmspace_alloc(minuser, maxuser);
3713 	if (newvmspace == NULL)
3714 		return (ENOMEM);
3715 	newvmspace->vm_swrss = oldvmspace->vm_swrss;
3716 	/*
3717 	 * This code is written like this for prototype purposes.  The
3718 	 * goal is to avoid running down the vmspace here, but let the
3719 	 * other process's that are still using the vmspace to finally
3720 	 * run it down.  Even though there is little or no chance of blocking
3721 	 * here, it is a good idea to keep this form for future mods.
3722 	 */
3723 	PROC_VMSPACE_LOCK(p);
3724 	p->p_vmspace = newvmspace;
3725 	PROC_VMSPACE_UNLOCK(p);
3726 	if (p == curthread->td_proc)
3727 		pmap_activate(curthread);
3728 	vmspace_free(oldvmspace);
3729 	return (0);
3730 }
3731 
3732 /*
3733  * Unshare the specified VM space for forcing COW.  This
3734  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3735  */
3736 int
3737 vmspace_unshare(struct proc *p)
3738 {
3739 	struct vmspace *oldvmspace = p->p_vmspace;
3740 	struct vmspace *newvmspace;
3741 	vm_ooffset_t fork_charge;
3742 
3743 	if (oldvmspace->vm_refcnt == 1)
3744 		return (0);
3745 	fork_charge = 0;
3746 	newvmspace = vmspace_fork(oldvmspace, &fork_charge);
3747 	if (newvmspace == NULL)
3748 		return (ENOMEM);
3749 	if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
3750 		vmspace_free(newvmspace);
3751 		return (ENOMEM);
3752 	}
3753 	PROC_VMSPACE_LOCK(p);
3754 	p->p_vmspace = newvmspace;
3755 	PROC_VMSPACE_UNLOCK(p);
3756 	if (p == curthread->td_proc)
3757 		pmap_activate(curthread);
3758 	vmspace_free(oldvmspace);
3759 	return (0);
3760 }
3761 
3762 /*
3763  *	vm_map_lookup:
3764  *
3765  *	Finds the VM object, offset, and
3766  *	protection for a given virtual address in the
3767  *	specified map, assuming a page fault of the
3768  *	type specified.
3769  *
3770  *	Leaves the map in question locked for read; return
3771  *	values are guaranteed until a vm_map_lookup_done
3772  *	call is performed.  Note that the map argument
3773  *	is in/out; the returned map must be used in
3774  *	the call to vm_map_lookup_done.
3775  *
3776  *	A handle (out_entry) is returned for use in
3777  *	vm_map_lookup_done, to make that fast.
3778  *
3779  *	If a lookup is requested with "write protection"
3780  *	specified, the map may be changed to perform virtual
3781  *	copying operations, although the data referenced will
3782  *	remain the same.
3783  */
3784 int
3785 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
3786 	      vm_offset_t vaddr,
3787 	      vm_prot_t fault_typea,
3788 	      vm_map_entry_t *out_entry,	/* OUT */
3789 	      vm_object_t *object,		/* OUT */
3790 	      vm_pindex_t *pindex,		/* OUT */
3791 	      vm_prot_t *out_prot,		/* OUT */
3792 	      boolean_t *wired)			/* OUT */
3793 {
3794 	vm_map_entry_t entry;
3795 	vm_map_t map = *var_map;
3796 	vm_prot_t prot;
3797 	vm_prot_t fault_type = fault_typea;
3798 	vm_object_t eobject;
3799 	vm_size_t size;
3800 	struct ucred *cred;
3801 
3802 RetryLookup:;
3803 
3804 	vm_map_lock_read(map);
3805 
3806 	/*
3807 	 * Lookup the faulting address.
3808 	 */
3809 	if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
3810 		vm_map_unlock_read(map);
3811 		return (KERN_INVALID_ADDRESS);
3812 	}
3813 
3814 	entry = *out_entry;
3815 
3816 	/*
3817 	 * Handle submaps.
3818 	 */
3819 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3820 		vm_map_t old_map = map;
3821 
3822 		*var_map = map = entry->object.sub_map;
3823 		vm_map_unlock_read(old_map);
3824 		goto RetryLookup;
3825 	}
3826 
3827 	/*
3828 	 * Check whether this task is allowed to have this page.
3829 	 */
3830 	prot = entry->protection;
3831 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3832 	if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
3833 		vm_map_unlock_read(map);
3834 		return (KERN_PROTECTION_FAILURE);
3835 	}
3836 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3837 	    (entry->eflags & MAP_ENTRY_COW) &&
3838 	    (fault_type & VM_PROT_WRITE)) {
3839 		vm_map_unlock_read(map);
3840 		return (KERN_PROTECTION_FAILURE);
3841 	}
3842 	if ((fault_typea & VM_PROT_COPY) != 0 &&
3843 	    (entry->max_protection & VM_PROT_WRITE) == 0 &&
3844 	    (entry->eflags & MAP_ENTRY_COW) == 0) {
3845 		vm_map_unlock_read(map);
3846 		return (KERN_PROTECTION_FAILURE);
3847 	}
3848 
3849 	/*
3850 	 * If this page is not pageable, we have to get it for all possible
3851 	 * accesses.
3852 	 */
3853 	*wired = (entry->wired_count != 0);
3854 	if (*wired)
3855 		fault_type = entry->protection;
3856 	size = entry->end - entry->start;
3857 	/*
3858 	 * If the entry was copy-on-write, we either ...
3859 	 */
3860 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3861 		/*
3862 		 * If we want to write the page, we may as well handle that
3863 		 * now since we've got the map locked.
3864 		 *
3865 		 * If we don't need to write the page, we just demote the
3866 		 * permissions allowed.
3867 		 */
3868 		if ((fault_type & VM_PROT_WRITE) != 0 ||
3869 		    (fault_typea & VM_PROT_COPY) != 0) {
3870 			/*
3871 			 * Make a new object, and place it in the object
3872 			 * chain.  Note that no new references have appeared
3873 			 * -- one just moved from the map to the new
3874 			 * object.
3875 			 */
3876 			if (vm_map_lock_upgrade(map))
3877 				goto RetryLookup;
3878 
3879 			if (entry->cred == NULL) {
3880 				/*
3881 				 * The debugger owner is charged for
3882 				 * the memory.
3883 				 */
3884 				cred = curthread->td_ucred;
3885 				crhold(cred);
3886 				if (!swap_reserve_by_cred(size, cred)) {
3887 					crfree(cred);
3888 					vm_map_unlock(map);
3889 					return (KERN_RESOURCE_SHORTAGE);
3890 				}
3891 				entry->cred = cred;
3892 			}
3893 			vm_object_shadow(&entry->object.vm_object,
3894 			    &entry->offset, size);
3895 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3896 			eobject = entry->object.vm_object;
3897 			if (eobject->cred != NULL) {
3898 				/*
3899 				 * The object was not shadowed.
3900 				 */
3901 				swap_release_by_cred(size, entry->cred);
3902 				crfree(entry->cred);
3903 				entry->cred = NULL;
3904 			} else if (entry->cred != NULL) {
3905 				VM_OBJECT_WLOCK(eobject);
3906 				eobject->cred = entry->cred;
3907 				eobject->charge = size;
3908 				VM_OBJECT_WUNLOCK(eobject);
3909 				entry->cred = NULL;
3910 			}
3911 
3912 			vm_map_lock_downgrade(map);
3913 		} else {
3914 			/*
3915 			 * We're attempting to read a copy-on-write page --
3916 			 * don't allow writes.
3917 			 */
3918 			prot &= ~VM_PROT_WRITE;
3919 		}
3920 	}
3921 
3922 	/*
3923 	 * Create an object if necessary.
3924 	 */
3925 	if (entry->object.vm_object == NULL &&
3926 	    !map->system_map) {
3927 		if (vm_map_lock_upgrade(map))
3928 			goto RetryLookup;
3929 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
3930 		    atop(size));
3931 		entry->offset = 0;
3932 		if (entry->cred != NULL) {
3933 			VM_OBJECT_WLOCK(entry->object.vm_object);
3934 			entry->object.vm_object->cred = entry->cred;
3935 			entry->object.vm_object->charge = size;
3936 			VM_OBJECT_WUNLOCK(entry->object.vm_object);
3937 			entry->cred = NULL;
3938 		}
3939 		vm_map_lock_downgrade(map);
3940 	}
3941 
3942 	/*
3943 	 * Return the object/offset from this entry.  If the entry was
3944 	 * copy-on-write or empty, it has been fixed up.
3945 	 */
3946 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3947 	*object = entry->object.vm_object;
3948 
3949 	*out_prot = prot;
3950 	return (KERN_SUCCESS);
3951 }
3952 
3953 /*
3954  *	vm_map_lookup_locked:
3955  *
3956  *	Lookup the faulting address.  A version of vm_map_lookup that returns
3957  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
3958  */
3959 int
3960 vm_map_lookup_locked(vm_map_t *var_map,		/* IN/OUT */
3961 		     vm_offset_t vaddr,
3962 		     vm_prot_t fault_typea,
3963 		     vm_map_entry_t *out_entry,	/* OUT */
3964 		     vm_object_t *object,	/* OUT */
3965 		     vm_pindex_t *pindex,	/* OUT */
3966 		     vm_prot_t *out_prot,	/* OUT */
3967 		     boolean_t *wired)		/* OUT */
3968 {
3969 	vm_map_entry_t entry;
3970 	vm_map_t map = *var_map;
3971 	vm_prot_t prot;
3972 	vm_prot_t fault_type = fault_typea;
3973 
3974 	/*
3975 	 * Lookup the faulting address.
3976 	 */
3977 	if (!vm_map_lookup_entry(map, vaddr, out_entry))
3978 		return (KERN_INVALID_ADDRESS);
3979 
3980 	entry = *out_entry;
3981 
3982 	/*
3983 	 * Fail if the entry refers to a submap.
3984 	 */
3985 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3986 		return (KERN_FAILURE);
3987 
3988 	/*
3989 	 * Check whether this task is allowed to have this page.
3990 	 */
3991 	prot = entry->protection;
3992 	fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
3993 	if ((fault_type & prot) != fault_type)
3994 		return (KERN_PROTECTION_FAILURE);
3995 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3996 	    (entry->eflags & MAP_ENTRY_COW) &&
3997 	    (fault_type & VM_PROT_WRITE))
3998 		return (KERN_PROTECTION_FAILURE);
3999 
4000 	/*
4001 	 * If this page is not pageable, we have to get it for all possible
4002 	 * accesses.
4003 	 */
4004 	*wired = (entry->wired_count != 0);
4005 	if (*wired)
4006 		fault_type = entry->protection;
4007 
4008 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4009 		/*
4010 		 * Fail if the entry was copy-on-write for a write fault.
4011 		 */
4012 		if (fault_type & VM_PROT_WRITE)
4013 			return (KERN_FAILURE);
4014 		/*
4015 		 * We're attempting to read a copy-on-write page --
4016 		 * don't allow writes.
4017 		 */
4018 		prot &= ~VM_PROT_WRITE;
4019 	}
4020 
4021 	/*
4022 	 * Fail if an object should be created.
4023 	 */
4024 	if (entry->object.vm_object == NULL && !map->system_map)
4025 		return (KERN_FAILURE);
4026 
4027 	/*
4028 	 * Return the object/offset from this entry.  If the entry was
4029 	 * copy-on-write or empty, it has been fixed up.
4030 	 */
4031 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4032 	*object = entry->object.vm_object;
4033 
4034 	*out_prot = prot;
4035 	return (KERN_SUCCESS);
4036 }
4037 
4038 /*
4039  *	vm_map_lookup_done:
4040  *
4041  *	Releases locks acquired by a vm_map_lookup
4042  *	(according to the handle returned by that lookup).
4043  */
4044 void
4045 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
4046 {
4047 	/*
4048 	 * Unlock the main-level map
4049 	 */
4050 	vm_map_unlock_read(map);
4051 }
4052 
4053 #include "opt_ddb.h"
4054 #ifdef DDB
4055 #include <sys/kernel.h>
4056 
4057 #include <ddb/ddb.h>
4058 
4059 static void
4060 vm_map_print(vm_map_t map)
4061 {
4062 	vm_map_entry_t entry;
4063 
4064 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4065 	    (void *)map,
4066 	    (void *)map->pmap, map->nentries, map->timestamp);
4067 
4068 	db_indent += 2;
4069 	for (entry = map->header.next; entry != &map->header;
4070 	    entry = entry->next) {
4071 		db_iprintf("map entry %p: start=%p, end=%p\n",
4072 		    (void *)entry, (void *)entry->start, (void *)entry->end);
4073 		{
4074 			static char *inheritance_name[4] =
4075 			{"share", "copy", "none", "donate_copy"};
4076 
4077 			db_iprintf(" prot=%x/%x/%s",
4078 			    entry->protection,
4079 			    entry->max_protection,
4080 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
4081 			if (entry->wired_count != 0)
4082 				db_printf(", wired");
4083 		}
4084 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4085 			db_printf(", share=%p, offset=0x%jx\n",
4086 			    (void *)entry->object.sub_map,
4087 			    (uintmax_t)entry->offset);
4088 			if ((entry->prev == &map->header) ||
4089 			    (entry->prev->object.sub_map !=
4090 				entry->object.sub_map)) {
4091 				db_indent += 2;
4092 				vm_map_print((vm_map_t)entry->object.sub_map);
4093 				db_indent -= 2;
4094 			}
4095 		} else {
4096 			if (entry->cred != NULL)
4097 				db_printf(", ruid %d", entry->cred->cr_ruid);
4098 			db_printf(", object=%p, offset=0x%jx",
4099 			    (void *)entry->object.vm_object,
4100 			    (uintmax_t)entry->offset);
4101 			if (entry->object.vm_object && entry->object.vm_object->cred)
4102 				db_printf(", obj ruid %d charge %jx",
4103 				    entry->object.vm_object->cred->cr_ruid,
4104 				    (uintmax_t)entry->object.vm_object->charge);
4105 			if (entry->eflags & MAP_ENTRY_COW)
4106 				db_printf(", copy (%s)",
4107 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4108 			db_printf("\n");
4109 
4110 			if ((entry->prev == &map->header) ||
4111 			    (entry->prev->object.vm_object !=
4112 				entry->object.vm_object)) {
4113 				db_indent += 2;
4114 				vm_object_print((db_expr_t)(intptr_t)
4115 						entry->object.vm_object,
4116 						1, 0, (char *)0);
4117 				db_indent -= 2;
4118 			}
4119 		}
4120 	}
4121 	db_indent -= 2;
4122 }
4123 
4124 DB_SHOW_COMMAND(map, map)
4125 {
4126 
4127 	if (!have_addr) {
4128 		db_printf("usage: show map <addr>\n");
4129 		return;
4130 	}
4131 	vm_map_print((vm_map_t)addr);
4132 }
4133 
4134 DB_SHOW_COMMAND(procvm, procvm)
4135 {
4136 	struct proc *p;
4137 
4138 	if (have_addr) {
4139 		p = (struct proc *) addr;
4140 	} else {
4141 		p = curproc;
4142 	}
4143 
4144 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4145 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4146 	    (void *)vmspace_pmap(p->p_vmspace));
4147 
4148 	vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
4149 }
4150 
4151 #endif /* DDB */
4152