xref: /freebsd/sys/vm/vm_map.c (revision 08c4a937a6685f05667996228898521fc453f8f3)
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  */
62 
63 /*
64  *	Virtual memory mapping module.
65  */
66 
67 #include <sys/cdefs.h>
68 __FBSDID("$FreeBSD$");
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/ktr.h>
74 #include <sys/lock.h>
75 #include <sys/mutex.h>
76 #include <sys/proc.h>
77 #include <sys/vmmeter.h>
78 #include <sys/mman.h>
79 #include <sys/vnode.h>
80 #include <sys/racct.h>
81 #include <sys/resourcevar.h>
82 #include <sys/rwlock.h>
83 #include <sys/file.h>
84 #include <sys/sysctl.h>
85 #include <sys/sysent.h>
86 #include <sys/shm.h>
87 
88 #include <vm/vm.h>
89 #include <vm/vm_param.h>
90 #include <vm/pmap.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_pager.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vnode_pager.h>
98 #include <vm/swap_pager.h>
99 #include <vm/uma.h>
100 
101 /*
102  *	Virtual memory maps provide for the mapping, protection,
103  *	and sharing of virtual memory objects.  In addition,
104  *	this module provides for an efficient virtual copy of
105  *	memory from one map to another.
106  *
107  *	Synchronization is required prior to most operations.
108  *
109  *	Maps consist of an ordered doubly-linked list of simple
110  *	entries; a self-adjusting binary search tree of these
111  *	entries is used to speed up lookups.
112  *
113  *	Since portions of maps are specified by start/end addresses,
114  *	which may not align with existing map entries, all
115  *	routines merely "clip" entries to these start/end values.
116  *	[That is, an entry is split into two, bordering at a
117  *	start or end value.]  Note that these clippings may not
118  *	always be necessary (as the two resulting entries are then
119  *	not changed); however, the clipping is done for convenience.
120  *
121  *	As mentioned above, virtual copy operations are performed
122  *	by copying VM object references from one map to
123  *	another, and then marking both regions as copy-on-write.
124  */
125 
126 static struct mtx map_sleep_mtx;
127 static uma_zone_t mapentzone;
128 static uma_zone_t kmapentzone;
129 static uma_zone_t mapzone;
130 static uma_zone_t vmspace_zone;
131 static int vmspace_zinit(void *mem, int size, int flags);
132 static int vm_map_zinit(void *mem, int ize, int flags);
133 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
134     vm_offset_t max);
135 static int vm_map_alignspace(vm_map_t map, vm_object_t object,
136     vm_ooffset_t offset, vm_offset_t *addr, vm_size_t length,
137     vm_offset_t max_addr, vm_offset_t alignment);
138 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
139 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
140 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
141 static int vm_map_growstack(vm_map_t map, vm_offset_t addr,
142     vm_map_entry_t gap_entry);
143 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
144     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
145 #ifdef INVARIANTS
146 static void vm_map_zdtor(void *mem, int size, void *arg);
147 static void vmspace_zdtor(void *mem, int size, void *arg);
148 #endif
149 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
150     vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
151     int cow);
152 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
153     vm_offset_t failed_addr);
154 
155 #define	ENTRY_CHARGED(e) ((e)->cred != NULL || \
156     ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
157      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
158 
159 /*
160  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
161  * stable.
162  */
163 #define PROC_VMSPACE_LOCK(p) do { } while (0)
164 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
165 
166 /*
167  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
168  *
169  *	Asserts that the starting and ending region
170  *	addresses fall within the valid range of the map.
171  */
172 #define	VM_MAP_RANGE_CHECK(map, start, end)		\
173 		{					\
174 		if (start < vm_map_min(map))		\
175 			start = vm_map_min(map);	\
176 		if (end > vm_map_max(map))		\
177 			end = vm_map_max(map);		\
178 		if (start > end)			\
179 			start = end;			\
180 		}
181 
182 /*
183  *	vm_map_startup:
184  *
185  *	Initialize the vm_map module.  Must be called before
186  *	any other vm_map routines.
187  *
188  *	Map and entry structures are allocated from the general
189  *	purpose memory pool with some exceptions:
190  *
191  *	- The kernel map and kmem submap are allocated statically.
192  *	- Kernel map entries are allocated out of a static pool.
193  *
194  *	These restrictions are necessary since malloc() uses the
195  *	maps and requires map entries.
196  */
197 
198 void
199 vm_map_startup(void)
200 {
201 	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
202 	mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
203 #ifdef INVARIANTS
204 	    vm_map_zdtor,
205 #else
206 	    NULL,
207 #endif
208 	    vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
209 	uma_prealloc(mapzone, MAX_KMAP);
210 	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
211 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
212 	    UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
213 	mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
214 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
215 	vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
216 #ifdef INVARIANTS
217 	    vmspace_zdtor,
218 #else
219 	    NULL,
220 #endif
221 	    vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
222 }
223 
224 static int
225 vmspace_zinit(void *mem, int size, int flags)
226 {
227 	struct vmspace *vm;
228 
229 	vm = (struct vmspace *)mem;
230 
231 	vm->vm_map.pmap = NULL;
232 	(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
233 	PMAP_LOCK_INIT(vmspace_pmap(vm));
234 	return (0);
235 }
236 
237 static int
238 vm_map_zinit(void *mem, int size, int flags)
239 {
240 	vm_map_t map;
241 
242 	map = (vm_map_t)mem;
243 	memset(map, 0, sizeof(*map));
244 	mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
245 	sx_init(&map->lock, "vm map (user)");
246 	return (0);
247 }
248 
249 #ifdef INVARIANTS
250 static void
251 vmspace_zdtor(void *mem, int size, void *arg)
252 {
253 	struct vmspace *vm;
254 
255 	vm = (struct vmspace *)mem;
256 
257 	vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
258 }
259 static void
260 vm_map_zdtor(void *mem, int size, void *arg)
261 {
262 	vm_map_t map;
263 
264 	map = (vm_map_t)mem;
265 	KASSERT(map->nentries == 0,
266 	    ("map %p nentries == %d on free.",
267 	    map, map->nentries));
268 	KASSERT(map->size == 0,
269 	    ("map %p size == %lu on free.",
270 	    map, (unsigned long)map->size));
271 }
272 #endif	/* INVARIANTS */
273 
274 /*
275  * Allocate a vmspace structure, including a vm_map and pmap,
276  * and initialize those structures.  The refcnt is set to 1.
277  *
278  * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
279  */
280 struct vmspace *
281 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
282 {
283 	struct vmspace *vm;
284 
285 	vm = uma_zalloc(vmspace_zone, M_WAITOK);
286 	KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
287 	if (!pinit(vmspace_pmap(vm))) {
288 		uma_zfree(vmspace_zone, vm);
289 		return (NULL);
290 	}
291 	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
292 	_vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
293 	vm->vm_refcnt = 1;
294 	vm->vm_shm = NULL;
295 	vm->vm_swrss = 0;
296 	vm->vm_tsize = 0;
297 	vm->vm_dsize = 0;
298 	vm->vm_ssize = 0;
299 	vm->vm_taddr = 0;
300 	vm->vm_daddr = 0;
301 	vm->vm_maxsaddr = 0;
302 	return (vm);
303 }
304 
305 #ifdef RACCT
306 static void
307 vmspace_container_reset(struct proc *p)
308 {
309 
310 	PROC_LOCK(p);
311 	racct_set(p, RACCT_DATA, 0);
312 	racct_set(p, RACCT_STACK, 0);
313 	racct_set(p, RACCT_RSS, 0);
314 	racct_set(p, RACCT_MEMLOCK, 0);
315 	racct_set(p, RACCT_VMEM, 0);
316 	PROC_UNLOCK(p);
317 }
318 #endif
319 
320 static inline void
321 vmspace_dofree(struct vmspace *vm)
322 {
323 
324 	CTR1(KTR_VM, "vmspace_free: %p", vm);
325 
326 	/*
327 	 * Make sure any SysV shm is freed, it might not have been in
328 	 * exit1().
329 	 */
330 	shmexit(vm);
331 
332 	/*
333 	 * Lock the map, to wait out all other references to it.
334 	 * Delete all of the mappings and pages they hold, then call
335 	 * the pmap module to reclaim anything left.
336 	 */
337 	(void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
338 	    vm_map_max(&vm->vm_map));
339 
340 	pmap_release(vmspace_pmap(vm));
341 	vm->vm_map.pmap = NULL;
342 	uma_zfree(vmspace_zone, vm);
343 }
344 
345 void
346 vmspace_free(struct vmspace *vm)
347 {
348 
349 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
350 	    "vmspace_free() called");
351 
352 	if (vm->vm_refcnt == 0)
353 		panic("vmspace_free: attempt to free already freed vmspace");
354 
355 	if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
356 		vmspace_dofree(vm);
357 }
358 
359 void
360 vmspace_exitfree(struct proc *p)
361 {
362 	struct vmspace *vm;
363 
364 	PROC_VMSPACE_LOCK(p);
365 	vm = p->p_vmspace;
366 	p->p_vmspace = NULL;
367 	PROC_VMSPACE_UNLOCK(p);
368 	KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
369 	vmspace_free(vm);
370 }
371 
372 void
373 vmspace_exit(struct thread *td)
374 {
375 	int refcnt;
376 	struct vmspace *vm;
377 	struct proc *p;
378 
379 	/*
380 	 * Release user portion of address space.
381 	 * This releases references to vnodes,
382 	 * which could cause I/O if the file has been unlinked.
383 	 * Need to do this early enough that we can still sleep.
384 	 *
385 	 * The last exiting process to reach this point releases as
386 	 * much of the environment as it can. vmspace_dofree() is the
387 	 * slower fallback in case another process had a temporary
388 	 * reference to the vmspace.
389 	 */
390 
391 	p = td->td_proc;
392 	vm = p->p_vmspace;
393 	atomic_add_int(&vmspace0.vm_refcnt, 1);
394 	do {
395 		refcnt = vm->vm_refcnt;
396 		if (refcnt > 1 && p->p_vmspace != &vmspace0) {
397 			/* Switch now since other proc might free vmspace */
398 			PROC_VMSPACE_LOCK(p);
399 			p->p_vmspace = &vmspace0;
400 			PROC_VMSPACE_UNLOCK(p);
401 			pmap_activate(td);
402 		}
403 	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
404 	if (refcnt == 1) {
405 		if (p->p_vmspace != vm) {
406 			/* vmspace not yet freed, switch back */
407 			PROC_VMSPACE_LOCK(p);
408 			p->p_vmspace = vm;
409 			PROC_VMSPACE_UNLOCK(p);
410 			pmap_activate(td);
411 		}
412 		pmap_remove_pages(vmspace_pmap(vm));
413 		/* Switch now since this proc will free vmspace */
414 		PROC_VMSPACE_LOCK(p);
415 		p->p_vmspace = &vmspace0;
416 		PROC_VMSPACE_UNLOCK(p);
417 		pmap_activate(td);
418 		vmspace_dofree(vm);
419 	}
420 #ifdef RACCT
421 	if (racct_enable)
422 		vmspace_container_reset(p);
423 #endif
424 }
425 
426 /* Acquire reference to vmspace owned by another process. */
427 
428 struct vmspace *
429 vmspace_acquire_ref(struct proc *p)
430 {
431 	struct vmspace *vm;
432 	int refcnt;
433 
434 	PROC_VMSPACE_LOCK(p);
435 	vm = p->p_vmspace;
436 	if (vm == NULL) {
437 		PROC_VMSPACE_UNLOCK(p);
438 		return (NULL);
439 	}
440 	do {
441 		refcnt = vm->vm_refcnt;
442 		if (refcnt <= 0) { 	/* Avoid 0->1 transition */
443 			PROC_VMSPACE_UNLOCK(p);
444 			return (NULL);
445 		}
446 	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
447 	if (vm != p->p_vmspace) {
448 		PROC_VMSPACE_UNLOCK(p);
449 		vmspace_free(vm);
450 		return (NULL);
451 	}
452 	PROC_VMSPACE_UNLOCK(p);
453 	return (vm);
454 }
455 
456 /*
457  * Switch between vmspaces in an AIO kernel process.
458  *
459  * The AIO kernel processes switch to and from a user process's
460  * vmspace while performing an I/O operation on behalf of a user
461  * process.  The new vmspace is either the vmspace of a user process
462  * obtained from an active AIO request or the initial vmspace of the
463  * AIO kernel process (when it is idling).  Because user processes
464  * will block to drain any active AIO requests before proceeding in
465  * exit() or execve(), the vmspace reference count for these vmspaces
466  * can never be 0.  This allows for a much simpler implementation than
467  * the loop in vmspace_acquire_ref() above.  Similarly, AIO kernel
468  * processes hold an extra reference on their initial vmspace for the
469  * life of the process so that this guarantee is true for any vmspace
470  * passed as 'newvm'.
471  */
472 void
473 vmspace_switch_aio(struct vmspace *newvm)
474 {
475 	struct vmspace *oldvm;
476 
477 	/* XXX: Need some way to assert that this is an aio daemon. */
478 
479 	KASSERT(newvm->vm_refcnt > 0,
480 	    ("vmspace_switch_aio: newvm unreferenced"));
481 
482 	oldvm = curproc->p_vmspace;
483 	if (oldvm == newvm)
484 		return;
485 
486 	/*
487 	 * Point to the new address space and refer to it.
488 	 */
489 	curproc->p_vmspace = newvm;
490 	atomic_add_int(&newvm->vm_refcnt, 1);
491 
492 	/* Activate the new mapping. */
493 	pmap_activate(curthread);
494 
495 	/* Remove the daemon's reference to the old address space. */
496 	KASSERT(oldvm->vm_refcnt > 1,
497 	    ("vmspace_switch_aio: oldvm dropping last reference"));
498 	vmspace_free(oldvm);
499 }
500 
501 void
502 _vm_map_lock(vm_map_t map, const char *file, int line)
503 {
504 
505 	if (map->system_map)
506 		mtx_lock_flags_(&map->system_mtx, 0, file, line);
507 	else
508 		sx_xlock_(&map->lock, file, line);
509 	map->timestamp++;
510 }
511 
512 static void
513 vm_map_process_deferred(void)
514 {
515 	struct thread *td;
516 	vm_map_entry_t entry, next;
517 	vm_object_t object;
518 
519 	td = curthread;
520 	entry = td->td_map_def_user;
521 	td->td_map_def_user = NULL;
522 	while (entry != NULL) {
523 		next = entry->next;
524 		if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) {
525 			/*
526 			 * Decrement the object's writemappings and
527 			 * possibly the vnode's v_writecount.
528 			 */
529 			KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
530 			    ("Submap with writecount"));
531 			object = entry->object.vm_object;
532 			KASSERT(object != NULL, ("No object for writecount"));
533 			vnode_pager_release_writecount(object, entry->start,
534 			    entry->end);
535 		}
536 		vm_map_entry_deallocate(entry, FALSE);
537 		entry = next;
538 	}
539 }
540 
541 void
542 _vm_map_unlock(vm_map_t map, const char *file, int line)
543 {
544 
545 	if (map->system_map)
546 		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
547 	else {
548 		sx_xunlock_(&map->lock, file, line);
549 		vm_map_process_deferred();
550 	}
551 }
552 
553 void
554 _vm_map_lock_read(vm_map_t map, const char *file, int line)
555 {
556 
557 	if (map->system_map)
558 		mtx_lock_flags_(&map->system_mtx, 0, file, line);
559 	else
560 		sx_slock_(&map->lock, file, line);
561 }
562 
563 void
564 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
565 {
566 
567 	if (map->system_map)
568 		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
569 	else {
570 		sx_sunlock_(&map->lock, file, line);
571 		vm_map_process_deferred();
572 	}
573 }
574 
575 int
576 _vm_map_trylock(vm_map_t map, const char *file, int line)
577 {
578 	int error;
579 
580 	error = map->system_map ?
581 	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
582 	    !sx_try_xlock_(&map->lock, file, line);
583 	if (error == 0)
584 		map->timestamp++;
585 	return (error == 0);
586 }
587 
588 int
589 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
590 {
591 	int error;
592 
593 	error = map->system_map ?
594 	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
595 	    !sx_try_slock_(&map->lock, file, line);
596 	return (error == 0);
597 }
598 
599 /*
600  *	_vm_map_lock_upgrade:	[ internal use only ]
601  *
602  *	Tries to upgrade a read (shared) lock on the specified map to a write
603  *	(exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
604  *	non-zero value if the upgrade fails.  If the upgrade fails, the map is
605  *	returned without a read or write lock held.
606  *
607  *	Requires that the map be read locked.
608  */
609 int
610 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
611 {
612 	unsigned int last_timestamp;
613 
614 	if (map->system_map) {
615 		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
616 	} else {
617 		if (!sx_try_upgrade_(&map->lock, file, line)) {
618 			last_timestamp = map->timestamp;
619 			sx_sunlock_(&map->lock, file, line);
620 			vm_map_process_deferred();
621 			/*
622 			 * If the map's timestamp does not change while the
623 			 * map is unlocked, then the upgrade succeeds.
624 			 */
625 			sx_xlock_(&map->lock, file, line);
626 			if (last_timestamp != map->timestamp) {
627 				sx_xunlock_(&map->lock, file, line);
628 				return (1);
629 			}
630 		}
631 	}
632 	map->timestamp++;
633 	return (0);
634 }
635 
636 void
637 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
638 {
639 
640 	if (map->system_map) {
641 		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
642 	} else
643 		sx_downgrade_(&map->lock, file, line);
644 }
645 
646 /*
647  *	vm_map_locked:
648  *
649  *	Returns a non-zero value if the caller holds a write (exclusive) lock
650  *	on the specified map and the value "0" otherwise.
651  */
652 int
653 vm_map_locked(vm_map_t map)
654 {
655 
656 	if (map->system_map)
657 		return (mtx_owned(&map->system_mtx));
658 	else
659 		return (sx_xlocked(&map->lock));
660 }
661 
662 #ifdef INVARIANTS
663 static void
664 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
665 {
666 
667 	if (map->system_map)
668 		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
669 	else
670 		sx_assert_(&map->lock, SA_XLOCKED, file, line);
671 }
672 
673 #define	VM_MAP_ASSERT_LOCKED(map) \
674     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
675 #else
676 #define	VM_MAP_ASSERT_LOCKED(map)
677 #endif
678 
679 /*
680  *	_vm_map_unlock_and_wait:
681  *
682  *	Atomically releases the lock on the specified map and puts the calling
683  *	thread to sleep.  The calling thread will remain asleep until either
684  *	vm_map_wakeup() is performed on the map or the specified timeout is
685  *	exceeded.
686  *
687  *	WARNING!  This function does not perform deferred deallocations of
688  *	objects and map	entries.  Therefore, the calling thread is expected to
689  *	reacquire the map lock after reawakening and later perform an ordinary
690  *	unlock operation, such as vm_map_unlock(), before completing its
691  *	operation on the map.
692  */
693 int
694 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
695 {
696 
697 	mtx_lock(&map_sleep_mtx);
698 	if (map->system_map)
699 		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
700 	else
701 		sx_xunlock_(&map->lock, file, line);
702 	return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
703 	    timo));
704 }
705 
706 /*
707  *	vm_map_wakeup:
708  *
709  *	Awaken any threads that have slept on the map using
710  *	vm_map_unlock_and_wait().
711  */
712 void
713 vm_map_wakeup(vm_map_t map)
714 {
715 
716 	/*
717 	 * Acquire and release map_sleep_mtx to prevent a wakeup()
718 	 * from being performed (and lost) between the map unlock
719 	 * and the msleep() in _vm_map_unlock_and_wait().
720 	 */
721 	mtx_lock(&map_sleep_mtx);
722 	mtx_unlock(&map_sleep_mtx);
723 	wakeup(&map->root);
724 }
725 
726 void
727 vm_map_busy(vm_map_t map)
728 {
729 
730 	VM_MAP_ASSERT_LOCKED(map);
731 	map->busy++;
732 }
733 
734 void
735 vm_map_unbusy(vm_map_t map)
736 {
737 
738 	VM_MAP_ASSERT_LOCKED(map);
739 	KASSERT(map->busy, ("vm_map_unbusy: not busy"));
740 	if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
741 		vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
742 		wakeup(&map->busy);
743 	}
744 }
745 
746 void
747 vm_map_wait_busy(vm_map_t map)
748 {
749 
750 	VM_MAP_ASSERT_LOCKED(map);
751 	while (map->busy) {
752 		vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
753 		if (map->system_map)
754 			msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
755 		else
756 			sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
757 	}
758 	map->timestamp++;
759 }
760 
761 long
762 vmspace_resident_count(struct vmspace *vmspace)
763 {
764 	return pmap_resident_count(vmspace_pmap(vmspace));
765 }
766 
767 /*
768  *	vm_map_create:
769  *
770  *	Creates and returns a new empty VM map with
771  *	the given physical map structure, and having
772  *	the given lower and upper address bounds.
773  */
774 vm_map_t
775 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
776 {
777 	vm_map_t result;
778 
779 	result = uma_zalloc(mapzone, M_WAITOK);
780 	CTR1(KTR_VM, "vm_map_create: %p", result);
781 	_vm_map_init(result, pmap, min, max);
782 	return (result);
783 }
784 
785 /*
786  * Initialize an existing vm_map structure
787  * such as that in the vmspace structure.
788  */
789 static void
790 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
791 {
792 
793 	map->header.next = map->header.prev = &map->header;
794 	map->header.eflags = MAP_ENTRY_HEADER;
795 	map->needs_wakeup = FALSE;
796 	map->system_map = 0;
797 	map->pmap = pmap;
798 	map->header.end = min;
799 	map->header.start = max;
800 	map->flags = 0;
801 	map->root = NULL;
802 	map->timestamp = 0;
803 	map->busy = 0;
804 }
805 
806 void
807 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
808 {
809 
810 	_vm_map_init(map, pmap, min, max);
811 	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
812 	sx_init(&map->lock, "user map");
813 }
814 
815 /*
816  *	vm_map_entry_dispose:	[ internal use only ]
817  *
818  *	Inverse of vm_map_entry_create.
819  */
820 static void
821 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
822 {
823 	uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
824 }
825 
826 /*
827  *	vm_map_entry_create:	[ internal use only ]
828  *
829  *	Allocates a VM map entry for insertion.
830  *	No entry fields are filled in.
831  */
832 static vm_map_entry_t
833 vm_map_entry_create(vm_map_t map)
834 {
835 	vm_map_entry_t new_entry;
836 
837 	if (map->system_map)
838 		new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
839 	else
840 		new_entry = uma_zalloc(mapentzone, M_WAITOK);
841 	if (new_entry == NULL)
842 		panic("vm_map_entry_create: kernel resources exhausted");
843 	return (new_entry);
844 }
845 
846 /*
847  *	vm_map_entry_set_behavior:
848  *
849  *	Set the expected access behavior, either normal, random, or
850  *	sequential.
851  */
852 static inline void
853 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
854 {
855 	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
856 	    (behavior & MAP_ENTRY_BEHAV_MASK);
857 }
858 
859 /*
860  *	vm_map_entry_set_max_free:
861  *
862  *	Set the max_free field in a vm_map_entry.
863  */
864 static inline void
865 vm_map_entry_set_max_free(vm_map_entry_t entry)
866 {
867 
868 	entry->max_free = entry->adj_free;
869 	if (entry->left != NULL && entry->left->max_free > entry->max_free)
870 		entry->max_free = entry->left->max_free;
871 	if (entry->right != NULL && entry->right->max_free > entry->max_free)
872 		entry->max_free = entry->right->max_free;
873 }
874 
875 /*
876  *	vm_map_entry_splay:
877  *
878  *	The Sleator and Tarjan top-down splay algorithm with the
879  *	following variation.  Max_free must be computed bottom-up, so
880  *	on the downward pass, maintain the left and right spines in
881  *	reverse order.  Then, make a second pass up each side to fix
882  *	the pointers and compute max_free.  The time bound is O(log n)
883  *	amortized.
884  *
885  *	The new root is the vm_map_entry containing "addr", or else an
886  *	adjacent entry (lower or higher) if addr is not in the tree.
887  *
888  *	The map must be locked, and leaves it so.
889  *
890  *	Returns: the new root.
891  */
892 static vm_map_entry_t
893 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
894 {
895 	vm_map_entry_t llist, rlist;
896 	vm_map_entry_t ltree, rtree;
897 	vm_map_entry_t y;
898 
899 	/* Special case of empty tree. */
900 	if (root == NULL)
901 		return (root);
902 
903 	/*
904 	 * Pass One: Splay down the tree until we find addr or a NULL
905 	 * pointer where addr would go.  llist and rlist are the two
906 	 * sides in reverse order (bottom-up), with llist linked by
907 	 * the right pointer and rlist linked by the left pointer in
908 	 * the vm_map_entry.  Wait until Pass Two to set max_free on
909 	 * the two spines.
910 	 */
911 	llist = NULL;
912 	rlist = NULL;
913 	for (;;) {
914 		/* root is never NULL in here. */
915 		if (addr < root->start) {
916 			y = root->left;
917 			if (y == NULL)
918 				break;
919 			if (addr < y->start && y->left != NULL) {
920 				/* Rotate right and put y on rlist. */
921 				root->left = y->right;
922 				y->right = root;
923 				vm_map_entry_set_max_free(root);
924 				root = y->left;
925 				y->left = rlist;
926 				rlist = y;
927 			} else {
928 				/* Put root on rlist. */
929 				root->left = rlist;
930 				rlist = root;
931 				root = y;
932 			}
933 		} else if (addr >= root->end) {
934 			y = root->right;
935 			if (y == NULL)
936 				break;
937 			if (addr >= y->end && y->right != NULL) {
938 				/* Rotate left and put y on llist. */
939 				root->right = y->left;
940 				y->left = root;
941 				vm_map_entry_set_max_free(root);
942 				root = y->right;
943 				y->right = llist;
944 				llist = y;
945 			} else {
946 				/* Put root on llist. */
947 				root->right = llist;
948 				llist = root;
949 				root = y;
950 			}
951 		} else
952 			break;
953 	}
954 
955 	/*
956 	 * Pass Two: Walk back up the two spines, flip the pointers
957 	 * and set max_free.  The subtrees of the root go at the
958 	 * bottom of llist and rlist.
959 	 */
960 	ltree = root->left;
961 	while (llist != NULL) {
962 		y = llist->right;
963 		llist->right = ltree;
964 		vm_map_entry_set_max_free(llist);
965 		ltree = llist;
966 		llist = y;
967 	}
968 	rtree = root->right;
969 	while (rlist != NULL) {
970 		y = rlist->left;
971 		rlist->left = rtree;
972 		vm_map_entry_set_max_free(rlist);
973 		rtree = rlist;
974 		rlist = y;
975 	}
976 
977 	/*
978 	 * Final assembly: add ltree and rtree as subtrees of root.
979 	 */
980 	root->left = ltree;
981 	root->right = rtree;
982 	vm_map_entry_set_max_free(root);
983 
984 	return (root);
985 }
986 
987 /*
988  *	vm_map_entry_{un,}link:
989  *
990  *	Insert/remove entries from maps.
991  */
992 static void
993 vm_map_entry_link(vm_map_t map,
994 		  vm_map_entry_t after_where,
995 		  vm_map_entry_t entry)
996 {
997 
998 	CTR4(KTR_VM,
999 	    "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
1000 	    map->nentries, entry, after_where);
1001 	VM_MAP_ASSERT_LOCKED(map);
1002 	KASSERT(after_where->end <= entry->start,
1003 	    ("vm_map_entry_link: prev end %jx new start %jx overlap",
1004 	    (uintmax_t)after_where->end, (uintmax_t)entry->start));
1005 	KASSERT(entry->end <= after_where->next->start,
1006 	    ("vm_map_entry_link: new end %jx next start %jx overlap",
1007 	    (uintmax_t)entry->end, (uintmax_t)after_where->next->start));
1008 
1009 	map->nentries++;
1010 	entry->prev = after_where;
1011 	entry->next = after_where->next;
1012 	entry->next->prev = entry;
1013 	after_where->next = entry;
1014 
1015 	if (after_where != &map->header) {
1016 		if (after_where != map->root)
1017 			vm_map_entry_splay(after_where->start, map->root);
1018 		entry->right = after_where->right;
1019 		entry->left = after_where;
1020 		after_where->right = NULL;
1021 		after_where->adj_free = entry->start - after_where->end;
1022 		vm_map_entry_set_max_free(after_where);
1023 	} else {
1024 		entry->right = map->root;
1025 		entry->left = NULL;
1026 	}
1027 	entry->adj_free = entry->next->start - entry->end;
1028 	vm_map_entry_set_max_free(entry);
1029 	map->root = entry;
1030 }
1031 
1032 static void
1033 vm_map_entry_unlink(vm_map_t map,
1034 		    vm_map_entry_t entry)
1035 {
1036 	vm_map_entry_t next, prev, root;
1037 
1038 	VM_MAP_ASSERT_LOCKED(map);
1039 	if (entry != map->root)
1040 		vm_map_entry_splay(entry->start, map->root);
1041 	if (entry->left == NULL)
1042 		root = entry->right;
1043 	else {
1044 		root = vm_map_entry_splay(entry->start, entry->left);
1045 		root->right = entry->right;
1046 		root->adj_free = entry->next->start - root->end;
1047 		vm_map_entry_set_max_free(root);
1048 	}
1049 	map->root = root;
1050 
1051 	prev = entry->prev;
1052 	next = entry->next;
1053 	next->prev = prev;
1054 	prev->next = next;
1055 	map->nentries--;
1056 	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1057 	    map->nentries, entry);
1058 }
1059 
1060 /*
1061  *	vm_map_entry_resize_free:
1062  *
1063  *	Recompute the amount of free space following a vm_map_entry
1064  *	and propagate that value up the tree.  Call this function after
1065  *	resizing a map entry in-place, that is, without a call to
1066  *	vm_map_entry_link() or _unlink().
1067  *
1068  *	The map must be locked, and leaves it so.
1069  */
1070 static void
1071 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
1072 {
1073 
1074 	/*
1075 	 * Using splay trees without parent pointers, propagating
1076 	 * max_free up the tree is done by moving the entry to the
1077 	 * root and making the change there.
1078 	 */
1079 	if (entry != map->root)
1080 		map->root = vm_map_entry_splay(entry->start, map->root);
1081 
1082 	entry->adj_free = entry->next->start - entry->end;
1083 	vm_map_entry_set_max_free(entry);
1084 }
1085 
1086 /*
1087  *	vm_map_lookup_entry:	[ internal use only ]
1088  *
1089  *	Finds the map entry containing (or
1090  *	immediately preceding) the specified address
1091  *	in the given map; the entry is returned
1092  *	in the "entry" parameter.  The boolean
1093  *	result indicates whether the address is
1094  *	actually contained in the map.
1095  */
1096 boolean_t
1097 vm_map_lookup_entry(
1098 	vm_map_t map,
1099 	vm_offset_t address,
1100 	vm_map_entry_t *entry)	/* OUT */
1101 {
1102 	vm_map_entry_t cur;
1103 	boolean_t locked;
1104 
1105 	/*
1106 	 * If the map is empty, then the map entry immediately preceding
1107 	 * "address" is the map's header.
1108 	 */
1109 	cur = map->root;
1110 	if (cur == NULL)
1111 		*entry = &map->header;
1112 	else if (address >= cur->start && cur->end > address) {
1113 		*entry = cur;
1114 		return (TRUE);
1115 	} else if ((locked = vm_map_locked(map)) ||
1116 	    sx_try_upgrade(&map->lock)) {
1117 		/*
1118 		 * Splay requires a write lock on the map.  However, it only
1119 		 * restructures the binary search tree; it does not otherwise
1120 		 * change the map.  Thus, the map's timestamp need not change
1121 		 * on a temporary upgrade.
1122 		 */
1123 		map->root = cur = vm_map_entry_splay(address, cur);
1124 		if (!locked)
1125 			sx_downgrade(&map->lock);
1126 
1127 		/*
1128 		 * If "address" is contained within a map entry, the new root
1129 		 * is that map entry.  Otherwise, the new root is a map entry
1130 		 * immediately before or after "address".
1131 		 */
1132 		if (address >= cur->start) {
1133 			*entry = cur;
1134 			if (cur->end > address)
1135 				return (TRUE);
1136 		} else
1137 			*entry = cur->prev;
1138 	} else
1139 		/*
1140 		 * Since the map is only locked for read access, perform a
1141 		 * standard binary search tree lookup for "address".
1142 		 */
1143 		for (;;) {
1144 			if (address < cur->start) {
1145 				if (cur->left == NULL) {
1146 					*entry = cur->prev;
1147 					break;
1148 				}
1149 				cur = cur->left;
1150 			} else if (cur->end > address) {
1151 				*entry = cur;
1152 				return (TRUE);
1153 			} else {
1154 				if (cur->right == NULL) {
1155 					*entry = cur;
1156 					break;
1157 				}
1158 				cur = cur->right;
1159 			}
1160 		}
1161 	return (FALSE);
1162 }
1163 
1164 /*
1165  *	vm_map_insert:
1166  *
1167  *	Inserts the given whole VM object into the target
1168  *	map at the specified address range.  The object's
1169  *	size should match that of the address range.
1170  *
1171  *	Requires that the map be locked, and leaves it so.
1172  *
1173  *	If object is non-NULL, ref count must be bumped by caller
1174  *	prior to making call to account for the new entry.
1175  */
1176 int
1177 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1178     vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
1179 {
1180 	vm_map_entry_t new_entry, prev_entry, temp_entry;
1181 	struct ucred *cred;
1182 	vm_eflags_t protoeflags;
1183 	vm_inherit_t inheritance;
1184 
1185 	VM_MAP_ASSERT_LOCKED(map);
1186 	KASSERT(object != kernel_object ||
1187 	    (cow & MAP_COPY_ON_WRITE) == 0,
1188 	    ("vm_map_insert: kernel object and COW"));
1189 	KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0,
1190 	    ("vm_map_insert: paradoxical MAP_NOFAULT request"));
1191 	KASSERT((prot & ~max) == 0,
1192 	    ("prot %#x is not subset of max_prot %#x", prot, max));
1193 
1194 	/*
1195 	 * Check that the start and end points are not bogus.
1196 	 */
1197 	if (start < vm_map_min(map) || end > vm_map_max(map) ||
1198 	    start >= end)
1199 		return (KERN_INVALID_ADDRESS);
1200 
1201 	/*
1202 	 * Find the entry prior to the proposed starting address; if it's part
1203 	 * of an existing entry, this range is bogus.
1204 	 */
1205 	if (vm_map_lookup_entry(map, start, &temp_entry))
1206 		return (KERN_NO_SPACE);
1207 
1208 	prev_entry = temp_entry;
1209 
1210 	/*
1211 	 * Assert that the next entry doesn't overlap the end point.
1212 	 */
1213 	if (prev_entry->next->start < end)
1214 		return (KERN_NO_SPACE);
1215 
1216 	if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL ||
1217 	    max != VM_PROT_NONE))
1218 		return (KERN_INVALID_ARGUMENT);
1219 
1220 	protoeflags = 0;
1221 	if (cow & MAP_COPY_ON_WRITE)
1222 		protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY;
1223 	if (cow & MAP_NOFAULT)
1224 		protoeflags |= MAP_ENTRY_NOFAULT;
1225 	if (cow & MAP_DISABLE_SYNCER)
1226 		protoeflags |= MAP_ENTRY_NOSYNC;
1227 	if (cow & MAP_DISABLE_COREDUMP)
1228 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
1229 	if (cow & MAP_STACK_GROWS_DOWN)
1230 		protoeflags |= MAP_ENTRY_GROWS_DOWN;
1231 	if (cow & MAP_STACK_GROWS_UP)
1232 		protoeflags |= MAP_ENTRY_GROWS_UP;
1233 	if (cow & MAP_VN_WRITECOUNT)
1234 		protoeflags |= MAP_ENTRY_VN_WRITECNT;
1235 	if ((cow & MAP_CREATE_GUARD) != 0)
1236 		protoeflags |= MAP_ENTRY_GUARD;
1237 	if ((cow & MAP_CREATE_STACK_GAP_DN) != 0)
1238 		protoeflags |= MAP_ENTRY_STACK_GAP_DN;
1239 	if ((cow & MAP_CREATE_STACK_GAP_UP) != 0)
1240 		protoeflags |= MAP_ENTRY_STACK_GAP_UP;
1241 	if (cow & MAP_INHERIT_SHARE)
1242 		inheritance = VM_INHERIT_SHARE;
1243 	else
1244 		inheritance = VM_INHERIT_DEFAULT;
1245 
1246 	cred = NULL;
1247 	if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0)
1248 		goto charged;
1249 	if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1250 	    ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1251 		if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1252 			return (KERN_RESOURCE_SHORTAGE);
1253 		KASSERT(object == NULL ||
1254 		    (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 ||
1255 		    object->cred == NULL,
1256 		    ("overcommit: vm_map_insert o %p", object));
1257 		cred = curthread->td_ucred;
1258 	}
1259 
1260 charged:
1261 	/* Expand the kernel pmap, if necessary. */
1262 	if (map == kernel_map && end > kernel_vm_end)
1263 		pmap_growkernel(end);
1264 	if (object != NULL) {
1265 		/*
1266 		 * OBJ_ONEMAPPING must be cleared unless this mapping
1267 		 * is trivially proven to be the only mapping for any
1268 		 * of the object's pages.  (Object granularity
1269 		 * reference counting is insufficient to recognize
1270 		 * aliases with precision.)
1271 		 */
1272 		VM_OBJECT_WLOCK(object);
1273 		if (object->ref_count > 1 || object->shadow_count != 0)
1274 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
1275 		VM_OBJECT_WUNLOCK(object);
1276 	} else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) ==
1277 	    protoeflags &&
1278 	    (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 &&
1279 	    prev_entry->end == start && (prev_entry->cred == cred ||
1280 	    (prev_entry->object.vm_object != NULL &&
1281 	    prev_entry->object.vm_object->cred == cred)) &&
1282 	    vm_object_coalesce(prev_entry->object.vm_object,
1283 	    prev_entry->offset,
1284 	    (vm_size_t)(prev_entry->end - prev_entry->start),
1285 	    (vm_size_t)(end - prev_entry->end), cred != NULL &&
1286 	    (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) {
1287 		/*
1288 		 * We were able to extend the object.  Determine if we
1289 		 * can extend the previous map entry to include the
1290 		 * new range as well.
1291 		 */
1292 		if (prev_entry->inheritance == inheritance &&
1293 		    prev_entry->protection == prot &&
1294 		    prev_entry->max_protection == max &&
1295 		    prev_entry->wired_count == 0) {
1296 			KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) ==
1297 			    0, ("prev_entry %p has incoherent wiring",
1298 			    prev_entry));
1299 			if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0)
1300 				map->size += end - prev_entry->end;
1301 			prev_entry->end = end;
1302 			vm_map_entry_resize_free(map, prev_entry);
1303 			vm_map_simplify_entry(map, prev_entry);
1304 			return (KERN_SUCCESS);
1305 		}
1306 
1307 		/*
1308 		 * If we can extend the object but cannot extend the
1309 		 * map entry, we have to create a new map entry.  We
1310 		 * must bump the ref count on the extended object to
1311 		 * account for it.  object may be NULL.
1312 		 */
1313 		object = prev_entry->object.vm_object;
1314 		offset = prev_entry->offset +
1315 		    (prev_entry->end - prev_entry->start);
1316 		vm_object_reference(object);
1317 		if (cred != NULL && object != NULL && object->cred != NULL &&
1318 		    !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1319 			/* Object already accounts for this uid. */
1320 			cred = NULL;
1321 		}
1322 	}
1323 	if (cred != NULL)
1324 		crhold(cred);
1325 
1326 	/*
1327 	 * Create a new entry
1328 	 */
1329 	new_entry = vm_map_entry_create(map);
1330 	new_entry->start = start;
1331 	new_entry->end = end;
1332 	new_entry->cred = NULL;
1333 
1334 	new_entry->eflags = protoeflags;
1335 	new_entry->object.vm_object = object;
1336 	new_entry->offset = offset;
1337 
1338 	new_entry->inheritance = inheritance;
1339 	new_entry->protection = prot;
1340 	new_entry->max_protection = max;
1341 	new_entry->wired_count = 0;
1342 	new_entry->wiring_thread = NULL;
1343 	new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1344 	new_entry->next_read = start;
1345 
1346 	KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1347 	    ("overcommit: vm_map_insert leaks vm_map %p", new_entry));
1348 	new_entry->cred = cred;
1349 
1350 	/*
1351 	 * Insert the new entry into the list
1352 	 */
1353 	vm_map_entry_link(map, prev_entry, new_entry);
1354 	if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0)
1355 		map->size += new_entry->end - new_entry->start;
1356 
1357 	/*
1358 	 * Try to coalesce the new entry with both the previous and next
1359 	 * entries in the list.  Previously, we only attempted to coalesce
1360 	 * with the previous entry when object is NULL.  Here, we handle the
1361 	 * other cases, which are less common.
1362 	 */
1363 	vm_map_simplify_entry(map, new_entry);
1364 
1365 	if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
1366 		vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
1367 		    end - start, cow & MAP_PREFAULT_PARTIAL);
1368 	}
1369 
1370 	return (KERN_SUCCESS);
1371 }
1372 
1373 /*
1374  *	vm_map_findspace:
1375  *
1376  *	Find the first fit (lowest VM address) for "length" free bytes
1377  *	beginning at address >= start in the given map.
1378  *
1379  *	In a vm_map_entry, "adj_free" is the amount of free space
1380  *	adjacent (higher address) to this entry, and "max_free" is the
1381  *	maximum amount of contiguous free space in its subtree.  This
1382  *	allows finding a free region in one path down the tree, so
1383  *	O(log n) amortized with splay trees.
1384  *
1385  *	The map must be locked, and leaves it so.
1386  *
1387  *	Returns: 0 on success, and starting address in *addr,
1388  *		 1 if insufficient space.
1389  */
1390 int
1391 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1392     vm_offset_t *addr)	/* OUT */
1393 {
1394 	vm_map_entry_t entry;
1395 	vm_offset_t st;
1396 
1397 	/*
1398 	 * Request must fit within min/max VM address and must avoid
1399 	 * address wrap.
1400 	 */
1401 	start = MAX(start, vm_map_min(map));
1402 	if (start + length > vm_map_max(map) || start + length < start)
1403 		return (1);
1404 
1405 	/* Empty tree means wide open address space. */
1406 	if (map->root == NULL) {
1407 		*addr = start;
1408 		return (0);
1409 	}
1410 
1411 	/*
1412 	 * After splay, if start comes before root node, then there
1413 	 * must be a gap from start to the root.
1414 	 */
1415 	map->root = vm_map_entry_splay(start, map->root);
1416 	if (start + length <= map->root->start) {
1417 		*addr = start;
1418 		return (0);
1419 	}
1420 
1421 	/*
1422 	 * Root is the last node that might begin its gap before
1423 	 * start, and this is the last comparison where address
1424 	 * wrap might be a problem.
1425 	 */
1426 	st = (start > map->root->end) ? start : map->root->end;
1427 	if (length <= map->root->end + map->root->adj_free - st) {
1428 		*addr = st;
1429 		return (0);
1430 	}
1431 
1432 	/* With max_free, can immediately tell if no solution. */
1433 	entry = map->root->right;
1434 	if (entry == NULL || length > entry->max_free)
1435 		return (1);
1436 
1437 	/*
1438 	 * Search the right subtree in the order: left subtree, root,
1439 	 * right subtree (first fit).  The previous splay implies that
1440 	 * all regions in the right subtree have addresses > start.
1441 	 */
1442 	while (entry != NULL) {
1443 		if (entry->left != NULL && entry->left->max_free >= length)
1444 			entry = entry->left;
1445 		else if (entry->adj_free >= length) {
1446 			*addr = entry->end;
1447 			return (0);
1448 		} else
1449 			entry = entry->right;
1450 	}
1451 
1452 	/* Can't get here, so panic if we do. */
1453 	panic("vm_map_findspace: max_free corrupt");
1454 }
1455 
1456 int
1457 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1458     vm_offset_t start, vm_size_t length, vm_prot_t prot,
1459     vm_prot_t max, int cow)
1460 {
1461 	vm_offset_t end;
1462 	int result;
1463 
1464 	end = start + length;
1465 	KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1466 	    object == NULL,
1467 	    ("vm_map_fixed: non-NULL backing object for stack"));
1468 	vm_map_lock(map);
1469 	VM_MAP_RANGE_CHECK(map, start, end);
1470 	if ((cow & MAP_CHECK_EXCL) == 0)
1471 		vm_map_delete(map, start, end);
1472 	if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1473 		result = vm_map_stack_locked(map, start, length, sgrowsiz,
1474 		    prot, max, cow);
1475 	} else {
1476 		result = vm_map_insert(map, object, offset, start, end,
1477 		    prot, max, cow);
1478 	}
1479 	vm_map_unlock(map);
1480 	return (result);
1481 }
1482 
1483 /*
1484  * Searches for the specified amount of free space in the given map with the
1485  * specified alignment.  Performs an address-ordered, first-fit search from
1486  * the given address "*addr", with an optional upper bound "max_addr".  If the
1487  * parameter "alignment" is zero, then the alignment is computed from the
1488  * given (object, offset) pair so as to enable the greatest possible use of
1489  * superpage mappings.  Returns KERN_SUCCESS and the address of the free space
1490  * in "*addr" if successful.  Otherwise, returns KERN_NO_SPACE.
1491  *
1492  * The map must be locked.  Initially, there must be at least "length" bytes
1493  * of free space at the given address.
1494  */
1495 static int
1496 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1497     vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr,
1498     vm_offset_t alignment)
1499 {
1500 	vm_offset_t aligned_addr, free_addr;
1501 
1502 	VM_MAP_ASSERT_LOCKED(map);
1503 	free_addr = *addr;
1504 	KASSERT(!vm_map_findspace(map, free_addr, length, addr) &&
1505 	    free_addr == *addr, ("caller provided insufficient free space"));
1506 	for (;;) {
1507 		/*
1508 		 * At the start of every iteration, the free space at address
1509 		 * "*addr" is at least "length" bytes.
1510 		 */
1511 		if (alignment == 0)
1512 			pmap_align_superpage(object, offset, addr, length);
1513 		else if ((*addr & (alignment - 1)) != 0) {
1514 			*addr &= ~(alignment - 1);
1515 			*addr += alignment;
1516 		}
1517 		aligned_addr = *addr;
1518 		if (aligned_addr == free_addr) {
1519 			/*
1520 			 * Alignment did not change "*addr", so "*addr" must
1521 			 * still provide sufficient free space.
1522 			 */
1523 			return (KERN_SUCCESS);
1524 		}
1525 
1526 		/*
1527 		 * Test for address wrap on "*addr".  A wrapped "*addr" could
1528 		 * be a valid address, in which case vm_map_findspace() cannot
1529 		 * be relied upon to fail.
1530 		 */
1531 		if (aligned_addr < free_addr ||
1532 		    vm_map_findspace(map, aligned_addr, length, addr) ||
1533 		    (max_addr != 0 && *addr + length > max_addr))
1534 			return (KERN_NO_SPACE);
1535 		free_addr = *addr;
1536 		if (free_addr == aligned_addr) {
1537 			/*
1538 			 * If a successful call to vm_map_findspace() did not
1539 			 * change "*addr", then "*addr" must still be aligned
1540 			 * and provide sufficient free space.
1541 			 */
1542 			return (KERN_SUCCESS);
1543 		}
1544 	}
1545 }
1546 
1547 /*
1548  *	vm_map_find finds an unallocated region in the target address
1549  *	map with the given length.  The search is defined to be
1550  *	first-fit from the specified address; the region found is
1551  *	returned in the same parameter.
1552  *
1553  *	If object is non-NULL, ref count must be bumped by caller
1554  *	prior to making call to account for the new entry.
1555  */
1556 int
1557 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1558 	    vm_offset_t *addr,	/* IN/OUT */
1559 	    vm_size_t length, vm_offset_t max_addr, int find_space,
1560 	    vm_prot_t prot, vm_prot_t max, int cow)
1561 {
1562 	vm_offset_t alignment, min_addr;
1563 	int rv;
1564 
1565 	KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1566 	    object == NULL,
1567 	    ("vm_map_find: non-NULL backing object for stack"));
1568 	if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
1569 	    (object->flags & OBJ_COLORED) == 0))
1570 		find_space = VMFS_ANY_SPACE;
1571 	if (find_space >> 8 != 0) {
1572 		KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
1573 		alignment = (vm_offset_t)1 << (find_space >> 8);
1574 	} else
1575 		alignment = 0;
1576 	vm_map_lock(map);
1577 	if (find_space != VMFS_NO_SPACE) {
1578 		KASSERT(find_space == VMFS_ANY_SPACE ||
1579 		    find_space == VMFS_OPTIMAL_SPACE ||
1580 		    find_space == VMFS_SUPER_SPACE ||
1581 		    alignment != 0, ("unexpected VMFS flag"));
1582 		min_addr = *addr;
1583 again:
1584 		if (vm_map_findspace(map, min_addr, length, addr) ||
1585 		    (max_addr != 0 && *addr + length > max_addr)) {
1586 			rv = KERN_NO_SPACE;
1587 			goto done;
1588 		}
1589 		if (find_space != VMFS_ANY_SPACE &&
1590 		    (rv = vm_map_alignspace(map, object, offset, addr, length,
1591 		    max_addr, alignment)) != KERN_SUCCESS) {
1592 			if (find_space == VMFS_OPTIMAL_SPACE) {
1593 				find_space = VMFS_ANY_SPACE;
1594 				goto again;
1595 			}
1596 			goto done;
1597 		}
1598 	}
1599 	if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1600 		rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot,
1601 		    max, cow);
1602 	} else {
1603 		rv = vm_map_insert(map, object, offset, *addr, *addr + length,
1604 		    prot, max, cow);
1605 	}
1606 done:
1607 	vm_map_unlock(map);
1608 	return (rv);
1609 }
1610 
1611 /*
1612  *	vm_map_find_min() is a variant of vm_map_find() that takes an
1613  *	additional parameter (min_addr) and treats the given address
1614  *	(*addr) differently.  Specifically, it treats *addr as a hint
1615  *	and not as the minimum address where the mapping is created.
1616  *
1617  *	This function works in two phases.  First, it tries to
1618  *	allocate above the hint.  If that fails and the hint is
1619  *	greater than min_addr, it performs a second pass, replacing
1620  *	the hint with min_addr as the minimum address for the
1621  *	allocation.
1622  */
1623 int
1624 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1625     vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr,
1626     vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max,
1627     int cow)
1628 {
1629 	vm_offset_t hint;
1630 	int rv;
1631 
1632 	hint = *addr;
1633 	for (;;) {
1634 		rv = vm_map_find(map, object, offset, addr, length, max_addr,
1635 		    find_space, prot, max, cow);
1636 		if (rv == KERN_SUCCESS || min_addr >= hint)
1637 			return (rv);
1638 		*addr = hint = min_addr;
1639 	}
1640 }
1641 
1642 /*
1643  * A map entry with any of the following flags set must not be merged with
1644  * another entry.
1645  */
1646 #define	MAP_ENTRY_NOMERGE_MASK	(MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \
1647 	    MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)
1648 
1649 static bool
1650 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry)
1651 {
1652 
1653 	KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 ||
1654 	    (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0,
1655 	    ("vm_map_mergeable_neighbors: neither %p nor %p are mergeable",
1656 	    prev, entry));
1657 	return (prev->end == entry->start &&
1658 	    prev->object.vm_object == entry->object.vm_object &&
1659 	    (prev->object.vm_object == NULL ||
1660 	    prev->offset + (prev->end - prev->start) == entry->offset) &&
1661 	    prev->eflags == entry->eflags &&
1662 	    prev->protection == entry->protection &&
1663 	    prev->max_protection == entry->max_protection &&
1664 	    prev->inheritance == entry->inheritance &&
1665 	    prev->wired_count == entry->wired_count &&
1666 	    prev->cred == entry->cred);
1667 }
1668 
1669 static void
1670 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry)
1671 {
1672 
1673 	/*
1674 	 * If the backing object is a vnode object, vm_object_deallocate()
1675 	 * calls vrele().  However, vrele() does not lock the vnode because
1676 	 * the vnode has additional references.  Thus, the map lock can be
1677 	 * kept without causing a lock-order reversal with the vnode lock.
1678 	 *
1679 	 * Since we count the number of virtual page mappings in
1680 	 * object->un_pager.vnp.writemappings, the writemappings value
1681 	 * should not be adjusted when the entry is disposed of.
1682 	 */
1683 	if (entry->object.vm_object != NULL)
1684 		vm_object_deallocate(entry->object.vm_object);
1685 	if (entry->cred != NULL)
1686 		crfree(entry->cred);
1687 	vm_map_entry_dispose(map, entry);
1688 }
1689 
1690 /*
1691  *	vm_map_simplify_entry:
1692  *
1693  *	Simplify the given map entry by merging with either neighbor.  This
1694  *	routine also has the ability to merge with both neighbors.
1695  *
1696  *	The map must be locked.
1697  *
1698  *	This routine guarantees that the passed entry remains valid (though
1699  *	possibly extended).  When merging, this routine may delete one or
1700  *	both neighbors.
1701  */
1702 void
1703 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1704 {
1705 	vm_map_entry_t next, prev;
1706 
1707 	if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) != 0)
1708 		return;
1709 	prev = entry->prev;
1710 	if (vm_map_mergeable_neighbors(prev, entry)) {
1711 		vm_map_entry_unlink(map, prev);
1712 		entry->start = prev->start;
1713 		entry->offset = prev->offset;
1714 		if (entry->prev != &map->header)
1715 			vm_map_entry_resize_free(map, entry->prev);
1716 		vm_map_merged_neighbor_dispose(map, prev);
1717 	}
1718 	next = entry->next;
1719 	if (vm_map_mergeable_neighbors(entry, next)) {
1720 		vm_map_entry_unlink(map, next);
1721 		entry->end = next->end;
1722 		vm_map_entry_resize_free(map, entry);
1723 		vm_map_merged_neighbor_dispose(map, next);
1724 	}
1725 }
1726 
1727 /*
1728  *	vm_map_clip_start:	[ internal use only ]
1729  *
1730  *	Asserts that the given entry begins at or after
1731  *	the specified address; if necessary,
1732  *	it splits the entry into two.
1733  */
1734 #define vm_map_clip_start(map, entry, startaddr) \
1735 { \
1736 	if (startaddr > entry->start) \
1737 		_vm_map_clip_start(map, entry, startaddr); \
1738 }
1739 
1740 /*
1741  *	This routine is called only when it is known that
1742  *	the entry must be split.
1743  */
1744 static void
1745 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1746 {
1747 	vm_map_entry_t new_entry;
1748 
1749 	VM_MAP_ASSERT_LOCKED(map);
1750 	KASSERT(entry->end > start && entry->start < start,
1751 	    ("_vm_map_clip_start: invalid clip of entry %p", entry));
1752 
1753 	/*
1754 	 * Split off the front portion -- note that we must insert the new
1755 	 * entry BEFORE this one, so that this entry has the specified
1756 	 * starting address.
1757 	 */
1758 	vm_map_simplify_entry(map, entry);
1759 
1760 	/*
1761 	 * If there is no object backing this entry, we might as well create
1762 	 * one now.  If we defer it, an object can get created after the map
1763 	 * is clipped, and individual objects will be created for the split-up
1764 	 * map.  This is a bit of a hack, but is also about the best place to
1765 	 * put this improvement.
1766 	 */
1767 	if (entry->object.vm_object == NULL && !map->system_map &&
1768 	    (entry->eflags & MAP_ENTRY_GUARD) == 0) {
1769 		vm_object_t object;
1770 		object = vm_object_allocate(OBJT_DEFAULT,
1771 				atop(entry->end - entry->start));
1772 		entry->object.vm_object = object;
1773 		entry->offset = 0;
1774 		if (entry->cred != NULL) {
1775 			object->cred = entry->cred;
1776 			object->charge = entry->end - entry->start;
1777 			entry->cred = NULL;
1778 		}
1779 	} else if (entry->object.vm_object != NULL &&
1780 		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1781 		   entry->cred != NULL) {
1782 		VM_OBJECT_WLOCK(entry->object.vm_object);
1783 		KASSERT(entry->object.vm_object->cred == NULL,
1784 		    ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
1785 		entry->object.vm_object->cred = entry->cred;
1786 		entry->object.vm_object->charge = entry->end - entry->start;
1787 		VM_OBJECT_WUNLOCK(entry->object.vm_object);
1788 		entry->cred = NULL;
1789 	}
1790 
1791 	new_entry = vm_map_entry_create(map);
1792 	*new_entry = *entry;
1793 
1794 	new_entry->end = start;
1795 	entry->offset += (start - entry->start);
1796 	entry->start = start;
1797 	if (new_entry->cred != NULL)
1798 		crhold(entry->cred);
1799 
1800 	vm_map_entry_link(map, entry->prev, new_entry);
1801 
1802 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1803 		vm_object_reference(new_entry->object.vm_object);
1804 		/*
1805 		 * The object->un_pager.vnp.writemappings for the
1806 		 * object of MAP_ENTRY_VN_WRITECNT type entry shall be
1807 		 * kept as is here.  The virtual pages are
1808 		 * re-distributed among the clipped entries, so the sum is
1809 		 * left the same.
1810 		 */
1811 	}
1812 }
1813 
1814 /*
1815  *	vm_map_clip_end:	[ internal use only ]
1816  *
1817  *	Asserts that the given entry ends at or before
1818  *	the specified address; if necessary,
1819  *	it splits the entry into two.
1820  */
1821 #define vm_map_clip_end(map, entry, endaddr) \
1822 { \
1823 	if ((endaddr) < (entry->end)) \
1824 		_vm_map_clip_end((map), (entry), (endaddr)); \
1825 }
1826 
1827 /*
1828  *	This routine is called only when it is known that
1829  *	the entry must be split.
1830  */
1831 static void
1832 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1833 {
1834 	vm_map_entry_t new_entry;
1835 
1836 	VM_MAP_ASSERT_LOCKED(map);
1837 	KASSERT(entry->start < end && entry->end > end,
1838 	    ("_vm_map_clip_end: invalid clip of entry %p", entry));
1839 
1840 	/*
1841 	 * If there is no object backing this entry, we might as well create
1842 	 * one now.  If we defer it, an object can get created after the map
1843 	 * is clipped, and individual objects will be created for the split-up
1844 	 * map.  This is a bit of a hack, but is also about the best place to
1845 	 * put this improvement.
1846 	 */
1847 	if (entry->object.vm_object == NULL && !map->system_map &&
1848 	    (entry->eflags & MAP_ENTRY_GUARD) == 0) {
1849 		vm_object_t object;
1850 		object = vm_object_allocate(OBJT_DEFAULT,
1851 				atop(entry->end - entry->start));
1852 		entry->object.vm_object = object;
1853 		entry->offset = 0;
1854 		if (entry->cred != NULL) {
1855 			object->cred = entry->cred;
1856 			object->charge = entry->end - entry->start;
1857 			entry->cred = NULL;
1858 		}
1859 	} else if (entry->object.vm_object != NULL &&
1860 		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1861 		   entry->cred != NULL) {
1862 		VM_OBJECT_WLOCK(entry->object.vm_object);
1863 		KASSERT(entry->object.vm_object->cred == NULL,
1864 		    ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
1865 		entry->object.vm_object->cred = entry->cred;
1866 		entry->object.vm_object->charge = entry->end - entry->start;
1867 		VM_OBJECT_WUNLOCK(entry->object.vm_object);
1868 		entry->cred = NULL;
1869 	}
1870 
1871 	/*
1872 	 * Create a new entry and insert it AFTER the specified entry
1873 	 */
1874 	new_entry = vm_map_entry_create(map);
1875 	*new_entry = *entry;
1876 
1877 	new_entry->start = entry->end = end;
1878 	new_entry->offset += (end - entry->start);
1879 	if (new_entry->cred != NULL)
1880 		crhold(entry->cred);
1881 
1882 	vm_map_entry_link(map, entry, new_entry);
1883 
1884 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1885 		vm_object_reference(new_entry->object.vm_object);
1886 	}
1887 }
1888 
1889 /*
1890  *	vm_map_submap:		[ kernel use only ]
1891  *
1892  *	Mark the given range as handled by a subordinate map.
1893  *
1894  *	This range must have been created with vm_map_find,
1895  *	and no other operations may have been performed on this
1896  *	range prior to calling vm_map_submap.
1897  *
1898  *	Only a limited number of operations can be performed
1899  *	within this rage after calling vm_map_submap:
1900  *		vm_fault
1901  *	[Don't try vm_map_copy!]
1902  *
1903  *	To remove a submapping, one must first remove the
1904  *	range from the superior map, and then destroy the
1905  *	submap (if desired).  [Better yet, don't try it.]
1906  */
1907 int
1908 vm_map_submap(
1909 	vm_map_t map,
1910 	vm_offset_t start,
1911 	vm_offset_t end,
1912 	vm_map_t submap)
1913 {
1914 	vm_map_entry_t entry;
1915 	int result = KERN_INVALID_ARGUMENT;
1916 
1917 	vm_map_lock(map);
1918 
1919 	VM_MAP_RANGE_CHECK(map, start, end);
1920 
1921 	if (vm_map_lookup_entry(map, start, &entry)) {
1922 		vm_map_clip_start(map, entry, start);
1923 	} else
1924 		entry = entry->next;
1925 
1926 	vm_map_clip_end(map, entry, end);
1927 
1928 	if ((entry->start == start) && (entry->end == end) &&
1929 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1930 	    (entry->object.vm_object == NULL)) {
1931 		entry->object.sub_map = submap;
1932 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1933 		result = KERN_SUCCESS;
1934 	}
1935 	vm_map_unlock(map);
1936 
1937 	return (result);
1938 }
1939 
1940 /*
1941  * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
1942  */
1943 #define	MAX_INIT_PT	96
1944 
1945 /*
1946  *	vm_map_pmap_enter:
1947  *
1948  *	Preload the specified map's pmap with mappings to the specified
1949  *	object's memory-resident pages.  No further physical pages are
1950  *	allocated, and no further virtual pages are retrieved from secondary
1951  *	storage.  If the specified flags include MAP_PREFAULT_PARTIAL, then a
1952  *	limited number of page mappings are created at the low-end of the
1953  *	specified address range.  (For this purpose, a superpage mapping
1954  *	counts as one page mapping.)  Otherwise, all resident pages within
1955  *	the specified address range are mapped.
1956  */
1957 static void
1958 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
1959     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1960 {
1961 	vm_offset_t start;
1962 	vm_page_t p, p_start;
1963 	vm_pindex_t mask, psize, threshold, tmpidx;
1964 
1965 	if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
1966 		return;
1967 	VM_OBJECT_RLOCK(object);
1968 	if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1969 		VM_OBJECT_RUNLOCK(object);
1970 		VM_OBJECT_WLOCK(object);
1971 		if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1972 			pmap_object_init_pt(map->pmap, addr, object, pindex,
1973 			    size);
1974 			VM_OBJECT_WUNLOCK(object);
1975 			return;
1976 		}
1977 		VM_OBJECT_LOCK_DOWNGRADE(object);
1978 	}
1979 
1980 	psize = atop(size);
1981 	if (psize + pindex > object->size) {
1982 		if (object->size < pindex) {
1983 			VM_OBJECT_RUNLOCK(object);
1984 			return;
1985 		}
1986 		psize = object->size - pindex;
1987 	}
1988 
1989 	start = 0;
1990 	p_start = NULL;
1991 	threshold = MAX_INIT_PT;
1992 
1993 	p = vm_page_find_least(object, pindex);
1994 	/*
1995 	 * Assert: the variable p is either (1) the page with the
1996 	 * least pindex greater than or equal to the parameter pindex
1997 	 * or (2) NULL.
1998 	 */
1999 	for (;
2000 	     p != NULL && (tmpidx = p->pindex - pindex) < psize;
2001 	     p = TAILQ_NEXT(p, listq)) {
2002 		/*
2003 		 * don't allow an madvise to blow away our really
2004 		 * free pages allocating pv entries.
2005 		 */
2006 		if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
2007 		    vm_page_count_severe()) ||
2008 		    ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
2009 		    tmpidx >= threshold)) {
2010 			psize = tmpidx;
2011 			break;
2012 		}
2013 		if (p->valid == VM_PAGE_BITS_ALL) {
2014 			if (p_start == NULL) {
2015 				start = addr + ptoa(tmpidx);
2016 				p_start = p;
2017 			}
2018 			/* Jump ahead if a superpage mapping is possible. */
2019 			if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
2020 			    (pagesizes[p->psind] - 1)) == 0) {
2021 				mask = atop(pagesizes[p->psind]) - 1;
2022 				if (tmpidx + mask < psize &&
2023 				    vm_page_ps_test(p, PS_ALL_VALID, NULL)) {
2024 					p += mask;
2025 					threshold += mask;
2026 				}
2027 			}
2028 		} else if (p_start != NULL) {
2029 			pmap_enter_object(map->pmap, start, addr +
2030 			    ptoa(tmpidx), p_start, prot);
2031 			p_start = NULL;
2032 		}
2033 	}
2034 	if (p_start != NULL)
2035 		pmap_enter_object(map->pmap, start, addr + ptoa(psize),
2036 		    p_start, prot);
2037 	VM_OBJECT_RUNLOCK(object);
2038 }
2039 
2040 /*
2041  *	vm_map_protect:
2042  *
2043  *	Sets the protection of the specified address
2044  *	region in the target map.  If "set_max" is
2045  *	specified, the maximum protection is to be set;
2046  *	otherwise, only the current protection is affected.
2047  */
2048 int
2049 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
2050 	       vm_prot_t new_prot, boolean_t set_max)
2051 {
2052 	vm_map_entry_t current, entry;
2053 	vm_object_t obj;
2054 	struct ucred *cred;
2055 	vm_prot_t old_prot;
2056 
2057 	if (start == end)
2058 		return (KERN_SUCCESS);
2059 
2060 	vm_map_lock(map);
2061 
2062 	/*
2063 	 * Ensure that we are not concurrently wiring pages.  vm_map_wire() may
2064 	 * need to fault pages into the map and will drop the map lock while
2065 	 * doing so, and the VM object may end up in an inconsistent state if we
2066 	 * update the protection on the map entry in between faults.
2067 	 */
2068 	vm_map_wait_busy(map);
2069 
2070 	VM_MAP_RANGE_CHECK(map, start, end);
2071 
2072 	if (vm_map_lookup_entry(map, start, &entry)) {
2073 		vm_map_clip_start(map, entry, start);
2074 	} else {
2075 		entry = entry->next;
2076 	}
2077 
2078 	/*
2079 	 * Make a first pass to check for protection violations.
2080 	 */
2081 	for (current = entry; current->start < end; current = current->next) {
2082 		if ((current->eflags & MAP_ENTRY_GUARD) != 0)
2083 			continue;
2084 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2085 			vm_map_unlock(map);
2086 			return (KERN_INVALID_ARGUMENT);
2087 		}
2088 		if ((new_prot & current->max_protection) != new_prot) {
2089 			vm_map_unlock(map);
2090 			return (KERN_PROTECTION_FAILURE);
2091 		}
2092 	}
2093 
2094 	/*
2095 	 * Do an accounting pass for private read-only mappings that
2096 	 * now will do cow due to allowed write (e.g. debugger sets
2097 	 * breakpoint on text segment)
2098 	 */
2099 	for (current = entry; current->start < end; current = current->next) {
2100 
2101 		vm_map_clip_end(map, current, end);
2102 
2103 		if (set_max ||
2104 		    ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
2105 		    ENTRY_CHARGED(current) ||
2106 		    (current->eflags & MAP_ENTRY_GUARD) != 0) {
2107 			continue;
2108 		}
2109 
2110 		cred = curthread->td_ucred;
2111 		obj = current->object.vm_object;
2112 
2113 		if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
2114 			if (!swap_reserve(current->end - current->start)) {
2115 				vm_map_unlock(map);
2116 				return (KERN_RESOURCE_SHORTAGE);
2117 			}
2118 			crhold(cred);
2119 			current->cred = cred;
2120 			continue;
2121 		}
2122 
2123 		VM_OBJECT_WLOCK(obj);
2124 		if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
2125 			VM_OBJECT_WUNLOCK(obj);
2126 			continue;
2127 		}
2128 
2129 		/*
2130 		 * Charge for the whole object allocation now, since
2131 		 * we cannot distinguish between non-charged and
2132 		 * charged clipped mapping of the same object later.
2133 		 */
2134 		KASSERT(obj->charge == 0,
2135 		    ("vm_map_protect: object %p overcharged (entry %p)",
2136 		    obj, current));
2137 		if (!swap_reserve(ptoa(obj->size))) {
2138 			VM_OBJECT_WUNLOCK(obj);
2139 			vm_map_unlock(map);
2140 			return (KERN_RESOURCE_SHORTAGE);
2141 		}
2142 
2143 		crhold(cred);
2144 		obj->cred = cred;
2145 		obj->charge = ptoa(obj->size);
2146 		VM_OBJECT_WUNLOCK(obj);
2147 	}
2148 
2149 	/*
2150 	 * Go back and fix up protections. [Note that clipping is not
2151 	 * necessary the second time.]
2152 	 */
2153 	for (current = entry; current->start < end; current = current->next) {
2154 		if ((current->eflags & MAP_ENTRY_GUARD) != 0)
2155 			continue;
2156 
2157 		old_prot = current->protection;
2158 
2159 		if (set_max)
2160 			current->protection =
2161 			    (current->max_protection = new_prot) &
2162 			    old_prot;
2163 		else
2164 			current->protection = new_prot;
2165 
2166 		/*
2167 		 * For user wired map entries, the normal lazy evaluation of
2168 		 * write access upgrades through soft page faults is
2169 		 * undesirable.  Instead, immediately copy any pages that are
2170 		 * copy-on-write and enable write access in the physical map.
2171 		 */
2172 		if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
2173 		    (current->protection & VM_PROT_WRITE) != 0 &&
2174 		    (old_prot & VM_PROT_WRITE) == 0)
2175 			vm_fault_copy_entry(map, map, current, current, NULL);
2176 
2177 		/*
2178 		 * When restricting access, update the physical map.  Worry
2179 		 * about copy-on-write here.
2180 		 */
2181 		if ((old_prot & ~current->protection) != 0) {
2182 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2183 							VM_PROT_ALL)
2184 			pmap_protect(map->pmap, current->start,
2185 			    current->end,
2186 			    current->protection & MASK(current));
2187 #undef	MASK
2188 		}
2189 		vm_map_simplify_entry(map, current);
2190 	}
2191 	vm_map_unlock(map);
2192 	return (KERN_SUCCESS);
2193 }
2194 
2195 /*
2196  *	vm_map_madvise:
2197  *
2198  *	This routine traverses a processes map handling the madvise
2199  *	system call.  Advisories are classified as either those effecting
2200  *	the vm_map_entry structure, or those effecting the underlying
2201  *	objects.
2202  */
2203 int
2204 vm_map_madvise(
2205 	vm_map_t map,
2206 	vm_offset_t start,
2207 	vm_offset_t end,
2208 	int behav)
2209 {
2210 	vm_map_entry_t current, entry;
2211 	bool modify_map;
2212 
2213 	/*
2214 	 * Some madvise calls directly modify the vm_map_entry, in which case
2215 	 * we need to use an exclusive lock on the map and we need to perform
2216 	 * various clipping operations.  Otherwise we only need a read-lock
2217 	 * on the map.
2218 	 */
2219 	switch(behav) {
2220 	case MADV_NORMAL:
2221 	case MADV_SEQUENTIAL:
2222 	case MADV_RANDOM:
2223 	case MADV_NOSYNC:
2224 	case MADV_AUTOSYNC:
2225 	case MADV_NOCORE:
2226 	case MADV_CORE:
2227 		if (start == end)
2228 			return (0);
2229 		modify_map = true;
2230 		vm_map_lock(map);
2231 		break;
2232 	case MADV_WILLNEED:
2233 	case MADV_DONTNEED:
2234 	case MADV_FREE:
2235 		if (start == end)
2236 			return (0);
2237 		modify_map = false;
2238 		vm_map_lock_read(map);
2239 		break;
2240 	default:
2241 		return (EINVAL);
2242 	}
2243 
2244 	/*
2245 	 * Locate starting entry and clip if necessary.
2246 	 */
2247 	VM_MAP_RANGE_CHECK(map, start, end);
2248 
2249 	if (vm_map_lookup_entry(map, start, &entry)) {
2250 		if (modify_map)
2251 			vm_map_clip_start(map, entry, start);
2252 	} else {
2253 		entry = entry->next;
2254 	}
2255 
2256 	if (modify_map) {
2257 		/*
2258 		 * madvise behaviors that are implemented in the vm_map_entry.
2259 		 *
2260 		 * We clip the vm_map_entry so that behavioral changes are
2261 		 * limited to the specified address range.
2262 		 */
2263 		for (current = entry; current->start < end;
2264 		    current = current->next) {
2265 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2266 				continue;
2267 
2268 			vm_map_clip_end(map, current, end);
2269 
2270 			switch (behav) {
2271 			case MADV_NORMAL:
2272 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2273 				break;
2274 			case MADV_SEQUENTIAL:
2275 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2276 				break;
2277 			case MADV_RANDOM:
2278 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2279 				break;
2280 			case MADV_NOSYNC:
2281 				current->eflags |= MAP_ENTRY_NOSYNC;
2282 				break;
2283 			case MADV_AUTOSYNC:
2284 				current->eflags &= ~MAP_ENTRY_NOSYNC;
2285 				break;
2286 			case MADV_NOCORE:
2287 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
2288 				break;
2289 			case MADV_CORE:
2290 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2291 				break;
2292 			default:
2293 				break;
2294 			}
2295 			vm_map_simplify_entry(map, current);
2296 		}
2297 		vm_map_unlock(map);
2298 	} else {
2299 		vm_pindex_t pstart, pend;
2300 
2301 		/*
2302 		 * madvise behaviors that are implemented in the underlying
2303 		 * vm_object.
2304 		 *
2305 		 * Since we don't clip the vm_map_entry, we have to clip
2306 		 * the vm_object pindex and count.
2307 		 */
2308 		for (current = entry; current->start < end;
2309 		    current = current->next) {
2310 			vm_offset_t useEnd, useStart;
2311 
2312 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2313 				continue;
2314 
2315 			pstart = OFF_TO_IDX(current->offset);
2316 			pend = pstart + atop(current->end - current->start);
2317 			useStart = current->start;
2318 			useEnd = current->end;
2319 
2320 			if (current->start < start) {
2321 				pstart += atop(start - current->start);
2322 				useStart = start;
2323 			}
2324 			if (current->end > end) {
2325 				pend -= atop(current->end - end);
2326 				useEnd = end;
2327 			}
2328 
2329 			if (pstart >= pend)
2330 				continue;
2331 
2332 			/*
2333 			 * Perform the pmap_advise() before clearing
2334 			 * PGA_REFERENCED in vm_page_advise().  Otherwise, a
2335 			 * concurrent pmap operation, such as pmap_remove(),
2336 			 * could clear a reference in the pmap and set
2337 			 * PGA_REFERENCED on the page before the pmap_advise()
2338 			 * had completed.  Consequently, the page would appear
2339 			 * referenced based upon an old reference that
2340 			 * occurred before this pmap_advise() ran.
2341 			 */
2342 			if (behav == MADV_DONTNEED || behav == MADV_FREE)
2343 				pmap_advise(map->pmap, useStart, useEnd,
2344 				    behav);
2345 
2346 			vm_object_madvise(current->object.vm_object, pstart,
2347 			    pend, behav);
2348 
2349 			/*
2350 			 * Pre-populate paging structures in the
2351 			 * WILLNEED case.  For wired entries, the
2352 			 * paging structures are already populated.
2353 			 */
2354 			if (behav == MADV_WILLNEED &&
2355 			    current->wired_count == 0) {
2356 				vm_map_pmap_enter(map,
2357 				    useStart,
2358 				    current->protection,
2359 				    current->object.vm_object,
2360 				    pstart,
2361 				    ptoa(pend - pstart),
2362 				    MAP_PREFAULT_MADVISE
2363 				);
2364 			}
2365 		}
2366 		vm_map_unlock_read(map);
2367 	}
2368 	return (0);
2369 }
2370 
2371 
2372 /*
2373  *	vm_map_inherit:
2374  *
2375  *	Sets the inheritance of the specified address
2376  *	range in the target map.  Inheritance
2377  *	affects how the map will be shared with
2378  *	child maps at the time of vmspace_fork.
2379  */
2380 int
2381 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2382 	       vm_inherit_t new_inheritance)
2383 {
2384 	vm_map_entry_t entry;
2385 	vm_map_entry_t temp_entry;
2386 
2387 	switch (new_inheritance) {
2388 	case VM_INHERIT_NONE:
2389 	case VM_INHERIT_COPY:
2390 	case VM_INHERIT_SHARE:
2391 	case VM_INHERIT_ZERO:
2392 		break;
2393 	default:
2394 		return (KERN_INVALID_ARGUMENT);
2395 	}
2396 	if (start == end)
2397 		return (KERN_SUCCESS);
2398 	vm_map_lock(map);
2399 	VM_MAP_RANGE_CHECK(map, start, end);
2400 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
2401 		entry = temp_entry;
2402 		vm_map_clip_start(map, entry, start);
2403 	} else
2404 		entry = temp_entry->next;
2405 	while (entry->start < end) {
2406 		vm_map_clip_end(map, entry, end);
2407 		if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
2408 		    new_inheritance != VM_INHERIT_ZERO)
2409 			entry->inheritance = new_inheritance;
2410 		vm_map_simplify_entry(map, entry);
2411 		entry = entry->next;
2412 	}
2413 	vm_map_unlock(map);
2414 	return (KERN_SUCCESS);
2415 }
2416 
2417 /*
2418  *	vm_map_unwire:
2419  *
2420  *	Implements both kernel and user unwiring.
2421  */
2422 int
2423 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2424     int flags)
2425 {
2426 	vm_map_entry_t entry, first_entry, tmp_entry;
2427 	vm_offset_t saved_start;
2428 	unsigned int last_timestamp;
2429 	int rv;
2430 	boolean_t need_wakeup, result, user_unwire;
2431 
2432 	if (start == end)
2433 		return (KERN_SUCCESS);
2434 	user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2435 	vm_map_lock(map);
2436 	VM_MAP_RANGE_CHECK(map, start, end);
2437 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2438 		if (flags & VM_MAP_WIRE_HOLESOK)
2439 			first_entry = first_entry->next;
2440 		else {
2441 			vm_map_unlock(map);
2442 			return (KERN_INVALID_ADDRESS);
2443 		}
2444 	}
2445 	last_timestamp = map->timestamp;
2446 	entry = first_entry;
2447 	while (entry->start < end) {
2448 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2449 			/*
2450 			 * We have not yet clipped the entry.
2451 			 */
2452 			saved_start = (start >= entry->start) ? start :
2453 			    entry->start;
2454 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2455 			if (vm_map_unlock_and_wait(map, 0)) {
2456 				/*
2457 				 * Allow interruption of user unwiring?
2458 				 */
2459 			}
2460 			vm_map_lock(map);
2461 			if (last_timestamp+1 != map->timestamp) {
2462 				/*
2463 				 * Look again for the entry because the map was
2464 				 * modified while it was unlocked.
2465 				 * Specifically, the entry may have been
2466 				 * clipped, merged, or deleted.
2467 				 */
2468 				if (!vm_map_lookup_entry(map, saved_start,
2469 				    &tmp_entry)) {
2470 					if (flags & VM_MAP_WIRE_HOLESOK)
2471 						tmp_entry = tmp_entry->next;
2472 					else {
2473 						if (saved_start == start) {
2474 							/*
2475 							 * First_entry has been deleted.
2476 							 */
2477 							vm_map_unlock(map);
2478 							return (KERN_INVALID_ADDRESS);
2479 						}
2480 						end = saved_start;
2481 						rv = KERN_INVALID_ADDRESS;
2482 						goto done;
2483 					}
2484 				}
2485 				if (entry == first_entry)
2486 					first_entry = tmp_entry;
2487 				else
2488 					first_entry = NULL;
2489 				entry = tmp_entry;
2490 			}
2491 			last_timestamp = map->timestamp;
2492 			continue;
2493 		}
2494 		vm_map_clip_start(map, entry, start);
2495 		vm_map_clip_end(map, entry, end);
2496 		/*
2497 		 * Mark the entry in case the map lock is released.  (See
2498 		 * above.)
2499 		 */
2500 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2501 		    entry->wiring_thread == NULL,
2502 		    ("owned map entry %p", entry));
2503 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2504 		entry->wiring_thread = curthread;
2505 		/*
2506 		 * Check the map for holes in the specified region.
2507 		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2508 		 */
2509 		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2510 		    (entry->end < end && entry->next->start > entry->end)) {
2511 			end = entry->end;
2512 			rv = KERN_INVALID_ADDRESS;
2513 			goto done;
2514 		}
2515 		/*
2516 		 * If system unwiring, require that the entry is system wired.
2517 		 */
2518 		if (!user_unwire &&
2519 		    vm_map_entry_system_wired_count(entry) == 0) {
2520 			end = entry->end;
2521 			rv = KERN_INVALID_ARGUMENT;
2522 			goto done;
2523 		}
2524 		entry = entry->next;
2525 	}
2526 	rv = KERN_SUCCESS;
2527 done:
2528 	need_wakeup = FALSE;
2529 	if (first_entry == NULL) {
2530 		result = vm_map_lookup_entry(map, start, &first_entry);
2531 		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2532 			first_entry = first_entry->next;
2533 		else
2534 			KASSERT(result, ("vm_map_unwire: lookup failed"));
2535 	}
2536 	for (entry = first_entry; entry->start < end; entry = entry->next) {
2537 		/*
2538 		 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2539 		 * space in the unwired region could have been mapped
2540 		 * while the map lock was dropped for draining
2541 		 * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
2542 		 * could be simultaneously wiring this new mapping
2543 		 * entry.  Detect these cases and skip any entries
2544 		 * marked as in transition by us.
2545 		 */
2546 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2547 		    entry->wiring_thread != curthread) {
2548 			KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2549 			    ("vm_map_unwire: !HOLESOK and new/changed entry"));
2550 			continue;
2551 		}
2552 
2553 		if (rv == KERN_SUCCESS && (!user_unwire ||
2554 		    (entry->eflags & MAP_ENTRY_USER_WIRED))) {
2555 			if (user_unwire)
2556 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2557 			if (entry->wired_count == 1)
2558 				vm_map_entry_unwire(map, entry);
2559 			else
2560 				entry->wired_count--;
2561 		}
2562 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2563 		    ("vm_map_unwire: in-transition flag missing %p", entry));
2564 		KASSERT(entry->wiring_thread == curthread,
2565 		    ("vm_map_unwire: alien wire %p", entry));
2566 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2567 		entry->wiring_thread = NULL;
2568 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2569 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2570 			need_wakeup = TRUE;
2571 		}
2572 		vm_map_simplify_entry(map, entry);
2573 	}
2574 	vm_map_unlock(map);
2575 	if (need_wakeup)
2576 		vm_map_wakeup(map);
2577 	return (rv);
2578 }
2579 
2580 /*
2581  *	vm_map_wire_entry_failure:
2582  *
2583  *	Handle a wiring failure on the given entry.
2584  *
2585  *	The map should be locked.
2586  */
2587 static void
2588 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
2589     vm_offset_t failed_addr)
2590 {
2591 
2592 	VM_MAP_ASSERT_LOCKED(map);
2593 	KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
2594 	    entry->wired_count == 1,
2595 	    ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
2596 	KASSERT(failed_addr < entry->end,
2597 	    ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
2598 
2599 	/*
2600 	 * If any pages at the start of this entry were successfully wired,
2601 	 * then unwire them.
2602 	 */
2603 	if (failed_addr > entry->start) {
2604 		pmap_unwire(map->pmap, entry->start, failed_addr);
2605 		vm_object_unwire(entry->object.vm_object, entry->offset,
2606 		    failed_addr - entry->start, PQ_ACTIVE);
2607 	}
2608 
2609 	/*
2610 	 * Assign an out-of-range value to represent the failure to wire this
2611 	 * entry.
2612 	 */
2613 	entry->wired_count = -1;
2614 }
2615 
2616 /*
2617  *	vm_map_wire:
2618  *
2619  *	Implements both kernel and user wiring.
2620  */
2621 int
2622 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2623     int flags)
2624 {
2625 	vm_map_entry_t entry, first_entry, tmp_entry;
2626 	vm_offset_t faddr, saved_end, saved_start;
2627 	unsigned int last_timestamp;
2628 	int rv;
2629 	boolean_t need_wakeup, result, user_wire;
2630 	vm_prot_t prot;
2631 
2632 	if (start == end)
2633 		return (KERN_SUCCESS);
2634 	prot = 0;
2635 	if (flags & VM_MAP_WIRE_WRITE)
2636 		prot |= VM_PROT_WRITE;
2637 	user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2638 	vm_map_lock(map);
2639 	VM_MAP_RANGE_CHECK(map, start, end);
2640 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2641 		if (flags & VM_MAP_WIRE_HOLESOK)
2642 			first_entry = first_entry->next;
2643 		else {
2644 			vm_map_unlock(map);
2645 			return (KERN_INVALID_ADDRESS);
2646 		}
2647 	}
2648 	last_timestamp = map->timestamp;
2649 	entry = first_entry;
2650 	while (entry->start < end) {
2651 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2652 			/*
2653 			 * We have not yet clipped the entry.
2654 			 */
2655 			saved_start = (start >= entry->start) ? start :
2656 			    entry->start;
2657 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2658 			if (vm_map_unlock_and_wait(map, 0)) {
2659 				/*
2660 				 * Allow interruption of user wiring?
2661 				 */
2662 			}
2663 			vm_map_lock(map);
2664 			if (last_timestamp + 1 != map->timestamp) {
2665 				/*
2666 				 * Look again for the entry because the map was
2667 				 * modified while it was unlocked.
2668 				 * Specifically, the entry may have been
2669 				 * clipped, merged, or deleted.
2670 				 */
2671 				if (!vm_map_lookup_entry(map, saved_start,
2672 				    &tmp_entry)) {
2673 					if (flags & VM_MAP_WIRE_HOLESOK)
2674 						tmp_entry = tmp_entry->next;
2675 					else {
2676 						if (saved_start == start) {
2677 							/*
2678 							 * first_entry has been deleted.
2679 							 */
2680 							vm_map_unlock(map);
2681 							return (KERN_INVALID_ADDRESS);
2682 						}
2683 						end = saved_start;
2684 						rv = KERN_INVALID_ADDRESS;
2685 						goto done;
2686 					}
2687 				}
2688 				if (entry == first_entry)
2689 					first_entry = tmp_entry;
2690 				else
2691 					first_entry = NULL;
2692 				entry = tmp_entry;
2693 			}
2694 			last_timestamp = map->timestamp;
2695 			continue;
2696 		}
2697 		vm_map_clip_start(map, entry, start);
2698 		vm_map_clip_end(map, entry, end);
2699 		/*
2700 		 * Mark the entry in case the map lock is released.  (See
2701 		 * above.)
2702 		 */
2703 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2704 		    entry->wiring_thread == NULL,
2705 		    ("owned map entry %p", entry));
2706 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2707 		entry->wiring_thread = curthread;
2708 		if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
2709 		    || (entry->protection & prot) != prot) {
2710 			entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
2711 			if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
2712 				end = entry->end;
2713 				rv = KERN_INVALID_ADDRESS;
2714 				goto done;
2715 			}
2716 			goto next_entry;
2717 		}
2718 		if (entry->wired_count == 0) {
2719 			entry->wired_count++;
2720 			saved_start = entry->start;
2721 			saved_end = entry->end;
2722 
2723 			/*
2724 			 * Release the map lock, relying on the in-transition
2725 			 * mark.  Mark the map busy for fork.
2726 			 */
2727 			vm_map_busy(map);
2728 			vm_map_unlock(map);
2729 
2730 			faddr = saved_start;
2731 			do {
2732 				/*
2733 				 * Simulate a fault to get the page and enter
2734 				 * it into the physical map.
2735 				 */
2736 				if ((rv = vm_fault(map, faddr, VM_PROT_NONE,
2737 				    VM_FAULT_WIRE)) != KERN_SUCCESS)
2738 					break;
2739 			} while ((faddr += PAGE_SIZE) < saved_end);
2740 			vm_map_lock(map);
2741 			vm_map_unbusy(map);
2742 			if (last_timestamp + 1 != map->timestamp) {
2743 				/*
2744 				 * Look again for the entry because the map was
2745 				 * modified while it was unlocked.  The entry
2746 				 * may have been clipped, but NOT merged or
2747 				 * deleted.
2748 				 */
2749 				result = vm_map_lookup_entry(map, saved_start,
2750 				    &tmp_entry);
2751 				KASSERT(result, ("vm_map_wire: lookup failed"));
2752 				if (entry == first_entry)
2753 					first_entry = tmp_entry;
2754 				else
2755 					first_entry = NULL;
2756 				entry = tmp_entry;
2757 				while (entry->end < saved_end) {
2758 					/*
2759 					 * In case of failure, handle entries
2760 					 * that were not fully wired here;
2761 					 * fully wired entries are handled
2762 					 * later.
2763 					 */
2764 					if (rv != KERN_SUCCESS &&
2765 					    faddr < entry->end)
2766 						vm_map_wire_entry_failure(map,
2767 						    entry, faddr);
2768 					entry = entry->next;
2769 				}
2770 			}
2771 			last_timestamp = map->timestamp;
2772 			if (rv != KERN_SUCCESS) {
2773 				vm_map_wire_entry_failure(map, entry, faddr);
2774 				end = entry->end;
2775 				goto done;
2776 			}
2777 		} else if (!user_wire ||
2778 			   (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2779 			entry->wired_count++;
2780 		}
2781 		/*
2782 		 * Check the map for holes in the specified region.
2783 		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2784 		 */
2785 	next_entry:
2786 		if ((flags & VM_MAP_WIRE_HOLESOK) == 0 &&
2787 		    entry->end < end && entry->next->start > entry->end) {
2788 			end = entry->end;
2789 			rv = KERN_INVALID_ADDRESS;
2790 			goto done;
2791 		}
2792 		entry = entry->next;
2793 	}
2794 	rv = KERN_SUCCESS;
2795 done:
2796 	need_wakeup = FALSE;
2797 	if (first_entry == NULL) {
2798 		result = vm_map_lookup_entry(map, start, &first_entry);
2799 		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2800 			first_entry = first_entry->next;
2801 		else
2802 			KASSERT(result, ("vm_map_wire: lookup failed"));
2803 	}
2804 	for (entry = first_entry; entry->start < end; entry = entry->next) {
2805 		/*
2806 		 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2807 		 * space in the unwired region could have been mapped
2808 		 * while the map lock was dropped for faulting in the
2809 		 * pages or draining MAP_ENTRY_IN_TRANSITION.
2810 		 * Moreover, another thread could be simultaneously
2811 		 * wiring this new mapping entry.  Detect these cases
2812 		 * and skip any entries marked as in transition not by us.
2813 		 */
2814 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2815 		    entry->wiring_thread != curthread) {
2816 			KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2817 			    ("vm_map_wire: !HOLESOK and new/changed entry"));
2818 			continue;
2819 		}
2820 
2821 		if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
2822 			goto next_entry_done;
2823 
2824 		if (rv == KERN_SUCCESS) {
2825 			if (user_wire)
2826 				entry->eflags |= MAP_ENTRY_USER_WIRED;
2827 		} else if (entry->wired_count == -1) {
2828 			/*
2829 			 * Wiring failed on this entry.  Thus, unwiring is
2830 			 * unnecessary.
2831 			 */
2832 			entry->wired_count = 0;
2833 		} else if (!user_wire ||
2834 		    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2835 			/*
2836 			 * Undo the wiring.  Wiring succeeded on this entry
2837 			 * but failed on a later entry.
2838 			 */
2839 			if (entry->wired_count == 1)
2840 				vm_map_entry_unwire(map, entry);
2841 			else
2842 				entry->wired_count--;
2843 		}
2844 	next_entry_done:
2845 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2846 		    ("vm_map_wire: in-transition flag missing %p", entry));
2847 		KASSERT(entry->wiring_thread == curthread,
2848 		    ("vm_map_wire: alien wire %p", entry));
2849 		entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
2850 		    MAP_ENTRY_WIRE_SKIPPED);
2851 		entry->wiring_thread = NULL;
2852 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2853 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2854 			need_wakeup = TRUE;
2855 		}
2856 		vm_map_simplify_entry(map, entry);
2857 	}
2858 	vm_map_unlock(map);
2859 	if (need_wakeup)
2860 		vm_map_wakeup(map);
2861 	return (rv);
2862 }
2863 
2864 /*
2865  * vm_map_sync
2866  *
2867  * Push any dirty cached pages in the address range to their pager.
2868  * If syncio is TRUE, dirty pages are written synchronously.
2869  * If invalidate is TRUE, any cached pages are freed as well.
2870  *
2871  * If the size of the region from start to end is zero, we are
2872  * supposed to flush all modified pages within the region containing
2873  * start.  Unfortunately, a region can be split or coalesced with
2874  * neighboring regions, making it difficult to determine what the
2875  * original region was.  Therefore, we approximate this requirement by
2876  * flushing the current region containing start.
2877  *
2878  * Returns an error if any part of the specified range is not mapped.
2879  */
2880 int
2881 vm_map_sync(
2882 	vm_map_t map,
2883 	vm_offset_t start,
2884 	vm_offset_t end,
2885 	boolean_t syncio,
2886 	boolean_t invalidate)
2887 {
2888 	vm_map_entry_t current;
2889 	vm_map_entry_t entry;
2890 	vm_size_t size;
2891 	vm_object_t object;
2892 	vm_ooffset_t offset;
2893 	unsigned int last_timestamp;
2894 	boolean_t failed;
2895 
2896 	vm_map_lock_read(map);
2897 	VM_MAP_RANGE_CHECK(map, start, end);
2898 	if (!vm_map_lookup_entry(map, start, &entry)) {
2899 		vm_map_unlock_read(map);
2900 		return (KERN_INVALID_ADDRESS);
2901 	} else if (start == end) {
2902 		start = entry->start;
2903 		end = entry->end;
2904 	}
2905 	/*
2906 	 * Make a first pass to check for user-wired memory and holes.
2907 	 */
2908 	for (current = entry; current->start < end; current = current->next) {
2909 		if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
2910 			vm_map_unlock_read(map);
2911 			return (KERN_INVALID_ARGUMENT);
2912 		}
2913 		if (end > current->end &&
2914 		    current->end != current->next->start) {
2915 			vm_map_unlock_read(map);
2916 			return (KERN_INVALID_ADDRESS);
2917 		}
2918 	}
2919 
2920 	if (invalidate)
2921 		pmap_remove(map->pmap, start, end);
2922 	failed = FALSE;
2923 
2924 	/*
2925 	 * Make a second pass, cleaning/uncaching pages from the indicated
2926 	 * objects as we go.
2927 	 */
2928 	for (current = entry; current->start < end;) {
2929 		offset = current->offset + (start - current->start);
2930 		size = (end <= current->end ? end : current->end) - start;
2931 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2932 			vm_map_t smap;
2933 			vm_map_entry_t tentry;
2934 			vm_size_t tsize;
2935 
2936 			smap = current->object.sub_map;
2937 			vm_map_lock_read(smap);
2938 			(void) vm_map_lookup_entry(smap, offset, &tentry);
2939 			tsize = tentry->end - offset;
2940 			if (tsize < size)
2941 				size = tsize;
2942 			object = tentry->object.vm_object;
2943 			offset = tentry->offset + (offset - tentry->start);
2944 			vm_map_unlock_read(smap);
2945 		} else {
2946 			object = current->object.vm_object;
2947 		}
2948 		vm_object_reference(object);
2949 		last_timestamp = map->timestamp;
2950 		vm_map_unlock_read(map);
2951 		if (!vm_object_sync(object, offset, size, syncio, invalidate))
2952 			failed = TRUE;
2953 		start += size;
2954 		vm_object_deallocate(object);
2955 		vm_map_lock_read(map);
2956 		if (last_timestamp == map->timestamp ||
2957 		    !vm_map_lookup_entry(map, start, &current))
2958 			current = current->next;
2959 	}
2960 
2961 	vm_map_unlock_read(map);
2962 	return (failed ? KERN_FAILURE : KERN_SUCCESS);
2963 }
2964 
2965 /*
2966  *	vm_map_entry_unwire:	[ internal use only ]
2967  *
2968  *	Make the region specified by this entry pageable.
2969  *
2970  *	The map in question should be locked.
2971  *	[This is the reason for this routine's existence.]
2972  */
2973 static void
2974 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2975 {
2976 
2977 	VM_MAP_ASSERT_LOCKED(map);
2978 	KASSERT(entry->wired_count > 0,
2979 	    ("vm_map_entry_unwire: entry %p isn't wired", entry));
2980 	pmap_unwire(map->pmap, entry->start, entry->end);
2981 	vm_object_unwire(entry->object.vm_object, entry->offset, entry->end -
2982 	    entry->start, PQ_ACTIVE);
2983 	entry->wired_count = 0;
2984 }
2985 
2986 static void
2987 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
2988 {
2989 
2990 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
2991 		vm_object_deallocate(entry->object.vm_object);
2992 	uma_zfree(system_map ? kmapentzone : mapentzone, entry);
2993 }
2994 
2995 /*
2996  *	vm_map_entry_delete:	[ internal use only ]
2997  *
2998  *	Deallocate the given entry from the target map.
2999  */
3000 static void
3001 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
3002 {
3003 	vm_object_t object;
3004 	vm_pindex_t offidxstart, offidxend, count, size1;
3005 	vm_size_t size;
3006 
3007 	vm_map_entry_unlink(map, entry);
3008 	object = entry->object.vm_object;
3009 
3010 	if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
3011 		MPASS(entry->cred == NULL);
3012 		MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0);
3013 		MPASS(object == NULL);
3014 		vm_map_entry_deallocate(entry, map->system_map);
3015 		return;
3016 	}
3017 
3018 	size = entry->end - entry->start;
3019 	map->size -= size;
3020 
3021 	if (entry->cred != NULL) {
3022 		swap_release_by_cred(size, entry->cred);
3023 		crfree(entry->cred);
3024 	}
3025 
3026 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
3027 	    (object != NULL)) {
3028 		KASSERT(entry->cred == NULL || object->cred == NULL ||
3029 		    (entry->eflags & MAP_ENTRY_NEEDS_COPY),
3030 		    ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
3031 		count = atop(size);
3032 		offidxstart = OFF_TO_IDX(entry->offset);
3033 		offidxend = offidxstart + count;
3034 		VM_OBJECT_WLOCK(object);
3035 		if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT |
3036 		    OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
3037 		    object == kernel_object)) {
3038 			vm_object_collapse(object);
3039 
3040 			/*
3041 			 * The option OBJPR_NOTMAPPED can be passed here
3042 			 * because vm_map_delete() already performed
3043 			 * pmap_remove() on the only mapping to this range
3044 			 * of pages.
3045 			 */
3046 			vm_object_page_remove(object, offidxstart, offidxend,
3047 			    OBJPR_NOTMAPPED);
3048 			if (object->type == OBJT_SWAP)
3049 				swap_pager_freespace(object, offidxstart,
3050 				    count);
3051 			if (offidxend >= object->size &&
3052 			    offidxstart < object->size) {
3053 				size1 = object->size;
3054 				object->size = offidxstart;
3055 				if (object->cred != NULL) {
3056 					size1 -= object->size;
3057 					KASSERT(object->charge >= ptoa(size1),
3058 					    ("object %p charge < 0", object));
3059 					swap_release_by_cred(ptoa(size1),
3060 					    object->cred);
3061 					object->charge -= ptoa(size1);
3062 				}
3063 			}
3064 		}
3065 		VM_OBJECT_WUNLOCK(object);
3066 	} else
3067 		entry->object.vm_object = NULL;
3068 	if (map->system_map)
3069 		vm_map_entry_deallocate(entry, TRUE);
3070 	else {
3071 		entry->next = curthread->td_map_def_user;
3072 		curthread->td_map_def_user = entry;
3073 	}
3074 }
3075 
3076 /*
3077  *	vm_map_delete:	[ internal use only ]
3078  *
3079  *	Deallocates the given address range from the target
3080  *	map.
3081  */
3082 int
3083 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
3084 {
3085 	vm_map_entry_t entry;
3086 	vm_map_entry_t first_entry;
3087 
3088 	VM_MAP_ASSERT_LOCKED(map);
3089 	if (start == end)
3090 		return (KERN_SUCCESS);
3091 
3092 	/*
3093 	 * Find the start of the region, and clip it
3094 	 */
3095 	if (!vm_map_lookup_entry(map, start, &first_entry))
3096 		entry = first_entry->next;
3097 	else {
3098 		entry = first_entry;
3099 		vm_map_clip_start(map, entry, start);
3100 	}
3101 
3102 	/*
3103 	 * Step through all entries in this region
3104 	 */
3105 	while (entry->start < end) {
3106 		vm_map_entry_t next;
3107 
3108 		/*
3109 		 * Wait for wiring or unwiring of an entry to complete.
3110 		 * Also wait for any system wirings to disappear on
3111 		 * user maps.
3112 		 */
3113 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
3114 		    (vm_map_pmap(map) != kernel_pmap &&
3115 		    vm_map_entry_system_wired_count(entry) != 0)) {
3116 			unsigned int last_timestamp;
3117 			vm_offset_t saved_start;
3118 			vm_map_entry_t tmp_entry;
3119 
3120 			saved_start = entry->start;
3121 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3122 			last_timestamp = map->timestamp;
3123 			(void) vm_map_unlock_and_wait(map, 0);
3124 			vm_map_lock(map);
3125 			if (last_timestamp + 1 != map->timestamp) {
3126 				/*
3127 				 * Look again for the entry because the map was
3128 				 * modified while it was unlocked.
3129 				 * Specifically, the entry may have been
3130 				 * clipped, merged, or deleted.
3131 				 */
3132 				if (!vm_map_lookup_entry(map, saved_start,
3133 							 &tmp_entry))
3134 					entry = tmp_entry->next;
3135 				else {
3136 					entry = tmp_entry;
3137 					vm_map_clip_start(map, entry,
3138 							  saved_start);
3139 				}
3140 			}
3141 			continue;
3142 		}
3143 		vm_map_clip_end(map, entry, end);
3144 
3145 		next = entry->next;
3146 
3147 		/*
3148 		 * Unwire before removing addresses from the pmap; otherwise,
3149 		 * unwiring will put the entries back in the pmap.
3150 		 */
3151 		if (entry->wired_count != 0)
3152 			vm_map_entry_unwire(map, entry);
3153 
3154 		/*
3155 		 * Remove mappings for the pages, but only if the
3156 		 * mappings could exist.  For instance, it does not
3157 		 * make sense to call pmap_remove() for guard entries.
3158 		 */
3159 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 ||
3160 		    entry->object.vm_object != NULL)
3161 			pmap_remove(map->pmap, entry->start, entry->end);
3162 
3163 		/*
3164 		 * Delete the entry only after removing all pmap
3165 		 * entries pointing to its pages.  (Otherwise, its
3166 		 * page frames may be reallocated, and any modify bits
3167 		 * will be set in the wrong object!)
3168 		 */
3169 		vm_map_entry_delete(map, entry);
3170 		entry = next;
3171 	}
3172 	return (KERN_SUCCESS);
3173 }
3174 
3175 /*
3176  *	vm_map_remove:
3177  *
3178  *	Remove the given address range from the target map.
3179  *	This is the exported form of vm_map_delete.
3180  */
3181 int
3182 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3183 {
3184 	int result;
3185 
3186 	vm_map_lock(map);
3187 	VM_MAP_RANGE_CHECK(map, start, end);
3188 	result = vm_map_delete(map, start, end);
3189 	vm_map_unlock(map);
3190 	return (result);
3191 }
3192 
3193 /*
3194  *	vm_map_check_protection:
3195  *
3196  *	Assert that the target map allows the specified privilege on the
3197  *	entire address region given.  The entire region must be allocated.
3198  *
3199  *	WARNING!  This code does not and should not check whether the
3200  *	contents of the region is accessible.  For example a smaller file
3201  *	might be mapped into a larger address space.
3202  *
3203  *	NOTE!  This code is also called by munmap().
3204  *
3205  *	The map must be locked.  A read lock is sufficient.
3206  */
3207 boolean_t
3208 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3209 			vm_prot_t protection)
3210 {
3211 	vm_map_entry_t entry;
3212 	vm_map_entry_t tmp_entry;
3213 
3214 	if (!vm_map_lookup_entry(map, start, &tmp_entry))
3215 		return (FALSE);
3216 	entry = tmp_entry;
3217 
3218 	while (start < end) {
3219 		/*
3220 		 * No holes allowed!
3221 		 */
3222 		if (start < entry->start)
3223 			return (FALSE);
3224 		/*
3225 		 * Check protection associated with entry.
3226 		 */
3227 		if ((entry->protection & protection) != protection)
3228 			return (FALSE);
3229 		/* go to next entry */
3230 		start = entry->end;
3231 		entry = entry->next;
3232 	}
3233 	return (TRUE);
3234 }
3235 
3236 /*
3237  *	vm_map_copy_entry:
3238  *
3239  *	Copies the contents of the source entry to the destination
3240  *	entry.  The entries *must* be aligned properly.
3241  */
3242 static void
3243 vm_map_copy_entry(
3244 	vm_map_t src_map,
3245 	vm_map_t dst_map,
3246 	vm_map_entry_t src_entry,
3247 	vm_map_entry_t dst_entry,
3248 	vm_ooffset_t *fork_charge)
3249 {
3250 	vm_object_t src_object;
3251 	vm_map_entry_t fake_entry;
3252 	vm_offset_t size;
3253 	struct ucred *cred;
3254 	int charged;
3255 
3256 	VM_MAP_ASSERT_LOCKED(dst_map);
3257 
3258 	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
3259 		return;
3260 
3261 	if (src_entry->wired_count == 0 ||
3262 	    (src_entry->protection & VM_PROT_WRITE) == 0) {
3263 		/*
3264 		 * If the source entry is marked needs_copy, it is already
3265 		 * write-protected.
3266 		 */
3267 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
3268 		    (src_entry->protection & VM_PROT_WRITE) != 0) {
3269 			pmap_protect(src_map->pmap,
3270 			    src_entry->start,
3271 			    src_entry->end,
3272 			    src_entry->protection & ~VM_PROT_WRITE);
3273 		}
3274 
3275 		/*
3276 		 * Make a copy of the object.
3277 		 */
3278 		size = src_entry->end - src_entry->start;
3279 		if ((src_object = src_entry->object.vm_object) != NULL) {
3280 			VM_OBJECT_WLOCK(src_object);
3281 			charged = ENTRY_CHARGED(src_entry);
3282 			if (src_object->handle == NULL &&
3283 			    (src_object->type == OBJT_DEFAULT ||
3284 			    src_object->type == OBJT_SWAP)) {
3285 				vm_object_collapse(src_object);
3286 				if ((src_object->flags & (OBJ_NOSPLIT |
3287 				    OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
3288 					vm_object_split(src_entry);
3289 					src_object =
3290 					    src_entry->object.vm_object;
3291 				}
3292 			}
3293 			vm_object_reference_locked(src_object);
3294 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3295 			if (src_entry->cred != NULL &&
3296 			    !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
3297 				KASSERT(src_object->cred == NULL,
3298 				    ("OVERCOMMIT: vm_map_copy_entry: cred %p",
3299 				     src_object));
3300 				src_object->cred = src_entry->cred;
3301 				src_object->charge = size;
3302 			}
3303 			VM_OBJECT_WUNLOCK(src_object);
3304 			dst_entry->object.vm_object = src_object;
3305 			if (charged) {
3306 				cred = curthread->td_ucred;
3307 				crhold(cred);
3308 				dst_entry->cred = cred;
3309 				*fork_charge += size;
3310 				if (!(src_entry->eflags &
3311 				      MAP_ENTRY_NEEDS_COPY)) {
3312 					crhold(cred);
3313 					src_entry->cred = cred;
3314 					*fork_charge += size;
3315 				}
3316 			}
3317 			src_entry->eflags |= MAP_ENTRY_COW |
3318 			    MAP_ENTRY_NEEDS_COPY;
3319 			dst_entry->eflags |= MAP_ENTRY_COW |
3320 			    MAP_ENTRY_NEEDS_COPY;
3321 			dst_entry->offset = src_entry->offset;
3322 			if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3323 				/*
3324 				 * MAP_ENTRY_VN_WRITECNT cannot
3325 				 * indicate write reference from
3326 				 * src_entry, since the entry is
3327 				 * marked as needs copy.  Allocate a
3328 				 * fake entry that is used to
3329 				 * decrement object->un_pager.vnp.writecount
3330 				 * at the appropriate time.  Attach
3331 				 * fake_entry to the deferred list.
3332 				 */
3333 				fake_entry = vm_map_entry_create(dst_map);
3334 				fake_entry->eflags = MAP_ENTRY_VN_WRITECNT;
3335 				src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT;
3336 				vm_object_reference(src_object);
3337 				fake_entry->object.vm_object = src_object;
3338 				fake_entry->start = src_entry->start;
3339 				fake_entry->end = src_entry->end;
3340 				fake_entry->next = curthread->td_map_def_user;
3341 				curthread->td_map_def_user = fake_entry;
3342 			}
3343 
3344 			pmap_copy(dst_map->pmap, src_map->pmap,
3345 			    dst_entry->start, dst_entry->end - dst_entry->start,
3346 			    src_entry->start);
3347 		} else {
3348 			dst_entry->object.vm_object = NULL;
3349 			dst_entry->offset = 0;
3350 			if (src_entry->cred != NULL) {
3351 				dst_entry->cred = curthread->td_ucred;
3352 				crhold(dst_entry->cred);
3353 				*fork_charge += size;
3354 			}
3355 		}
3356 	} else {
3357 		/*
3358 		 * We don't want to make writeable wired pages copy-on-write.
3359 		 * Immediately copy these pages into the new map by simulating
3360 		 * page faults.  The new pages are pageable.
3361 		 */
3362 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
3363 		    fork_charge);
3364 	}
3365 }
3366 
3367 /*
3368  * vmspace_map_entry_forked:
3369  * Update the newly-forked vmspace each time a map entry is inherited
3370  * or copied.  The values for vm_dsize and vm_tsize are approximate
3371  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
3372  */
3373 static void
3374 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
3375     vm_map_entry_t entry)
3376 {
3377 	vm_size_t entrysize;
3378 	vm_offset_t newend;
3379 
3380 	if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
3381 		return;
3382 	entrysize = entry->end - entry->start;
3383 	vm2->vm_map.size += entrysize;
3384 	if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
3385 		vm2->vm_ssize += btoc(entrysize);
3386 	} else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
3387 	    entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
3388 		newend = MIN(entry->end,
3389 		    (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
3390 		vm2->vm_dsize += btoc(newend - entry->start);
3391 	} else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
3392 	    entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
3393 		newend = MIN(entry->end,
3394 		    (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
3395 		vm2->vm_tsize += btoc(newend - entry->start);
3396 	}
3397 }
3398 
3399 /*
3400  * vmspace_fork:
3401  * Create a new process vmspace structure and vm_map
3402  * based on those of an existing process.  The new map
3403  * is based on the old map, according to the inheritance
3404  * values on the regions in that map.
3405  *
3406  * XXX It might be worth coalescing the entries added to the new vmspace.
3407  *
3408  * The source map must not be locked.
3409  */
3410 struct vmspace *
3411 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
3412 {
3413 	struct vmspace *vm2;
3414 	vm_map_t new_map, old_map;
3415 	vm_map_entry_t new_entry, old_entry;
3416 	vm_object_t object;
3417 	int locked;
3418 	vm_inherit_t inh;
3419 
3420 	old_map = &vm1->vm_map;
3421 	/* Copy immutable fields of vm1 to vm2. */
3422 	vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map),
3423 	    pmap_pinit);
3424 	if (vm2 == NULL)
3425 		return (NULL);
3426 	vm2->vm_taddr = vm1->vm_taddr;
3427 	vm2->vm_daddr = vm1->vm_daddr;
3428 	vm2->vm_maxsaddr = vm1->vm_maxsaddr;
3429 	vm_map_lock(old_map);
3430 	if (old_map->busy)
3431 		vm_map_wait_busy(old_map);
3432 	new_map = &vm2->vm_map;
3433 	locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
3434 	KASSERT(locked, ("vmspace_fork: lock failed"));
3435 
3436 	old_entry = old_map->header.next;
3437 
3438 	while (old_entry != &old_map->header) {
3439 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3440 			panic("vm_map_fork: encountered a submap");
3441 
3442 		inh = old_entry->inheritance;
3443 		if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 &&
3444 		    inh != VM_INHERIT_NONE)
3445 			inh = VM_INHERIT_COPY;
3446 
3447 		switch (inh) {
3448 		case VM_INHERIT_NONE:
3449 			break;
3450 
3451 		case VM_INHERIT_SHARE:
3452 			/*
3453 			 * Clone the entry, creating the shared object if necessary.
3454 			 */
3455 			object = old_entry->object.vm_object;
3456 			if (object == NULL) {
3457 				object = vm_object_allocate(OBJT_DEFAULT,
3458 					atop(old_entry->end - old_entry->start));
3459 				old_entry->object.vm_object = object;
3460 				old_entry->offset = 0;
3461 				if (old_entry->cred != NULL) {
3462 					object->cred = old_entry->cred;
3463 					object->charge = old_entry->end -
3464 					    old_entry->start;
3465 					old_entry->cred = NULL;
3466 				}
3467 			}
3468 
3469 			/*
3470 			 * Add the reference before calling vm_object_shadow
3471 			 * to insure that a shadow object is created.
3472 			 */
3473 			vm_object_reference(object);
3474 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3475 				vm_object_shadow(&old_entry->object.vm_object,
3476 				    &old_entry->offset,
3477 				    old_entry->end - old_entry->start);
3478 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3479 				/* Transfer the second reference too. */
3480 				vm_object_reference(
3481 				    old_entry->object.vm_object);
3482 
3483 				/*
3484 				 * As in vm_map_simplify_entry(), the
3485 				 * vnode lock will not be acquired in
3486 				 * this call to vm_object_deallocate().
3487 				 */
3488 				vm_object_deallocate(object);
3489 				object = old_entry->object.vm_object;
3490 			}
3491 			VM_OBJECT_WLOCK(object);
3492 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
3493 			if (old_entry->cred != NULL) {
3494 				KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
3495 				object->cred = old_entry->cred;
3496 				object->charge = old_entry->end - old_entry->start;
3497 				old_entry->cred = NULL;
3498 			}
3499 
3500 			/*
3501 			 * Assert the correct state of the vnode
3502 			 * v_writecount while the object is locked, to
3503 			 * not relock it later for the assertion
3504 			 * correctness.
3505 			 */
3506 			if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT &&
3507 			    object->type == OBJT_VNODE) {
3508 				KASSERT(((struct vnode *)object->handle)->
3509 				    v_writecount > 0,
3510 				    ("vmspace_fork: v_writecount %p", object));
3511 				KASSERT(object->un_pager.vnp.writemappings > 0,
3512 				    ("vmspace_fork: vnp.writecount %p",
3513 				    object));
3514 			}
3515 			VM_OBJECT_WUNLOCK(object);
3516 
3517 			/*
3518 			 * Clone the entry, referencing the shared object.
3519 			 */
3520 			new_entry = vm_map_entry_create(new_map);
3521 			*new_entry = *old_entry;
3522 			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3523 			    MAP_ENTRY_IN_TRANSITION);
3524 			new_entry->wiring_thread = NULL;
3525 			new_entry->wired_count = 0;
3526 			if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3527 				vnode_pager_update_writecount(object,
3528 				    new_entry->start, new_entry->end);
3529 			}
3530 
3531 			/*
3532 			 * Insert the entry into the new map -- we know we're
3533 			 * inserting at the end of the new map.
3534 			 */
3535 			vm_map_entry_link(new_map, new_map->header.prev,
3536 			    new_entry);
3537 			vmspace_map_entry_forked(vm1, vm2, new_entry);
3538 
3539 			/*
3540 			 * Update the physical map
3541 			 */
3542 			pmap_copy(new_map->pmap, old_map->pmap,
3543 			    new_entry->start,
3544 			    (old_entry->end - old_entry->start),
3545 			    old_entry->start);
3546 			break;
3547 
3548 		case VM_INHERIT_COPY:
3549 			/*
3550 			 * Clone the entry and link into the map.
3551 			 */
3552 			new_entry = vm_map_entry_create(new_map);
3553 			*new_entry = *old_entry;
3554 			/*
3555 			 * Copied entry is COW over the old object.
3556 			 */
3557 			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3558 			    MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT);
3559 			new_entry->wiring_thread = NULL;
3560 			new_entry->wired_count = 0;
3561 			new_entry->object.vm_object = NULL;
3562 			new_entry->cred = NULL;
3563 			vm_map_entry_link(new_map, new_map->header.prev,
3564 			    new_entry);
3565 			vmspace_map_entry_forked(vm1, vm2, new_entry);
3566 			vm_map_copy_entry(old_map, new_map, old_entry,
3567 			    new_entry, fork_charge);
3568 			break;
3569 
3570 		case VM_INHERIT_ZERO:
3571 			/*
3572 			 * Create a new anonymous mapping entry modelled from
3573 			 * the old one.
3574 			 */
3575 			new_entry = vm_map_entry_create(new_map);
3576 			memset(new_entry, 0, sizeof(*new_entry));
3577 
3578 			new_entry->start = old_entry->start;
3579 			new_entry->end = old_entry->end;
3580 			new_entry->eflags = old_entry->eflags &
3581 			    ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION |
3582 			    MAP_ENTRY_VN_WRITECNT);
3583 			new_entry->protection = old_entry->protection;
3584 			new_entry->max_protection = old_entry->max_protection;
3585 			new_entry->inheritance = VM_INHERIT_ZERO;
3586 
3587 			vm_map_entry_link(new_map, new_map->header.prev,
3588 			    new_entry);
3589 			vmspace_map_entry_forked(vm1, vm2, new_entry);
3590 
3591 			new_entry->cred = curthread->td_ucred;
3592 			crhold(new_entry->cred);
3593 			*fork_charge += (new_entry->end - new_entry->start);
3594 
3595 			break;
3596 		}
3597 		old_entry = old_entry->next;
3598 	}
3599 	/*
3600 	 * Use inlined vm_map_unlock() to postpone handling the deferred
3601 	 * map entries, which cannot be done until both old_map and
3602 	 * new_map locks are released.
3603 	 */
3604 	sx_xunlock(&old_map->lock);
3605 	sx_xunlock(&new_map->lock);
3606 	vm_map_process_deferred();
3607 
3608 	return (vm2);
3609 }
3610 
3611 /*
3612  * Create a process's stack for exec_new_vmspace().  This function is never
3613  * asked to wire the newly created stack.
3614  */
3615 int
3616 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3617     vm_prot_t prot, vm_prot_t max, int cow)
3618 {
3619 	vm_size_t growsize, init_ssize;
3620 	rlim_t vmemlim;
3621 	int rv;
3622 
3623 	MPASS((map->flags & MAP_WIREFUTURE) == 0);
3624 	growsize = sgrowsiz;
3625 	init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3626 	vm_map_lock(map);
3627 	vmemlim = lim_cur(curthread, RLIMIT_VMEM);
3628 	/* If we would blow our VMEM resource limit, no go */
3629 	if (map->size + init_ssize > vmemlim) {
3630 		rv = KERN_NO_SPACE;
3631 		goto out;
3632 	}
3633 	rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
3634 	    max, cow);
3635 out:
3636 	vm_map_unlock(map);
3637 	return (rv);
3638 }
3639 
3640 static int stack_guard_page = 1;
3641 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN,
3642     &stack_guard_page, 0,
3643     "Specifies the number of guard pages for a stack that grows");
3644 
3645 static int
3646 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3647     vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
3648 {
3649 	vm_map_entry_t new_entry, prev_entry;
3650 	vm_offset_t bot, gap_bot, gap_top, top;
3651 	vm_size_t init_ssize, sgp;
3652 	int orient, rv;
3653 
3654 	/*
3655 	 * The stack orientation is piggybacked with the cow argument.
3656 	 * Extract it into orient and mask the cow argument so that we
3657 	 * don't pass it around further.
3658 	 */
3659 	orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP);
3660 	KASSERT(orient != 0, ("No stack grow direction"));
3661 	KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP),
3662 	    ("bi-dir stack"));
3663 
3664 	if (addrbos < vm_map_min(map) ||
3665 	    addrbos + max_ssize > vm_map_max(map) ||
3666 	    addrbos + max_ssize <= addrbos)
3667 		return (KERN_INVALID_ADDRESS);
3668 	sgp = (vm_size_t)stack_guard_page * PAGE_SIZE;
3669 	if (sgp >= max_ssize)
3670 		return (KERN_INVALID_ARGUMENT);
3671 
3672 	init_ssize = growsize;
3673 	if (max_ssize < init_ssize + sgp)
3674 		init_ssize = max_ssize - sgp;
3675 
3676 	/* If addr is already mapped, no go */
3677 	if (vm_map_lookup_entry(map, addrbos, &prev_entry))
3678 		return (KERN_NO_SPACE);
3679 
3680 	/*
3681 	 * If we can't accommodate max_ssize in the current mapping, no go.
3682 	 */
3683 	if (prev_entry->next->start < addrbos + max_ssize)
3684 		return (KERN_NO_SPACE);
3685 
3686 	/*
3687 	 * We initially map a stack of only init_ssize.  We will grow as
3688 	 * needed later.  Depending on the orientation of the stack (i.e.
3689 	 * the grow direction) we either map at the top of the range, the
3690 	 * bottom of the range or in the middle.
3691 	 *
3692 	 * Note: we would normally expect prot and max to be VM_PROT_ALL,
3693 	 * and cow to be 0.  Possibly we should eliminate these as input
3694 	 * parameters, and just pass these values here in the insert call.
3695 	 */
3696 	if (orient == MAP_STACK_GROWS_DOWN) {
3697 		bot = addrbos + max_ssize - init_ssize;
3698 		top = bot + init_ssize;
3699 		gap_bot = addrbos;
3700 		gap_top = bot;
3701 	} else /* if (orient == MAP_STACK_GROWS_UP) */ {
3702 		bot = addrbos;
3703 		top = bot + init_ssize;
3704 		gap_bot = top;
3705 		gap_top = addrbos + max_ssize;
3706 	}
3707 	rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
3708 	if (rv != KERN_SUCCESS)
3709 		return (rv);
3710 	new_entry = prev_entry->next;
3711 	KASSERT(new_entry->end == top || new_entry->start == bot,
3712 	    ("Bad entry start/end for new stack entry"));
3713 	KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 ||
3714 	    (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0,
3715 	    ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
3716 	KASSERT((orient & MAP_STACK_GROWS_UP) == 0 ||
3717 	    (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0,
3718 	    ("new entry lacks MAP_ENTRY_GROWS_UP"));
3719 	rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE,
3720 	    VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ?
3721 	    MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP));
3722 	if (rv != KERN_SUCCESS)
3723 		(void)vm_map_delete(map, bot, top);
3724 	return (rv);
3725 }
3726 
3727 /*
3728  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if we
3729  * successfully grow the stack.
3730  */
3731 static int
3732 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
3733 {
3734 	vm_map_entry_t stack_entry;
3735 	struct proc *p;
3736 	struct vmspace *vm;
3737 	struct ucred *cred;
3738 	vm_offset_t gap_end, gap_start, grow_start;
3739 	size_t grow_amount, guard, max_grow;
3740 	rlim_t lmemlim, stacklim, vmemlim;
3741 	int rv, rv1;
3742 	bool gap_deleted, grow_down, is_procstack;
3743 #ifdef notyet
3744 	uint64_t limit;
3745 #endif
3746 #ifdef RACCT
3747 	int error;
3748 #endif
3749 
3750 	p = curproc;
3751 	vm = p->p_vmspace;
3752 
3753 	/*
3754 	 * Disallow stack growth when the access is performed by a
3755 	 * debugger or AIO daemon.  The reason is that the wrong
3756 	 * resource limits are applied.
3757 	 */
3758 	if (map != &p->p_vmspace->vm_map || p->p_textvp == NULL)
3759 		return (KERN_FAILURE);
3760 
3761 	MPASS(!map->system_map);
3762 
3763 	guard = stack_guard_page * PAGE_SIZE;
3764 	lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK);
3765 	stacklim = lim_cur(curthread, RLIMIT_STACK);
3766 	vmemlim = lim_cur(curthread, RLIMIT_VMEM);
3767 retry:
3768 	/* If addr is not in a hole for a stack grow area, no need to grow. */
3769 	if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry))
3770 		return (KERN_FAILURE);
3771 	if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0)
3772 		return (KERN_SUCCESS);
3773 	if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) {
3774 		stack_entry = gap_entry->next;
3775 		if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 ||
3776 		    stack_entry->start != gap_entry->end)
3777 			return (KERN_FAILURE);
3778 		grow_amount = round_page(stack_entry->start - addr);
3779 		grow_down = true;
3780 	} else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) {
3781 		stack_entry = gap_entry->prev;
3782 		if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 ||
3783 		    stack_entry->end != gap_entry->start)
3784 			return (KERN_FAILURE);
3785 		grow_amount = round_page(addr + 1 - stack_entry->end);
3786 		grow_down = false;
3787 	} else {
3788 		return (KERN_FAILURE);
3789 	}
3790 	max_grow = gap_entry->end - gap_entry->start;
3791 	if (guard > max_grow)
3792 		return (KERN_NO_SPACE);
3793 	max_grow -= guard;
3794 	if (grow_amount > max_grow)
3795 		return (KERN_NO_SPACE);
3796 
3797 	/*
3798 	 * If this is the main process stack, see if we're over the stack
3799 	 * limit.
3800 	 */
3801 	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr &&
3802 	    addr < (vm_offset_t)p->p_sysent->sv_usrstack;
3803 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim))
3804 		return (KERN_NO_SPACE);
3805 
3806 #ifdef RACCT
3807 	if (racct_enable) {
3808 		PROC_LOCK(p);
3809 		if (is_procstack && racct_set(p, RACCT_STACK,
3810 		    ctob(vm->vm_ssize) + grow_amount)) {
3811 			PROC_UNLOCK(p);
3812 			return (KERN_NO_SPACE);
3813 		}
3814 		PROC_UNLOCK(p);
3815 	}
3816 #endif
3817 
3818 	grow_amount = roundup(grow_amount, sgrowsiz);
3819 	if (grow_amount > max_grow)
3820 		grow_amount = max_grow;
3821 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3822 		grow_amount = trunc_page((vm_size_t)stacklim) -
3823 		    ctob(vm->vm_ssize);
3824 	}
3825 
3826 #ifdef notyet
3827 	PROC_LOCK(p);
3828 	limit = racct_get_available(p, RACCT_STACK);
3829 	PROC_UNLOCK(p);
3830 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
3831 		grow_amount = limit - ctob(vm->vm_ssize);
3832 #endif
3833 
3834 	if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) {
3835 		if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
3836 			rv = KERN_NO_SPACE;
3837 			goto out;
3838 		}
3839 #ifdef RACCT
3840 		if (racct_enable) {
3841 			PROC_LOCK(p);
3842 			if (racct_set(p, RACCT_MEMLOCK,
3843 			    ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
3844 				PROC_UNLOCK(p);
3845 				rv = KERN_NO_SPACE;
3846 				goto out;
3847 			}
3848 			PROC_UNLOCK(p);
3849 		}
3850 #endif
3851 	}
3852 
3853 	/* If we would blow our VMEM resource limit, no go */
3854 	if (map->size + grow_amount > vmemlim) {
3855 		rv = KERN_NO_SPACE;
3856 		goto out;
3857 	}
3858 #ifdef RACCT
3859 	if (racct_enable) {
3860 		PROC_LOCK(p);
3861 		if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
3862 			PROC_UNLOCK(p);
3863 			rv = KERN_NO_SPACE;
3864 			goto out;
3865 		}
3866 		PROC_UNLOCK(p);
3867 	}
3868 #endif
3869 
3870 	if (vm_map_lock_upgrade(map)) {
3871 		gap_entry = NULL;
3872 		vm_map_lock_read(map);
3873 		goto retry;
3874 	}
3875 
3876 	if (grow_down) {
3877 		grow_start = gap_entry->end - grow_amount;
3878 		if (gap_entry->start + grow_amount == gap_entry->end) {
3879 			gap_start = gap_entry->start;
3880 			gap_end = gap_entry->end;
3881 			vm_map_entry_delete(map, gap_entry);
3882 			gap_deleted = true;
3883 		} else {
3884 			MPASS(gap_entry->start < gap_entry->end - grow_amount);
3885 			gap_entry->end -= grow_amount;
3886 			vm_map_entry_resize_free(map, gap_entry);
3887 			gap_deleted = false;
3888 		}
3889 		rv = vm_map_insert(map, NULL, 0, grow_start,
3890 		    grow_start + grow_amount,
3891 		    stack_entry->protection, stack_entry->max_protection,
3892 		    MAP_STACK_GROWS_DOWN);
3893 		if (rv != KERN_SUCCESS) {
3894 			if (gap_deleted) {
3895 				rv1 = vm_map_insert(map, NULL, 0, gap_start,
3896 				    gap_end, VM_PROT_NONE, VM_PROT_NONE,
3897 				    MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN);
3898 				MPASS(rv1 == KERN_SUCCESS);
3899 			} else {
3900 				gap_entry->end += grow_amount;
3901 				vm_map_entry_resize_free(map, gap_entry);
3902 			}
3903 		}
3904 	} else {
3905 		grow_start = stack_entry->end;
3906 		cred = stack_entry->cred;
3907 		if (cred == NULL && stack_entry->object.vm_object != NULL)
3908 			cred = stack_entry->object.vm_object->cred;
3909 		if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
3910 			rv = KERN_NO_SPACE;
3911 		/* Grow the underlying object if applicable. */
3912 		else if (stack_entry->object.vm_object == NULL ||
3913 		    vm_object_coalesce(stack_entry->object.vm_object,
3914 		    stack_entry->offset,
3915 		    (vm_size_t)(stack_entry->end - stack_entry->start),
3916 		    (vm_size_t)grow_amount, cred != NULL)) {
3917 			if (gap_entry->start + grow_amount == gap_entry->end)
3918 				vm_map_entry_delete(map, gap_entry);
3919 			else
3920 				gap_entry->start += grow_amount;
3921 			stack_entry->end += grow_amount;
3922 			map->size += grow_amount;
3923 			vm_map_entry_resize_free(map, stack_entry);
3924 			rv = KERN_SUCCESS;
3925 		} else
3926 			rv = KERN_FAILURE;
3927 	}
3928 	if (rv == KERN_SUCCESS && is_procstack)
3929 		vm->vm_ssize += btoc(grow_amount);
3930 
3931 	/*
3932 	 * Heed the MAP_WIREFUTURE flag if it was set for this process.
3933 	 */
3934 	if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) {
3935 		vm_map_unlock(map);
3936 		vm_map_wire(map, grow_start, grow_start + grow_amount,
3937 		    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
3938 		vm_map_lock_read(map);
3939 	} else
3940 		vm_map_lock_downgrade(map);
3941 
3942 out:
3943 #ifdef RACCT
3944 	if (racct_enable && rv != KERN_SUCCESS) {
3945 		PROC_LOCK(p);
3946 		error = racct_set(p, RACCT_VMEM, map->size);
3947 		KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
3948 		if (!old_mlock) {
3949 			error = racct_set(p, RACCT_MEMLOCK,
3950 			    ptoa(pmap_wired_count(map->pmap)));
3951 			KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
3952 		}
3953 	    	error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
3954 		KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
3955 		PROC_UNLOCK(p);
3956 	}
3957 #endif
3958 
3959 	return (rv);
3960 }
3961 
3962 /*
3963  * Unshare the specified VM space for exec.  If other processes are
3964  * mapped to it, then create a new one.  The new vmspace is null.
3965  */
3966 int
3967 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
3968 {
3969 	struct vmspace *oldvmspace = p->p_vmspace;
3970 	struct vmspace *newvmspace;
3971 
3972 	KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
3973 	    ("vmspace_exec recursed"));
3974 	newvmspace = vmspace_alloc(minuser, maxuser, pmap_pinit);
3975 	if (newvmspace == NULL)
3976 		return (ENOMEM);
3977 	newvmspace->vm_swrss = oldvmspace->vm_swrss;
3978 	/*
3979 	 * This code is written like this for prototype purposes.  The
3980 	 * goal is to avoid running down the vmspace here, but let the
3981 	 * other process's that are still using the vmspace to finally
3982 	 * run it down.  Even though there is little or no chance of blocking
3983 	 * here, it is a good idea to keep this form for future mods.
3984 	 */
3985 	PROC_VMSPACE_LOCK(p);
3986 	p->p_vmspace = newvmspace;
3987 	PROC_VMSPACE_UNLOCK(p);
3988 	if (p == curthread->td_proc)
3989 		pmap_activate(curthread);
3990 	curthread->td_pflags |= TDP_EXECVMSPC;
3991 	return (0);
3992 }
3993 
3994 /*
3995  * Unshare the specified VM space for forcing COW.  This
3996  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3997  */
3998 int
3999 vmspace_unshare(struct proc *p)
4000 {
4001 	struct vmspace *oldvmspace = p->p_vmspace;
4002 	struct vmspace *newvmspace;
4003 	vm_ooffset_t fork_charge;
4004 
4005 	if (oldvmspace->vm_refcnt == 1)
4006 		return (0);
4007 	fork_charge = 0;
4008 	newvmspace = vmspace_fork(oldvmspace, &fork_charge);
4009 	if (newvmspace == NULL)
4010 		return (ENOMEM);
4011 	if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
4012 		vmspace_free(newvmspace);
4013 		return (ENOMEM);
4014 	}
4015 	PROC_VMSPACE_LOCK(p);
4016 	p->p_vmspace = newvmspace;
4017 	PROC_VMSPACE_UNLOCK(p);
4018 	if (p == curthread->td_proc)
4019 		pmap_activate(curthread);
4020 	vmspace_free(oldvmspace);
4021 	return (0);
4022 }
4023 
4024 /*
4025  *	vm_map_lookup:
4026  *
4027  *	Finds the VM object, offset, and
4028  *	protection for a given virtual address in the
4029  *	specified map, assuming a page fault of the
4030  *	type specified.
4031  *
4032  *	Leaves the map in question locked for read; return
4033  *	values are guaranteed until a vm_map_lookup_done
4034  *	call is performed.  Note that the map argument
4035  *	is in/out; the returned map must be used in
4036  *	the call to vm_map_lookup_done.
4037  *
4038  *	A handle (out_entry) is returned for use in
4039  *	vm_map_lookup_done, to make that fast.
4040  *
4041  *	If a lookup is requested with "write protection"
4042  *	specified, the map may be changed to perform virtual
4043  *	copying operations, although the data referenced will
4044  *	remain the same.
4045  */
4046 int
4047 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
4048 	      vm_offset_t vaddr,
4049 	      vm_prot_t fault_typea,
4050 	      vm_map_entry_t *out_entry,	/* OUT */
4051 	      vm_object_t *object,		/* OUT */
4052 	      vm_pindex_t *pindex,		/* OUT */
4053 	      vm_prot_t *out_prot,		/* OUT */
4054 	      boolean_t *wired)			/* OUT */
4055 {
4056 	vm_map_entry_t entry;
4057 	vm_map_t map = *var_map;
4058 	vm_prot_t prot;
4059 	vm_prot_t fault_type = fault_typea;
4060 	vm_object_t eobject;
4061 	vm_size_t size;
4062 	struct ucred *cred;
4063 
4064 RetryLookup:
4065 
4066 	vm_map_lock_read(map);
4067 
4068 RetryLookupLocked:
4069 	/*
4070 	 * Lookup the faulting address.
4071 	 */
4072 	if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
4073 		vm_map_unlock_read(map);
4074 		return (KERN_INVALID_ADDRESS);
4075 	}
4076 
4077 	entry = *out_entry;
4078 
4079 	/*
4080 	 * Handle submaps.
4081 	 */
4082 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4083 		vm_map_t old_map = map;
4084 
4085 		*var_map = map = entry->object.sub_map;
4086 		vm_map_unlock_read(old_map);
4087 		goto RetryLookup;
4088 	}
4089 
4090 	/*
4091 	 * Check whether this task is allowed to have this page.
4092 	 */
4093 	prot = entry->protection;
4094 	if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) {
4095 		fault_typea &= ~VM_PROT_FAULT_LOOKUP;
4096 		if (prot == VM_PROT_NONE && map != kernel_map &&
4097 		    (entry->eflags & MAP_ENTRY_GUARD) != 0 &&
4098 		    (entry->eflags & (MAP_ENTRY_STACK_GAP_DN |
4099 		    MAP_ENTRY_STACK_GAP_UP)) != 0 &&
4100 		    vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS)
4101 			goto RetryLookupLocked;
4102 	}
4103 	fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4104 	if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
4105 		vm_map_unlock_read(map);
4106 		return (KERN_PROTECTION_FAILURE);
4107 	}
4108 	KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
4109 	    (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) !=
4110 	    (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY),
4111 	    ("entry %p flags %x", entry, entry->eflags));
4112 	if ((fault_typea & VM_PROT_COPY) != 0 &&
4113 	    (entry->max_protection & VM_PROT_WRITE) == 0 &&
4114 	    (entry->eflags & MAP_ENTRY_COW) == 0) {
4115 		vm_map_unlock_read(map);
4116 		return (KERN_PROTECTION_FAILURE);
4117 	}
4118 
4119 	/*
4120 	 * If this page is not pageable, we have to get it for all possible
4121 	 * accesses.
4122 	 */
4123 	*wired = (entry->wired_count != 0);
4124 	if (*wired)
4125 		fault_type = entry->protection;
4126 	size = entry->end - entry->start;
4127 	/*
4128 	 * If the entry was copy-on-write, we either ...
4129 	 */
4130 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4131 		/*
4132 		 * If we want to write the page, we may as well handle that
4133 		 * now since we've got the map locked.
4134 		 *
4135 		 * If we don't need to write the page, we just demote the
4136 		 * permissions allowed.
4137 		 */
4138 		if ((fault_type & VM_PROT_WRITE) != 0 ||
4139 		    (fault_typea & VM_PROT_COPY) != 0) {
4140 			/*
4141 			 * Make a new object, and place it in the object
4142 			 * chain.  Note that no new references have appeared
4143 			 * -- one just moved from the map to the new
4144 			 * object.
4145 			 */
4146 			if (vm_map_lock_upgrade(map))
4147 				goto RetryLookup;
4148 
4149 			if (entry->cred == NULL) {
4150 				/*
4151 				 * The debugger owner is charged for
4152 				 * the memory.
4153 				 */
4154 				cred = curthread->td_ucred;
4155 				crhold(cred);
4156 				if (!swap_reserve_by_cred(size, cred)) {
4157 					crfree(cred);
4158 					vm_map_unlock(map);
4159 					return (KERN_RESOURCE_SHORTAGE);
4160 				}
4161 				entry->cred = cred;
4162 			}
4163 			vm_object_shadow(&entry->object.vm_object,
4164 			    &entry->offset, size);
4165 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4166 			eobject = entry->object.vm_object;
4167 			if (eobject->cred != NULL) {
4168 				/*
4169 				 * The object was not shadowed.
4170 				 */
4171 				swap_release_by_cred(size, entry->cred);
4172 				crfree(entry->cred);
4173 				entry->cred = NULL;
4174 			} else if (entry->cred != NULL) {
4175 				VM_OBJECT_WLOCK(eobject);
4176 				eobject->cred = entry->cred;
4177 				eobject->charge = size;
4178 				VM_OBJECT_WUNLOCK(eobject);
4179 				entry->cred = NULL;
4180 			}
4181 
4182 			vm_map_lock_downgrade(map);
4183 		} else {
4184 			/*
4185 			 * We're attempting to read a copy-on-write page --
4186 			 * don't allow writes.
4187 			 */
4188 			prot &= ~VM_PROT_WRITE;
4189 		}
4190 	}
4191 
4192 	/*
4193 	 * Create an object if necessary.
4194 	 */
4195 	if (entry->object.vm_object == NULL &&
4196 	    !map->system_map) {
4197 		if (vm_map_lock_upgrade(map))
4198 			goto RetryLookup;
4199 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
4200 		    atop(size));
4201 		entry->offset = 0;
4202 		if (entry->cred != NULL) {
4203 			VM_OBJECT_WLOCK(entry->object.vm_object);
4204 			entry->object.vm_object->cred = entry->cred;
4205 			entry->object.vm_object->charge = size;
4206 			VM_OBJECT_WUNLOCK(entry->object.vm_object);
4207 			entry->cred = NULL;
4208 		}
4209 		vm_map_lock_downgrade(map);
4210 	}
4211 
4212 	/*
4213 	 * Return the object/offset from this entry.  If the entry was
4214 	 * copy-on-write or empty, it has been fixed up.
4215 	 */
4216 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4217 	*object = entry->object.vm_object;
4218 
4219 	*out_prot = prot;
4220 	return (KERN_SUCCESS);
4221 }
4222 
4223 /*
4224  *	vm_map_lookup_locked:
4225  *
4226  *	Lookup the faulting address.  A version of vm_map_lookup that returns
4227  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
4228  */
4229 int
4230 vm_map_lookup_locked(vm_map_t *var_map,		/* IN/OUT */
4231 		     vm_offset_t vaddr,
4232 		     vm_prot_t fault_typea,
4233 		     vm_map_entry_t *out_entry,	/* OUT */
4234 		     vm_object_t *object,	/* OUT */
4235 		     vm_pindex_t *pindex,	/* OUT */
4236 		     vm_prot_t *out_prot,	/* OUT */
4237 		     boolean_t *wired)		/* OUT */
4238 {
4239 	vm_map_entry_t entry;
4240 	vm_map_t map = *var_map;
4241 	vm_prot_t prot;
4242 	vm_prot_t fault_type = fault_typea;
4243 
4244 	/*
4245 	 * Lookup the faulting address.
4246 	 */
4247 	if (!vm_map_lookup_entry(map, vaddr, out_entry))
4248 		return (KERN_INVALID_ADDRESS);
4249 
4250 	entry = *out_entry;
4251 
4252 	/*
4253 	 * Fail if the entry refers to a submap.
4254 	 */
4255 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
4256 		return (KERN_FAILURE);
4257 
4258 	/*
4259 	 * Check whether this task is allowed to have this page.
4260 	 */
4261 	prot = entry->protection;
4262 	fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4263 	if ((fault_type & prot) != fault_type)
4264 		return (KERN_PROTECTION_FAILURE);
4265 
4266 	/*
4267 	 * If this page is not pageable, we have to get it for all possible
4268 	 * accesses.
4269 	 */
4270 	*wired = (entry->wired_count != 0);
4271 	if (*wired)
4272 		fault_type = entry->protection;
4273 
4274 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4275 		/*
4276 		 * Fail if the entry was copy-on-write for a write fault.
4277 		 */
4278 		if (fault_type & VM_PROT_WRITE)
4279 			return (KERN_FAILURE);
4280 		/*
4281 		 * We're attempting to read a copy-on-write page --
4282 		 * don't allow writes.
4283 		 */
4284 		prot &= ~VM_PROT_WRITE;
4285 	}
4286 
4287 	/*
4288 	 * Fail if an object should be created.
4289 	 */
4290 	if (entry->object.vm_object == NULL && !map->system_map)
4291 		return (KERN_FAILURE);
4292 
4293 	/*
4294 	 * Return the object/offset from this entry.  If the entry was
4295 	 * copy-on-write or empty, it has been fixed up.
4296 	 */
4297 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4298 	*object = entry->object.vm_object;
4299 
4300 	*out_prot = prot;
4301 	return (KERN_SUCCESS);
4302 }
4303 
4304 /*
4305  *	vm_map_lookup_done:
4306  *
4307  *	Releases locks acquired by a vm_map_lookup
4308  *	(according to the handle returned by that lookup).
4309  */
4310 void
4311 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
4312 {
4313 	/*
4314 	 * Unlock the main-level map
4315 	 */
4316 	vm_map_unlock_read(map);
4317 }
4318 
4319 vm_offset_t
4320 vm_map_max_KBI(const struct vm_map *map)
4321 {
4322 
4323 	return (vm_map_max(map));
4324 }
4325 
4326 vm_offset_t
4327 vm_map_min_KBI(const struct vm_map *map)
4328 {
4329 
4330 	return (vm_map_min(map));
4331 }
4332 
4333 pmap_t
4334 vm_map_pmap_KBI(vm_map_t map)
4335 {
4336 
4337 	return (map->pmap);
4338 }
4339 
4340 #include "opt_ddb.h"
4341 #ifdef DDB
4342 #include <sys/kernel.h>
4343 
4344 #include <ddb/ddb.h>
4345 
4346 static void
4347 vm_map_print(vm_map_t map)
4348 {
4349 	vm_map_entry_t entry;
4350 
4351 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4352 	    (void *)map,
4353 	    (void *)map->pmap, map->nentries, map->timestamp);
4354 
4355 	db_indent += 2;
4356 	for (entry = map->header.next; entry != &map->header;
4357 	    entry = entry->next) {
4358 		db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n",
4359 		    (void *)entry, (void *)entry->start, (void *)entry->end,
4360 		    entry->eflags);
4361 		{
4362 			static char *inheritance_name[4] =
4363 			{"share", "copy", "none", "donate_copy"};
4364 
4365 			db_iprintf(" prot=%x/%x/%s",
4366 			    entry->protection,
4367 			    entry->max_protection,
4368 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
4369 			if (entry->wired_count != 0)
4370 				db_printf(", wired");
4371 		}
4372 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4373 			db_printf(", share=%p, offset=0x%jx\n",
4374 			    (void *)entry->object.sub_map,
4375 			    (uintmax_t)entry->offset);
4376 			if ((entry->prev == &map->header) ||
4377 			    (entry->prev->object.sub_map !=
4378 				entry->object.sub_map)) {
4379 				db_indent += 2;
4380 				vm_map_print((vm_map_t)entry->object.sub_map);
4381 				db_indent -= 2;
4382 			}
4383 		} else {
4384 			if (entry->cred != NULL)
4385 				db_printf(", ruid %d", entry->cred->cr_ruid);
4386 			db_printf(", object=%p, offset=0x%jx",
4387 			    (void *)entry->object.vm_object,
4388 			    (uintmax_t)entry->offset);
4389 			if (entry->object.vm_object && entry->object.vm_object->cred)
4390 				db_printf(", obj ruid %d charge %jx",
4391 				    entry->object.vm_object->cred->cr_ruid,
4392 				    (uintmax_t)entry->object.vm_object->charge);
4393 			if (entry->eflags & MAP_ENTRY_COW)
4394 				db_printf(", copy (%s)",
4395 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4396 			db_printf("\n");
4397 
4398 			if ((entry->prev == &map->header) ||
4399 			    (entry->prev->object.vm_object !=
4400 				entry->object.vm_object)) {
4401 				db_indent += 2;
4402 				vm_object_print((db_expr_t)(intptr_t)
4403 						entry->object.vm_object,
4404 						0, 0, (char *)0);
4405 				db_indent -= 2;
4406 			}
4407 		}
4408 	}
4409 	db_indent -= 2;
4410 }
4411 
4412 DB_SHOW_COMMAND(map, map)
4413 {
4414 
4415 	if (!have_addr) {
4416 		db_printf("usage: show map <addr>\n");
4417 		return;
4418 	}
4419 	vm_map_print((vm_map_t)addr);
4420 }
4421 
4422 DB_SHOW_COMMAND(procvm, procvm)
4423 {
4424 	struct proc *p;
4425 
4426 	if (have_addr) {
4427 		p = db_lookup_proc(addr);
4428 	} else {
4429 		p = curproc;
4430 	}
4431 
4432 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4433 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4434 	    (void *)vmspace_pmap(p->p_vmspace));
4435 
4436 	vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
4437 }
4438 
4439 #endif /* DDB */
4440