xref: /freebsd/sys/vm/vm_map.c (revision b89a7cc2ed6e4398d5be502f5bb5885d1ec6ff0f)
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  */
62 
63 /*
64  *	Virtual memory mapping module.
65  */
66 
67 #include <sys/cdefs.h>
68 __FBSDID("$FreeBSD$");
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/ktr.h>
74 #include <sys/lock.h>
75 #include <sys/mutex.h>
76 #include <sys/proc.h>
77 #include <sys/vmmeter.h>
78 #include <sys/mman.h>
79 #include <sys/vnode.h>
80 #include <sys/racct.h>
81 #include <sys/resourcevar.h>
82 #include <sys/rwlock.h>
83 #include <sys/file.h>
84 #include <sys/sysctl.h>
85 #include <sys/sysent.h>
86 #include <sys/shm.h>
87 
88 #include <vm/vm.h>
89 #include <vm/vm_param.h>
90 #include <vm/pmap.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_pager.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vnode_pager.h>
98 #include <vm/swap_pager.h>
99 #include <vm/uma.h>
100 
101 /*
102  *	Virtual memory maps provide for the mapping, protection,
103  *	and sharing of virtual memory objects.  In addition,
104  *	this module provides for an efficient virtual copy of
105  *	memory from one map to another.
106  *
107  *	Synchronization is required prior to most operations.
108  *
109  *	Maps consist of an ordered doubly-linked list of simple
110  *	entries; a self-adjusting binary search tree of these
111  *	entries is used to speed up lookups.
112  *
113  *	Since portions of maps are specified by start/end addresses,
114  *	which may not align with existing map entries, all
115  *	routines merely "clip" entries to these start/end values.
116  *	[That is, an entry is split into two, bordering at a
117  *	start or end value.]  Note that these clippings may not
118  *	always be necessary (as the two resulting entries are then
119  *	not changed); however, the clipping is done for convenience.
120  *
121  *	As mentioned above, virtual copy operations are performed
122  *	by copying VM object references from one map to
123  *	another, and then marking both regions as copy-on-write.
124  */
125 
126 static struct mtx map_sleep_mtx;
127 static uma_zone_t mapentzone;
128 static uma_zone_t kmapentzone;
129 static uma_zone_t mapzone;
130 static uma_zone_t vmspace_zone;
131 static int vmspace_zinit(void *mem, int size, int flags);
132 static int vm_map_zinit(void *mem, int ize, int flags);
133 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
134     vm_offset_t max);
135 static int vm_map_alignspace(vm_map_t map, vm_object_t object,
136     vm_ooffset_t offset, vm_offset_t *addr, vm_size_t length,
137     vm_offset_t max_addr, vm_offset_t alignment);
138 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
139 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
140 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
141 static int vm_map_growstack(vm_map_t map, vm_offset_t addr,
142     vm_map_entry_t gap_entry);
143 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
144     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
145 #ifdef INVARIANTS
146 static void vm_map_zdtor(void *mem, int size, void *arg);
147 static void vmspace_zdtor(void *mem, int size, void *arg);
148 #endif
149 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
150     vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
151     int cow);
152 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
153     vm_offset_t failed_addr);
154 
155 #define	ENTRY_CHARGED(e) ((e)->cred != NULL || \
156     ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
157      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
158 
159 /*
160  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
161  * stable.
162  */
163 #define PROC_VMSPACE_LOCK(p) do { } while (0)
164 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
165 
166 /*
167  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
168  *
169  *	Asserts that the starting and ending region
170  *	addresses fall within the valid range of the map.
171  */
172 #define	VM_MAP_RANGE_CHECK(map, start, end)		\
173 		{					\
174 		if (start < vm_map_min(map))		\
175 			start = vm_map_min(map);	\
176 		if (end > vm_map_max(map))		\
177 			end = vm_map_max(map);		\
178 		if (start > end)			\
179 			start = end;			\
180 		}
181 
182 /*
183  *	vm_map_startup:
184  *
185  *	Initialize the vm_map module.  Must be called before
186  *	any other vm_map routines.
187  *
188  *	Map and entry structures are allocated from the general
189  *	purpose memory pool with some exceptions:
190  *
191  *	- The kernel map and kmem submap are allocated statically.
192  *	- Kernel map entries are allocated out of a static pool.
193  *
194  *	These restrictions are necessary since malloc() uses the
195  *	maps and requires map entries.
196  */
197 
198 void
199 vm_map_startup(void)
200 {
201 	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
202 	mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
203 #ifdef INVARIANTS
204 	    vm_map_zdtor,
205 #else
206 	    NULL,
207 #endif
208 	    vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
209 	uma_prealloc(mapzone, MAX_KMAP);
210 	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
211 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
212 	    UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
213 	mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
214 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
215 	vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
216 #ifdef INVARIANTS
217 	    vmspace_zdtor,
218 #else
219 	    NULL,
220 #endif
221 	    vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
222 }
223 
224 static int
225 vmspace_zinit(void *mem, int size, int flags)
226 {
227 	struct vmspace *vm;
228 
229 	vm = (struct vmspace *)mem;
230 
231 	vm->vm_map.pmap = NULL;
232 	(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
233 	PMAP_LOCK_INIT(vmspace_pmap(vm));
234 	return (0);
235 }
236 
237 static int
238 vm_map_zinit(void *mem, int size, int flags)
239 {
240 	vm_map_t map;
241 
242 	map = (vm_map_t)mem;
243 	memset(map, 0, sizeof(*map));
244 	mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
245 	sx_init(&map->lock, "vm map (user)");
246 	return (0);
247 }
248 
249 #ifdef INVARIANTS
250 static void
251 vmspace_zdtor(void *mem, int size, void *arg)
252 {
253 	struct vmspace *vm;
254 
255 	vm = (struct vmspace *)mem;
256 
257 	vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
258 }
259 static void
260 vm_map_zdtor(void *mem, int size, void *arg)
261 {
262 	vm_map_t map;
263 
264 	map = (vm_map_t)mem;
265 	KASSERT(map->nentries == 0,
266 	    ("map %p nentries == %d on free.",
267 	    map, map->nentries));
268 	KASSERT(map->size == 0,
269 	    ("map %p size == %lu on free.",
270 	    map, (unsigned long)map->size));
271 }
272 #endif	/* INVARIANTS */
273 
274 /*
275  * Allocate a vmspace structure, including a vm_map and pmap,
276  * and initialize those structures.  The refcnt is set to 1.
277  *
278  * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
279  */
280 struct vmspace *
281 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
282 {
283 	struct vmspace *vm;
284 
285 	vm = uma_zalloc(vmspace_zone, M_WAITOK);
286 	KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
287 	if (!pinit(vmspace_pmap(vm))) {
288 		uma_zfree(vmspace_zone, vm);
289 		return (NULL);
290 	}
291 	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
292 	_vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
293 	vm->vm_refcnt = 1;
294 	vm->vm_shm = NULL;
295 	vm->vm_swrss = 0;
296 	vm->vm_tsize = 0;
297 	vm->vm_dsize = 0;
298 	vm->vm_ssize = 0;
299 	vm->vm_taddr = 0;
300 	vm->vm_daddr = 0;
301 	vm->vm_maxsaddr = 0;
302 	return (vm);
303 }
304 
305 #ifdef RACCT
306 static void
307 vmspace_container_reset(struct proc *p)
308 {
309 
310 	PROC_LOCK(p);
311 	racct_set(p, RACCT_DATA, 0);
312 	racct_set(p, RACCT_STACK, 0);
313 	racct_set(p, RACCT_RSS, 0);
314 	racct_set(p, RACCT_MEMLOCK, 0);
315 	racct_set(p, RACCT_VMEM, 0);
316 	PROC_UNLOCK(p);
317 }
318 #endif
319 
320 static inline void
321 vmspace_dofree(struct vmspace *vm)
322 {
323 
324 	CTR1(KTR_VM, "vmspace_free: %p", vm);
325 
326 	/*
327 	 * Make sure any SysV shm is freed, it might not have been in
328 	 * exit1().
329 	 */
330 	shmexit(vm);
331 
332 	/*
333 	 * Lock the map, to wait out all other references to it.
334 	 * Delete all of the mappings and pages they hold, then call
335 	 * the pmap module to reclaim anything left.
336 	 */
337 	(void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
338 	    vm_map_max(&vm->vm_map));
339 
340 	pmap_release(vmspace_pmap(vm));
341 	vm->vm_map.pmap = NULL;
342 	uma_zfree(vmspace_zone, vm);
343 }
344 
345 void
346 vmspace_free(struct vmspace *vm)
347 {
348 
349 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
350 	    "vmspace_free() called");
351 
352 	if (vm->vm_refcnt == 0)
353 		panic("vmspace_free: attempt to free already freed vmspace");
354 
355 	if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
356 		vmspace_dofree(vm);
357 }
358 
359 void
360 vmspace_exitfree(struct proc *p)
361 {
362 	struct vmspace *vm;
363 
364 	PROC_VMSPACE_LOCK(p);
365 	vm = p->p_vmspace;
366 	p->p_vmspace = NULL;
367 	PROC_VMSPACE_UNLOCK(p);
368 	KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
369 	vmspace_free(vm);
370 }
371 
372 void
373 vmspace_exit(struct thread *td)
374 {
375 	int refcnt;
376 	struct vmspace *vm;
377 	struct proc *p;
378 
379 	/*
380 	 * Release user portion of address space.
381 	 * This releases references to vnodes,
382 	 * which could cause I/O if the file has been unlinked.
383 	 * Need to do this early enough that we can still sleep.
384 	 *
385 	 * The last exiting process to reach this point releases as
386 	 * much of the environment as it can. vmspace_dofree() is the
387 	 * slower fallback in case another process had a temporary
388 	 * reference to the vmspace.
389 	 */
390 
391 	p = td->td_proc;
392 	vm = p->p_vmspace;
393 	atomic_add_int(&vmspace0.vm_refcnt, 1);
394 	refcnt = vm->vm_refcnt;
395 	do {
396 		if (refcnt > 1 && p->p_vmspace != &vmspace0) {
397 			/* Switch now since other proc might free vmspace */
398 			PROC_VMSPACE_LOCK(p);
399 			p->p_vmspace = &vmspace0;
400 			PROC_VMSPACE_UNLOCK(p);
401 			pmap_activate(td);
402 		}
403 	} while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt - 1));
404 	if (refcnt == 1) {
405 		if (p->p_vmspace != vm) {
406 			/* vmspace not yet freed, switch back */
407 			PROC_VMSPACE_LOCK(p);
408 			p->p_vmspace = vm;
409 			PROC_VMSPACE_UNLOCK(p);
410 			pmap_activate(td);
411 		}
412 		pmap_remove_pages(vmspace_pmap(vm));
413 		/* Switch now since this proc will free vmspace */
414 		PROC_VMSPACE_LOCK(p);
415 		p->p_vmspace = &vmspace0;
416 		PROC_VMSPACE_UNLOCK(p);
417 		pmap_activate(td);
418 		vmspace_dofree(vm);
419 	}
420 #ifdef RACCT
421 	if (racct_enable)
422 		vmspace_container_reset(p);
423 #endif
424 }
425 
426 /* Acquire reference to vmspace owned by another process. */
427 
428 struct vmspace *
429 vmspace_acquire_ref(struct proc *p)
430 {
431 	struct vmspace *vm;
432 	int refcnt;
433 
434 	PROC_VMSPACE_LOCK(p);
435 	vm = p->p_vmspace;
436 	if (vm == NULL) {
437 		PROC_VMSPACE_UNLOCK(p);
438 		return (NULL);
439 	}
440 	refcnt = vm->vm_refcnt;
441 	do {
442 		if (refcnt <= 0) { 	/* Avoid 0->1 transition */
443 			PROC_VMSPACE_UNLOCK(p);
444 			return (NULL);
445 		}
446 	} while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt + 1));
447 	if (vm != p->p_vmspace) {
448 		PROC_VMSPACE_UNLOCK(p);
449 		vmspace_free(vm);
450 		return (NULL);
451 	}
452 	PROC_VMSPACE_UNLOCK(p);
453 	return (vm);
454 }
455 
456 /*
457  * Switch between vmspaces in an AIO kernel process.
458  *
459  * The AIO kernel processes switch to and from a user process's
460  * vmspace while performing an I/O operation on behalf of a user
461  * process.  The new vmspace is either the vmspace of a user process
462  * obtained from an active AIO request or the initial vmspace of the
463  * AIO kernel process (when it is idling).  Because user processes
464  * will block to drain any active AIO requests before proceeding in
465  * exit() or execve(), the vmspace reference count for these vmspaces
466  * can never be 0.  This allows for a much simpler implementation than
467  * the loop in vmspace_acquire_ref() above.  Similarly, AIO kernel
468  * processes hold an extra reference on their initial vmspace for the
469  * life of the process so that this guarantee is true for any vmspace
470  * passed as 'newvm'.
471  */
472 void
473 vmspace_switch_aio(struct vmspace *newvm)
474 {
475 	struct vmspace *oldvm;
476 
477 	/* XXX: Need some way to assert that this is an aio daemon. */
478 
479 	KASSERT(newvm->vm_refcnt > 0,
480 	    ("vmspace_switch_aio: newvm unreferenced"));
481 
482 	oldvm = curproc->p_vmspace;
483 	if (oldvm == newvm)
484 		return;
485 
486 	/*
487 	 * Point to the new address space and refer to it.
488 	 */
489 	curproc->p_vmspace = newvm;
490 	atomic_add_int(&newvm->vm_refcnt, 1);
491 
492 	/* Activate the new mapping. */
493 	pmap_activate(curthread);
494 
495 	/* Remove the daemon's reference to the old address space. */
496 	KASSERT(oldvm->vm_refcnt > 1,
497 	    ("vmspace_switch_aio: oldvm dropping last reference"));
498 	vmspace_free(oldvm);
499 }
500 
501 void
502 _vm_map_lock(vm_map_t map, const char *file, int line)
503 {
504 
505 	if (map->system_map)
506 		mtx_lock_flags_(&map->system_mtx, 0, file, line);
507 	else
508 		sx_xlock_(&map->lock, file, line);
509 	map->timestamp++;
510 }
511 
512 static void
513 vm_map_process_deferred(void)
514 {
515 	struct thread *td;
516 	vm_map_entry_t entry, next;
517 	vm_object_t object;
518 
519 	td = curthread;
520 	entry = td->td_map_def_user;
521 	td->td_map_def_user = NULL;
522 	while (entry != NULL) {
523 		next = entry->next;
524 		if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) {
525 			/*
526 			 * Decrement the object's writemappings and
527 			 * possibly the vnode's v_writecount.
528 			 */
529 			KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
530 			    ("Submap with writecount"));
531 			object = entry->object.vm_object;
532 			KASSERT(object != NULL, ("No object for writecount"));
533 			vnode_pager_release_writecount(object, entry->start,
534 			    entry->end);
535 		}
536 		vm_map_entry_deallocate(entry, FALSE);
537 		entry = next;
538 	}
539 }
540 
541 void
542 _vm_map_unlock(vm_map_t map, const char *file, int line)
543 {
544 
545 	if (map->system_map)
546 		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
547 	else {
548 		sx_xunlock_(&map->lock, file, line);
549 		vm_map_process_deferred();
550 	}
551 }
552 
553 void
554 _vm_map_lock_read(vm_map_t map, const char *file, int line)
555 {
556 
557 	if (map->system_map)
558 		mtx_lock_flags_(&map->system_mtx, 0, file, line);
559 	else
560 		sx_slock_(&map->lock, file, line);
561 }
562 
563 void
564 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
565 {
566 
567 	if (map->system_map)
568 		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
569 	else {
570 		sx_sunlock_(&map->lock, file, line);
571 		vm_map_process_deferred();
572 	}
573 }
574 
575 int
576 _vm_map_trylock(vm_map_t map, const char *file, int line)
577 {
578 	int error;
579 
580 	error = map->system_map ?
581 	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
582 	    !sx_try_xlock_(&map->lock, file, line);
583 	if (error == 0)
584 		map->timestamp++;
585 	return (error == 0);
586 }
587 
588 int
589 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
590 {
591 	int error;
592 
593 	error = map->system_map ?
594 	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
595 	    !sx_try_slock_(&map->lock, file, line);
596 	return (error == 0);
597 }
598 
599 /*
600  *	_vm_map_lock_upgrade:	[ internal use only ]
601  *
602  *	Tries to upgrade a read (shared) lock on the specified map to a write
603  *	(exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
604  *	non-zero value if the upgrade fails.  If the upgrade fails, the map is
605  *	returned without a read or write lock held.
606  *
607  *	Requires that the map be read locked.
608  */
609 int
610 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
611 {
612 	unsigned int last_timestamp;
613 
614 	if (map->system_map) {
615 		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
616 	} else {
617 		if (!sx_try_upgrade_(&map->lock, file, line)) {
618 			last_timestamp = map->timestamp;
619 			sx_sunlock_(&map->lock, file, line);
620 			vm_map_process_deferred();
621 			/*
622 			 * If the map's timestamp does not change while the
623 			 * map is unlocked, then the upgrade succeeds.
624 			 */
625 			sx_xlock_(&map->lock, file, line);
626 			if (last_timestamp != map->timestamp) {
627 				sx_xunlock_(&map->lock, file, line);
628 				return (1);
629 			}
630 		}
631 	}
632 	map->timestamp++;
633 	return (0);
634 }
635 
636 void
637 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
638 {
639 
640 	if (map->system_map) {
641 		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
642 	} else
643 		sx_downgrade_(&map->lock, file, line);
644 }
645 
646 /*
647  *	vm_map_locked:
648  *
649  *	Returns a non-zero value if the caller holds a write (exclusive) lock
650  *	on the specified map and the value "0" otherwise.
651  */
652 int
653 vm_map_locked(vm_map_t map)
654 {
655 
656 	if (map->system_map)
657 		return (mtx_owned(&map->system_mtx));
658 	else
659 		return (sx_xlocked(&map->lock));
660 }
661 
662 #ifdef INVARIANTS
663 static void
664 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
665 {
666 
667 	if (map->system_map)
668 		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
669 	else
670 		sx_assert_(&map->lock, SA_XLOCKED, file, line);
671 }
672 
673 #define	VM_MAP_ASSERT_LOCKED(map) \
674     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
675 #else
676 #define	VM_MAP_ASSERT_LOCKED(map)
677 #endif
678 
679 /*
680  *	_vm_map_unlock_and_wait:
681  *
682  *	Atomically releases the lock on the specified map and puts the calling
683  *	thread to sleep.  The calling thread will remain asleep until either
684  *	vm_map_wakeup() is performed on the map or the specified timeout is
685  *	exceeded.
686  *
687  *	WARNING!  This function does not perform deferred deallocations of
688  *	objects and map	entries.  Therefore, the calling thread is expected to
689  *	reacquire the map lock after reawakening and later perform an ordinary
690  *	unlock operation, such as vm_map_unlock(), before completing its
691  *	operation on the map.
692  */
693 int
694 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
695 {
696 
697 	mtx_lock(&map_sleep_mtx);
698 	if (map->system_map)
699 		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
700 	else
701 		sx_xunlock_(&map->lock, file, line);
702 	return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
703 	    timo));
704 }
705 
706 /*
707  *	vm_map_wakeup:
708  *
709  *	Awaken any threads that have slept on the map using
710  *	vm_map_unlock_and_wait().
711  */
712 void
713 vm_map_wakeup(vm_map_t map)
714 {
715 
716 	/*
717 	 * Acquire and release map_sleep_mtx to prevent a wakeup()
718 	 * from being performed (and lost) between the map unlock
719 	 * and the msleep() in _vm_map_unlock_and_wait().
720 	 */
721 	mtx_lock(&map_sleep_mtx);
722 	mtx_unlock(&map_sleep_mtx);
723 	wakeup(&map->root);
724 }
725 
726 void
727 vm_map_busy(vm_map_t map)
728 {
729 
730 	VM_MAP_ASSERT_LOCKED(map);
731 	map->busy++;
732 }
733 
734 void
735 vm_map_unbusy(vm_map_t map)
736 {
737 
738 	VM_MAP_ASSERT_LOCKED(map);
739 	KASSERT(map->busy, ("vm_map_unbusy: not busy"));
740 	if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
741 		vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
742 		wakeup(&map->busy);
743 	}
744 }
745 
746 void
747 vm_map_wait_busy(vm_map_t map)
748 {
749 
750 	VM_MAP_ASSERT_LOCKED(map);
751 	while (map->busy) {
752 		vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
753 		if (map->system_map)
754 			msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
755 		else
756 			sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
757 	}
758 	map->timestamp++;
759 }
760 
761 long
762 vmspace_resident_count(struct vmspace *vmspace)
763 {
764 	return pmap_resident_count(vmspace_pmap(vmspace));
765 }
766 
767 /*
768  *	vm_map_create:
769  *
770  *	Creates and returns a new empty VM map with
771  *	the given physical map structure, and having
772  *	the given lower and upper address bounds.
773  */
774 vm_map_t
775 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
776 {
777 	vm_map_t result;
778 
779 	result = uma_zalloc(mapzone, M_WAITOK);
780 	CTR1(KTR_VM, "vm_map_create: %p", result);
781 	_vm_map_init(result, pmap, min, max);
782 	return (result);
783 }
784 
785 /*
786  * Initialize an existing vm_map structure
787  * such as that in the vmspace structure.
788  */
789 static void
790 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
791 {
792 
793 	map->header.next = map->header.prev = &map->header;
794 	map->header.eflags = MAP_ENTRY_HEADER;
795 	map->needs_wakeup = FALSE;
796 	map->system_map = 0;
797 	map->pmap = pmap;
798 	map->header.end = min;
799 	map->header.start = max;
800 	map->flags = 0;
801 	map->root = NULL;
802 	map->timestamp = 0;
803 	map->busy = 0;
804 	map->anon_loc = 0;
805 }
806 
807 void
808 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
809 {
810 
811 	_vm_map_init(map, pmap, min, max);
812 	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
813 	sx_init(&map->lock, "user map");
814 }
815 
816 /*
817  *	vm_map_entry_dispose:	[ internal use only ]
818  *
819  *	Inverse of vm_map_entry_create.
820  */
821 static void
822 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
823 {
824 	uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
825 }
826 
827 /*
828  *	vm_map_entry_create:	[ internal use only ]
829  *
830  *	Allocates a VM map entry for insertion.
831  *	No entry fields are filled in.
832  */
833 static vm_map_entry_t
834 vm_map_entry_create(vm_map_t map)
835 {
836 	vm_map_entry_t new_entry;
837 
838 	if (map->system_map)
839 		new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
840 	else
841 		new_entry = uma_zalloc(mapentzone, M_WAITOK);
842 	if (new_entry == NULL)
843 		panic("vm_map_entry_create: kernel resources exhausted");
844 	return (new_entry);
845 }
846 
847 /*
848  *	vm_map_entry_set_behavior:
849  *
850  *	Set the expected access behavior, either normal, random, or
851  *	sequential.
852  */
853 static inline void
854 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
855 {
856 	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
857 	    (behavior & MAP_ENTRY_BEHAV_MASK);
858 }
859 
860 /*
861  *	vm_map_entry_set_max_free:
862  *
863  *	Set the max_free field in a vm_map_entry.
864  */
865 static inline void
866 vm_map_entry_set_max_free(vm_map_entry_t entry)
867 {
868 
869 	entry->max_free = entry->adj_free;
870 	if (entry->left != NULL && entry->left->max_free > entry->max_free)
871 		entry->max_free = entry->left->max_free;
872 	if (entry->right != NULL && entry->right->max_free > entry->max_free)
873 		entry->max_free = entry->right->max_free;
874 }
875 
876 /*
877  *	vm_map_entry_splay:
878  *
879  *	The Sleator and Tarjan top-down splay algorithm with the
880  *	following variation.  Max_free must be computed bottom-up, so
881  *	on the downward pass, maintain the left and right spines in
882  *	reverse order.  Then, make a second pass up each side to fix
883  *	the pointers and compute max_free.  The time bound is O(log n)
884  *	amortized.
885  *
886  *	The new root is the vm_map_entry containing "addr", or else an
887  *	adjacent entry (lower or higher) if addr is not in the tree.
888  *
889  *	The map must be locked, and leaves it so.
890  *
891  *	Returns: the new root.
892  */
893 static vm_map_entry_t
894 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
895 {
896 	vm_map_entry_t llist, rlist;
897 	vm_map_entry_t ltree, rtree;
898 	vm_map_entry_t y;
899 
900 	/* Special case of empty tree. */
901 	if (root == NULL)
902 		return (root);
903 
904 	/*
905 	 * Pass One: Splay down the tree until we find addr or a NULL
906 	 * pointer where addr would go.  llist and rlist are the two
907 	 * sides in reverse order (bottom-up), with llist linked by
908 	 * the right pointer and rlist linked by the left pointer in
909 	 * the vm_map_entry.  Wait until Pass Two to set max_free on
910 	 * the two spines.
911 	 */
912 	llist = NULL;
913 	rlist = NULL;
914 	for (;;) {
915 		/* root is never NULL in here. */
916 		if (addr < root->start) {
917 			y = root->left;
918 			if (y == NULL)
919 				break;
920 			if (addr < y->start && y->left != NULL) {
921 				/* Rotate right and put y on rlist. */
922 				root->left = y->right;
923 				y->right = root;
924 				vm_map_entry_set_max_free(root);
925 				root = y->left;
926 				y->left = rlist;
927 				rlist = y;
928 			} else {
929 				/* Put root on rlist. */
930 				root->left = rlist;
931 				rlist = root;
932 				root = y;
933 			}
934 		} else if (addr >= root->end) {
935 			y = root->right;
936 			if (y == NULL)
937 				break;
938 			if (addr >= y->end && y->right != NULL) {
939 				/* Rotate left and put y on llist. */
940 				root->right = y->left;
941 				y->left = root;
942 				vm_map_entry_set_max_free(root);
943 				root = y->right;
944 				y->right = llist;
945 				llist = y;
946 			} else {
947 				/* Put root on llist. */
948 				root->right = llist;
949 				llist = root;
950 				root = y;
951 			}
952 		} else
953 			break;
954 	}
955 
956 	/*
957 	 * Pass Two: Walk back up the two spines, flip the pointers
958 	 * and set max_free.  The subtrees of the root go at the
959 	 * bottom of llist and rlist.
960 	 */
961 	ltree = root->left;
962 	while (llist != NULL) {
963 		y = llist->right;
964 		llist->right = ltree;
965 		vm_map_entry_set_max_free(llist);
966 		ltree = llist;
967 		llist = y;
968 	}
969 	rtree = root->right;
970 	while (rlist != NULL) {
971 		y = rlist->left;
972 		rlist->left = rtree;
973 		vm_map_entry_set_max_free(rlist);
974 		rtree = rlist;
975 		rlist = y;
976 	}
977 
978 	/*
979 	 * Final assembly: add ltree and rtree as subtrees of root.
980 	 */
981 	root->left = ltree;
982 	root->right = rtree;
983 	vm_map_entry_set_max_free(root);
984 
985 	return (root);
986 }
987 
988 /*
989  *	vm_map_entry_{un,}link:
990  *
991  *	Insert/remove entries from maps.
992  */
993 static void
994 vm_map_entry_link(vm_map_t map,
995 		  vm_map_entry_t after_where,
996 		  vm_map_entry_t entry)
997 {
998 
999 	CTR4(KTR_VM,
1000 	    "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
1001 	    map->nentries, entry, after_where);
1002 	VM_MAP_ASSERT_LOCKED(map);
1003 	KASSERT(after_where->end <= entry->start,
1004 	    ("vm_map_entry_link: prev end %jx new start %jx overlap",
1005 	    (uintmax_t)after_where->end, (uintmax_t)entry->start));
1006 	KASSERT(entry->end <= after_where->next->start,
1007 	    ("vm_map_entry_link: new end %jx next start %jx overlap",
1008 	    (uintmax_t)entry->end, (uintmax_t)after_where->next->start));
1009 
1010 	map->nentries++;
1011 	entry->prev = after_where;
1012 	entry->next = after_where->next;
1013 	entry->next->prev = entry;
1014 	after_where->next = entry;
1015 
1016 	if (after_where != &map->header) {
1017 		if (after_where != map->root)
1018 			vm_map_entry_splay(after_where->start, map->root);
1019 		entry->right = after_where->right;
1020 		entry->left = after_where;
1021 		after_where->right = NULL;
1022 		after_where->adj_free = entry->start - after_where->end;
1023 		vm_map_entry_set_max_free(after_where);
1024 	} else {
1025 		entry->right = map->root;
1026 		entry->left = NULL;
1027 	}
1028 	entry->adj_free = entry->next->start - entry->end;
1029 	vm_map_entry_set_max_free(entry);
1030 	map->root = entry;
1031 }
1032 
1033 static void
1034 vm_map_entry_unlink(vm_map_t map,
1035 		    vm_map_entry_t entry)
1036 {
1037 	vm_map_entry_t next, prev, root;
1038 
1039 	VM_MAP_ASSERT_LOCKED(map);
1040 	if (entry != map->root)
1041 		vm_map_entry_splay(entry->start, map->root);
1042 	if (entry->left == NULL)
1043 		root = entry->right;
1044 	else {
1045 		root = vm_map_entry_splay(entry->start, entry->left);
1046 		root->right = entry->right;
1047 		root->adj_free = entry->next->start - root->end;
1048 		vm_map_entry_set_max_free(root);
1049 	}
1050 	map->root = root;
1051 
1052 	prev = entry->prev;
1053 	next = entry->next;
1054 	next->prev = prev;
1055 	prev->next = next;
1056 	map->nentries--;
1057 	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1058 	    map->nentries, entry);
1059 }
1060 
1061 /*
1062  *	vm_map_entry_resize_free:
1063  *
1064  *	Recompute the amount of free space following a vm_map_entry
1065  *	and propagate that value up the tree.  Call this function after
1066  *	resizing a map entry in-place, that is, without a call to
1067  *	vm_map_entry_link() or _unlink().
1068  *
1069  *	The map must be locked, and leaves it so.
1070  */
1071 static void
1072 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
1073 {
1074 
1075 	/*
1076 	 * Using splay trees without parent pointers, propagating
1077 	 * max_free up the tree is done by moving the entry to the
1078 	 * root and making the change there.
1079 	 */
1080 	if (entry != map->root)
1081 		map->root = vm_map_entry_splay(entry->start, map->root);
1082 
1083 	entry->adj_free = entry->next->start - entry->end;
1084 	vm_map_entry_set_max_free(entry);
1085 }
1086 
1087 /*
1088  *	vm_map_lookup_entry:	[ internal use only ]
1089  *
1090  *	Finds the map entry containing (or
1091  *	immediately preceding) the specified address
1092  *	in the given map; the entry is returned
1093  *	in the "entry" parameter.  The boolean
1094  *	result indicates whether the address is
1095  *	actually contained in the map.
1096  */
1097 boolean_t
1098 vm_map_lookup_entry(
1099 	vm_map_t map,
1100 	vm_offset_t address,
1101 	vm_map_entry_t *entry)	/* OUT */
1102 {
1103 	vm_map_entry_t cur;
1104 	boolean_t locked;
1105 
1106 	/*
1107 	 * If the map is empty, then the map entry immediately preceding
1108 	 * "address" is the map's header.
1109 	 */
1110 	cur = map->root;
1111 	if (cur == NULL)
1112 		*entry = &map->header;
1113 	else if (address >= cur->start && cur->end > address) {
1114 		*entry = cur;
1115 		return (TRUE);
1116 	} else if ((locked = vm_map_locked(map)) ||
1117 	    sx_try_upgrade(&map->lock)) {
1118 		/*
1119 		 * Splay requires a write lock on the map.  However, it only
1120 		 * restructures the binary search tree; it does not otherwise
1121 		 * change the map.  Thus, the map's timestamp need not change
1122 		 * on a temporary upgrade.
1123 		 */
1124 		map->root = cur = vm_map_entry_splay(address, cur);
1125 		if (!locked)
1126 			sx_downgrade(&map->lock);
1127 
1128 		/*
1129 		 * If "address" is contained within a map entry, the new root
1130 		 * is that map entry.  Otherwise, the new root is a map entry
1131 		 * immediately before or after "address".
1132 		 */
1133 		if (address >= cur->start) {
1134 			*entry = cur;
1135 			if (cur->end > address)
1136 				return (TRUE);
1137 		} else
1138 			*entry = cur->prev;
1139 	} else
1140 		/*
1141 		 * Since the map is only locked for read access, perform a
1142 		 * standard binary search tree lookup for "address".
1143 		 */
1144 		for (;;) {
1145 			if (address < cur->start) {
1146 				if (cur->left == NULL) {
1147 					*entry = cur->prev;
1148 					break;
1149 				}
1150 				cur = cur->left;
1151 			} else if (cur->end > address) {
1152 				*entry = cur;
1153 				return (TRUE);
1154 			} else {
1155 				if (cur->right == NULL) {
1156 					*entry = cur;
1157 					break;
1158 				}
1159 				cur = cur->right;
1160 			}
1161 		}
1162 	return (FALSE);
1163 }
1164 
1165 /*
1166  *	vm_map_insert:
1167  *
1168  *	Inserts the given whole VM object into the target
1169  *	map at the specified address range.  The object's
1170  *	size should match that of the address range.
1171  *
1172  *	Requires that the map be locked, and leaves it so.
1173  *
1174  *	If object is non-NULL, ref count must be bumped by caller
1175  *	prior to making call to account for the new entry.
1176  */
1177 int
1178 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1179     vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
1180 {
1181 	vm_map_entry_t new_entry, prev_entry, temp_entry;
1182 	struct ucred *cred;
1183 	vm_eflags_t protoeflags;
1184 	vm_inherit_t inheritance;
1185 
1186 	VM_MAP_ASSERT_LOCKED(map);
1187 	KASSERT(object != kernel_object ||
1188 	    (cow & MAP_COPY_ON_WRITE) == 0,
1189 	    ("vm_map_insert: kernel object and COW"));
1190 	KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0,
1191 	    ("vm_map_insert: paradoxical MAP_NOFAULT request"));
1192 	KASSERT((prot & ~max) == 0,
1193 	    ("prot %#x is not subset of max_prot %#x", prot, max));
1194 
1195 	/*
1196 	 * Check that the start and end points are not bogus.
1197 	 */
1198 	if (start < vm_map_min(map) || end > vm_map_max(map) ||
1199 	    start >= end)
1200 		return (KERN_INVALID_ADDRESS);
1201 
1202 	/*
1203 	 * Find the entry prior to the proposed starting address; if it's part
1204 	 * of an existing entry, this range is bogus.
1205 	 */
1206 	if (vm_map_lookup_entry(map, start, &temp_entry))
1207 		return (KERN_NO_SPACE);
1208 
1209 	prev_entry = temp_entry;
1210 
1211 	/*
1212 	 * Assert that the next entry doesn't overlap the end point.
1213 	 */
1214 	if (prev_entry->next->start < end)
1215 		return (KERN_NO_SPACE);
1216 
1217 	if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL ||
1218 	    max != VM_PROT_NONE))
1219 		return (KERN_INVALID_ARGUMENT);
1220 
1221 	protoeflags = 0;
1222 	if (cow & MAP_COPY_ON_WRITE)
1223 		protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY;
1224 	if (cow & MAP_NOFAULT)
1225 		protoeflags |= MAP_ENTRY_NOFAULT;
1226 	if (cow & MAP_DISABLE_SYNCER)
1227 		protoeflags |= MAP_ENTRY_NOSYNC;
1228 	if (cow & MAP_DISABLE_COREDUMP)
1229 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
1230 	if (cow & MAP_STACK_GROWS_DOWN)
1231 		protoeflags |= MAP_ENTRY_GROWS_DOWN;
1232 	if (cow & MAP_STACK_GROWS_UP)
1233 		protoeflags |= MAP_ENTRY_GROWS_UP;
1234 	if (cow & MAP_VN_WRITECOUNT)
1235 		protoeflags |= MAP_ENTRY_VN_WRITECNT;
1236 	if ((cow & MAP_CREATE_GUARD) != 0)
1237 		protoeflags |= MAP_ENTRY_GUARD;
1238 	if ((cow & MAP_CREATE_STACK_GAP_DN) != 0)
1239 		protoeflags |= MAP_ENTRY_STACK_GAP_DN;
1240 	if ((cow & MAP_CREATE_STACK_GAP_UP) != 0)
1241 		protoeflags |= MAP_ENTRY_STACK_GAP_UP;
1242 	if (cow & MAP_INHERIT_SHARE)
1243 		inheritance = VM_INHERIT_SHARE;
1244 	else
1245 		inheritance = VM_INHERIT_DEFAULT;
1246 
1247 	cred = NULL;
1248 	if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0)
1249 		goto charged;
1250 	if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1251 	    ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1252 		if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1253 			return (KERN_RESOURCE_SHORTAGE);
1254 		KASSERT(object == NULL ||
1255 		    (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 ||
1256 		    object->cred == NULL,
1257 		    ("overcommit: vm_map_insert o %p", object));
1258 		cred = curthread->td_ucred;
1259 	}
1260 
1261 charged:
1262 	/* Expand the kernel pmap, if necessary. */
1263 	if (map == kernel_map && end > kernel_vm_end)
1264 		pmap_growkernel(end);
1265 	if (object != NULL) {
1266 		/*
1267 		 * OBJ_ONEMAPPING must be cleared unless this mapping
1268 		 * is trivially proven to be the only mapping for any
1269 		 * of the object's pages.  (Object granularity
1270 		 * reference counting is insufficient to recognize
1271 		 * aliases with precision.)
1272 		 */
1273 		VM_OBJECT_WLOCK(object);
1274 		if (object->ref_count > 1 || object->shadow_count != 0)
1275 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
1276 		VM_OBJECT_WUNLOCK(object);
1277 	} else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) ==
1278 	    protoeflags &&
1279 	    (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 &&
1280 	    prev_entry->end == start && (prev_entry->cred == cred ||
1281 	    (prev_entry->object.vm_object != NULL &&
1282 	    prev_entry->object.vm_object->cred == cred)) &&
1283 	    vm_object_coalesce(prev_entry->object.vm_object,
1284 	    prev_entry->offset,
1285 	    (vm_size_t)(prev_entry->end - prev_entry->start),
1286 	    (vm_size_t)(end - prev_entry->end), cred != NULL &&
1287 	    (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) {
1288 		/*
1289 		 * We were able to extend the object.  Determine if we
1290 		 * can extend the previous map entry to include the
1291 		 * new range as well.
1292 		 */
1293 		if (prev_entry->inheritance == inheritance &&
1294 		    prev_entry->protection == prot &&
1295 		    prev_entry->max_protection == max &&
1296 		    prev_entry->wired_count == 0) {
1297 			KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) ==
1298 			    0, ("prev_entry %p has incoherent wiring",
1299 			    prev_entry));
1300 			if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0)
1301 				map->size += end - prev_entry->end;
1302 			prev_entry->end = end;
1303 			vm_map_entry_resize_free(map, prev_entry);
1304 			vm_map_simplify_entry(map, prev_entry);
1305 			return (KERN_SUCCESS);
1306 		}
1307 
1308 		/*
1309 		 * If we can extend the object but cannot extend the
1310 		 * map entry, we have to create a new map entry.  We
1311 		 * must bump the ref count on the extended object to
1312 		 * account for it.  object may be NULL.
1313 		 */
1314 		object = prev_entry->object.vm_object;
1315 		offset = prev_entry->offset +
1316 		    (prev_entry->end - prev_entry->start);
1317 		vm_object_reference(object);
1318 		if (cred != NULL && object != NULL && object->cred != NULL &&
1319 		    !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1320 			/* Object already accounts for this uid. */
1321 			cred = NULL;
1322 		}
1323 	}
1324 	if (cred != NULL)
1325 		crhold(cred);
1326 
1327 	/*
1328 	 * Create a new entry
1329 	 */
1330 	new_entry = vm_map_entry_create(map);
1331 	new_entry->start = start;
1332 	new_entry->end = end;
1333 	new_entry->cred = NULL;
1334 
1335 	new_entry->eflags = protoeflags;
1336 	new_entry->object.vm_object = object;
1337 	new_entry->offset = offset;
1338 
1339 	new_entry->inheritance = inheritance;
1340 	new_entry->protection = prot;
1341 	new_entry->max_protection = max;
1342 	new_entry->wired_count = 0;
1343 	new_entry->wiring_thread = NULL;
1344 	new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1345 	new_entry->next_read = start;
1346 
1347 	KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1348 	    ("overcommit: vm_map_insert leaks vm_map %p", new_entry));
1349 	new_entry->cred = cred;
1350 
1351 	/*
1352 	 * Insert the new entry into the list
1353 	 */
1354 	vm_map_entry_link(map, prev_entry, new_entry);
1355 	if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0)
1356 		map->size += new_entry->end - new_entry->start;
1357 
1358 	/*
1359 	 * Try to coalesce the new entry with both the previous and next
1360 	 * entries in the list.  Previously, we only attempted to coalesce
1361 	 * with the previous entry when object is NULL.  Here, we handle the
1362 	 * other cases, which are less common.
1363 	 */
1364 	vm_map_simplify_entry(map, new_entry);
1365 
1366 	if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
1367 		vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
1368 		    end - start, cow & MAP_PREFAULT_PARTIAL);
1369 	}
1370 
1371 	return (KERN_SUCCESS);
1372 }
1373 
1374 /*
1375  *	vm_map_findspace:
1376  *
1377  *	Find the first fit (lowest VM address) for "length" free bytes
1378  *	beginning at address >= start in the given map.
1379  *
1380  *	In a vm_map_entry, "adj_free" is the amount of free space
1381  *	adjacent (higher address) to this entry, and "max_free" is the
1382  *	maximum amount of contiguous free space in its subtree.  This
1383  *	allows finding a free region in one path down the tree, so
1384  *	O(log n) amortized with splay trees.
1385  *
1386  *	The map must be locked, and leaves it so.
1387  *
1388  *	Returns: 0 on success, and starting address in *addr,
1389  *		 1 if insufficient space.
1390  */
1391 int
1392 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1393     vm_offset_t *addr)	/* OUT */
1394 {
1395 	vm_map_entry_t entry;
1396 	vm_offset_t st;
1397 
1398 	/*
1399 	 * Request must fit within min/max VM address and must avoid
1400 	 * address wrap.
1401 	 */
1402 	start = MAX(start, vm_map_min(map));
1403 	if (start + length > vm_map_max(map) || start + length < start)
1404 		return (1);
1405 
1406 	/* Empty tree means wide open address space. */
1407 	if (map->root == NULL) {
1408 		*addr = start;
1409 		return (0);
1410 	}
1411 
1412 	/*
1413 	 * After splay, if start comes before root node, then there
1414 	 * must be a gap from start to the root.
1415 	 */
1416 	map->root = vm_map_entry_splay(start, map->root);
1417 	if (start + length <= map->root->start) {
1418 		*addr = start;
1419 		return (0);
1420 	}
1421 
1422 	/*
1423 	 * Root is the last node that might begin its gap before
1424 	 * start, and this is the last comparison where address
1425 	 * wrap might be a problem.
1426 	 */
1427 	st = (start > map->root->end) ? start : map->root->end;
1428 	if (length <= map->root->end + map->root->adj_free - st) {
1429 		*addr = st;
1430 		return (0);
1431 	}
1432 
1433 	/* With max_free, can immediately tell if no solution. */
1434 	entry = map->root->right;
1435 	if (entry == NULL || length > entry->max_free)
1436 		return (1);
1437 
1438 	/*
1439 	 * Search the right subtree in the order: left subtree, root,
1440 	 * right subtree (first fit).  The previous splay implies that
1441 	 * all regions in the right subtree have addresses > start.
1442 	 */
1443 	while (entry != NULL) {
1444 		if (entry->left != NULL && entry->left->max_free >= length)
1445 			entry = entry->left;
1446 		else if (entry->adj_free >= length) {
1447 			*addr = entry->end;
1448 			return (0);
1449 		} else
1450 			entry = entry->right;
1451 	}
1452 
1453 	/* Can't get here, so panic if we do. */
1454 	panic("vm_map_findspace: max_free corrupt");
1455 }
1456 
1457 int
1458 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1459     vm_offset_t start, vm_size_t length, vm_prot_t prot,
1460     vm_prot_t max, int cow)
1461 {
1462 	vm_offset_t end;
1463 	int result;
1464 
1465 	end = start + length;
1466 	KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1467 	    object == NULL,
1468 	    ("vm_map_fixed: non-NULL backing object for stack"));
1469 	vm_map_lock(map);
1470 	VM_MAP_RANGE_CHECK(map, start, end);
1471 	if ((cow & MAP_CHECK_EXCL) == 0)
1472 		vm_map_delete(map, start, end);
1473 	if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1474 		result = vm_map_stack_locked(map, start, length, sgrowsiz,
1475 		    prot, max, cow);
1476 	} else {
1477 		result = vm_map_insert(map, object, offset, start, end,
1478 		    prot, max, cow);
1479 	}
1480 	vm_map_unlock(map);
1481 	return (result);
1482 }
1483 
1484 static const int aslr_pages_rnd_64[2] = {0x1000, 0x10};
1485 static const int aslr_pages_rnd_32[2] = {0x100, 0x4};
1486 
1487 static int cluster_anon = 1;
1488 SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW,
1489     &cluster_anon, 0,
1490     "Cluster anonymous mappings");
1491 
1492 static long aslr_restarts;
1493 SYSCTL_LONG(_vm, OID_AUTO, aslr_restarts, CTLFLAG_RD,
1494     &aslr_restarts, 0,
1495     "Number of aslr failures");
1496 
1497 #define	MAP_32BIT_MAX_ADDR	((vm_offset_t)1 << 31)
1498 
1499 /*
1500  * Searches for the specified amount of free space in the given map with the
1501  * specified alignment.  Performs an address-ordered, first-fit search from
1502  * the given address "*addr", with an optional upper bound "max_addr".  If the
1503  * parameter "alignment" is zero, then the alignment is computed from the
1504  * given (object, offset) pair so as to enable the greatest possible use of
1505  * superpage mappings.  Returns KERN_SUCCESS and the address of the free space
1506  * in "*addr" if successful.  Otherwise, returns KERN_NO_SPACE.
1507  *
1508  * The map must be locked.  Initially, there must be at least "length" bytes
1509  * of free space at the given address.
1510  */
1511 static int
1512 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1513     vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr,
1514     vm_offset_t alignment)
1515 {
1516 	vm_offset_t aligned_addr, free_addr;
1517 
1518 	VM_MAP_ASSERT_LOCKED(map);
1519 	free_addr = *addr;
1520 	KASSERT(!vm_map_findspace(map, free_addr, length, addr) &&
1521 	    free_addr == *addr, ("caller provided insufficient free space"));
1522 	for (;;) {
1523 		/*
1524 		 * At the start of every iteration, the free space at address
1525 		 * "*addr" is at least "length" bytes.
1526 		 */
1527 		if (alignment == 0)
1528 			pmap_align_superpage(object, offset, addr, length);
1529 		else if ((*addr & (alignment - 1)) != 0) {
1530 			*addr &= ~(alignment - 1);
1531 			*addr += alignment;
1532 		}
1533 		aligned_addr = *addr;
1534 		if (aligned_addr == free_addr) {
1535 			/*
1536 			 * Alignment did not change "*addr", so "*addr" must
1537 			 * still provide sufficient free space.
1538 			 */
1539 			return (KERN_SUCCESS);
1540 		}
1541 
1542 		/*
1543 		 * Test for address wrap on "*addr".  A wrapped "*addr" could
1544 		 * be a valid address, in which case vm_map_findspace() cannot
1545 		 * be relied upon to fail.
1546 		 */
1547 		if (aligned_addr < free_addr ||
1548 		    vm_map_findspace(map, aligned_addr, length, addr) ||
1549 		    (max_addr != 0 && *addr + length > max_addr))
1550 			return (KERN_NO_SPACE);
1551 		free_addr = *addr;
1552 		if (free_addr == aligned_addr) {
1553 			/*
1554 			 * If a successful call to vm_map_findspace() did not
1555 			 * change "*addr", then "*addr" must still be aligned
1556 			 * and provide sufficient free space.
1557 			 */
1558 			return (KERN_SUCCESS);
1559 		}
1560 	}
1561 }
1562 
1563 /*
1564  *	vm_map_find finds an unallocated region in the target address
1565  *	map with the given length.  The search is defined to be
1566  *	first-fit from the specified address; the region found is
1567  *	returned in the same parameter.
1568  *
1569  *	If object is non-NULL, ref count must be bumped by caller
1570  *	prior to making call to account for the new entry.
1571  */
1572 int
1573 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1574 	    vm_offset_t *addr,	/* IN/OUT */
1575 	    vm_size_t length, vm_offset_t max_addr, int find_space,
1576 	    vm_prot_t prot, vm_prot_t max, int cow)
1577 {
1578 	vm_offset_t alignment, curr_min_addr, min_addr;
1579 	int gap, pidx, rv, try;
1580 	bool cluster, en_aslr, update_anon;
1581 
1582 	KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1583 	    object == NULL,
1584 	    ("vm_map_find: non-NULL backing object for stack"));
1585 	MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE &&
1586 	    (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0));
1587 	if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
1588 	    (object->flags & OBJ_COLORED) == 0))
1589 		find_space = VMFS_ANY_SPACE;
1590 	if (find_space >> 8 != 0) {
1591 		KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
1592 		alignment = (vm_offset_t)1 << (find_space >> 8);
1593 	} else
1594 		alignment = 0;
1595 	en_aslr = (map->flags & MAP_ASLR) != 0;
1596 	update_anon = cluster = cluster_anon != 0 &&
1597 	    (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 &&
1598 	    find_space != VMFS_NO_SPACE && object == NULL &&
1599 	    (cow & (MAP_INHERIT_SHARE | MAP_STACK_GROWS_UP |
1600 	    MAP_STACK_GROWS_DOWN)) == 0 && prot != PROT_NONE;
1601 	curr_min_addr = min_addr = *addr;
1602 	if (en_aslr && min_addr == 0 && !cluster &&
1603 	    find_space != VMFS_NO_SPACE &&
1604 	    (map->flags & MAP_ASLR_IGNSTART) != 0)
1605 		curr_min_addr = min_addr = vm_map_min(map);
1606 	try = 0;
1607 	vm_map_lock(map);
1608 	if (cluster) {
1609 		curr_min_addr = map->anon_loc;
1610 		if (curr_min_addr == 0)
1611 			cluster = false;
1612 	}
1613 	if (find_space != VMFS_NO_SPACE) {
1614 		KASSERT(find_space == VMFS_ANY_SPACE ||
1615 		    find_space == VMFS_OPTIMAL_SPACE ||
1616 		    find_space == VMFS_SUPER_SPACE ||
1617 		    alignment != 0, ("unexpected VMFS flag"));
1618 again:
1619 		/*
1620 		 * When creating an anonymous mapping, try clustering
1621 		 * with an existing anonymous mapping first.
1622 		 *
1623 		 * We make up to two attempts to find address space
1624 		 * for a given find_space value. The first attempt may
1625 		 * apply randomization or may cluster with an existing
1626 		 * anonymous mapping. If this first attempt fails,
1627 		 * perform a first-fit search of the available address
1628 		 * space.
1629 		 *
1630 		 * If all tries failed, and find_space is
1631 		 * VMFS_OPTIMAL_SPACE, fallback to VMFS_ANY_SPACE.
1632 		 * Again enable clustering and randomization.
1633 		 */
1634 		try++;
1635 		MPASS(try <= 2);
1636 
1637 		if (try == 2) {
1638 			/*
1639 			 * Second try: we failed either to find a
1640 			 * suitable region for randomizing the
1641 			 * allocation, or to cluster with an existing
1642 			 * mapping.  Retry with free run.
1643 			 */
1644 			curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ?
1645 			    vm_map_min(map) : min_addr;
1646 			atomic_add_long(&aslr_restarts, 1);
1647 		}
1648 
1649 		if (try == 1 && en_aslr && !cluster) {
1650 			/*
1651 			 * Find space for allocation, including
1652 			 * gap needed for later randomization.
1653 			 */
1654 			pidx = MAXPAGESIZES > 1 && pagesizes[1] != 0 &&
1655 			    (find_space == VMFS_SUPER_SPACE || find_space ==
1656 			    VMFS_OPTIMAL_SPACE) ? 1 : 0;
1657 			gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR &&
1658 			    (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ?
1659 			    aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx];
1660 			if (vm_map_findspace(map, curr_min_addr, length +
1661 			    gap * pagesizes[pidx], addr) ||
1662 			    (max_addr != 0 && *addr + length > max_addr))
1663 				goto again;
1664 			/* And randomize the start address. */
1665 			*addr += (arc4random() % gap) * pagesizes[pidx];
1666 		} else if (vm_map_findspace(map, curr_min_addr, length, addr) ||
1667 		    (max_addr != 0 && *addr + length > max_addr)) {
1668 			if (cluster) {
1669 				cluster = false;
1670 				MPASS(try == 1);
1671 				goto again;
1672 			}
1673 			rv = KERN_NO_SPACE;
1674 			goto done;
1675 		}
1676 
1677 		if (find_space != VMFS_ANY_SPACE &&
1678 		    (rv = vm_map_alignspace(map, object, offset, addr, length,
1679 		    max_addr, alignment)) != KERN_SUCCESS) {
1680 			if (find_space == VMFS_OPTIMAL_SPACE) {
1681 				find_space = VMFS_ANY_SPACE;
1682 				curr_min_addr = min_addr;
1683 				cluster = update_anon;
1684 				try = 0;
1685 				goto again;
1686 			}
1687 			goto done;
1688 		}
1689 	} else if ((cow & MAP_REMAP) != 0) {
1690 		if (*addr < vm_map_min(map) ||
1691 		    *addr + length > vm_map_max(map) ||
1692 		    *addr + length <= length) {
1693 			rv = KERN_INVALID_ADDRESS;
1694 			goto done;
1695 		}
1696 		vm_map_delete(map, *addr, *addr + length);
1697 	}
1698 	if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1699 		rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot,
1700 		    max, cow);
1701 	} else {
1702 		rv = vm_map_insert(map, object, offset, *addr, *addr + length,
1703 		    prot, max, cow);
1704 	}
1705 	if (rv == KERN_SUCCESS && update_anon)
1706 		map->anon_loc = *addr + length;
1707 done:
1708 	vm_map_unlock(map);
1709 	return (rv);
1710 }
1711 
1712 /*
1713  *	vm_map_find_min() is a variant of vm_map_find() that takes an
1714  *	additional parameter (min_addr) and treats the given address
1715  *	(*addr) differently.  Specifically, it treats *addr as a hint
1716  *	and not as the minimum address where the mapping is created.
1717  *
1718  *	This function works in two phases.  First, it tries to
1719  *	allocate above the hint.  If that fails and the hint is
1720  *	greater than min_addr, it performs a second pass, replacing
1721  *	the hint with min_addr as the minimum address for the
1722  *	allocation.
1723  */
1724 int
1725 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1726     vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr,
1727     vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max,
1728     int cow)
1729 {
1730 	vm_offset_t hint;
1731 	int rv;
1732 
1733 	hint = *addr;
1734 	for (;;) {
1735 		rv = vm_map_find(map, object, offset, addr, length, max_addr,
1736 		    find_space, prot, max, cow);
1737 		if (rv == KERN_SUCCESS || min_addr >= hint)
1738 			return (rv);
1739 		*addr = hint = min_addr;
1740 	}
1741 }
1742 
1743 /*
1744  * A map entry with any of the following flags set must not be merged with
1745  * another entry.
1746  */
1747 #define	MAP_ENTRY_NOMERGE_MASK	(MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \
1748 	    MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)
1749 
1750 static bool
1751 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry)
1752 {
1753 
1754 	KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 ||
1755 	    (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0,
1756 	    ("vm_map_mergeable_neighbors: neither %p nor %p are mergeable",
1757 	    prev, entry));
1758 	return (prev->end == entry->start &&
1759 	    prev->object.vm_object == entry->object.vm_object &&
1760 	    (prev->object.vm_object == NULL ||
1761 	    prev->offset + (prev->end - prev->start) == entry->offset) &&
1762 	    prev->eflags == entry->eflags &&
1763 	    prev->protection == entry->protection &&
1764 	    prev->max_protection == entry->max_protection &&
1765 	    prev->inheritance == entry->inheritance &&
1766 	    prev->wired_count == entry->wired_count &&
1767 	    prev->cred == entry->cred);
1768 }
1769 
1770 static void
1771 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry)
1772 {
1773 
1774 	/*
1775 	 * If the backing object is a vnode object, vm_object_deallocate()
1776 	 * calls vrele().  However, vrele() does not lock the vnode because
1777 	 * the vnode has additional references.  Thus, the map lock can be
1778 	 * kept without causing a lock-order reversal with the vnode lock.
1779 	 *
1780 	 * Since we count the number of virtual page mappings in
1781 	 * object->un_pager.vnp.writemappings, the writemappings value
1782 	 * should not be adjusted when the entry is disposed of.
1783 	 */
1784 	if (entry->object.vm_object != NULL)
1785 		vm_object_deallocate(entry->object.vm_object);
1786 	if (entry->cred != NULL)
1787 		crfree(entry->cred);
1788 	vm_map_entry_dispose(map, entry);
1789 }
1790 
1791 /*
1792  *	vm_map_simplify_entry:
1793  *
1794  *	Simplify the given map entry by merging with either neighbor.  This
1795  *	routine also has the ability to merge with both neighbors.
1796  *
1797  *	The map must be locked.
1798  *
1799  *	This routine guarantees that the passed entry remains valid (though
1800  *	possibly extended).  When merging, this routine may delete one or
1801  *	both neighbors.
1802  */
1803 void
1804 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1805 {
1806 	vm_map_entry_t next, prev;
1807 
1808 	if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) != 0)
1809 		return;
1810 	prev = entry->prev;
1811 	if (vm_map_mergeable_neighbors(prev, entry)) {
1812 		vm_map_entry_unlink(map, prev);
1813 		entry->start = prev->start;
1814 		entry->offset = prev->offset;
1815 		if (entry->prev != &map->header)
1816 			vm_map_entry_resize_free(map, entry->prev);
1817 		vm_map_merged_neighbor_dispose(map, prev);
1818 	}
1819 	next = entry->next;
1820 	if (vm_map_mergeable_neighbors(entry, next)) {
1821 		vm_map_entry_unlink(map, next);
1822 		entry->end = next->end;
1823 		vm_map_entry_resize_free(map, entry);
1824 		vm_map_merged_neighbor_dispose(map, next);
1825 	}
1826 }
1827 
1828 /*
1829  *	vm_map_clip_start:	[ internal use only ]
1830  *
1831  *	Asserts that the given entry begins at or after
1832  *	the specified address; if necessary,
1833  *	it splits the entry into two.
1834  */
1835 #define vm_map_clip_start(map, entry, startaddr) \
1836 { \
1837 	if (startaddr > entry->start) \
1838 		_vm_map_clip_start(map, entry, startaddr); \
1839 }
1840 
1841 /*
1842  *	This routine is called only when it is known that
1843  *	the entry must be split.
1844  */
1845 static void
1846 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1847 {
1848 	vm_map_entry_t new_entry;
1849 
1850 	VM_MAP_ASSERT_LOCKED(map);
1851 	KASSERT(entry->end > start && entry->start < start,
1852 	    ("_vm_map_clip_start: invalid clip of entry %p", entry));
1853 
1854 	/*
1855 	 * Split off the front portion -- note that we must insert the new
1856 	 * entry BEFORE this one, so that this entry has the specified
1857 	 * starting address.
1858 	 */
1859 	vm_map_simplify_entry(map, entry);
1860 
1861 	/*
1862 	 * If there is no object backing this entry, we might as well create
1863 	 * one now.  If we defer it, an object can get created after the map
1864 	 * is clipped, and individual objects will be created for the split-up
1865 	 * map.  This is a bit of a hack, but is also about the best place to
1866 	 * put this improvement.
1867 	 */
1868 	if (entry->object.vm_object == NULL && !map->system_map &&
1869 	    (entry->eflags & MAP_ENTRY_GUARD) == 0) {
1870 		vm_object_t object;
1871 		object = vm_object_allocate(OBJT_DEFAULT,
1872 				atop(entry->end - entry->start));
1873 		entry->object.vm_object = object;
1874 		entry->offset = 0;
1875 		if (entry->cred != NULL) {
1876 			object->cred = entry->cred;
1877 			object->charge = entry->end - entry->start;
1878 			entry->cred = NULL;
1879 		}
1880 	} else if (entry->object.vm_object != NULL &&
1881 		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1882 		   entry->cred != NULL) {
1883 		VM_OBJECT_WLOCK(entry->object.vm_object);
1884 		KASSERT(entry->object.vm_object->cred == NULL,
1885 		    ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
1886 		entry->object.vm_object->cred = entry->cred;
1887 		entry->object.vm_object->charge = entry->end - entry->start;
1888 		VM_OBJECT_WUNLOCK(entry->object.vm_object);
1889 		entry->cred = NULL;
1890 	}
1891 
1892 	new_entry = vm_map_entry_create(map);
1893 	*new_entry = *entry;
1894 
1895 	new_entry->end = start;
1896 	entry->offset += (start - entry->start);
1897 	entry->start = start;
1898 	if (new_entry->cred != NULL)
1899 		crhold(entry->cred);
1900 
1901 	vm_map_entry_link(map, entry->prev, new_entry);
1902 
1903 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1904 		vm_object_reference(new_entry->object.vm_object);
1905 		/*
1906 		 * The object->un_pager.vnp.writemappings for the
1907 		 * object of MAP_ENTRY_VN_WRITECNT type entry shall be
1908 		 * kept as is here.  The virtual pages are
1909 		 * re-distributed among the clipped entries, so the sum is
1910 		 * left the same.
1911 		 */
1912 	}
1913 }
1914 
1915 /*
1916  *	vm_map_clip_end:	[ internal use only ]
1917  *
1918  *	Asserts that the given entry ends at or before
1919  *	the specified address; if necessary,
1920  *	it splits the entry into two.
1921  */
1922 #define vm_map_clip_end(map, entry, endaddr) \
1923 { \
1924 	if ((endaddr) < (entry->end)) \
1925 		_vm_map_clip_end((map), (entry), (endaddr)); \
1926 }
1927 
1928 /*
1929  *	This routine is called only when it is known that
1930  *	the entry must be split.
1931  */
1932 static void
1933 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1934 {
1935 	vm_map_entry_t new_entry;
1936 
1937 	VM_MAP_ASSERT_LOCKED(map);
1938 	KASSERT(entry->start < end && entry->end > end,
1939 	    ("_vm_map_clip_end: invalid clip of entry %p", entry));
1940 
1941 	/*
1942 	 * If there is no object backing this entry, we might as well create
1943 	 * one now.  If we defer it, an object can get created after the map
1944 	 * is clipped, and individual objects will be created for the split-up
1945 	 * map.  This is a bit of a hack, but is also about the best place to
1946 	 * put this improvement.
1947 	 */
1948 	if (entry->object.vm_object == NULL && !map->system_map &&
1949 	    (entry->eflags & MAP_ENTRY_GUARD) == 0) {
1950 		vm_object_t object;
1951 		object = vm_object_allocate(OBJT_DEFAULT,
1952 				atop(entry->end - entry->start));
1953 		entry->object.vm_object = object;
1954 		entry->offset = 0;
1955 		if (entry->cred != NULL) {
1956 			object->cred = entry->cred;
1957 			object->charge = entry->end - entry->start;
1958 			entry->cred = NULL;
1959 		}
1960 	} else if (entry->object.vm_object != NULL &&
1961 		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1962 		   entry->cred != NULL) {
1963 		VM_OBJECT_WLOCK(entry->object.vm_object);
1964 		KASSERT(entry->object.vm_object->cred == NULL,
1965 		    ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
1966 		entry->object.vm_object->cred = entry->cred;
1967 		entry->object.vm_object->charge = entry->end - entry->start;
1968 		VM_OBJECT_WUNLOCK(entry->object.vm_object);
1969 		entry->cred = NULL;
1970 	}
1971 
1972 	/*
1973 	 * Create a new entry and insert it AFTER the specified entry
1974 	 */
1975 	new_entry = vm_map_entry_create(map);
1976 	*new_entry = *entry;
1977 
1978 	new_entry->start = entry->end = end;
1979 	new_entry->offset += (end - entry->start);
1980 	if (new_entry->cred != NULL)
1981 		crhold(entry->cred);
1982 
1983 	vm_map_entry_link(map, entry, new_entry);
1984 
1985 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1986 		vm_object_reference(new_entry->object.vm_object);
1987 	}
1988 }
1989 
1990 /*
1991  *	vm_map_submap:		[ kernel use only ]
1992  *
1993  *	Mark the given range as handled by a subordinate map.
1994  *
1995  *	This range must have been created with vm_map_find,
1996  *	and no other operations may have been performed on this
1997  *	range prior to calling vm_map_submap.
1998  *
1999  *	Only a limited number of operations can be performed
2000  *	within this rage after calling vm_map_submap:
2001  *		vm_fault
2002  *	[Don't try vm_map_copy!]
2003  *
2004  *	To remove a submapping, one must first remove the
2005  *	range from the superior map, and then destroy the
2006  *	submap (if desired).  [Better yet, don't try it.]
2007  */
2008 int
2009 vm_map_submap(
2010 	vm_map_t map,
2011 	vm_offset_t start,
2012 	vm_offset_t end,
2013 	vm_map_t submap)
2014 {
2015 	vm_map_entry_t entry;
2016 	int result;
2017 
2018 	result = KERN_INVALID_ARGUMENT;
2019 
2020 	vm_map_lock(submap);
2021 	submap->flags |= MAP_IS_SUB_MAP;
2022 	vm_map_unlock(submap);
2023 
2024 	vm_map_lock(map);
2025 
2026 	VM_MAP_RANGE_CHECK(map, start, end);
2027 
2028 	if (vm_map_lookup_entry(map, start, &entry)) {
2029 		vm_map_clip_start(map, entry, start);
2030 	} else
2031 		entry = entry->next;
2032 
2033 	vm_map_clip_end(map, entry, end);
2034 
2035 	if ((entry->start == start) && (entry->end == end) &&
2036 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
2037 	    (entry->object.vm_object == NULL)) {
2038 		entry->object.sub_map = submap;
2039 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
2040 		result = KERN_SUCCESS;
2041 	}
2042 	vm_map_unlock(map);
2043 
2044 	if (result != KERN_SUCCESS) {
2045 		vm_map_lock(submap);
2046 		submap->flags &= ~MAP_IS_SUB_MAP;
2047 		vm_map_unlock(submap);
2048 	}
2049 	return (result);
2050 }
2051 
2052 /*
2053  * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
2054  */
2055 #define	MAX_INIT_PT	96
2056 
2057 /*
2058  *	vm_map_pmap_enter:
2059  *
2060  *	Preload the specified map's pmap with mappings to the specified
2061  *	object's memory-resident pages.  No further physical pages are
2062  *	allocated, and no further virtual pages are retrieved from secondary
2063  *	storage.  If the specified flags include MAP_PREFAULT_PARTIAL, then a
2064  *	limited number of page mappings are created at the low-end of the
2065  *	specified address range.  (For this purpose, a superpage mapping
2066  *	counts as one page mapping.)  Otherwise, all resident pages within
2067  *	the specified address range are mapped.
2068  */
2069 static void
2070 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
2071     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
2072 {
2073 	vm_offset_t start;
2074 	vm_page_t p, p_start;
2075 	vm_pindex_t mask, psize, threshold, tmpidx;
2076 
2077 	if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
2078 		return;
2079 	VM_OBJECT_RLOCK(object);
2080 	if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
2081 		VM_OBJECT_RUNLOCK(object);
2082 		VM_OBJECT_WLOCK(object);
2083 		if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
2084 			pmap_object_init_pt(map->pmap, addr, object, pindex,
2085 			    size);
2086 			VM_OBJECT_WUNLOCK(object);
2087 			return;
2088 		}
2089 		VM_OBJECT_LOCK_DOWNGRADE(object);
2090 	}
2091 
2092 	psize = atop(size);
2093 	if (psize + pindex > object->size) {
2094 		if (object->size < pindex) {
2095 			VM_OBJECT_RUNLOCK(object);
2096 			return;
2097 		}
2098 		psize = object->size - pindex;
2099 	}
2100 
2101 	start = 0;
2102 	p_start = NULL;
2103 	threshold = MAX_INIT_PT;
2104 
2105 	p = vm_page_find_least(object, pindex);
2106 	/*
2107 	 * Assert: the variable p is either (1) the page with the
2108 	 * least pindex greater than or equal to the parameter pindex
2109 	 * or (2) NULL.
2110 	 */
2111 	for (;
2112 	     p != NULL && (tmpidx = p->pindex - pindex) < psize;
2113 	     p = TAILQ_NEXT(p, listq)) {
2114 		/*
2115 		 * don't allow an madvise to blow away our really
2116 		 * free pages allocating pv entries.
2117 		 */
2118 		if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
2119 		    vm_page_count_severe()) ||
2120 		    ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
2121 		    tmpidx >= threshold)) {
2122 			psize = tmpidx;
2123 			break;
2124 		}
2125 		if (p->valid == VM_PAGE_BITS_ALL) {
2126 			if (p_start == NULL) {
2127 				start = addr + ptoa(tmpidx);
2128 				p_start = p;
2129 			}
2130 			/* Jump ahead if a superpage mapping is possible. */
2131 			if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
2132 			    (pagesizes[p->psind] - 1)) == 0) {
2133 				mask = atop(pagesizes[p->psind]) - 1;
2134 				if (tmpidx + mask < psize &&
2135 				    vm_page_ps_test(p, PS_ALL_VALID, NULL)) {
2136 					p += mask;
2137 					threshold += mask;
2138 				}
2139 			}
2140 		} else if (p_start != NULL) {
2141 			pmap_enter_object(map->pmap, start, addr +
2142 			    ptoa(tmpidx), p_start, prot);
2143 			p_start = NULL;
2144 		}
2145 	}
2146 	if (p_start != NULL)
2147 		pmap_enter_object(map->pmap, start, addr + ptoa(psize),
2148 		    p_start, prot);
2149 	VM_OBJECT_RUNLOCK(object);
2150 }
2151 
2152 /*
2153  *	vm_map_protect:
2154  *
2155  *	Sets the protection of the specified address
2156  *	region in the target map.  If "set_max" is
2157  *	specified, the maximum protection is to be set;
2158  *	otherwise, only the current protection is affected.
2159  */
2160 int
2161 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
2162 	       vm_prot_t new_prot, boolean_t set_max)
2163 {
2164 	vm_map_entry_t current, entry;
2165 	vm_object_t obj;
2166 	struct ucred *cred;
2167 	vm_prot_t old_prot;
2168 
2169 	if (start == end)
2170 		return (KERN_SUCCESS);
2171 
2172 	vm_map_lock(map);
2173 
2174 	/*
2175 	 * Ensure that we are not concurrently wiring pages.  vm_map_wire() may
2176 	 * need to fault pages into the map and will drop the map lock while
2177 	 * doing so, and the VM object may end up in an inconsistent state if we
2178 	 * update the protection on the map entry in between faults.
2179 	 */
2180 	vm_map_wait_busy(map);
2181 
2182 	VM_MAP_RANGE_CHECK(map, start, end);
2183 
2184 	if (vm_map_lookup_entry(map, start, &entry)) {
2185 		vm_map_clip_start(map, entry, start);
2186 	} else {
2187 		entry = entry->next;
2188 	}
2189 
2190 	/*
2191 	 * Make a first pass to check for protection violations.
2192 	 */
2193 	for (current = entry; current->start < end; current = current->next) {
2194 		if ((current->eflags & MAP_ENTRY_GUARD) != 0)
2195 			continue;
2196 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2197 			vm_map_unlock(map);
2198 			return (KERN_INVALID_ARGUMENT);
2199 		}
2200 		if ((new_prot & current->max_protection) != new_prot) {
2201 			vm_map_unlock(map);
2202 			return (KERN_PROTECTION_FAILURE);
2203 		}
2204 	}
2205 
2206 	/*
2207 	 * Do an accounting pass for private read-only mappings that
2208 	 * now will do cow due to allowed write (e.g. debugger sets
2209 	 * breakpoint on text segment)
2210 	 */
2211 	for (current = entry; current->start < end; current = current->next) {
2212 
2213 		vm_map_clip_end(map, current, end);
2214 
2215 		if (set_max ||
2216 		    ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
2217 		    ENTRY_CHARGED(current) ||
2218 		    (current->eflags & MAP_ENTRY_GUARD) != 0) {
2219 			continue;
2220 		}
2221 
2222 		cred = curthread->td_ucred;
2223 		obj = current->object.vm_object;
2224 
2225 		if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
2226 			if (!swap_reserve(current->end - current->start)) {
2227 				vm_map_unlock(map);
2228 				return (KERN_RESOURCE_SHORTAGE);
2229 			}
2230 			crhold(cred);
2231 			current->cred = cred;
2232 			continue;
2233 		}
2234 
2235 		VM_OBJECT_WLOCK(obj);
2236 		if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
2237 			VM_OBJECT_WUNLOCK(obj);
2238 			continue;
2239 		}
2240 
2241 		/*
2242 		 * Charge for the whole object allocation now, since
2243 		 * we cannot distinguish between non-charged and
2244 		 * charged clipped mapping of the same object later.
2245 		 */
2246 		KASSERT(obj->charge == 0,
2247 		    ("vm_map_protect: object %p overcharged (entry %p)",
2248 		    obj, current));
2249 		if (!swap_reserve(ptoa(obj->size))) {
2250 			VM_OBJECT_WUNLOCK(obj);
2251 			vm_map_unlock(map);
2252 			return (KERN_RESOURCE_SHORTAGE);
2253 		}
2254 
2255 		crhold(cred);
2256 		obj->cred = cred;
2257 		obj->charge = ptoa(obj->size);
2258 		VM_OBJECT_WUNLOCK(obj);
2259 	}
2260 
2261 	/*
2262 	 * Go back and fix up protections. [Note that clipping is not
2263 	 * necessary the second time.]
2264 	 */
2265 	for (current = entry; current->start < end; current = current->next) {
2266 		if ((current->eflags & MAP_ENTRY_GUARD) != 0)
2267 			continue;
2268 
2269 		old_prot = current->protection;
2270 
2271 		if (set_max)
2272 			current->protection =
2273 			    (current->max_protection = new_prot) &
2274 			    old_prot;
2275 		else
2276 			current->protection = new_prot;
2277 
2278 		/*
2279 		 * For user wired map entries, the normal lazy evaluation of
2280 		 * write access upgrades through soft page faults is
2281 		 * undesirable.  Instead, immediately copy any pages that are
2282 		 * copy-on-write and enable write access in the physical map.
2283 		 */
2284 		if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
2285 		    (current->protection & VM_PROT_WRITE) != 0 &&
2286 		    (old_prot & VM_PROT_WRITE) == 0)
2287 			vm_fault_copy_entry(map, map, current, current, NULL);
2288 
2289 		/*
2290 		 * When restricting access, update the physical map.  Worry
2291 		 * about copy-on-write here.
2292 		 */
2293 		if ((old_prot & ~current->protection) != 0) {
2294 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2295 							VM_PROT_ALL)
2296 			pmap_protect(map->pmap, current->start,
2297 			    current->end,
2298 			    current->protection & MASK(current));
2299 #undef	MASK
2300 		}
2301 		vm_map_simplify_entry(map, current);
2302 	}
2303 	vm_map_unlock(map);
2304 	return (KERN_SUCCESS);
2305 }
2306 
2307 /*
2308  *	vm_map_madvise:
2309  *
2310  *	This routine traverses a processes map handling the madvise
2311  *	system call.  Advisories are classified as either those effecting
2312  *	the vm_map_entry structure, or those effecting the underlying
2313  *	objects.
2314  */
2315 int
2316 vm_map_madvise(
2317 	vm_map_t map,
2318 	vm_offset_t start,
2319 	vm_offset_t end,
2320 	int behav)
2321 {
2322 	vm_map_entry_t current, entry;
2323 	bool modify_map;
2324 
2325 	/*
2326 	 * Some madvise calls directly modify the vm_map_entry, in which case
2327 	 * we need to use an exclusive lock on the map and we need to perform
2328 	 * various clipping operations.  Otherwise we only need a read-lock
2329 	 * on the map.
2330 	 */
2331 	switch(behav) {
2332 	case MADV_NORMAL:
2333 	case MADV_SEQUENTIAL:
2334 	case MADV_RANDOM:
2335 	case MADV_NOSYNC:
2336 	case MADV_AUTOSYNC:
2337 	case MADV_NOCORE:
2338 	case MADV_CORE:
2339 		if (start == end)
2340 			return (0);
2341 		modify_map = true;
2342 		vm_map_lock(map);
2343 		break;
2344 	case MADV_WILLNEED:
2345 	case MADV_DONTNEED:
2346 	case MADV_FREE:
2347 		if (start == end)
2348 			return (0);
2349 		modify_map = false;
2350 		vm_map_lock_read(map);
2351 		break;
2352 	default:
2353 		return (EINVAL);
2354 	}
2355 
2356 	/*
2357 	 * Locate starting entry and clip if necessary.
2358 	 */
2359 	VM_MAP_RANGE_CHECK(map, start, end);
2360 
2361 	if (vm_map_lookup_entry(map, start, &entry)) {
2362 		if (modify_map)
2363 			vm_map_clip_start(map, entry, start);
2364 	} else {
2365 		entry = entry->next;
2366 	}
2367 
2368 	if (modify_map) {
2369 		/*
2370 		 * madvise behaviors that are implemented in the vm_map_entry.
2371 		 *
2372 		 * We clip the vm_map_entry so that behavioral changes are
2373 		 * limited to the specified address range.
2374 		 */
2375 		for (current = entry; current->start < end;
2376 		    current = current->next) {
2377 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2378 				continue;
2379 
2380 			vm_map_clip_end(map, current, end);
2381 
2382 			switch (behav) {
2383 			case MADV_NORMAL:
2384 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2385 				break;
2386 			case MADV_SEQUENTIAL:
2387 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2388 				break;
2389 			case MADV_RANDOM:
2390 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2391 				break;
2392 			case MADV_NOSYNC:
2393 				current->eflags |= MAP_ENTRY_NOSYNC;
2394 				break;
2395 			case MADV_AUTOSYNC:
2396 				current->eflags &= ~MAP_ENTRY_NOSYNC;
2397 				break;
2398 			case MADV_NOCORE:
2399 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
2400 				break;
2401 			case MADV_CORE:
2402 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2403 				break;
2404 			default:
2405 				break;
2406 			}
2407 			vm_map_simplify_entry(map, current);
2408 		}
2409 		vm_map_unlock(map);
2410 	} else {
2411 		vm_pindex_t pstart, pend;
2412 
2413 		/*
2414 		 * madvise behaviors that are implemented in the underlying
2415 		 * vm_object.
2416 		 *
2417 		 * Since we don't clip the vm_map_entry, we have to clip
2418 		 * the vm_object pindex and count.
2419 		 */
2420 		for (current = entry; current->start < end;
2421 		    current = current->next) {
2422 			vm_offset_t useEnd, useStart;
2423 
2424 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2425 				continue;
2426 
2427 			pstart = OFF_TO_IDX(current->offset);
2428 			pend = pstart + atop(current->end - current->start);
2429 			useStart = current->start;
2430 			useEnd = current->end;
2431 
2432 			if (current->start < start) {
2433 				pstart += atop(start - current->start);
2434 				useStart = start;
2435 			}
2436 			if (current->end > end) {
2437 				pend -= atop(current->end - end);
2438 				useEnd = end;
2439 			}
2440 
2441 			if (pstart >= pend)
2442 				continue;
2443 
2444 			/*
2445 			 * Perform the pmap_advise() before clearing
2446 			 * PGA_REFERENCED in vm_page_advise().  Otherwise, a
2447 			 * concurrent pmap operation, such as pmap_remove(),
2448 			 * could clear a reference in the pmap and set
2449 			 * PGA_REFERENCED on the page before the pmap_advise()
2450 			 * had completed.  Consequently, the page would appear
2451 			 * referenced based upon an old reference that
2452 			 * occurred before this pmap_advise() ran.
2453 			 */
2454 			if (behav == MADV_DONTNEED || behav == MADV_FREE)
2455 				pmap_advise(map->pmap, useStart, useEnd,
2456 				    behav);
2457 
2458 			vm_object_madvise(current->object.vm_object, pstart,
2459 			    pend, behav);
2460 
2461 			/*
2462 			 * Pre-populate paging structures in the
2463 			 * WILLNEED case.  For wired entries, the
2464 			 * paging structures are already populated.
2465 			 */
2466 			if (behav == MADV_WILLNEED &&
2467 			    current->wired_count == 0) {
2468 				vm_map_pmap_enter(map,
2469 				    useStart,
2470 				    current->protection,
2471 				    current->object.vm_object,
2472 				    pstart,
2473 				    ptoa(pend - pstart),
2474 				    MAP_PREFAULT_MADVISE
2475 				);
2476 			}
2477 		}
2478 		vm_map_unlock_read(map);
2479 	}
2480 	return (0);
2481 }
2482 
2483 
2484 /*
2485  *	vm_map_inherit:
2486  *
2487  *	Sets the inheritance of the specified address
2488  *	range in the target map.  Inheritance
2489  *	affects how the map will be shared with
2490  *	child maps at the time of vmspace_fork.
2491  */
2492 int
2493 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2494 	       vm_inherit_t new_inheritance)
2495 {
2496 	vm_map_entry_t entry;
2497 	vm_map_entry_t temp_entry;
2498 
2499 	switch (new_inheritance) {
2500 	case VM_INHERIT_NONE:
2501 	case VM_INHERIT_COPY:
2502 	case VM_INHERIT_SHARE:
2503 	case VM_INHERIT_ZERO:
2504 		break;
2505 	default:
2506 		return (KERN_INVALID_ARGUMENT);
2507 	}
2508 	if (start == end)
2509 		return (KERN_SUCCESS);
2510 	vm_map_lock(map);
2511 	VM_MAP_RANGE_CHECK(map, start, end);
2512 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
2513 		entry = temp_entry;
2514 		vm_map_clip_start(map, entry, start);
2515 	} else
2516 		entry = temp_entry->next;
2517 	while (entry->start < end) {
2518 		vm_map_clip_end(map, entry, end);
2519 		if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
2520 		    new_inheritance != VM_INHERIT_ZERO)
2521 			entry->inheritance = new_inheritance;
2522 		vm_map_simplify_entry(map, entry);
2523 		entry = entry->next;
2524 	}
2525 	vm_map_unlock(map);
2526 	return (KERN_SUCCESS);
2527 }
2528 
2529 /*
2530  *	vm_map_unwire:
2531  *
2532  *	Implements both kernel and user unwiring.
2533  */
2534 int
2535 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2536     int flags)
2537 {
2538 	vm_map_entry_t entry, first_entry, tmp_entry;
2539 	vm_offset_t saved_start;
2540 	unsigned int last_timestamp;
2541 	int rv;
2542 	boolean_t need_wakeup, result, user_unwire;
2543 
2544 	if (start == end)
2545 		return (KERN_SUCCESS);
2546 	user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2547 	vm_map_lock(map);
2548 	VM_MAP_RANGE_CHECK(map, start, end);
2549 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2550 		if (flags & VM_MAP_WIRE_HOLESOK)
2551 			first_entry = first_entry->next;
2552 		else {
2553 			vm_map_unlock(map);
2554 			return (KERN_INVALID_ADDRESS);
2555 		}
2556 	}
2557 	last_timestamp = map->timestamp;
2558 	entry = first_entry;
2559 	while (entry->start < end) {
2560 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2561 			/*
2562 			 * We have not yet clipped the entry.
2563 			 */
2564 			saved_start = (start >= entry->start) ? start :
2565 			    entry->start;
2566 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2567 			if (vm_map_unlock_and_wait(map, 0)) {
2568 				/*
2569 				 * Allow interruption of user unwiring?
2570 				 */
2571 			}
2572 			vm_map_lock(map);
2573 			if (last_timestamp+1 != map->timestamp) {
2574 				/*
2575 				 * Look again for the entry because the map was
2576 				 * modified while it was unlocked.
2577 				 * Specifically, the entry may have been
2578 				 * clipped, merged, or deleted.
2579 				 */
2580 				if (!vm_map_lookup_entry(map, saved_start,
2581 				    &tmp_entry)) {
2582 					if (flags & VM_MAP_WIRE_HOLESOK)
2583 						tmp_entry = tmp_entry->next;
2584 					else {
2585 						if (saved_start == start) {
2586 							/*
2587 							 * First_entry has been deleted.
2588 							 */
2589 							vm_map_unlock(map);
2590 							return (KERN_INVALID_ADDRESS);
2591 						}
2592 						end = saved_start;
2593 						rv = KERN_INVALID_ADDRESS;
2594 						goto done;
2595 					}
2596 				}
2597 				if (entry == first_entry)
2598 					first_entry = tmp_entry;
2599 				else
2600 					first_entry = NULL;
2601 				entry = tmp_entry;
2602 			}
2603 			last_timestamp = map->timestamp;
2604 			continue;
2605 		}
2606 		vm_map_clip_start(map, entry, start);
2607 		vm_map_clip_end(map, entry, end);
2608 		/*
2609 		 * Mark the entry in case the map lock is released.  (See
2610 		 * above.)
2611 		 */
2612 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2613 		    entry->wiring_thread == NULL,
2614 		    ("owned map entry %p", entry));
2615 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2616 		entry->wiring_thread = curthread;
2617 		/*
2618 		 * Check the map for holes in the specified region.
2619 		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2620 		 */
2621 		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2622 		    (entry->end < end && entry->next->start > entry->end)) {
2623 			end = entry->end;
2624 			rv = KERN_INVALID_ADDRESS;
2625 			goto done;
2626 		}
2627 		/*
2628 		 * If system unwiring, require that the entry is system wired.
2629 		 */
2630 		if (!user_unwire &&
2631 		    vm_map_entry_system_wired_count(entry) == 0) {
2632 			end = entry->end;
2633 			rv = KERN_INVALID_ARGUMENT;
2634 			goto done;
2635 		}
2636 		entry = entry->next;
2637 	}
2638 	rv = KERN_SUCCESS;
2639 done:
2640 	need_wakeup = FALSE;
2641 	if (first_entry == NULL) {
2642 		result = vm_map_lookup_entry(map, start, &first_entry);
2643 		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2644 			first_entry = first_entry->next;
2645 		else
2646 			KASSERT(result, ("vm_map_unwire: lookup failed"));
2647 	}
2648 	for (entry = first_entry; entry->start < end; entry = entry->next) {
2649 		/*
2650 		 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2651 		 * space in the unwired region could have been mapped
2652 		 * while the map lock was dropped for draining
2653 		 * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
2654 		 * could be simultaneously wiring this new mapping
2655 		 * entry.  Detect these cases and skip any entries
2656 		 * marked as in transition by us.
2657 		 */
2658 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2659 		    entry->wiring_thread != curthread) {
2660 			KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2661 			    ("vm_map_unwire: !HOLESOK and new/changed entry"));
2662 			continue;
2663 		}
2664 
2665 		if (rv == KERN_SUCCESS && (!user_unwire ||
2666 		    (entry->eflags & MAP_ENTRY_USER_WIRED))) {
2667 			if (user_unwire)
2668 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2669 			if (entry->wired_count == 1)
2670 				vm_map_entry_unwire(map, entry);
2671 			else
2672 				entry->wired_count--;
2673 		}
2674 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2675 		    ("vm_map_unwire: in-transition flag missing %p", entry));
2676 		KASSERT(entry->wiring_thread == curthread,
2677 		    ("vm_map_unwire: alien wire %p", entry));
2678 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2679 		entry->wiring_thread = NULL;
2680 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2681 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2682 			need_wakeup = TRUE;
2683 		}
2684 		vm_map_simplify_entry(map, entry);
2685 	}
2686 	vm_map_unlock(map);
2687 	if (need_wakeup)
2688 		vm_map_wakeup(map);
2689 	return (rv);
2690 }
2691 
2692 /*
2693  *	vm_map_wire_entry_failure:
2694  *
2695  *	Handle a wiring failure on the given entry.
2696  *
2697  *	The map should be locked.
2698  */
2699 static void
2700 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
2701     vm_offset_t failed_addr)
2702 {
2703 
2704 	VM_MAP_ASSERT_LOCKED(map);
2705 	KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
2706 	    entry->wired_count == 1,
2707 	    ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
2708 	KASSERT(failed_addr < entry->end,
2709 	    ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
2710 
2711 	/*
2712 	 * If any pages at the start of this entry were successfully wired,
2713 	 * then unwire them.
2714 	 */
2715 	if (failed_addr > entry->start) {
2716 		pmap_unwire(map->pmap, entry->start, failed_addr);
2717 		vm_object_unwire(entry->object.vm_object, entry->offset,
2718 		    failed_addr - entry->start, PQ_ACTIVE);
2719 	}
2720 
2721 	/*
2722 	 * Assign an out-of-range value to represent the failure to wire this
2723 	 * entry.
2724 	 */
2725 	entry->wired_count = -1;
2726 }
2727 
2728 /*
2729  *	vm_map_wire:
2730  *
2731  *	Implements both kernel and user wiring.
2732  */
2733 int
2734 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2735     int flags)
2736 {
2737 	vm_map_entry_t entry, first_entry, tmp_entry;
2738 	vm_offset_t faddr, saved_end, saved_start;
2739 	unsigned int last_timestamp;
2740 	int rv;
2741 	boolean_t need_wakeup, result, user_wire;
2742 	vm_prot_t prot;
2743 
2744 	if (start == end)
2745 		return (KERN_SUCCESS);
2746 	prot = 0;
2747 	if (flags & VM_MAP_WIRE_WRITE)
2748 		prot |= VM_PROT_WRITE;
2749 	user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2750 	vm_map_lock(map);
2751 	VM_MAP_RANGE_CHECK(map, start, end);
2752 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2753 		if (flags & VM_MAP_WIRE_HOLESOK)
2754 			first_entry = first_entry->next;
2755 		else {
2756 			vm_map_unlock(map);
2757 			return (KERN_INVALID_ADDRESS);
2758 		}
2759 	}
2760 	last_timestamp = map->timestamp;
2761 	entry = first_entry;
2762 	while (entry->start < end) {
2763 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2764 			/*
2765 			 * We have not yet clipped the entry.
2766 			 */
2767 			saved_start = (start >= entry->start) ? start :
2768 			    entry->start;
2769 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2770 			if (vm_map_unlock_and_wait(map, 0)) {
2771 				/*
2772 				 * Allow interruption of user wiring?
2773 				 */
2774 			}
2775 			vm_map_lock(map);
2776 			if (last_timestamp + 1 != map->timestamp) {
2777 				/*
2778 				 * Look again for the entry because the map was
2779 				 * modified while it was unlocked.
2780 				 * Specifically, the entry may have been
2781 				 * clipped, merged, or deleted.
2782 				 */
2783 				if (!vm_map_lookup_entry(map, saved_start,
2784 				    &tmp_entry)) {
2785 					if (flags & VM_MAP_WIRE_HOLESOK)
2786 						tmp_entry = tmp_entry->next;
2787 					else {
2788 						if (saved_start == start) {
2789 							/*
2790 							 * first_entry has been deleted.
2791 							 */
2792 							vm_map_unlock(map);
2793 							return (KERN_INVALID_ADDRESS);
2794 						}
2795 						end = saved_start;
2796 						rv = KERN_INVALID_ADDRESS;
2797 						goto done;
2798 					}
2799 				}
2800 				if (entry == first_entry)
2801 					first_entry = tmp_entry;
2802 				else
2803 					first_entry = NULL;
2804 				entry = tmp_entry;
2805 			}
2806 			last_timestamp = map->timestamp;
2807 			continue;
2808 		}
2809 		vm_map_clip_start(map, entry, start);
2810 		vm_map_clip_end(map, entry, end);
2811 		/*
2812 		 * Mark the entry in case the map lock is released.  (See
2813 		 * above.)
2814 		 */
2815 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2816 		    entry->wiring_thread == NULL,
2817 		    ("owned map entry %p", entry));
2818 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2819 		entry->wiring_thread = curthread;
2820 		if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
2821 		    || (entry->protection & prot) != prot) {
2822 			entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
2823 			if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
2824 				end = entry->end;
2825 				rv = KERN_INVALID_ADDRESS;
2826 				goto done;
2827 			}
2828 			goto next_entry;
2829 		}
2830 		if (entry->wired_count == 0) {
2831 			entry->wired_count++;
2832 			saved_start = entry->start;
2833 			saved_end = entry->end;
2834 
2835 			/*
2836 			 * Release the map lock, relying on the in-transition
2837 			 * mark.  Mark the map busy for fork.
2838 			 */
2839 			vm_map_busy(map);
2840 			vm_map_unlock(map);
2841 
2842 			faddr = saved_start;
2843 			do {
2844 				/*
2845 				 * Simulate a fault to get the page and enter
2846 				 * it into the physical map.
2847 				 */
2848 				if ((rv = vm_fault(map, faddr, VM_PROT_NONE,
2849 				    VM_FAULT_WIRE)) != KERN_SUCCESS)
2850 					break;
2851 			} while ((faddr += PAGE_SIZE) < saved_end);
2852 			vm_map_lock(map);
2853 			vm_map_unbusy(map);
2854 			if (last_timestamp + 1 != map->timestamp) {
2855 				/*
2856 				 * Look again for the entry because the map was
2857 				 * modified while it was unlocked.  The entry
2858 				 * may have been clipped, but NOT merged or
2859 				 * deleted.
2860 				 */
2861 				result = vm_map_lookup_entry(map, saved_start,
2862 				    &tmp_entry);
2863 				KASSERT(result, ("vm_map_wire: lookup failed"));
2864 				if (entry == first_entry)
2865 					first_entry = tmp_entry;
2866 				else
2867 					first_entry = NULL;
2868 				entry = tmp_entry;
2869 				while (entry->end < saved_end) {
2870 					/*
2871 					 * In case of failure, handle entries
2872 					 * that were not fully wired here;
2873 					 * fully wired entries are handled
2874 					 * later.
2875 					 */
2876 					if (rv != KERN_SUCCESS &&
2877 					    faddr < entry->end)
2878 						vm_map_wire_entry_failure(map,
2879 						    entry, faddr);
2880 					entry = entry->next;
2881 				}
2882 			}
2883 			last_timestamp = map->timestamp;
2884 			if (rv != KERN_SUCCESS) {
2885 				vm_map_wire_entry_failure(map, entry, faddr);
2886 				end = entry->end;
2887 				goto done;
2888 			}
2889 		} else if (!user_wire ||
2890 			   (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2891 			entry->wired_count++;
2892 		}
2893 		/*
2894 		 * Check the map for holes in the specified region.
2895 		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2896 		 */
2897 	next_entry:
2898 		if ((flags & VM_MAP_WIRE_HOLESOK) == 0 &&
2899 		    entry->end < end && entry->next->start > entry->end) {
2900 			end = entry->end;
2901 			rv = KERN_INVALID_ADDRESS;
2902 			goto done;
2903 		}
2904 		entry = entry->next;
2905 	}
2906 	rv = KERN_SUCCESS;
2907 done:
2908 	need_wakeup = FALSE;
2909 	if (first_entry == NULL) {
2910 		result = vm_map_lookup_entry(map, start, &first_entry);
2911 		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2912 			first_entry = first_entry->next;
2913 		else
2914 			KASSERT(result, ("vm_map_wire: lookup failed"));
2915 	}
2916 	for (entry = first_entry; entry->start < end; entry = entry->next) {
2917 		/*
2918 		 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2919 		 * space in the unwired region could have been mapped
2920 		 * while the map lock was dropped for faulting in the
2921 		 * pages or draining MAP_ENTRY_IN_TRANSITION.
2922 		 * Moreover, another thread could be simultaneously
2923 		 * wiring this new mapping entry.  Detect these cases
2924 		 * and skip any entries marked as in transition not by us.
2925 		 */
2926 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2927 		    entry->wiring_thread != curthread) {
2928 			KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2929 			    ("vm_map_wire: !HOLESOK and new/changed entry"));
2930 			continue;
2931 		}
2932 
2933 		if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
2934 			goto next_entry_done;
2935 
2936 		if (rv == KERN_SUCCESS) {
2937 			if (user_wire)
2938 				entry->eflags |= MAP_ENTRY_USER_WIRED;
2939 		} else if (entry->wired_count == -1) {
2940 			/*
2941 			 * Wiring failed on this entry.  Thus, unwiring is
2942 			 * unnecessary.
2943 			 */
2944 			entry->wired_count = 0;
2945 		} else if (!user_wire ||
2946 		    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2947 			/*
2948 			 * Undo the wiring.  Wiring succeeded on this entry
2949 			 * but failed on a later entry.
2950 			 */
2951 			if (entry->wired_count == 1)
2952 				vm_map_entry_unwire(map, entry);
2953 			else
2954 				entry->wired_count--;
2955 		}
2956 	next_entry_done:
2957 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2958 		    ("vm_map_wire: in-transition flag missing %p", entry));
2959 		KASSERT(entry->wiring_thread == curthread,
2960 		    ("vm_map_wire: alien wire %p", entry));
2961 		entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
2962 		    MAP_ENTRY_WIRE_SKIPPED);
2963 		entry->wiring_thread = NULL;
2964 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2965 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2966 			need_wakeup = TRUE;
2967 		}
2968 		vm_map_simplify_entry(map, entry);
2969 	}
2970 	vm_map_unlock(map);
2971 	if (need_wakeup)
2972 		vm_map_wakeup(map);
2973 	return (rv);
2974 }
2975 
2976 /*
2977  * vm_map_sync
2978  *
2979  * Push any dirty cached pages in the address range to their pager.
2980  * If syncio is TRUE, dirty pages are written synchronously.
2981  * If invalidate is TRUE, any cached pages are freed as well.
2982  *
2983  * If the size of the region from start to end is zero, we are
2984  * supposed to flush all modified pages within the region containing
2985  * start.  Unfortunately, a region can be split or coalesced with
2986  * neighboring regions, making it difficult to determine what the
2987  * original region was.  Therefore, we approximate this requirement by
2988  * flushing the current region containing start.
2989  *
2990  * Returns an error if any part of the specified range is not mapped.
2991  */
2992 int
2993 vm_map_sync(
2994 	vm_map_t map,
2995 	vm_offset_t start,
2996 	vm_offset_t end,
2997 	boolean_t syncio,
2998 	boolean_t invalidate)
2999 {
3000 	vm_map_entry_t current;
3001 	vm_map_entry_t entry;
3002 	vm_size_t size;
3003 	vm_object_t object;
3004 	vm_ooffset_t offset;
3005 	unsigned int last_timestamp;
3006 	boolean_t failed;
3007 
3008 	vm_map_lock_read(map);
3009 	VM_MAP_RANGE_CHECK(map, start, end);
3010 	if (!vm_map_lookup_entry(map, start, &entry)) {
3011 		vm_map_unlock_read(map);
3012 		return (KERN_INVALID_ADDRESS);
3013 	} else if (start == end) {
3014 		start = entry->start;
3015 		end = entry->end;
3016 	}
3017 	/*
3018 	 * Make a first pass to check for user-wired memory and holes.
3019 	 */
3020 	for (current = entry; current->start < end; current = current->next) {
3021 		if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
3022 			vm_map_unlock_read(map);
3023 			return (KERN_INVALID_ARGUMENT);
3024 		}
3025 		if (end > current->end &&
3026 		    current->end != current->next->start) {
3027 			vm_map_unlock_read(map);
3028 			return (KERN_INVALID_ADDRESS);
3029 		}
3030 	}
3031 
3032 	if (invalidate)
3033 		pmap_remove(map->pmap, start, end);
3034 	failed = FALSE;
3035 
3036 	/*
3037 	 * Make a second pass, cleaning/uncaching pages from the indicated
3038 	 * objects as we go.
3039 	 */
3040 	for (current = entry; current->start < end;) {
3041 		offset = current->offset + (start - current->start);
3042 		size = (end <= current->end ? end : current->end) - start;
3043 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
3044 			vm_map_t smap;
3045 			vm_map_entry_t tentry;
3046 			vm_size_t tsize;
3047 
3048 			smap = current->object.sub_map;
3049 			vm_map_lock_read(smap);
3050 			(void) vm_map_lookup_entry(smap, offset, &tentry);
3051 			tsize = tentry->end - offset;
3052 			if (tsize < size)
3053 				size = tsize;
3054 			object = tentry->object.vm_object;
3055 			offset = tentry->offset + (offset - tentry->start);
3056 			vm_map_unlock_read(smap);
3057 		} else {
3058 			object = current->object.vm_object;
3059 		}
3060 		vm_object_reference(object);
3061 		last_timestamp = map->timestamp;
3062 		vm_map_unlock_read(map);
3063 		if (!vm_object_sync(object, offset, size, syncio, invalidate))
3064 			failed = TRUE;
3065 		start += size;
3066 		vm_object_deallocate(object);
3067 		vm_map_lock_read(map);
3068 		if (last_timestamp == map->timestamp ||
3069 		    !vm_map_lookup_entry(map, start, &current))
3070 			current = current->next;
3071 	}
3072 
3073 	vm_map_unlock_read(map);
3074 	return (failed ? KERN_FAILURE : KERN_SUCCESS);
3075 }
3076 
3077 /*
3078  *	vm_map_entry_unwire:	[ internal use only ]
3079  *
3080  *	Make the region specified by this entry pageable.
3081  *
3082  *	The map in question should be locked.
3083  *	[This is the reason for this routine's existence.]
3084  */
3085 static void
3086 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
3087 {
3088 
3089 	VM_MAP_ASSERT_LOCKED(map);
3090 	KASSERT(entry->wired_count > 0,
3091 	    ("vm_map_entry_unwire: entry %p isn't wired", entry));
3092 	pmap_unwire(map->pmap, entry->start, entry->end);
3093 	vm_object_unwire(entry->object.vm_object, entry->offset, entry->end -
3094 	    entry->start, PQ_ACTIVE);
3095 	entry->wired_count = 0;
3096 }
3097 
3098 static void
3099 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
3100 {
3101 
3102 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
3103 		vm_object_deallocate(entry->object.vm_object);
3104 	uma_zfree(system_map ? kmapentzone : mapentzone, entry);
3105 }
3106 
3107 /*
3108  *	vm_map_entry_delete:	[ internal use only ]
3109  *
3110  *	Deallocate the given entry from the target map.
3111  */
3112 static void
3113 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
3114 {
3115 	vm_object_t object;
3116 	vm_pindex_t offidxstart, offidxend, count, size1;
3117 	vm_size_t size;
3118 
3119 	vm_map_entry_unlink(map, entry);
3120 	object = entry->object.vm_object;
3121 
3122 	if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
3123 		MPASS(entry->cred == NULL);
3124 		MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0);
3125 		MPASS(object == NULL);
3126 		vm_map_entry_deallocate(entry, map->system_map);
3127 		return;
3128 	}
3129 
3130 	size = entry->end - entry->start;
3131 	map->size -= size;
3132 
3133 	if (entry->cred != NULL) {
3134 		swap_release_by_cred(size, entry->cred);
3135 		crfree(entry->cred);
3136 	}
3137 
3138 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
3139 	    (object != NULL)) {
3140 		KASSERT(entry->cred == NULL || object->cred == NULL ||
3141 		    (entry->eflags & MAP_ENTRY_NEEDS_COPY),
3142 		    ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
3143 		count = atop(size);
3144 		offidxstart = OFF_TO_IDX(entry->offset);
3145 		offidxend = offidxstart + count;
3146 		VM_OBJECT_WLOCK(object);
3147 		if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT |
3148 		    OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
3149 		    object == kernel_object)) {
3150 			vm_object_collapse(object);
3151 
3152 			/*
3153 			 * The option OBJPR_NOTMAPPED can be passed here
3154 			 * because vm_map_delete() already performed
3155 			 * pmap_remove() on the only mapping to this range
3156 			 * of pages.
3157 			 */
3158 			vm_object_page_remove(object, offidxstart, offidxend,
3159 			    OBJPR_NOTMAPPED);
3160 			if (object->type == OBJT_SWAP)
3161 				swap_pager_freespace(object, offidxstart,
3162 				    count);
3163 			if (offidxend >= object->size &&
3164 			    offidxstart < object->size) {
3165 				size1 = object->size;
3166 				object->size = offidxstart;
3167 				if (object->cred != NULL) {
3168 					size1 -= object->size;
3169 					KASSERT(object->charge >= ptoa(size1),
3170 					    ("object %p charge < 0", object));
3171 					swap_release_by_cred(ptoa(size1),
3172 					    object->cred);
3173 					object->charge -= ptoa(size1);
3174 				}
3175 			}
3176 		}
3177 		VM_OBJECT_WUNLOCK(object);
3178 	} else
3179 		entry->object.vm_object = NULL;
3180 	if (map->system_map)
3181 		vm_map_entry_deallocate(entry, TRUE);
3182 	else {
3183 		entry->next = curthread->td_map_def_user;
3184 		curthread->td_map_def_user = entry;
3185 	}
3186 }
3187 
3188 /*
3189  *	vm_map_delete:	[ internal use only ]
3190  *
3191  *	Deallocates the given address range from the target
3192  *	map.
3193  */
3194 int
3195 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
3196 {
3197 	vm_map_entry_t entry;
3198 	vm_map_entry_t first_entry;
3199 
3200 	VM_MAP_ASSERT_LOCKED(map);
3201 	if (start == end)
3202 		return (KERN_SUCCESS);
3203 
3204 	/*
3205 	 * Find the start of the region, and clip it
3206 	 */
3207 	if (!vm_map_lookup_entry(map, start, &first_entry))
3208 		entry = first_entry->next;
3209 	else {
3210 		entry = first_entry;
3211 		vm_map_clip_start(map, entry, start);
3212 	}
3213 
3214 	/*
3215 	 * Step through all entries in this region
3216 	 */
3217 	while (entry->start < end) {
3218 		vm_map_entry_t next;
3219 
3220 		/*
3221 		 * Wait for wiring or unwiring of an entry to complete.
3222 		 * Also wait for any system wirings to disappear on
3223 		 * user maps.
3224 		 */
3225 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
3226 		    (vm_map_pmap(map) != kernel_pmap &&
3227 		    vm_map_entry_system_wired_count(entry) != 0)) {
3228 			unsigned int last_timestamp;
3229 			vm_offset_t saved_start;
3230 			vm_map_entry_t tmp_entry;
3231 
3232 			saved_start = entry->start;
3233 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3234 			last_timestamp = map->timestamp;
3235 			(void) vm_map_unlock_and_wait(map, 0);
3236 			vm_map_lock(map);
3237 			if (last_timestamp + 1 != map->timestamp) {
3238 				/*
3239 				 * Look again for the entry because the map was
3240 				 * modified while it was unlocked.
3241 				 * Specifically, the entry may have been
3242 				 * clipped, merged, or deleted.
3243 				 */
3244 				if (!vm_map_lookup_entry(map, saved_start,
3245 							 &tmp_entry))
3246 					entry = tmp_entry->next;
3247 				else {
3248 					entry = tmp_entry;
3249 					vm_map_clip_start(map, entry,
3250 							  saved_start);
3251 				}
3252 			}
3253 			continue;
3254 		}
3255 		vm_map_clip_end(map, entry, end);
3256 
3257 		next = entry->next;
3258 
3259 		/*
3260 		 * Unwire before removing addresses from the pmap; otherwise,
3261 		 * unwiring will put the entries back in the pmap.
3262 		 */
3263 		if (entry->wired_count != 0)
3264 			vm_map_entry_unwire(map, entry);
3265 
3266 		/*
3267 		 * Remove mappings for the pages, but only if the
3268 		 * mappings could exist.  For instance, it does not
3269 		 * make sense to call pmap_remove() for guard entries.
3270 		 */
3271 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 ||
3272 		    entry->object.vm_object != NULL)
3273 			pmap_remove(map->pmap, entry->start, entry->end);
3274 
3275 		if (entry->end == map->anon_loc)
3276 			map->anon_loc = entry->start;
3277 
3278 		/*
3279 		 * Delete the entry only after removing all pmap
3280 		 * entries pointing to its pages.  (Otherwise, its
3281 		 * page frames may be reallocated, and any modify bits
3282 		 * will be set in the wrong object!)
3283 		 */
3284 		vm_map_entry_delete(map, entry);
3285 		entry = next;
3286 	}
3287 	return (KERN_SUCCESS);
3288 }
3289 
3290 /*
3291  *	vm_map_remove:
3292  *
3293  *	Remove the given address range from the target map.
3294  *	This is the exported form of vm_map_delete.
3295  */
3296 int
3297 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3298 {
3299 	int result;
3300 
3301 	vm_map_lock(map);
3302 	VM_MAP_RANGE_CHECK(map, start, end);
3303 	result = vm_map_delete(map, start, end);
3304 	vm_map_unlock(map);
3305 	return (result);
3306 }
3307 
3308 /*
3309  *	vm_map_check_protection:
3310  *
3311  *	Assert that the target map allows the specified privilege on the
3312  *	entire address region given.  The entire region must be allocated.
3313  *
3314  *	WARNING!  This code does not and should not check whether the
3315  *	contents of the region is accessible.  For example a smaller file
3316  *	might be mapped into a larger address space.
3317  *
3318  *	NOTE!  This code is also called by munmap().
3319  *
3320  *	The map must be locked.  A read lock is sufficient.
3321  */
3322 boolean_t
3323 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3324 			vm_prot_t protection)
3325 {
3326 	vm_map_entry_t entry;
3327 	vm_map_entry_t tmp_entry;
3328 
3329 	if (!vm_map_lookup_entry(map, start, &tmp_entry))
3330 		return (FALSE);
3331 	entry = tmp_entry;
3332 
3333 	while (start < end) {
3334 		/*
3335 		 * No holes allowed!
3336 		 */
3337 		if (start < entry->start)
3338 			return (FALSE);
3339 		/*
3340 		 * Check protection associated with entry.
3341 		 */
3342 		if ((entry->protection & protection) != protection)
3343 			return (FALSE);
3344 		/* go to next entry */
3345 		start = entry->end;
3346 		entry = entry->next;
3347 	}
3348 	return (TRUE);
3349 }
3350 
3351 /*
3352  *	vm_map_copy_entry:
3353  *
3354  *	Copies the contents of the source entry to the destination
3355  *	entry.  The entries *must* be aligned properly.
3356  */
3357 static void
3358 vm_map_copy_entry(
3359 	vm_map_t src_map,
3360 	vm_map_t dst_map,
3361 	vm_map_entry_t src_entry,
3362 	vm_map_entry_t dst_entry,
3363 	vm_ooffset_t *fork_charge)
3364 {
3365 	vm_object_t src_object;
3366 	vm_map_entry_t fake_entry;
3367 	vm_offset_t size;
3368 	struct ucred *cred;
3369 	int charged;
3370 
3371 	VM_MAP_ASSERT_LOCKED(dst_map);
3372 
3373 	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
3374 		return;
3375 
3376 	if (src_entry->wired_count == 0 ||
3377 	    (src_entry->protection & VM_PROT_WRITE) == 0) {
3378 		/*
3379 		 * If the source entry is marked needs_copy, it is already
3380 		 * write-protected.
3381 		 */
3382 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
3383 		    (src_entry->protection & VM_PROT_WRITE) != 0) {
3384 			pmap_protect(src_map->pmap,
3385 			    src_entry->start,
3386 			    src_entry->end,
3387 			    src_entry->protection & ~VM_PROT_WRITE);
3388 		}
3389 
3390 		/*
3391 		 * Make a copy of the object.
3392 		 */
3393 		size = src_entry->end - src_entry->start;
3394 		if ((src_object = src_entry->object.vm_object) != NULL) {
3395 			VM_OBJECT_WLOCK(src_object);
3396 			charged = ENTRY_CHARGED(src_entry);
3397 			if (src_object->handle == NULL &&
3398 			    (src_object->type == OBJT_DEFAULT ||
3399 			    src_object->type == OBJT_SWAP)) {
3400 				vm_object_collapse(src_object);
3401 				if ((src_object->flags & (OBJ_NOSPLIT |
3402 				    OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
3403 					vm_object_split(src_entry);
3404 					src_object =
3405 					    src_entry->object.vm_object;
3406 				}
3407 			}
3408 			vm_object_reference_locked(src_object);
3409 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3410 			if (src_entry->cred != NULL &&
3411 			    !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
3412 				KASSERT(src_object->cred == NULL,
3413 				    ("OVERCOMMIT: vm_map_copy_entry: cred %p",
3414 				     src_object));
3415 				src_object->cred = src_entry->cred;
3416 				src_object->charge = size;
3417 			}
3418 			VM_OBJECT_WUNLOCK(src_object);
3419 			dst_entry->object.vm_object = src_object;
3420 			if (charged) {
3421 				cred = curthread->td_ucred;
3422 				crhold(cred);
3423 				dst_entry->cred = cred;
3424 				*fork_charge += size;
3425 				if (!(src_entry->eflags &
3426 				      MAP_ENTRY_NEEDS_COPY)) {
3427 					crhold(cred);
3428 					src_entry->cred = cred;
3429 					*fork_charge += size;
3430 				}
3431 			}
3432 			src_entry->eflags |= MAP_ENTRY_COW |
3433 			    MAP_ENTRY_NEEDS_COPY;
3434 			dst_entry->eflags |= MAP_ENTRY_COW |
3435 			    MAP_ENTRY_NEEDS_COPY;
3436 			dst_entry->offset = src_entry->offset;
3437 			if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3438 				/*
3439 				 * MAP_ENTRY_VN_WRITECNT cannot
3440 				 * indicate write reference from
3441 				 * src_entry, since the entry is
3442 				 * marked as needs copy.  Allocate a
3443 				 * fake entry that is used to
3444 				 * decrement object->un_pager.vnp.writecount
3445 				 * at the appropriate time.  Attach
3446 				 * fake_entry to the deferred list.
3447 				 */
3448 				fake_entry = vm_map_entry_create(dst_map);
3449 				fake_entry->eflags = MAP_ENTRY_VN_WRITECNT;
3450 				src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT;
3451 				vm_object_reference(src_object);
3452 				fake_entry->object.vm_object = src_object;
3453 				fake_entry->start = src_entry->start;
3454 				fake_entry->end = src_entry->end;
3455 				fake_entry->next = curthread->td_map_def_user;
3456 				curthread->td_map_def_user = fake_entry;
3457 			}
3458 
3459 			pmap_copy(dst_map->pmap, src_map->pmap,
3460 			    dst_entry->start, dst_entry->end - dst_entry->start,
3461 			    src_entry->start);
3462 		} else {
3463 			dst_entry->object.vm_object = NULL;
3464 			dst_entry->offset = 0;
3465 			if (src_entry->cred != NULL) {
3466 				dst_entry->cred = curthread->td_ucred;
3467 				crhold(dst_entry->cred);
3468 				*fork_charge += size;
3469 			}
3470 		}
3471 	} else {
3472 		/*
3473 		 * We don't want to make writeable wired pages copy-on-write.
3474 		 * Immediately copy these pages into the new map by simulating
3475 		 * page faults.  The new pages are pageable.
3476 		 */
3477 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
3478 		    fork_charge);
3479 	}
3480 }
3481 
3482 /*
3483  * vmspace_map_entry_forked:
3484  * Update the newly-forked vmspace each time a map entry is inherited
3485  * or copied.  The values for vm_dsize and vm_tsize are approximate
3486  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
3487  */
3488 static void
3489 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
3490     vm_map_entry_t entry)
3491 {
3492 	vm_size_t entrysize;
3493 	vm_offset_t newend;
3494 
3495 	if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
3496 		return;
3497 	entrysize = entry->end - entry->start;
3498 	vm2->vm_map.size += entrysize;
3499 	if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
3500 		vm2->vm_ssize += btoc(entrysize);
3501 	} else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
3502 	    entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
3503 		newend = MIN(entry->end,
3504 		    (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
3505 		vm2->vm_dsize += btoc(newend - entry->start);
3506 	} else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
3507 	    entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
3508 		newend = MIN(entry->end,
3509 		    (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
3510 		vm2->vm_tsize += btoc(newend - entry->start);
3511 	}
3512 }
3513 
3514 /*
3515  * vmspace_fork:
3516  * Create a new process vmspace structure and vm_map
3517  * based on those of an existing process.  The new map
3518  * is based on the old map, according to the inheritance
3519  * values on the regions in that map.
3520  *
3521  * XXX It might be worth coalescing the entries added to the new vmspace.
3522  *
3523  * The source map must not be locked.
3524  */
3525 struct vmspace *
3526 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
3527 {
3528 	struct vmspace *vm2;
3529 	vm_map_t new_map, old_map;
3530 	vm_map_entry_t new_entry, old_entry;
3531 	vm_object_t object;
3532 	int locked;
3533 	vm_inherit_t inh;
3534 
3535 	old_map = &vm1->vm_map;
3536 	/* Copy immutable fields of vm1 to vm2. */
3537 	vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map),
3538 	    pmap_pinit);
3539 	if (vm2 == NULL)
3540 		return (NULL);
3541 	vm2->vm_taddr = vm1->vm_taddr;
3542 	vm2->vm_daddr = vm1->vm_daddr;
3543 	vm2->vm_maxsaddr = vm1->vm_maxsaddr;
3544 	vm_map_lock(old_map);
3545 	if (old_map->busy)
3546 		vm_map_wait_busy(old_map);
3547 	new_map = &vm2->vm_map;
3548 	locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
3549 	KASSERT(locked, ("vmspace_fork: lock failed"));
3550 
3551 	new_map->anon_loc = old_map->anon_loc;
3552 	old_entry = old_map->header.next;
3553 
3554 	while (old_entry != &old_map->header) {
3555 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3556 			panic("vm_map_fork: encountered a submap");
3557 
3558 		inh = old_entry->inheritance;
3559 		if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 &&
3560 		    inh != VM_INHERIT_NONE)
3561 			inh = VM_INHERIT_COPY;
3562 
3563 		switch (inh) {
3564 		case VM_INHERIT_NONE:
3565 			break;
3566 
3567 		case VM_INHERIT_SHARE:
3568 			/*
3569 			 * Clone the entry, creating the shared object if necessary.
3570 			 */
3571 			object = old_entry->object.vm_object;
3572 			if (object == NULL) {
3573 				object = vm_object_allocate(OBJT_DEFAULT,
3574 					atop(old_entry->end - old_entry->start));
3575 				old_entry->object.vm_object = object;
3576 				old_entry->offset = 0;
3577 				if (old_entry->cred != NULL) {
3578 					object->cred = old_entry->cred;
3579 					object->charge = old_entry->end -
3580 					    old_entry->start;
3581 					old_entry->cred = NULL;
3582 				}
3583 			}
3584 
3585 			/*
3586 			 * Add the reference before calling vm_object_shadow
3587 			 * to insure that a shadow object is created.
3588 			 */
3589 			vm_object_reference(object);
3590 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3591 				vm_object_shadow(&old_entry->object.vm_object,
3592 				    &old_entry->offset,
3593 				    old_entry->end - old_entry->start);
3594 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3595 				/* Transfer the second reference too. */
3596 				vm_object_reference(
3597 				    old_entry->object.vm_object);
3598 
3599 				/*
3600 				 * As in vm_map_simplify_entry(), the
3601 				 * vnode lock will not be acquired in
3602 				 * this call to vm_object_deallocate().
3603 				 */
3604 				vm_object_deallocate(object);
3605 				object = old_entry->object.vm_object;
3606 			}
3607 			VM_OBJECT_WLOCK(object);
3608 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
3609 			if (old_entry->cred != NULL) {
3610 				KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
3611 				object->cred = old_entry->cred;
3612 				object->charge = old_entry->end - old_entry->start;
3613 				old_entry->cred = NULL;
3614 			}
3615 
3616 			/*
3617 			 * Assert the correct state of the vnode
3618 			 * v_writecount while the object is locked, to
3619 			 * not relock it later for the assertion
3620 			 * correctness.
3621 			 */
3622 			if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT &&
3623 			    object->type == OBJT_VNODE) {
3624 				KASSERT(((struct vnode *)object->handle)->
3625 				    v_writecount > 0,
3626 				    ("vmspace_fork: v_writecount %p", object));
3627 				KASSERT(object->un_pager.vnp.writemappings > 0,
3628 				    ("vmspace_fork: vnp.writecount %p",
3629 				    object));
3630 			}
3631 			VM_OBJECT_WUNLOCK(object);
3632 
3633 			/*
3634 			 * Clone the entry, referencing the shared object.
3635 			 */
3636 			new_entry = vm_map_entry_create(new_map);
3637 			*new_entry = *old_entry;
3638 			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3639 			    MAP_ENTRY_IN_TRANSITION);
3640 			new_entry->wiring_thread = NULL;
3641 			new_entry->wired_count = 0;
3642 			if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3643 				vnode_pager_update_writecount(object,
3644 				    new_entry->start, new_entry->end);
3645 			}
3646 
3647 			/*
3648 			 * Insert the entry into the new map -- we know we're
3649 			 * inserting at the end of the new map.
3650 			 */
3651 			vm_map_entry_link(new_map, new_map->header.prev,
3652 			    new_entry);
3653 			vmspace_map_entry_forked(vm1, vm2, new_entry);
3654 
3655 			/*
3656 			 * Update the physical map
3657 			 */
3658 			pmap_copy(new_map->pmap, old_map->pmap,
3659 			    new_entry->start,
3660 			    (old_entry->end - old_entry->start),
3661 			    old_entry->start);
3662 			break;
3663 
3664 		case VM_INHERIT_COPY:
3665 			/*
3666 			 * Clone the entry and link into the map.
3667 			 */
3668 			new_entry = vm_map_entry_create(new_map);
3669 			*new_entry = *old_entry;
3670 			/*
3671 			 * Copied entry is COW over the old object.
3672 			 */
3673 			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3674 			    MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT);
3675 			new_entry->wiring_thread = NULL;
3676 			new_entry->wired_count = 0;
3677 			new_entry->object.vm_object = NULL;
3678 			new_entry->cred = NULL;
3679 			vm_map_entry_link(new_map, new_map->header.prev,
3680 			    new_entry);
3681 			vmspace_map_entry_forked(vm1, vm2, new_entry);
3682 			vm_map_copy_entry(old_map, new_map, old_entry,
3683 			    new_entry, fork_charge);
3684 			break;
3685 
3686 		case VM_INHERIT_ZERO:
3687 			/*
3688 			 * Create a new anonymous mapping entry modelled from
3689 			 * the old one.
3690 			 */
3691 			new_entry = vm_map_entry_create(new_map);
3692 			memset(new_entry, 0, sizeof(*new_entry));
3693 
3694 			new_entry->start = old_entry->start;
3695 			new_entry->end = old_entry->end;
3696 			new_entry->eflags = old_entry->eflags &
3697 			    ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION |
3698 			    MAP_ENTRY_VN_WRITECNT);
3699 			new_entry->protection = old_entry->protection;
3700 			new_entry->max_protection = old_entry->max_protection;
3701 			new_entry->inheritance = VM_INHERIT_ZERO;
3702 
3703 			vm_map_entry_link(new_map, new_map->header.prev,
3704 			    new_entry);
3705 			vmspace_map_entry_forked(vm1, vm2, new_entry);
3706 
3707 			new_entry->cred = curthread->td_ucred;
3708 			crhold(new_entry->cred);
3709 			*fork_charge += (new_entry->end - new_entry->start);
3710 
3711 			break;
3712 		}
3713 		old_entry = old_entry->next;
3714 	}
3715 	/*
3716 	 * Use inlined vm_map_unlock() to postpone handling the deferred
3717 	 * map entries, which cannot be done until both old_map and
3718 	 * new_map locks are released.
3719 	 */
3720 	sx_xunlock(&old_map->lock);
3721 	sx_xunlock(&new_map->lock);
3722 	vm_map_process_deferred();
3723 
3724 	return (vm2);
3725 }
3726 
3727 /*
3728  * Create a process's stack for exec_new_vmspace().  This function is never
3729  * asked to wire the newly created stack.
3730  */
3731 int
3732 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3733     vm_prot_t prot, vm_prot_t max, int cow)
3734 {
3735 	vm_size_t growsize, init_ssize;
3736 	rlim_t vmemlim;
3737 	int rv;
3738 
3739 	MPASS((map->flags & MAP_WIREFUTURE) == 0);
3740 	growsize = sgrowsiz;
3741 	init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3742 	vm_map_lock(map);
3743 	vmemlim = lim_cur(curthread, RLIMIT_VMEM);
3744 	/* If we would blow our VMEM resource limit, no go */
3745 	if (map->size + init_ssize > vmemlim) {
3746 		rv = KERN_NO_SPACE;
3747 		goto out;
3748 	}
3749 	rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
3750 	    max, cow);
3751 out:
3752 	vm_map_unlock(map);
3753 	return (rv);
3754 }
3755 
3756 static int stack_guard_page = 1;
3757 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN,
3758     &stack_guard_page, 0,
3759     "Specifies the number of guard pages for a stack that grows");
3760 
3761 static int
3762 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3763     vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
3764 {
3765 	vm_map_entry_t new_entry, prev_entry;
3766 	vm_offset_t bot, gap_bot, gap_top, top;
3767 	vm_size_t init_ssize, sgp;
3768 	int orient, rv;
3769 
3770 	/*
3771 	 * The stack orientation is piggybacked with the cow argument.
3772 	 * Extract it into orient and mask the cow argument so that we
3773 	 * don't pass it around further.
3774 	 */
3775 	orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP);
3776 	KASSERT(orient != 0, ("No stack grow direction"));
3777 	KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP),
3778 	    ("bi-dir stack"));
3779 
3780 	if (addrbos < vm_map_min(map) ||
3781 	    addrbos + max_ssize > vm_map_max(map) ||
3782 	    addrbos + max_ssize <= addrbos)
3783 		return (KERN_INVALID_ADDRESS);
3784 	sgp = (vm_size_t)stack_guard_page * PAGE_SIZE;
3785 	if (sgp >= max_ssize)
3786 		return (KERN_INVALID_ARGUMENT);
3787 
3788 	init_ssize = growsize;
3789 	if (max_ssize < init_ssize + sgp)
3790 		init_ssize = max_ssize - sgp;
3791 
3792 	/* If addr is already mapped, no go */
3793 	if (vm_map_lookup_entry(map, addrbos, &prev_entry))
3794 		return (KERN_NO_SPACE);
3795 
3796 	/*
3797 	 * If we can't accommodate max_ssize in the current mapping, no go.
3798 	 */
3799 	if (prev_entry->next->start < addrbos + max_ssize)
3800 		return (KERN_NO_SPACE);
3801 
3802 	/*
3803 	 * We initially map a stack of only init_ssize.  We will grow as
3804 	 * needed later.  Depending on the orientation of the stack (i.e.
3805 	 * the grow direction) we either map at the top of the range, the
3806 	 * bottom of the range or in the middle.
3807 	 *
3808 	 * Note: we would normally expect prot and max to be VM_PROT_ALL,
3809 	 * and cow to be 0.  Possibly we should eliminate these as input
3810 	 * parameters, and just pass these values here in the insert call.
3811 	 */
3812 	if (orient == MAP_STACK_GROWS_DOWN) {
3813 		bot = addrbos + max_ssize - init_ssize;
3814 		top = bot + init_ssize;
3815 		gap_bot = addrbos;
3816 		gap_top = bot;
3817 	} else /* if (orient == MAP_STACK_GROWS_UP) */ {
3818 		bot = addrbos;
3819 		top = bot + init_ssize;
3820 		gap_bot = top;
3821 		gap_top = addrbos + max_ssize;
3822 	}
3823 	rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
3824 	if (rv != KERN_SUCCESS)
3825 		return (rv);
3826 	new_entry = prev_entry->next;
3827 	KASSERT(new_entry->end == top || new_entry->start == bot,
3828 	    ("Bad entry start/end for new stack entry"));
3829 	KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 ||
3830 	    (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0,
3831 	    ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
3832 	KASSERT((orient & MAP_STACK_GROWS_UP) == 0 ||
3833 	    (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0,
3834 	    ("new entry lacks MAP_ENTRY_GROWS_UP"));
3835 	rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE,
3836 	    VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ?
3837 	    MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP));
3838 	if (rv != KERN_SUCCESS)
3839 		(void)vm_map_delete(map, bot, top);
3840 	return (rv);
3841 }
3842 
3843 /*
3844  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if we
3845  * successfully grow the stack.
3846  */
3847 static int
3848 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
3849 {
3850 	vm_map_entry_t stack_entry;
3851 	struct proc *p;
3852 	struct vmspace *vm;
3853 	struct ucred *cred;
3854 	vm_offset_t gap_end, gap_start, grow_start;
3855 	size_t grow_amount, guard, max_grow;
3856 	rlim_t lmemlim, stacklim, vmemlim;
3857 	int rv, rv1;
3858 	bool gap_deleted, grow_down, is_procstack;
3859 #ifdef notyet
3860 	uint64_t limit;
3861 #endif
3862 #ifdef RACCT
3863 	int error;
3864 #endif
3865 
3866 	p = curproc;
3867 	vm = p->p_vmspace;
3868 
3869 	/*
3870 	 * Disallow stack growth when the access is performed by a
3871 	 * debugger or AIO daemon.  The reason is that the wrong
3872 	 * resource limits are applied.
3873 	 */
3874 	if (map != &p->p_vmspace->vm_map || p->p_textvp == NULL)
3875 		return (KERN_FAILURE);
3876 
3877 	MPASS(!map->system_map);
3878 
3879 	guard = stack_guard_page * PAGE_SIZE;
3880 	lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK);
3881 	stacklim = lim_cur(curthread, RLIMIT_STACK);
3882 	vmemlim = lim_cur(curthread, RLIMIT_VMEM);
3883 retry:
3884 	/* If addr is not in a hole for a stack grow area, no need to grow. */
3885 	if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry))
3886 		return (KERN_FAILURE);
3887 	if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0)
3888 		return (KERN_SUCCESS);
3889 	if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) {
3890 		stack_entry = gap_entry->next;
3891 		if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 ||
3892 		    stack_entry->start != gap_entry->end)
3893 			return (KERN_FAILURE);
3894 		grow_amount = round_page(stack_entry->start - addr);
3895 		grow_down = true;
3896 	} else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) {
3897 		stack_entry = gap_entry->prev;
3898 		if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 ||
3899 		    stack_entry->end != gap_entry->start)
3900 			return (KERN_FAILURE);
3901 		grow_amount = round_page(addr + 1 - stack_entry->end);
3902 		grow_down = false;
3903 	} else {
3904 		return (KERN_FAILURE);
3905 	}
3906 	max_grow = gap_entry->end - gap_entry->start;
3907 	if (guard > max_grow)
3908 		return (KERN_NO_SPACE);
3909 	max_grow -= guard;
3910 	if (grow_amount > max_grow)
3911 		return (KERN_NO_SPACE);
3912 
3913 	/*
3914 	 * If this is the main process stack, see if we're over the stack
3915 	 * limit.
3916 	 */
3917 	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr &&
3918 	    addr < (vm_offset_t)p->p_sysent->sv_usrstack;
3919 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim))
3920 		return (KERN_NO_SPACE);
3921 
3922 #ifdef RACCT
3923 	if (racct_enable) {
3924 		PROC_LOCK(p);
3925 		if (is_procstack && racct_set(p, RACCT_STACK,
3926 		    ctob(vm->vm_ssize) + grow_amount)) {
3927 			PROC_UNLOCK(p);
3928 			return (KERN_NO_SPACE);
3929 		}
3930 		PROC_UNLOCK(p);
3931 	}
3932 #endif
3933 
3934 	grow_amount = roundup(grow_amount, sgrowsiz);
3935 	if (grow_amount > max_grow)
3936 		grow_amount = max_grow;
3937 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3938 		grow_amount = trunc_page((vm_size_t)stacklim) -
3939 		    ctob(vm->vm_ssize);
3940 	}
3941 
3942 #ifdef notyet
3943 	PROC_LOCK(p);
3944 	limit = racct_get_available(p, RACCT_STACK);
3945 	PROC_UNLOCK(p);
3946 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
3947 		grow_amount = limit - ctob(vm->vm_ssize);
3948 #endif
3949 
3950 	if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) {
3951 		if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
3952 			rv = KERN_NO_SPACE;
3953 			goto out;
3954 		}
3955 #ifdef RACCT
3956 		if (racct_enable) {
3957 			PROC_LOCK(p);
3958 			if (racct_set(p, RACCT_MEMLOCK,
3959 			    ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
3960 				PROC_UNLOCK(p);
3961 				rv = KERN_NO_SPACE;
3962 				goto out;
3963 			}
3964 			PROC_UNLOCK(p);
3965 		}
3966 #endif
3967 	}
3968 
3969 	/* If we would blow our VMEM resource limit, no go */
3970 	if (map->size + grow_amount > vmemlim) {
3971 		rv = KERN_NO_SPACE;
3972 		goto out;
3973 	}
3974 #ifdef RACCT
3975 	if (racct_enable) {
3976 		PROC_LOCK(p);
3977 		if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
3978 			PROC_UNLOCK(p);
3979 			rv = KERN_NO_SPACE;
3980 			goto out;
3981 		}
3982 		PROC_UNLOCK(p);
3983 	}
3984 #endif
3985 
3986 	if (vm_map_lock_upgrade(map)) {
3987 		gap_entry = NULL;
3988 		vm_map_lock_read(map);
3989 		goto retry;
3990 	}
3991 
3992 	if (grow_down) {
3993 		grow_start = gap_entry->end - grow_amount;
3994 		if (gap_entry->start + grow_amount == gap_entry->end) {
3995 			gap_start = gap_entry->start;
3996 			gap_end = gap_entry->end;
3997 			vm_map_entry_delete(map, gap_entry);
3998 			gap_deleted = true;
3999 		} else {
4000 			MPASS(gap_entry->start < gap_entry->end - grow_amount);
4001 			gap_entry->end -= grow_amount;
4002 			vm_map_entry_resize_free(map, gap_entry);
4003 			gap_deleted = false;
4004 		}
4005 		rv = vm_map_insert(map, NULL, 0, grow_start,
4006 		    grow_start + grow_amount,
4007 		    stack_entry->protection, stack_entry->max_protection,
4008 		    MAP_STACK_GROWS_DOWN);
4009 		if (rv != KERN_SUCCESS) {
4010 			if (gap_deleted) {
4011 				rv1 = vm_map_insert(map, NULL, 0, gap_start,
4012 				    gap_end, VM_PROT_NONE, VM_PROT_NONE,
4013 				    MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN);
4014 				MPASS(rv1 == KERN_SUCCESS);
4015 			} else {
4016 				gap_entry->end += grow_amount;
4017 				vm_map_entry_resize_free(map, gap_entry);
4018 			}
4019 		}
4020 	} else {
4021 		grow_start = stack_entry->end;
4022 		cred = stack_entry->cred;
4023 		if (cred == NULL && stack_entry->object.vm_object != NULL)
4024 			cred = stack_entry->object.vm_object->cred;
4025 		if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
4026 			rv = KERN_NO_SPACE;
4027 		/* Grow the underlying object if applicable. */
4028 		else if (stack_entry->object.vm_object == NULL ||
4029 		    vm_object_coalesce(stack_entry->object.vm_object,
4030 		    stack_entry->offset,
4031 		    (vm_size_t)(stack_entry->end - stack_entry->start),
4032 		    (vm_size_t)grow_amount, cred != NULL)) {
4033 			if (gap_entry->start + grow_amount == gap_entry->end)
4034 				vm_map_entry_delete(map, gap_entry);
4035 			else
4036 				gap_entry->start += grow_amount;
4037 			stack_entry->end += grow_amount;
4038 			map->size += grow_amount;
4039 			vm_map_entry_resize_free(map, stack_entry);
4040 			rv = KERN_SUCCESS;
4041 		} else
4042 			rv = KERN_FAILURE;
4043 	}
4044 	if (rv == KERN_SUCCESS && is_procstack)
4045 		vm->vm_ssize += btoc(grow_amount);
4046 
4047 	/*
4048 	 * Heed the MAP_WIREFUTURE flag if it was set for this process.
4049 	 */
4050 	if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) {
4051 		vm_map_unlock(map);
4052 		vm_map_wire(map, grow_start, grow_start + grow_amount,
4053 		    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
4054 		vm_map_lock_read(map);
4055 	} else
4056 		vm_map_lock_downgrade(map);
4057 
4058 out:
4059 #ifdef RACCT
4060 	if (racct_enable && rv != KERN_SUCCESS) {
4061 		PROC_LOCK(p);
4062 		error = racct_set(p, RACCT_VMEM, map->size);
4063 		KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
4064 		if (!old_mlock) {
4065 			error = racct_set(p, RACCT_MEMLOCK,
4066 			    ptoa(pmap_wired_count(map->pmap)));
4067 			KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
4068 		}
4069 	    	error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
4070 		KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
4071 		PROC_UNLOCK(p);
4072 	}
4073 #endif
4074 
4075 	return (rv);
4076 }
4077 
4078 /*
4079  * Unshare the specified VM space for exec.  If other processes are
4080  * mapped to it, then create a new one.  The new vmspace is null.
4081  */
4082 int
4083 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
4084 {
4085 	struct vmspace *oldvmspace = p->p_vmspace;
4086 	struct vmspace *newvmspace;
4087 
4088 	KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
4089 	    ("vmspace_exec recursed"));
4090 	newvmspace = vmspace_alloc(minuser, maxuser, pmap_pinit);
4091 	if (newvmspace == NULL)
4092 		return (ENOMEM);
4093 	newvmspace->vm_swrss = oldvmspace->vm_swrss;
4094 	/*
4095 	 * This code is written like this for prototype purposes.  The
4096 	 * goal is to avoid running down the vmspace here, but let the
4097 	 * other process's that are still using the vmspace to finally
4098 	 * run it down.  Even though there is little or no chance of blocking
4099 	 * here, it is a good idea to keep this form for future mods.
4100 	 */
4101 	PROC_VMSPACE_LOCK(p);
4102 	p->p_vmspace = newvmspace;
4103 	PROC_VMSPACE_UNLOCK(p);
4104 	if (p == curthread->td_proc)
4105 		pmap_activate(curthread);
4106 	curthread->td_pflags |= TDP_EXECVMSPC;
4107 	return (0);
4108 }
4109 
4110 /*
4111  * Unshare the specified VM space for forcing COW.  This
4112  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
4113  */
4114 int
4115 vmspace_unshare(struct proc *p)
4116 {
4117 	struct vmspace *oldvmspace = p->p_vmspace;
4118 	struct vmspace *newvmspace;
4119 	vm_ooffset_t fork_charge;
4120 
4121 	if (oldvmspace->vm_refcnt == 1)
4122 		return (0);
4123 	fork_charge = 0;
4124 	newvmspace = vmspace_fork(oldvmspace, &fork_charge);
4125 	if (newvmspace == NULL)
4126 		return (ENOMEM);
4127 	if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
4128 		vmspace_free(newvmspace);
4129 		return (ENOMEM);
4130 	}
4131 	PROC_VMSPACE_LOCK(p);
4132 	p->p_vmspace = newvmspace;
4133 	PROC_VMSPACE_UNLOCK(p);
4134 	if (p == curthread->td_proc)
4135 		pmap_activate(curthread);
4136 	vmspace_free(oldvmspace);
4137 	return (0);
4138 }
4139 
4140 /*
4141  *	vm_map_lookup:
4142  *
4143  *	Finds the VM object, offset, and
4144  *	protection for a given virtual address in the
4145  *	specified map, assuming a page fault of the
4146  *	type specified.
4147  *
4148  *	Leaves the map in question locked for read; return
4149  *	values are guaranteed until a vm_map_lookup_done
4150  *	call is performed.  Note that the map argument
4151  *	is in/out; the returned map must be used in
4152  *	the call to vm_map_lookup_done.
4153  *
4154  *	A handle (out_entry) is returned for use in
4155  *	vm_map_lookup_done, to make that fast.
4156  *
4157  *	If a lookup is requested with "write protection"
4158  *	specified, the map may be changed to perform virtual
4159  *	copying operations, although the data referenced will
4160  *	remain the same.
4161  */
4162 int
4163 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
4164 	      vm_offset_t vaddr,
4165 	      vm_prot_t fault_typea,
4166 	      vm_map_entry_t *out_entry,	/* OUT */
4167 	      vm_object_t *object,		/* OUT */
4168 	      vm_pindex_t *pindex,		/* OUT */
4169 	      vm_prot_t *out_prot,		/* OUT */
4170 	      boolean_t *wired)			/* OUT */
4171 {
4172 	vm_map_entry_t entry;
4173 	vm_map_t map = *var_map;
4174 	vm_prot_t prot;
4175 	vm_prot_t fault_type = fault_typea;
4176 	vm_object_t eobject;
4177 	vm_size_t size;
4178 	struct ucred *cred;
4179 
4180 RetryLookup:
4181 
4182 	vm_map_lock_read(map);
4183 
4184 RetryLookupLocked:
4185 	/*
4186 	 * Lookup the faulting address.
4187 	 */
4188 	if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
4189 		vm_map_unlock_read(map);
4190 		return (KERN_INVALID_ADDRESS);
4191 	}
4192 
4193 	entry = *out_entry;
4194 
4195 	/*
4196 	 * Handle submaps.
4197 	 */
4198 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4199 		vm_map_t old_map = map;
4200 
4201 		*var_map = map = entry->object.sub_map;
4202 		vm_map_unlock_read(old_map);
4203 		goto RetryLookup;
4204 	}
4205 
4206 	/*
4207 	 * Check whether this task is allowed to have this page.
4208 	 */
4209 	prot = entry->protection;
4210 	if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) {
4211 		fault_typea &= ~VM_PROT_FAULT_LOOKUP;
4212 		if (prot == VM_PROT_NONE && map != kernel_map &&
4213 		    (entry->eflags & MAP_ENTRY_GUARD) != 0 &&
4214 		    (entry->eflags & (MAP_ENTRY_STACK_GAP_DN |
4215 		    MAP_ENTRY_STACK_GAP_UP)) != 0 &&
4216 		    vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS)
4217 			goto RetryLookupLocked;
4218 	}
4219 	fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4220 	if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
4221 		vm_map_unlock_read(map);
4222 		return (KERN_PROTECTION_FAILURE);
4223 	}
4224 	KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
4225 	    (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) !=
4226 	    (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY),
4227 	    ("entry %p flags %x", entry, entry->eflags));
4228 	if ((fault_typea & VM_PROT_COPY) != 0 &&
4229 	    (entry->max_protection & VM_PROT_WRITE) == 0 &&
4230 	    (entry->eflags & MAP_ENTRY_COW) == 0) {
4231 		vm_map_unlock_read(map);
4232 		return (KERN_PROTECTION_FAILURE);
4233 	}
4234 
4235 	/*
4236 	 * If this page is not pageable, we have to get it for all possible
4237 	 * accesses.
4238 	 */
4239 	*wired = (entry->wired_count != 0);
4240 	if (*wired)
4241 		fault_type = entry->protection;
4242 	size = entry->end - entry->start;
4243 	/*
4244 	 * If the entry was copy-on-write, we either ...
4245 	 */
4246 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4247 		/*
4248 		 * If we want to write the page, we may as well handle that
4249 		 * now since we've got the map locked.
4250 		 *
4251 		 * If we don't need to write the page, we just demote the
4252 		 * permissions allowed.
4253 		 */
4254 		if ((fault_type & VM_PROT_WRITE) != 0 ||
4255 		    (fault_typea & VM_PROT_COPY) != 0) {
4256 			/*
4257 			 * Make a new object, and place it in the object
4258 			 * chain.  Note that no new references have appeared
4259 			 * -- one just moved from the map to the new
4260 			 * object.
4261 			 */
4262 			if (vm_map_lock_upgrade(map))
4263 				goto RetryLookup;
4264 
4265 			if (entry->cred == NULL) {
4266 				/*
4267 				 * The debugger owner is charged for
4268 				 * the memory.
4269 				 */
4270 				cred = curthread->td_ucred;
4271 				crhold(cred);
4272 				if (!swap_reserve_by_cred(size, cred)) {
4273 					crfree(cred);
4274 					vm_map_unlock(map);
4275 					return (KERN_RESOURCE_SHORTAGE);
4276 				}
4277 				entry->cred = cred;
4278 			}
4279 			vm_object_shadow(&entry->object.vm_object,
4280 			    &entry->offset, size);
4281 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4282 			eobject = entry->object.vm_object;
4283 			if (eobject->cred != NULL) {
4284 				/*
4285 				 * The object was not shadowed.
4286 				 */
4287 				swap_release_by_cred(size, entry->cred);
4288 				crfree(entry->cred);
4289 				entry->cred = NULL;
4290 			} else if (entry->cred != NULL) {
4291 				VM_OBJECT_WLOCK(eobject);
4292 				eobject->cred = entry->cred;
4293 				eobject->charge = size;
4294 				VM_OBJECT_WUNLOCK(eobject);
4295 				entry->cred = NULL;
4296 			}
4297 
4298 			vm_map_lock_downgrade(map);
4299 		} else {
4300 			/*
4301 			 * We're attempting to read a copy-on-write page --
4302 			 * don't allow writes.
4303 			 */
4304 			prot &= ~VM_PROT_WRITE;
4305 		}
4306 	}
4307 
4308 	/*
4309 	 * Create an object if necessary.
4310 	 */
4311 	if (entry->object.vm_object == NULL &&
4312 	    !map->system_map) {
4313 		if (vm_map_lock_upgrade(map))
4314 			goto RetryLookup;
4315 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
4316 		    atop(size));
4317 		entry->offset = 0;
4318 		if (entry->cred != NULL) {
4319 			VM_OBJECT_WLOCK(entry->object.vm_object);
4320 			entry->object.vm_object->cred = entry->cred;
4321 			entry->object.vm_object->charge = size;
4322 			VM_OBJECT_WUNLOCK(entry->object.vm_object);
4323 			entry->cred = NULL;
4324 		}
4325 		vm_map_lock_downgrade(map);
4326 	}
4327 
4328 	/*
4329 	 * Return the object/offset from this entry.  If the entry was
4330 	 * copy-on-write or empty, it has been fixed up.
4331 	 */
4332 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4333 	*object = entry->object.vm_object;
4334 
4335 	*out_prot = prot;
4336 	return (KERN_SUCCESS);
4337 }
4338 
4339 /*
4340  *	vm_map_lookup_locked:
4341  *
4342  *	Lookup the faulting address.  A version of vm_map_lookup that returns
4343  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
4344  */
4345 int
4346 vm_map_lookup_locked(vm_map_t *var_map,		/* IN/OUT */
4347 		     vm_offset_t vaddr,
4348 		     vm_prot_t fault_typea,
4349 		     vm_map_entry_t *out_entry,	/* OUT */
4350 		     vm_object_t *object,	/* OUT */
4351 		     vm_pindex_t *pindex,	/* OUT */
4352 		     vm_prot_t *out_prot,	/* OUT */
4353 		     boolean_t *wired)		/* OUT */
4354 {
4355 	vm_map_entry_t entry;
4356 	vm_map_t map = *var_map;
4357 	vm_prot_t prot;
4358 	vm_prot_t fault_type = fault_typea;
4359 
4360 	/*
4361 	 * Lookup the faulting address.
4362 	 */
4363 	if (!vm_map_lookup_entry(map, vaddr, out_entry))
4364 		return (KERN_INVALID_ADDRESS);
4365 
4366 	entry = *out_entry;
4367 
4368 	/*
4369 	 * Fail if the entry refers to a submap.
4370 	 */
4371 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
4372 		return (KERN_FAILURE);
4373 
4374 	/*
4375 	 * Check whether this task is allowed to have this page.
4376 	 */
4377 	prot = entry->protection;
4378 	fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4379 	if ((fault_type & prot) != fault_type)
4380 		return (KERN_PROTECTION_FAILURE);
4381 
4382 	/*
4383 	 * If this page is not pageable, we have to get it for all possible
4384 	 * accesses.
4385 	 */
4386 	*wired = (entry->wired_count != 0);
4387 	if (*wired)
4388 		fault_type = entry->protection;
4389 
4390 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4391 		/*
4392 		 * Fail if the entry was copy-on-write for a write fault.
4393 		 */
4394 		if (fault_type & VM_PROT_WRITE)
4395 			return (KERN_FAILURE);
4396 		/*
4397 		 * We're attempting to read a copy-on-write page --
4398 		 * don't allow writes.
4399 		 */
4400 		prot &= ~VM_PROT_WRITE;
4401 	}
4402 
4403 	/*
4404 	 * Fail if an object should be created.
4405 	 */
4406 	if (entry->object.vm_object == NULL && !map->system_map)
4407 		return (KERN_FAILURE);
4408 
4409 	/*
4410 	 * Return the object/offset from this entry.  If the entry was
4411 	 * copy-on-write or empty, it has been fixed up.
4412 	 */
4413 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4414 	*object = entry->object.vm_object;
4415 
4416 	*out_prot = prot;
4417 	return (KERN_SUCCESS);
4418 }
4419 
4420 /*
4421  *	vm_map_lookup_done:
4422  *
4423  *	Releases locks acquired by a vm_map_lookup
4424  *	(according to the handle returned by that lookup).
4425  */
4426 void
4427 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
4428 {
4429 	/*
4430 	 * Unlock the main-level map
4431 	 */
4432 	vm_map_unlock_read(map);
4433 }
4434 
4435 vm_offset_t
4436 vm_map_max_KBI(const struct vm_map *map)
4437 {
4438 
4439 	return (vm_map_max(map));
4440 }
4441 
4442 vm_offset_t
4443 vm_map_min_KBI(const struct vm_map *map)
4444 {
4445 
4446 	return (vm_map_min(map));
4447 }
4448 
4449 pmap_t
4450 vm_map_pmap_KBI(vm_map_t map)
4451 {
4452 
4453 	return (map->pmap);
4454 }
4455 
4456 #include "opt_ddb.h"
4457 #ifdef DDB
4458 #include <sys/kernel.h>
4459 
4460 #include <ddb/ddb.h>
4461 
4462 static void
4463 vm_map_print(vm_map_t map)
4464 {
4465 	vm_map_entry_t entry;
4466 
4467 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4468 	    (void *)map,
4469 	    (void *)map->pmap, map->nentries, map->timestamp);
4470 
4471 	db_indent += 2;
4472 	for (entry = map->header.next; entry != &map->header;
4473 	    entry = entry->next) {
4474 		db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n",
4475 		    (void *)entry, (void *)entry->start, (void *)entry->end,
4476 		    entry->eflags);
4477 		{
4478 			static char *inheritance_name[4] =
4479 			{"share", "copy", "none", "donate_copy"};
4480 
4481 			db_iprintf(" prot=%x/%x/%s",
4482 			    entry->protection,
4483 			    entry->max_protection,
4484 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
4485 			if (entry->wired_count != 0)
4486 				db_printf(", wired");
4487 		}
4488 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4489 			db_printf(", share=%p, offset=0x%jx\n",
4490 			    (void *)entry->object.sub_map,
4491 			    (uintmax_t)entry->offset);
4492 			if ((entry->prev == &map->header) ||
4493 			    (entry->prev->object.sub_map !=
4494 				entry->object.sub_map)) {
4495 				db_indent += 2;
4496 				vm_map_print((vm_map_t)entry->object.sub_map);
4497 				db_indent -= 2;
4498 			}
4499 		} else {
4500 			if (entry->cred != NULL)
4501 				db_printf(", ruid %d", entry->cred->cr_ruid);
4502 			db_printf(", object=%p, offset=0x%jx",
4503 			    (void *)entry->object.vm_object,
4504 			    (uintmax_t)entry->offset);
4505 			if (entry->object.vm_object && entry->object.vm_object->cred)
4506 				db_printf(", obj ruid %d charge %jx",
4507 				    entry->object.vm_object->cred->cr_ruid,
4508 				    (uintmax_t)entry->object.vm_object->charge);
4509 			if (entry->eflags & MAP_ENTRY_COW)
4510 				db_printf(", copy (%s)",
4511 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4512 			db_printf("\n");
4513 
4514 			if ((entry->prev == &map->header) ||
4515 			    (entry->prev->object.vm_object !=
4516 				entry->object.vm_object)) {
4517 				db_indent += 2;
4518 				vm_object_print((db_expr_t)(intptr_t)
4519 						entry->object.vm_object,
4520 						0, 0, (char *)0);
4521 				db_indent -= 2;
4522 			}
4523 		}
4524 	}
4525 	db_indent -= 2;
4526 }
4527 
4528 DB_SHOW_COMMAND(map, map)
4529 {
4530 
4531 	if (!have_addr) {
4532 		db_printf("usage: show map <addr>\n");
4533 		return;
4534 	}
4535 	vm_map_print((vm_map_t)addr);
4536 }
4537 
4538 DB_SHOW_COMMAND(procvm, procvm)
4539 {
4540 	struct proc *p;
4541 
4542 	if (have_addr) {
4543 		p = db_lookup_proc(addr);
4544 	} else {
4545 		p = curproc;
4546 	}
4547 
4548 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4549 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4550 	    (void *)vmspace_pmap(p->p_vmspace));
4551 
4552 	vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
4553 }
4554 
4555 #endif /* DDB */
4556