xref: /freebsd/sys/vm/vm_map.c (revision 940cb0e2bb228ca52f2d29c9c990be0634aec7e4)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 /*
62  *	Virtual memory mapping module.
63  */
64 
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/lock.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/vmmeter.h>
76 #include <sys/mman.h>
77 #include <sys/vnode.h>
78 #include <sys/racct.h>
79 #include <sys/resourcevar.h>
80 #include <sys/rwlock.h>
81 #include <sys/file.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysent.h>
84 #include <sys/shm.h>
85 
86 #include <vm/vm.h>
87 #include <vm/vm_param.h>
88 #include <vm/pmap.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_pager.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_extern.h>
95 #include <vm/vnode_pager.h>
96 #include <vm/swap_pager.h>
97 #include <vm/uma.h>
98 
99 /*
100  *	Virtual memory maps provide for the mapping, protection,
101  *	and sharing of virtual memory objects.  In addition,
102  *	this module provides for an efficient virtual copy of
103  *	memory from one map to another.
104  *
105  *	Synchronization is required prior to most operations.
106  *
107  *	Maps consist of an ordered doubly-linked list of simple
108  *	entries; a self-adjusting binary search tree of these
109  *	entries is used to speed up lookups.
110  *
111  *	Since portions of maps are specified by start/end addresses,
112  *	which may not align with existing map entries, all
113  *	routines merely "clip" entries to these start/end values.
114  *	[That is, an entry is split into two, bordering at a
115  *	start or end value.]  Note that these clippings may not
116  *	always be necessary (as the two resulting entries are then
117  *	not changed); however, the clipping is done for convenience.
118  *
119  *	As mentioned above, virtual copy operations are performed
120  *	by copying VM object references from one map to
121  *	another, and then marking both regions as copy-on-write.
122  */
123 
124 static struct mtx map_sleep_mtx;
125 static uma_zone_t mapentzone;
126 static uma_zone_t kmapentzone;
127 static uma_zone_t mapzone;
128 static uma_zone_t vmspace_zone;
129 static int vmspace_zinit(void *mem, int size, int flags);
130 static void vmspace_zfini(void *mem, int size);
131 static int vm_map_zinit(void *mem, int ize, int flags);
132 static void vm_map_zfini(void *mem, int size);
133 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
134     vm_offset_t max);
135 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
136 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
137 #ifdef INVARIANTS
138 static void vm_map_zdtor(void *mem, int size, void *arg);
139 static void vmspace_zdtor(void *mem, int size, void *arg);
140 #endif
141 
142 #define	ENTRY_CHARGED(e) ((e)->cred != NULL || \
143     ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
144      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
145 
146 /*
147  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
148  * stable.
149  */
150 #define PROC_VMSPACE_LOCK(p) do { } while (0)
151 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
152 
153 /*
154  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
155  *
156  *	Asserts that the starting and ending region
157  *	addresses fall within the valid range of the map.
158  */
159 #define	VM_MAP_RANGE_CHECK(map, start, end)		\
160 		{					\
161 		if (start < vm_map_min(map))		\
162 			start = vm_map_min(map);	\
163 		if (end > vm_map_max(map))		\
164 			end = vm_map_max(map);		\
165 		if (start > end)			\
166 			start = end;			\
167 		}
168 
169 /*
170  *	vm_map_startup:
171  *
172  *	Initialize the vm_map module.  Must be called before
173  *	any other vm_map routines.
174  *
175  *	Map and entry structures are allocated from the general
176  *	purpose memory pool with some exceptions:
177  *
178  *	- The kernel map and kmem submap are allocated statically.
179  *	- Kernel map entries are allocated out of a static pool.
180  *
181  *	These restrictions are necessary since malloc() uses the
182  *	maps and requires map entries.
183  */
184 
185 void
186 vm_map_startup(void)
187 {
188 	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
189 	mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
190 #ifdef INVARIANTS
191 	    vm_map_zdtor,
192 #else
193 	    NULL,
194 #endif
195 	    vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
196 	uma_prealloc(mapzone, MAX_KMAP);
197 	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
198 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
199 	    UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
200 	mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
201 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
202 	vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
203 #ifdef INVARIANTS
204 	    vmspace_zdtor,
205 #else
206 	    NULL,
207 #endif
208 	    vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
209 }
210 
211 static void
212 vmspace_zfini(void *mem, int size)
213 {
214 	struct vmspace *vm;
215 
216 	vm = (struct vmspace *)mem;
217 	vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
218 }
219 
220 static int
221 vmspace_zinit(void *mem, int size, int flags)
222 {
223 	struct vmspace *vm;
224 
225 	vm = (struct vmspace *)mem;
226 
227 	vm->vm_map.pmap = NULL;
228 	(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
229 	return (0);
230 }
231 
232 static void
233 vm_map_zfini(void *mem, int size)
234 {
235 	vm_map_t map;
236 
237 	map = (vm_map_t)mem;
238 	mtx_destroy(&map->system_mtx);
239 	sx_destroy(&map->lock);
240 }
241 
242 static int
243 vm_map_zinit(void *mem, int size, int flags)
244 {
245 	vm_map_t map;
246 
247 	map = (vm_map_t)mem;
248 	memset(map, 0, sizeof(*map));
249 	mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
250 	sx_init(&map->lock, "vm map (user)");
251 	return (0);
252 }
253 
254 #ifdef INVARIANTS
255 static void
256 vmspace_zdtor(void *mem, int size, void *arg)
257 {
258 	struct vmspace *vm;
259 
260 	vm = (struct vmspace *)mem;
261 
262 	vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
263 }
264 static void
265 vm_map_zdtor(void *mem, int size, void *arg)
266 {
267 	vm_map_t map;
268 
269 	map = (vm_map_t)mem;
270 	KASSERT(map->nentries == 0,
271 	    ("map %p nentries == %d on free.",
272 	    map, map->nentries));
273 	KASSERT(map->size == 0,
274 	    ("map %p size == %lu on free.",
275 	    map, (unsigned long)map->size));
276 }
277 #endif	/* INVARIANTS */
278 
279 /*
280  * Allocate a vmspace structure, including a vm_map and pmap,
281  * and initialize those structures.  The refcnt is set to 1.
282  */
283 struct vmspace *
284 vmspace_alloc(min, max)
285 	vm_offset_t min, max;
286 {
287 	struct vmspace *vm;
288 
289 	vm = uma_zalloc(vmspace_zone, M_WAITOK);
290 	if (vm->vm_map.pmap == NULL && !pmap_pinit(vmspace_pmap(vm))) {
291 		uma_zfree(vmspace_zone, vm);
292 		return (NULL);
293 	}
294 	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
295 	_vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
296 	vm->vm_refcnt = 1;
297 	vm->vm_shm = NULL;
298 	vm->vm_swrss = 0;
299 	vm->vm_tsize = 0;
300 	vm->vm_dsize = 0;
301 	vm->vm_ssize = 0;
302 	vm->vm_taddr = 0;
303 	vm->vm_daddr = 0;
304 	vm->vm_maxsaddr = 0;
305 	return (vm);
306 }
307 
308 static void
309 vmspace_container_reset(struct proc *p)
310 {
311 
312 #ifdef RACCT
313 	PROC_LOCK(p);
314 	racct_set(p, RACCT_DATA, 0);
315 	racct_set(p, RACCT_STACK, 0);
316 	racct_set(p, RACCT_RSS, 0);
317 	racct_set(p, RACCT_MEMLOCK, 0);
318 	racct_set(p, RACCT_VMEM, 0);
319 	PROC_UNLOCK(p);
320 #endif
321 }
322 
323 static inline void
324 vmspace_dofree(struct vmspace *vm)
325 {
326 
327 	CTR1(KTR_VM, "vmspace_free: %p", vm);
328 
329 	/*
330 	 * Make sure any SysV shm is freed, it might not have been in
331 	 * exit1().
332 	 */
333 	shmexit(vm);
334 
335 	/*
336 	 * Lock the map, to wait out all other references to it.
337 	 * Delete all of the mappings and pages they hold, then call
338 	 * the pmap module to reclaim anything left.
339 	 */
340 	(void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset,
341 	    vm->vm_map.max_offset);
342 
343 	pmap_release(vmspace_pmap(vm));
344 	vm->vm_map.pmap = NULL;
345 	uma_zfree(vmspace_zone, vm);
346 }
347 
348 void
349 vmspace_free(struct vmspace *vm)
350 {
351 
352 	if (vm->vm_refcnt == 0)
353 		panic("vmspace_free: attempt to free already freed vmspace");
354 
355 	if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
356 		vmspace_dofree(vm);
357 }
358 
359 void
360 vmspace_exitfree(struct proc *p)
361 {
362 	struct vmspace *vm;
363 
364 	PROC_VMSPACE_LOCK(p);
365 	vm = p->p_vmspace;
366 	p->p_vmspace = NULL;
367 	PROC_VMSPACE_UNLOCK(p);
368 	KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
369 	vmspace_free(vm);
370 }
371 
372 void
373 vmspace_exit(struct thread *td)
374 {
375 	int refcnt;
376 	struct vmspace *vm;
377 	struct proc *p;
378 
379 	/*
380 	 * Release user portion of address space.
381 	 * This releases references to vnodes,
382 	 * which could cause I/O if the file has been unlinked.
383 	 * Need to do this early enough that we can still sleep.
384 	 *
385 	 * The last exiting process to reach this point releases as
386 	 * much of the environment as it can. vmspace_dofree() is the
387 	 * slower fallback in case another process had a temporary
388 	 * reference to the vmspace.
389 	 */
390 
391 	p = td->td_proc;
392 	vm = p->p_vmspace;
393 	atomic_add_int(&vmspace0.vm_refcnt, 1);
394 	do {
395 		refcnt = vm->vm_refcnt;
396 		if (refcnt > 1 && p->p_vmspace != &vmspace0) {
397 			/* Switch now since other proc might free vmspace */
398 			PROC_VMSPACE_LOCK(p);
399 			p->p_vmspace = &vmspace0;
400 			PROC_VMSPACE_UNLOCK(p);
401 			pmap_activate(td);
402 		}
403 	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
404 	if (refcnt == 1) {
405 		if (p->p_vmspace != vm) {
406 			/* vmspace not yet freed, switch back */
407 			PROC_VMSPACE_LOCK(p);
408 			p->p_vmspace = vm;
409 			PROC_VMSPACE_UNLOCK(p);
410 			pmap_activate(td);
411 		}
412 		pmap_remove_pages(vmspace_pmap(vm));
413 		/* Switch now since this proc will free vmspace */
414 		PROC_VMSPACE_LOCK(p);
415 		p->p_vmspace = &vmspace0;
416 		PROC_VMSPACE_UNLOCK(p);
417 		pmap_activate(td);
418 		vmspace_dofree(vm);
419 	}
420 	vmspace_container_reset(p);
421 }
422 
423 /* Acquire reference to vmspace owned by another process. */
424 
425 struct vmspace *
426 vmspace_acquire_ref(struct proc *p)
427 {
428 	struct vmspace *vm;
429 	int refcnt;
430 
431 	PROC_VMSPACE_LOCK(p);
432 	vm = p->p_vmspace;
433 	if (vm == NULL) {
434 		PROC_VMSPACE_UNLOCK(p);
435 		return (NULL);
436 	}
437 	do {
438 		refcnt = vm->vm_refcnt;
439 		if (refcnt <= 0) { 	/* Avoid 0->1 transition */
440 			PROC_VMSPACE_UNLOCK(p);
441 			return (NULL);
442 		}
443 	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
444 	if (vm != p->p_vmspace) {
445 		PROC_VMSPACE_UNLOCK(p);
446 		vmspace_free(vm);
447 		return (NULL);
448 	}
449 	PROC_VMSPACE_UNLOCK(p);
450 	return (vm);
451 }
452 
453 void
454 _vm_map_lock(vm_map_t map, const char *file, int line)
455 {
456 
457 	if (map->system_map)
458 		mtx_lock_flags_(&map->system_mtx, 0, file, line);
459 	else
460 		sx_xlock_(&map->lock, file, line);
461 	map->timestamp++;
462 }
463 
464 static void
465 vm_map_process_deferred(void)
466 {
467 	struct thread *td;
468 	vm_map_entry_t entry, next;
469 	vm_object_t object;
470 
471 	td = curthread;
472 	entry = td->td_map_def_user;
473 	td->td_map_def_user = NULL;
474 	while (entry != NULL) {
475 		next = entry->next;
476 		if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) {
477 			/*
478 			 * Decrement the object's writemappings and
479 			 * possibly the vnode's v_writecount.
480 			 */
481 			KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
482 			    ("Submap with writecount"));
483 			object = entry->object.vm_object;
484 			KASSERT(object != NULL, ("No object for writecount"));
485 			vnode_pager_release_writecount(object, entry->start,
486 			    entry->end);
487 		}
488 		vm_map_entry_deallocate(entry, FALSE);
489 		entry = next;
490 	}
491 }
492 
493 void
494 _vm_map_unlock(vm_map_t map, const char *file, int line)
495 {
496 
497 	if (map->system_map)
498 		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
499 	else {
500 		sx_xunlock_(&map->lock, file, line);
501 		vm_map_process_deferred();
502 	}
503 }
504 
505 void
506 _vm_map_lock_read(vm_map_t map, const char *file, int line)
507 {
508 
509 	if (map->system_map)
510 		mtx_lock_flags_(&map->system_mtx, 0, file, line);
511 	else
512 		sx_slock_(&map->lock, file, line);
513 }
514 
515 void
516 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
517 {
518 
519 	if (map->system_map)
520 		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
521 	else {
522 		sx_sunlock_(&map->lock, file, line);
523 		vm_map_process_deferred();
524 	}
525 }
526 
527 int
528 _vm_map_trylock(vm_map_t map, const char *file, int line)
529 {
530 	int error;
531 
532 	error = map->system_map ?
533 	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
534 	    !sx_try_xlock_(&map->lock, file, line);
535 	if (error == 0)
536 		map->timestamp++;
537 	return (error == 0);
538 }
539 
540 int
541 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
542 {
543 	int error;
544 
545 	error = map->system_map ?
546 	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
547 	    !sx_try_slock_(&map->lock, file, line);
548 	return (error == 0);
549 }
550 
551 /*
552  *	_vm_map_lock_upgrade:	[ internal use only ]
553  *
554  *	Tries to upgrade a read (shared) lock on the specified map to a write
555  *	(exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
556  *	non-zero value if the upgrade fails.  If the upgrade fails, the map is
557  *	returned without a read or write lock held.
558  *
559  *	Requires that the map be read locked.
560  */
561 int
562 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
563 {
564 	unsigned int last_timestamp;
565 
566 	if (map->system_map) {
567 		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
568 	} else {
569 		if (!sx_try_upgrade_(&map->lock, file, line)) {
570 			last_timestamp = map->timestamp;
571 			sx_sunlock_(&map->lock, file, line);
572 			vm_map_process_deferred();
573 			/*
574 			 * If the map's timestamp does not change while the
575 			 * map is unlocked, then the upgrade succeeds.
576 			 */
577 			sx_xlock_(&map->lock, file, line);
578 			if (last_timestamp != map->timestamp) {
579 				sx_xunlock_(&map->lock, file, line);
580 				return (1);
581 			}
582 		}
583 	}
584 	map->timestamp++;
585 	return (0);
586 }
587 
588 void
589 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
590 {
591 
592 	if (map->system_map) {
593 		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
594 	} else
595 		sx_downgrade_(&map->lock, file, line);
596 }
597 
598 /*
599  *	vm_map_locked:
600  *
601  *	Returns a non-zero value if the caller holds a write (exclusive) lock
602  *	on the specified map and the value "0" otherwise.
603  */
604 int
605 vm_map_locked(vm_map_t map)
606 {
607 
608 	if (map->system_map)
609 		return (mtx_owned(&map->system_mtx));
610 	else
611 		return (sx_xlocked(&map->lock));
612 }
613 
614 #ifdef INVARIANTS
615 static void
616 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
617 {
618 
619 	if (map->system_map)
620 		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
621 	else
622 		sx_assert_(&map->lock, SA_XLOCKED, file, line);
623 }
624 
625 #define	VM_MAP_ASSERT_LOCKED(map) \
626     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
627 #else
628 #define	VM_MAP_ASSERT_LOCKED(map)
629 #endif
630 
631 /*
632  *	_vm_map_unlock_and_wait:
633  *
634  *	Atomically releases the lock on the specified map and puts the calling
635  *	thread to sleep.  The calling thread will remain asleep until either
636  *	vm_map_wakeup() is performed on the map or the specified timeout is
637  *	exceeded.
638  *
639  *	WARNING!  This function does not perform deferred deallocations of
640  *	objects and map	entries.  Therefore, the calling thread is expected to
641  *	reacquire the map lock after reawakening and later perform an ordinary
642  *	unlock operation, such as vm_map_unlock(), before completing its
643  *	operation on the map.
644  */
645 int
646 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
647 {
648 
649 	mtx_lock(&map_sleep_mtx);
650 	if (map->system_map)
651 		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
652 	else
653 		sx_xunlock_(&map->lock, file, line);
654 	return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
655 	    timo));
656 }
657 
658 /*
659  *	vm_map_wakeup:
660  *
661  *	Awaken any threads that have slept on the map using
662  *	vm_map_unlock_and_wait().
663  */
664 void
665 vm_map_wakeup(vm_map_t map)
666 {
667 
668 	/*
669 	 * Acquire and release map_sleep_mtx to prevent a wakeup()
670 	 * from being performed (and lost) between the map unlock
671 	 * and the msleep() in _vm_map_unlock_and_wait().
672 	 */
673 	mtx_lock(&map_sleep_mtx);
674 	mtx_unlock(&map_sleep_mtx);
675 	wakeup(&map->root);
676 }
677 
678 void
679 vm_map_busy(vm_map_t map)
680 {
681 
682 	VM_MAP_ASSERT_LOCKED(map);
683 	map->busy++;
684 }
685 
686 void
687 vm_map_unbusy(vm_map_t map)
688 {
689 
690 	VM_MAP_ASSERT_LOCKED(map);
691 	KASSERT(map->busy, ("vm_map_unbusy: not busy"));
692 	if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
693 		vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
694 		wakeup(&map->busy);
695 	}
696 }
697 
698 void
699 vm_map_wait_busy(vm_map_t map)
700 {
701 
702 	VM_MAP_ASSERT_LOCKED(map);
703 	while (map->busy) {
704 		vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
705 		if (map->system_map)
706 			msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
707 		else
708 			sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
709 	}
710 	map->timestamp++;
711 }
712 
713 long
714 vmspace_resident_count(struct vmspace *vmspace)
715 {
716 	return pmap_resident_count(vmspace_pmap(vmspace));
717 }
718 
719 /*
720  *	vm_map_create:
721  *
722  *	Creates and returns a new empty VM map with
723  *	the given physical map structure, and having
724  *	the given lower and upper address bounds.
725  */
726 vm_map_t
727 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
728 {
729 	vm_map_t result;
730 
731 	result = uma_zalloc(mapzone, M_WAITOK);
732 	CTR1(KTR_VM, "vm_map_create: %p", result);
733 	_vm_map_init(result, pmap, min, max);
734 	return (result);
735 }
736 
737 /*
738  * Initialize an existing vm_map structure
739  * such as that in the vmspace structure.
740  */
741 static void
742 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
743 {
744 
745 	map->header.next = map->header.prev = &map->header;
746 	map->needs_wakeup = FALSE;
747 	map->system_map = 0;
748 	map->pmap = pmap;
749 	map->min_offset = min;
750 	map->max_offset = max;
751 	map->flags = 0;
752 	map->root = NULL;
753 	map->timestamp = 0;
754 	map->busy = 0;
755 }
756 
757 void
758 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
759 {
760 
761 	_vm_map_init(map, pmap, min, max);
762 	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
763 	sx_init(&map->lock, "user map");
764 }
765 
766 /*
767  *	vm_map_entry_dispose:	[ internal use only ]
768  *
769  *	Inverse of vm_map_entry_create.
770  */
771 static void
772 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
773 {
774 	uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
775 }
776 
777 /*
778  *	vm_map_entry_create:	[ internal use only ]
779  *
780  *	Allocates a VM map entry for insertion.
781  *	No entry fields are filled in.
782  */
783 static vm_map_entry_t
784 vm_map_entry_create(vm_map_t map)
785 {
786 	vm_map_entry_t new_entry;
787 
788 	if (map->system_map)
789 		new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
790 	else
791 		new_entry = uma_zalloc(mapentzone, M_WAITOK);
792 	if (new_entry == NULL)
793 		panic("vm_map_entry_create: kernel resources exhausted");
794 	return (new_entry);
795 }
796 
797 /*
798  *	vm_map_entry_set_behavior:
799  *
800  *	Set the expected access behavior, either normal, random, or
801  *	sequential.
802  */
803 static inline void
804 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
805 {
806 	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
807 	    (behavior & MAP_ENTRY_BEHAV_MASK);
808 }
809 
810 /*
811  *	vm_map_entry_set_max_free:
812  *
813  *	Set the max_free field in a vm_map_entry.
814  */
815 static inline void
816 vm_map_entry_set_max_free(vm_map_entry_t entry)
817 {
818 
819 	entry->max_free = entry->adj_free;
820 	if (entry->left != NULL && entry->left->max_free > entry->max_free)
821 		entry->max_free = entry->left->max_free;
822 	if (entry->right != NULL && entry->right->max_free > entry->max_free)
823 		entry->max_free = entry->right->max_free;
824 }
825 
826 /*
827  *	vm_map_entry_splay:
828  *
829  *	The Sleator and Tarjan top-down splay algorithm with the
830  *	following variation.  Max_free must be computed bottom-up, so
831  *	on the downward pass, maintain the left and right spines in
832  *	reverse order.  Then, make a second pass up each side to fix
833  *	the pointers and compute max_free.  The time bound is O(log n)
834  *	amortized.
835  *
836  *	The new root is the vm_map_entry containing "addr", or else an
837  *	adjacent entry (lower or higher) if addr is not in the tree.
838  *
839  *	The map must be locked, and leaves it so.
840  *
841  *	Returns: the new root.
842  */
843 static vm_map_entry_t
844 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
845 {
846 	vm_map_entry_t llist, rlist;
847 	vm_map_entry_t ltree, rtree;
848 	vm_map_entry_t y;
849 
850 	/* Special case of empty tree. */
851 	if (root == NULL)
852 		return (root);
853 
854 	/*
855 	 * Pass One: Splay down the tree until we find addr or a NULL
856 	 * pointer where addr would go.  llist and rlist are the two
857 	 * sides in reverse order (bottom-up), with llist linked by
858 	 * the right pointer and rlist linked by the left pointer in
859 	 * the vm_map_entry.  Wait until Pass Two to set max_free on
860 	 * the two spines.
861 	 */
862 	llist = NULL;
863 	rlist = NULL;
864 	for (;;) {
865 		/* root is never NULL in here. */
866 		if (addr < root->start) {
867 			y = root->left;
868 			if (y == NULL)
869 				break;
870 			if (addr < y->start && y->left != NULL) {
871 				/* Rotate right and put y on rlist. */
872 				root->left = y->right;
873 				y->right = root;
874 				vm_map_entry_set_max_free(root);
875 				root = y->left;
876 				y->left = rlist;
877 				rlist = y;
878 			} else {
879 				/* Put root on rlist. */
880 				root->left = rlist;
881 				rlist = root;
882 				root = y;
883 			}
884 		} else if (addr >= root->end) {
885 			y = root->right;
886 			if (y == NULL)
887 				break;
888 			if (addr >= y->end && y->right != NULL) {
889 				/* Rotate left and put y on llist. */
890 				root->right = y->left;
891 				y->left = root;
892 				vm_map_entry_set_max_free(root);
893 				root = y->right;
894 				y->right = llist;
895 				llist = y;
896 			} else {
897 				/* Put root on llist. */
898 				root->right = llist;
899 				llist = root;
900 				root = y;
901 			}
902 		} else
903 			break;
904 	}
905 
906 	/*
907 	 * Pass Two: Walk back up the two spines, flip the pointers
908 	 * and set max_free.  The subtrees of the root go at the
909 	 * bottom of llist and rlist.
910 	 */
911 	ltree = root->left;
912 	while (llist != NULL) {
913 		y = llist->right;
914 		llist->right = ltree;
915 		vm_map_entry_set_max_free(llist);
916 		ltree = llist;
917 		llist = y;
918 	}
919 	rtree = root->right;
920 	while (rlist != NULL) {
921 		y = rlist->left;
922 		rlist->left = rtree;
923 		vm_map_entry_set_max_free(rlist);
924 		rtree = rlist;
925 		rlist = y;
926 	}
927 
928 	/*
929 	 * Final assembly: add ltree and rtree as subtrees of root.
930 	 */
931 	root->left = ltree;
932 	root->right = rtree;
933 	vm_map_entry_set_max_free(root);
934 
935 	return (root);
936 }
937 
938 /*
939  *	vm_map_entry_{un,}link:
940  *
941  *	Insert/remove entries from maps.
942  */
943 static void
944 vm_map_entry_link(vm_map_t map,
945 		  vm_map_entry_t after_where,
946 		  vm_map_entry_t entry)
947 {
948 
949 	CTR4(KTR_VM,
950 	    "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
951 	    map->nentries, entry, after_where);
952 	VM_MAP_ASSERT_LOCKED(map);
953 	map->nentries++;
954 	entry->prev = after_where;
955 	entry->next = after_where->next;
956 	entry->next->prev = entry;
957 	after_where->next = entry;
958 
959 	if (after_where != &map->header) {
960 		if (after_where != map->root)
961 			vm_map_entry_splay(after_where->start, map->root);
962 		entry->right = after_where->right;
963 		entry->left = after_where;
964 		after_where->right = NULL;
965 		after_where->adj_free = entry->start - after_where->end;
966 		vm_map_entry_set_max_free(after_where);
967 	} else {
968 		entry->right = map->root;
969 		entry->left = NULL;
970 	}
971 	entry->adj_free = (entry->next == &map->header ? map->max_offset :
972 	    entry->next->start) - entry->end;
973 	vm_map_entry_set_max_free(entry);
974 	map->root = entry;
975 }
976 
977 static void
978 vm_map_entry_unlink(vm_map_t map,
979 		    vm_map_entry_t entry)
980 {
981 	vm_map_entry_t next, prev, root;
982 
983 	VM_MAP_ASSERT_LOCKED(map);
984 	if (entry != map->root)
985 		vm_map_entry_splay(entry->start, map->root);
986 	if (entry->left == NULL)
987 		root = entry->right;
988 	else {
989 		root = vm_map_entry_splay(entry->start, entry->left);
990 		root->right = entry->right;
991 		root->adj_free = (entry->next == &map->header ? map->max_offset :
992 		    entry->next->start) - root->end;
993 		vm_map_entry_set_max_free(root);
994 	}
995 	map->root = root;
996 
997 	prev = entry->prev;
998 	next = entry->next;
999 	next->prev = prev;
1000 	prev->next = next;
1001 	map->nentries--;
1002 	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1003 	    map->nentries, entry);
1004 }
1005 
1006 /*
1007  *	vm_map_entry_resize_free:
1008  *
1009  *	Recompute the amount of free space following a vm_map_entry
1010  *	and propagate that value up the tree.  Call this function after
1011  *	resizing a map entry in-place, that is, without a call to
1012  *	vm_map_entry_link() or _unlink().
1013  *
1014  *	The map must be locked, and leaves it so.
1015  */
1016 static void
1017 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
1018 {
1019 
1020 	/*
1021 	 * Using splay trees without parent pointers, propagating
1022 	 * max_free up the tree is done by moving the entry to the
1023 	 * root and making the change there.
1024 	 */
1025 	if (entry != map->root)
1026 		map->root = vm_map_entry_splay(entry->start, map->root);
1027 
1028 	entry->adj_free = (entry->next == &map->header ? map->max_offset :
1029 	    entry->next->start) - entry->end;
1030 	vm_map_entry_set_max_free(entry);
1031 }
1032 
1033 /*
1034  *	vm_map_lookup_entry:	[ internal use only ]
1035  *
1036  *	Finds the map entry containing (or
1037  *	immediately preceding) the specified address
1038  *	in the given map; the entry is returned
1039  *	in the "entry" parameter.  The boolean
1040  *	result indicates whether the address is
1041  *	actually contained in the map.
1042  */
1043 boolean_t
1044 vm_map_lookup_entry(
1045 	vm_map_t map,
1046 	vm_offset_t address,
1047 	vm_map_entry_t *entry)	/* OUT */
1048 {
1049 	vm_map_entry_t cur;
1050 	boolean_t locked;
1051 
1052 	/*
1053 	 * If the map is empty, then the map entry immediately preceding
1054 	 * "address" is the map's header.
1055 	 */
1056 	cur = map->root;
1057 	if (cur == NULL)
1058 		*entry = &map->header;
1059 	else if (address >= cur->start && cur->end > address) {
1060 		*entry = cur;
1061 		return (TRUE);
1062 	} else if ((locked = vm_map_locked(map)) ||
1063 	    sx_try_upgrade(&map->lock)) {
1064 		/*
1065 		 * Splay requires a write lock on the map.  However, it only
1066 		 * restructures the binary search tree; it does not otherwise
1067 		 * change the map.  Thus, the map's timestamp need not change
1068 		 * on a temporary upgrade.
1069 		 */
1070 		map->root = cur = vm_map_entry_splay(address, cur);
1071 		if (!locked)
1072 			sx_downgrade(&map->lock);
1073 
1074 		/*
1075 		 * If "address" is contained within a map entry, the new root
1076 		 * is that map entry.  Otherwise, the new root is a map entry
1077 		 * immediately before or after "address".
1078 		 */
1079 		if (address >= cur->start) {
1080 			*entry = cur;
1081 			if (cur->end > address)
1082 				return (TRUE);
1083 		} else
1084 			*entry = cur->prev;
1085 	} else
1086 		/*
1087 		 * Since the map is only locked for read access, perform a
1088 		 * standard binary search tree lookup for "address".
1089 		 */
1090 		for (;;) {
1091 			if (address < cur->start) {
1092 				if (cur->left == NULL) {
1093 					*entry = cur->prev;
1094 					break;
1095 				}
1096 				cur = cur->left;
1097 			} else if (cur->end > address) {
1098 				*entry = cur;
1099 				return (TRUE);
1100 			} else {
1101 				if (cur->right == NULL) {
1102 					*entry = cur;
1103 					break;
1104 				}
1105 				cur = cur->right;
1106 			}
1107 		}
1108 	return (FALSE);
1109 }
1110 
1111 /*
1112  *	vm_map_insert:
1113  *
1114  *	Inserts the given whole VM object into the target
1115  *	map at the specified address range.  The object's
1116  *	size should match that of the address range.
1117  *
1118  *	Requires that the map be locked, and leaves it so.
1119  *
1120  *	If object is non-NULL, ref count must be bumped by caller
1121  *	prior to making call to account for the new entry.
1122  */
1123 int
1124 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1125 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
1126 	      int cow)
1127 {
1128 	vm_map_entry_t new_entry;
1129 	vm_map_entry_t prev_entry;
1130 	vm_map_entry_t temp_entry;
1131 	vm_eflags_t protoeflags;
1132 	struct ucred *cred;
1133 	vm_inherit_t inheritance;
1134 	boolean_t charge_prev_obj;
1135 
1136 	VM_MAP_ASSERT_LOCKED(map);
1137 
1138 	/*
1139 	 * Check that the start and end points are not bogus.
1140 	 */
1141 	if ((start < map->min_offset) || (end > map->max_offset) ||
1142 	    (start >= end))
1143 		return (KERN_INVALID_ADDRESS);
1144 
1145 	/*
1146 	 * Find the entry prior to the proposed starting address; if it's part
1147 	 * of an existing entry, this range is bogus.
1148 	 */
1149 	if (vm_map_lookup_entry(map, start, &temp_entry))
1150 		return (KERN_NO_SPACE);
1151 
1152 	prev_entry = temp_entry;
1153 
1154 	/*
1155 	 * Assert that the next entry doesn't overlap the end point.
1156 	 */
1157 	if ((prev_entry->next != &map->header) &&
1158 	    (prev_entry->next->start < end))
1159 		return (KERN_NO_SPACE);
1160 
1161 	protoeflags = 0;
1162 	charge_prev_obj = FALSE;
1163 
1164 	if (cow & MAP_COPY_ON_WRITE)
1165 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1166 
1167 	if (cow & MAP_NOFAULT) {
1168 		protoeflags |= MAP_ENTRY_NOFAULT;
1169 
1170 		KASSERT(object == NULL,
1171 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
1172 	}
1173 	if (cow & MAP_DISABLE_SYNCER)
1174 		protoeflags |= MAP_ENTRY_NOSYNC;
1175 	if (cow & MAP_DISABLE_COREDUMP)
1176 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
1177 	if (cow & MAP_VN_WRITECOUNT)
1178 		protoeflags |= MAP_ENTRY_VN_WRITECNT;
1179 	if (cow & MAP_INHERIT_SHARE)
1180 		inheritance = VM_INHERIT_SHARE;
1181 	else
1182 		inheritance = VM_INHERIT_DEFAULT;
1183 
1184 	cred = NULL;
1185 	KASSERT((object != kmem_object && object != kernel_object) ||
1186 	    ((object == kmem_object || object == kernel_object) &&
1187 		!(protoeflags & MAP_ENTRY_NEEDS_COPY)),
1188 	    ("kmem or kernel object and cow"));
1189 	if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT))
1190 		goto charged;
1191 	if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1192 	    ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1193 		if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1194 			return (KERN_RESOURCE_SHORTAGE);
1195 		KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) ||
1196 		    object->cred == NULL,
1197 		    ("OVERCOMMIT: vm_map_insert o %p", object));
1198 		cred = curthread->td_ucred;
1199 		crhold(cred);
1200 		if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY))
1201 			charge_prev_obj = TRUE;
1202 	}
1203 
1204 charged:
1205 	/* Expand the kernel pmap, if necessary. */
1206 	if (map == kernel_map && end > kernel_vm_end)
1207 		pmap_growkernel(end);
1208 	if (object != NULL) {
1209 		/*
1210 		 * OBJ_ONEMAPPING must be cleared unless this mapping
1211 		 * is trivially proven to be the only mapping for any
1212 		 * of the object's pages.  (Object granularity
1213 		 * reference counting is insufficient to recognize
1214 		 * aliases with precision.)
1215 		 */
1216 		VM_OBJECT_WLOCK(object);
1217 		if (object->ref_count > 1 || object->shadow_count != 0)
1218 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
1219 		VM_OBJECT_WUNLOCK(object);
1220 	}
1221 	else if ((prev_entry != &map->header) &&
1222 		 (prev_entry->eflags == protoeflags) &&
1223 		 (prev_entry->end == start) &&
1224 		 (prev_entry->wired_count == 0) &&
1225 		 (prev_entry->cred == cred ||
1226 		  (prev_entry->object.vm_object != NULL &&
1227 		   (prev_entry->object.vm_object->cred == cred))) &&
1228 		   vm_object_coalesce(prev_entry->object.vm_object,
1229 		       prev_entry->offset,
1230 		       (vm_size_t)(prev_entry->end - prev_entry->start),
1231 		       (vm_size_t)(end - prev_entry->end), charge_prev_obj)) {
1232 		/*
1233 		 * We were able to extend the object.  Determine if we
1234 		 * can extend the previous map entry to include the
1235 		 * new range as well.
1236 		 */
1237 		if ((prev_entry->inheritance == inheritance) &&
1238 		    (prev_entry->protection == prot) &&
1239 		    (prev_entry->max_protection == max)) {
1240 			map->size += (end - prev_entry->end);
1241 			prev_entry->end = end;
1242 			vm_map_entry_resize_free(map, prev_entry);
1243 			vm_map_simplify_entry(map, prev_entry);
1244 			if (cred != NULL)
1245 				crfree(cred);
1246 			return (KERN_SUCCESS);
1247 		}
1248 
1249 		/*
1250 		 * If we can extend the object but cannot extend the
1251 		 * map entry, we have to create a new map entry.  We
1252 		 * must bump the ref count on the extended object to
1253 		 * account for it.  object may be NULL.
1254 		 */
1255 		object = prev_entry->object.vm_object;
1256 		offset = prev_entry->offset +
1257 			(prev_entry->end - prev_entry->start);
1258 		vm_object_reference(object);
1259 		if (cred != NULL && object != NULL && object->cred != NULL &&
1260 		    !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1261 			/* Object already accounts for this uid. */
1262 			crfree(cred);
1263 			cred = NULL;
1264 		}
1265 	}
1266 
1267 	/*
1268 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
1269 	 * in things like the buffer map where we manage kva but do not manage
1270 	 * backing objects.
1271 	 */
1272 
1273 	/*
1274 	 * Create a new entry
1275 	 */
1276 	new_entry = vm_map_entry_create(map);
1277 	new_entry->start = start;
1278 	new_entry->end = end;
1279 	new_entry->cred = NULL;
1280 
1281 	new_entry->eflags = protoeflags;
1282 	new_entry->object.vm_object = object;
1283 	new_entry->offset = offset;
1284 	new_entry->avail_ssize = 0;
1285 
1286 	new_entry->inheritance = inheritance;
1287 	new_entry->protection = prot;
1288 	new_entry->max_protection = max;
1289 	new_entry->wired_count = 0;
1290 	new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1291 	new_entry->next_read = OFF_TO_IDX(offset);
1292 
1293 	KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1294 	    ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry));
1295 	new_entry->cred = cred;
1296 
1297 	/*
1298 	 * Insert the new entry into the list
1299 	 */
1300 	vm_map_entry_link(map, prev_entry, new_entry);
1301 	map->size += new_entry->end - new_entry->start;
1302 
1303 	/*
1304 	 * It may be possible to merge the new entry with the next and/or
1305 	 * previous entries.  However, due to MAP_STACK_* being a hack, a
1306 	 * panic can result from merging such entries.
1307 	 */
1308 	if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0)
1309 		vm_map_simplify_entry(map, new_entry);
1310 
1311 	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
1312 		vm_map_pmap_enter(map, start, prot,
1313 				    object, OFF_TO_IDX(offset), end - start,
1314 				    cow & MAP_PREFAULT_PARTIAL);
1315 	}
1316 
1317 	return (KERN_SUCCESS);
1318 }
1319 
1320 /*
1321  *	vm_map_findspace:
1322  *
1323  *	Find the first fit (lowest VM address) for "length" free bytes
1324  *	beginning at address >= start in the given map.
1325  *
1326  *	In a vm_map_entry, "adj_free" is the amount of free space
1327  *	adjacent (higher address) to this entry, and "max_free" is the
1328  *	maximum amount of contiguous free space in its subtree.  This
1329  *	allows finding a free region in one path down the tree, so
1330  *	O(log n) amortized with splay trees.
1331  *
1332  *	The map must be locked, and leaves it so.
1333  *
1334  *	Returns: 0 on success, and starting address in *addr,
1335  *		 1 if insufficient space.
1336  */
1337 int
1338 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1339     vm_offset_t *addr)	/* OUT */
1340 {
1341 	vm_map_entry_t entry;
1342 	vm_offset_t st;
1343 
1344 	/*
1345 	 * Request must fit within min/max VM address and must avoid
1346 	 * address wrap.
1347 	 */
1348 	if (start < map->min_offset)
1349 		start = map->min_offset;
1350 	if (start + length > map->max_offset || start + length < start)
1351 		return (1);
1352 
1353 	/* Empty tree means wide open address space. */
1354 	if (map->root == NULL) {
1355 		*addr = start;
1356 		return (0);
1357 	}
1358 
1359 	/*
1360 	 * After splay, if start comes before root node, then there
1361 	 * must be a gap from start to the root.
1362 	 */
1363 	map->root = vm_map_entry_splay(start, map->root);
1364 	if (start + length <= map->root->start) {
1365 		*addr = start;
1366 		return (0);
1367 	}
1368 
1369 	/*
1370 	 * Root is the last node that might begin its gap before
1371 	 * start, and this is the last comparison where address
1372 	 * wrap might be a problem.
1373 	 */
1374 	st = (start > map->root->end) ? start : map->root->end;
1375 	if (length <= map->root->end + map->root->adj_free - st) {
1376 		*addr = st;
1377 		return (0);
1378 	}
1379 
1380 	/* With max_free, can immediately tell if no solution. */
1381 	entry = map->root->right;
1382 	if (entry == NULL || length > entry->max_free)
1383 		return (1);
1384 
1385 	/*
1386 	 * Search the right subtree in the order: left subtree, root,
1387 	 * right subtree (first fit).  The previous splay implies that
1388 	 * all regions in the right subtree have addresses > start.
1389 	 */
1390 	while (entry != NULL) {
1391 		if (entry->left != NULL && entry->left->max_free >= length)
1392 			entry = entry->left;
1393 		else if (entry->adj_free >= length) {
1394 			*addr = entry->end;
1395 			return (0);
1396 		} else
1397 			entry = entry->right;
1398 	}
1399 
1400 	/* Can't get here, so panic if we do. */
1401 	panic("vm_map_findspace: max_free corrupt");
1402 }
1403 
1404 int
1405 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1406     vm_offset_t start, vm_size_t length, vm_prot_t prot,
1407     vm_prot_t max, int cow)
1408 {
1409 	vm_offset_t end;
1410 	int result;
1411 
1412 	end = start + length;
1413 	vm_map_lock(map);
1414 	VM_MAP_RANGE_CHECK(map, start, end);
1415 	(void) vm_map_delete(map, start, end);
1416 	result = vm_map_insert(map, object, offset, start, end, prot,
1417 	    max, cow);
1418 	vm_map_unlock(map);
1419 	return (result);
1420 }
1421 
1422 /*
1423  *	vm_map_find finds an unallocated region in the target address
1424  *	map with the given length.  The search is defined to be
1425  *	first-fit from the specified address; the region found is
1426  *	returned in the same parameter.
1427  *
1428  *	If object is non-NULL, ref count must be bumped by caller
1429  *	prior to making call to account for the new entry.
1430  */
1431 int
1432 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1433 	    vm_offset_t *addr,	/* IN/OUT */
1434 	    vm_size_t length, int find_space, vm_prot_t prot,
1435 	    vm_prot_t max, int cow)
1436 {
1437 	vm_offset_t alignment, initial_addr, start;
1438 	int result;
1439 
1440 	if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
1441 	    (object->flags & OBJ_COLORED) == 0))
1442 		find_space = VMFS_ANY_SPACE;
1443 	if (find_space >> 8 != 0) {
1444 		KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
1445 		alignment = (vm_offset_t)1 << (find_space >> 8);
1446 	} else
1447 		alignment = 0;
1448 	initial_addr = *addr;
1449 again:
1450 	start = initial_addr;
1451 	vm_map_lock(map);
1452 	do {
1453 		if (find_space != VMFS_NO_SPACE) {
1454 			if (vm_map_findspace(map, start, length, addr)) {
1455 				vm_map_unlock(map);
1456 				if (find_space == VMFS_OPTIMAL_SPACE) {
1457 					find_space = VMFS_ANY_SPACE;
1458 					goto again;
1459 				}
1460 				return (KERN_NO_SPACE);
1461 			}
1462 			switch (find_space) {
1463 			case VMFS_SUPER_SPACE:
1464 			case VMFS_OPTIMAL_SPACE:
1465 				pmap_align_superpage(object, offset, addr,
1466 				    length);
1467 				break;
1468 			case VMFS_ANY_SPACE:
1469 				break;
1470 			default:
1471 				if ((*addr & (alignment - 1)) != 0) {
1472 					*addr &= ~(alignment - 1);
1473 					*addr += alignment;
1474 				}
1475 				break;
1476 			}
1477 
1478 			start = *addr;
1479 		}
1480 		result = vm_map_insert(map, object, offset, start, start +
1481 		    length, prot, max, cow);
1482 	} while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE &&
1483 	    find_space != VMFS_ANY_SPACE);
1484 	vm_map_unlock(map);
1485 	return (result);
1486 }
1487 
1488 /*
1489  *	vm_map_simplify_entry:
1490  *
1491  *	Simplify the given map entry by merging with either neighbor.  This
1492  *	routine also has the ability to merge with both neighbors.
1493  *
1494  *	The map must be locked.
1495  *
1496  *	This routine guarentees that the passed entry remains valid (though
1497  *	possibly extended).  When merging, this routine may delete one or
1498  *	both neighbors.
1499  */
1500 void
1501 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1502 {
1503 	vm_map_entry_t next, prev;
1504 	vm_size_t prevsize, esize;
1505 
1506 	if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
1507 		return;
1508 
1509 	prev = entry->prev;
1510 	if (prev != &map->header) {
1511 		prevsize = prev->end - prev->start;
1512 		if ( (prev->end == entry->start) &&
1513 		     (prev->object.vm_object == entry->object.vm_object) &&
1514 		     (!prev->object.vm_object ||
1515 			(prev->offset + prevsize == entry->offset)) &&
1516 		     (prev->eflags == entry->eflags) &&
1517 		     (prev->protection == entry->protection) &&
1518 		     (prev->max_protection == entry->max_protection) &&
1519 		     (prev->inheritance == entry->inheritance) &&
1520 		     (prev->wired_count == entry->wired_count) &&
1521 		     (prev->cred == entry->cred)) {
1522 			vm_map_entry_unlink(map, prev);
1523 			entry->start = prev->start;
1524 			entry->offset = prev->offset;
1525 			if (entry->prev != &map->header)
1526 				vm_map_entry_resize_free(map, entry->prev);
1527 
1528 			/*
1529 			 * If the backing object is a vnode object,
1530 			 * vm_object_deallocate() calls vrele().
1531 			 * However, vrele() does not lock the vnode
1532 			 * because the vnode has additional
1533 			 * references.  Thus, the map lock can be kept
1534 			 * without causing a lock-order reversal with
1535 			 * the vnode lock.
1536 			 *
1537 			 * Since we count the number of virtual page
1538 			 * mappings in object->un_pager.vnp.writemappings,
1539 			 * the writemappings value should not be adjusted
1540 			 * when the entry is disposed of.
1541 			 */
1542 			if (prev->object.vm_object)
1543 				vm_object_deallocate(prev->object.vm_object);
1544 			if (prev->cred != NULL)
1545 				crfree(prev->cred);
1546 			vm_map_entry_dispose(map, prev);
1547 		}
1548 	}
1549 
1550 	next = entry->next;
1551 	if (next != &map->header) {
1552 		esize = entry->end - entry->start;
1553 		if ((entry->end == next->start) &&
1554 		    (next->object.vm_object == entry->object.vm_object) &&
1555 		     (!entry->object.vm_object ||
1556 			(entry->offset + esize == next->offset)) &&
1557 		    (next->eflags == entry->eflags) &&
1558 		    (next->protection == entry->protection) &&
1559 		    (next->max_protection == entry->max_protection) &&
1560 		    (next->inheritance == entry->inheritance) &&
1561 		    (next->wired_count == entry->wired_count) &&
1562 		    (next->cred == entry->cred)) {
1563 			vm_map_entry_unlink(map, next);
1564 			entry->end = next->end;
1565 			vm_map_entry_resize_free(map, entry);
1566 
1567 			/*
1568 			 * See comment above.
1569 			 */
1570 			if (next->object.vm_object)
1571 				vm_object_deallocate(next->object.vm_object);
1572 			if (next->cred != NULL)
1573 				crfree(next->cred);
1574 			vm_map_entry_dispose(map, next);
1575 		}
1576 	}
1577 }
1578 /*
1579  *	vm_map_clip_start:	[ internal use only ]
1580  *
1581  *	Asserts that the given entry begins at or after
1582  *	the specified address; if necessary,
1583  *	it splits the entry into two.
1584  */
1585 #define vm_map_clip_start(map, entry, startaddr) \
1586 { \
1587 	if (startaddr > entry->start) \
1588 		_vm_map_clip_start(map, entry, startaddr); \
1589 }
1590 
1591 /*
1592  *	This routine is called only when it is known that
1593  *	the entry must be split.
1594  */
1595 static void
1596 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1597 {
1598 	vm_map_entry_t new_entry;
1599 
1600 	VM_MAP_ASSERT_LOCKED(map);
1601 
1602 	/*
1603 	 * Split off the front portion -- note that we must insert the new
1604 	 * entry BEFORE this one, so that this entry has the specified
1605 	 * starting address.
1606 	 */
1607 	vm_map_simplify_entry(map, entry);
1608 
1609 	/*
1610 	 * If there is no object backing this entry, we might as well create
1611 	 * one now.  If we defer it, an object can get created after the map
1612 	 * is clipped, and individual objects will be created for the split-up
1613 	 * map.  This is a bit of a hack, but is also about the best place to
1614 	 * put this improvement.
1615 	 */
1616 	if (entry->object.vm_object == NULL && !map->system_map) {
1617 		vm_object_t object;
1618 		object = vm_object_allocate(OBJT_DEFAULT,
1619 				atop(entry->end - entry->start));
1620 		entry->object.vm_object = object;
1621 		entry->offset = 0;
1622 		if (entry->cred != NULL) {
1623 			object->cred = entry->cred;
1624 			object->charge = entry->end - entry->start;
1625 			entry->cred = NULL;
1626 		}
1627 	} else if (entry->object.vm_object != NULL &&
1628 		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1629 		   entry->cred != NULL) {
1630 		VM_OBJECT_WLOCK(entry->object.vm_object);
1631 		KASSERT(entry->object.vm_object->cred == NULL,
1632 		    ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
1633 		entry->object.vm_object->cred = entry->cred;
1634 		entry->object.vm_object->charge = entry->end - entry->start;
1635 		VM_OBJECT_WUNLOCK(entry->object.vm_object);
1636 		entry->cred = NULL;
1637 	}
1638 
1639 	new_entry = vm_map_entry_create(map);
1640 	*new_entry = *entry;
1641 
1642 	new_entry->end = start;
1643 	entry->offset += (start - entry->start);
1644 	entry->start = start;
1645 	if (new_entry->cred != NULL)
1646 		crhold(entry->cred);
1647 
1648 	vm_map_entry_link(map, entry->prev, new_entry);
1649 
1650 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1651 		vm_object_reference(new_entry->object.vm_object);
1652 		/*
1653 		 * The object->un_pager.vnp.writemappings for the
1654 		 * object of MAP_ENTRY_VN_WRITECNT type entry shall be
1655 		 * kept as is here.  The virtual pages are
1656 		 * re-distributed among the clipped entries, so the sum is
1657 		 * left the same.
1658 		 */
1659 	}
1660 }
1661 
1662 /*
1663  *	vm_map_clip_end:	[ internal use only ]
1664  *
1665  *	Asserts that the given entry ends at or before
1666  *	the specified address; if necessary,
1667  *	it splits the entry into two.
1668  */
1669 #define vm_map_clip_end(map, entry, endaddr) \
1670 { \
1671 	if ((endaddr) < (entry->end)) \
1672 		_vm_map_clip_end((map), (entry), (endaddr)); \
1673 }
1674 
1675 /*
1676  *	This routine is called only when it is known that
1677  *	the entry must be split.
1678  */
1679 static void
1680 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1681 {
1682 	vm_map_entry_t new_entry;
1683 
1684 	VM_MAP_ASSERT_LOCKED(map);
1685 
1686 	/*
1687 	 * If there is no object backing this entry, we might as well create
1688 	 * one now.  If we defer it, an object can get created after the map
1689 	 * is clipped, and individual objects will be created for the split-up
1690 	 * map.  This is a bit of a hack, but is also about the best place to
1691 	 * put this improvement.
1692 	 */
1693 	if (entry->object.vm_object == NULL && !map->system_map) {
1694 		vm_object_t object;
1695 		object = vm_object_allocate(OBJT_DEFAULT,
1696 				atop(entry->end - entry->start));
1697 		entry->object.vm_object = object;
1698 		entry->offset = 0;
1699 		if (entry->cred != NULL) {
1700 			object->cred = entry->cred;
1701 			object->charge = entry->end - entry->start;
1702 			entry->cred = NULL;
1703 		}
1704 	} else if (entry->object.vm_object != NULL &&
1705 		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1706 		   entry->cred != NULL) {
1707 		VM_OBJECT_WLOCK(entry->object.vm_object);
1708 		KASSERT(entry->object.vm_object->cred == NULL,
1709 		    ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
1710 		entry->object.vm_object->cred = entry->cred;
1711 		entry->object.vm_object->charge = entry->end - entry->start;
1712 		VM_OBJECT_WUNLOCK(entry->object.vm_object);
1713 		entry->cred = NULL;
1714 	}
1715 
1716 	/*
1717 	 * Create a new entry and insert it AFTER the specified entry
1718 	 */
1719 	new_entry = vm_map_entry_create(map);
1720 	*new_entry = *entry;
1721 
1722 	new_entry->start = entry->end = end;
1723 	new_entry->offset += (end - entry->start);
1724 	if (new_entry->cred != NULL)
1725 		crhold(entry->cred);
1726 
1727 	vm_map_entry_link(map, entry, new_entry);
1728 
1729 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1730 		vm_object_reference(new_entry->object.vm_object);
1731 	}
1732 }
1733 
1734 /*
1735  *	vm_map_submap:		[ kernel use only ]
1736  *
1737  *	Mark the given range as handled by a subordinate map.
1738  *
1739  *	This range must have been created with vm_map_find,
1740  *	and no other operations may have been performed on this
1741  *	range prior to calling vm_map_submap.
1742  *
1743  *	Only a limited number of operations can be performed
1744  *	within this rage after calling vm_map_submap:
1745  *		vm_fault
1746  *	[Don't try vm_map_copy!]
1747  *
1748  *	To remove a submapping, one must first remove the
1749  *	range from the superior map, and then destroy the
1750  *	submap (if desired).  [Better yet, don't try it.]
1751  */
1752 int
1753 vm_map_submap(
1754 	vm_map_t map,
1755 	vm_offset_t start,
1756 	vm_offset_t end,
1757 	vm_map_t submap)
1758 {
1759 	vm_map_entry_t entry;
1760 	int result = KERN_INVALID_ARGUMENT;
1761 
1762 	vm_map_lock(map);
1763 
1764 	VM_MAP_RANGE_CHECK(map, start, end);
1765 
1766 	if (vm_map_lookup_entry(map, start, &entry)) {
1767 		vm_map_clip_start(map, entry, start);
1768 	} else
1769 		entry = entry->next;
1770 
1771 	vm_map_clip_end(map, entry, end);
1772 
1773 	if ((entry->start == start) && (entry->end == end) &&
1774 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1775 	    (entry->object.vm_object == NULL)) {
1776 		entry->object.sub_map = submap;
1777 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1778 		result = KERN_SUCCESS;
1779 	}
1780 	vm_map_unlock(map);
1781 
1782 	return (result);
1783 }
1784 
1785 /*
1786  * The maximum number of pages to map
1787  */
1788 #define	MAX_INIT_PT	96
1789 
1790 /*
1791  *	vm_map_pmap_enter:
1792  *
1793  *	Preload read-only mappings for the specified object's resident pages
1794  *	into the target map.  If "flags" is MAP_PREFAULT_PARTIAL, then only
1795  *	the resident pages within the address range [addr, addr + ulmin(size,
1796  *	ptoa(MAX_INIT_PT))) are mapped.  Otherwise, all resident pages within
1797  *	the specified address range are mapped.  This eliminates many soft
1798  *	faults on process startup and immediately after an mmap(2).  Because
1799  *	these are speculative mappings, cached pages are not reactivated and
1800  *	mapped.
1801  */
1802 void
1803 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
1804     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1805 {
1806 	vm_offset_t start;
1807 	vm_page_t p, p_start;
1808 	vm_pindex_t psize, tmpidx;
1809 
1810 	if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
1811 		return;
1812 	VM_OBJECT_RLOCK(object);
1813 	if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1814 		VM_OBJECT_RUNLOCK(object);
1815 		VM_OBJECT_WLOCK(object);
1816 		if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1817 			pmap_object_init_pt(map->pmap, addr, object, pindex,
1818 			    size);
1819 			VM_OBJECT_WUNLOCK(object);
1820 			return;
1821 		}
1822 		VM_OBJECT_LOCK_DOWNGRADE(object);
1823 	}
1824 
1825 	psize = atop(size);
1826 	if (psize > MAX_INIT_PT && (flags & MAP_PREFAULT_PARTIAL) != 0)
1827 		psize = MAX_INIT_PT;
1828 	if (psize + pindex > object->size) {
1829 		if (object->size < pindex) {
1830 			VM_OBJECT_RUNLOCK(object);
1831 			return;
1832 		}
1833 		psize = object->size - pindex;
1834 	}
1835 
1836 	start = 0;
1837 	p_start = NULL;
1838 
1839 	p = vm_page_find_least(object, pindex);
1840 	/*
1841 	 * Assert: the variable p is either (1) the page with the
1842 	 * least pindex greater than or equal to the parameter pindex
1843 	 * or (2) NULL.
1844 	 */
1845 	for (;
1846 	     p != NULL && (tmpidx = p->pindex - pindex) < psize;
1847 	     p = TAILQ_NEXT(p, listq)) {
1848 		/*
1849 		 * don't allow an madvise to blow away our really
1850 		 * free pages allocating pv entries.
1851 		 */
1852 		if ((flags & MAP_PREFAULT_MADVISE) &&
1853 		    cnt.v_free_count < cnt.v_free_reserved) {
1854 			psize = tmpidx;
1855 			break;
1856 		}
1857 		if (p->valid == VM_PAGE_BITS_ALL) {
1858 			if (p_start == NULL) {
1859 				start = addr + ptoa(tmpidx);
1860 				p_start = p;
1861 			}
1862 		} else if (p_start != NULL) {
1863 			pmap_enter_object(map->pmap, start, addr +
1864 			    ptoa(tmpidx), p_start, prot);
1865 			p_start = NULL;
1866 		}
1867 	}
1868 	if (p_start != NULL)
1869 		pmap_enter_object(map->pmap, start, addr + ptoa(psize),
1870 		    p_start, prot);
1871 	VM_OBJECT_RUNLOCK(object);
1872 }
1873 
1874 /*
1875  *	vm_map_protect:
1876  *
1877  *	Sets the protection of the specified address
1878  *	region in the target map.  If "set_max" is
1879  *	specified, the maximum protection is to be set;
1880  *	otherwise, only the current protection is affected.
1881  */
1882 int
1883 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1884 	       vm_prot_t new_prot, boolean_t set_max)
1885 {
1886 	vm_map_entry_t current, entry;
1887 	vm_object_t obj;
1888 	struct ucred *cred;
1889 	vm_prot_t old_prot;
1890 
1891 	vm_map_lock(map);
1892 
1893 	VM_MAP_RANGE_CHECK(map, start, end);
1894 
1895 	if (vm_map_lookup_entry(map, start, &entry)) {
1896 		vm_map_clip_start(map, entry, start);
1897 	} else {
1898 		entry = entry->next;
1899 	}
1900 
1901 	/*
1902 	 * Make a first pass to check for protection violations.
1903 	 */
1904 	current = entry;
1905 	while ((current != &map->header) && (current->start < end)) {
1906 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1907 			vm_map_unlock(map);
1908 			return (KERN_INVALID_ARGUMENT);
1909 		}
1910 		if ((new_prot & current->max_protection) != new_prot) {
1911 			vm_map_unlock(map);
1912 			return (KERN_PROTECTION_FAILURE);
1913 		}
1914 		current = current->next;
1915 	}
1916 
1917 
1918 	/*
1919 	 * Do an accounting pass for private read-only mappings that
1920 	 * now will do cow due to allowed write (e.g. debugger sets
1921 	 * breakpoint on text segment)
1922 	 */
1923 	for (current = entry; (current != &map->header) &&
1924 	     (current->start < end); current = current->next) {
1925 
1926 		vm_map_clip_end(map, current, end);
1927 
1928 		if (set_max ||
1929 		    ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
1930 		    ENTRY_CHARGED(current)) {
1931 			continue;
1932 		}
1933 
1934 		cred = curthread->td_ucred;
1935 		obj = current->object.vm_object;
1936 
1937 		if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
1938 			if (!swap_reserve(current->end - current->start)) {
1939 				vm_map_unlock(map);
1940 				return (KERN_RESOURCE_SHORTAGE);
1941 			}
1942 			crhold(cred);
1943 			current->cred = cred;
1944 			continue;
1945 		}
1946 
1947 		VM_OBJECT_WLOCK(obj);
1948 		if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
1949 			VM_OBJECT_WUNLOCK(obj);
1950 			continue;
1951 		}
1952 
1953 		/*
1954 		 * Charge for the whole object allocation now, since
1955 		 * we cannot distinguish between non-charged and
1956 		 * charged clipped mapping of the same object later.
1957 		 */
1958 		KASSERT(obj->charge == 0,
1959 		    ("vm_map_protect: object %p overcharged\n", obj));
1960 		if (!swap_reserve(ptoa(obj->size))) {
1961 			VM_OBJECT_WUNLOCK(obj);
1962 			vm_map_unlock(map);
1963 			return (KERN_RESOURCE_SHORTAGE);
1964 		}
1965 
1966 		crhold(cred);
1967 		obj->cred = cred;
1968 		obj->charge = ptoa(obj->size);
1969 		VM_OBJECT_WUNLOCK(obj);
1970 	}
1971 
1972 	/*
1973 	 * Go back and fix up protections. [Note that clipping is not
1974 	 * necessary the second time.]
1975 	 */
1976 	current = entry;
1977 	while ((current != &map->header) && (current->start < end)) {
1978 		old_prot = current->protection;
1979 
1980 		if (set_max)
1981 			current->protection =
1982 			    (current->max_protection = new_prot) &
1983 			    old_prot;
1984 		else
1985 			current->protection = new_prot;
1986 
1987 		if ((current->eflags & (MAP_ENTRY_COW | MAP_ENTRY_USER_WIRED))
1988 		     == (MAP_ENTRY_COW | MAP_ENTRY_USER_WIRED) &&
1989 		    (current->protection & VM_PROT_WRITE) != 0 &&
1990 		    (old_prot & VM_PROT_WRITE) == 0) {
1991 			vm_fault_copy_entry(map, map, current, current, NULL);
1992 		}
1993 
1994 		/*
1995 		 * When restricting access, update the physical map.  Worry
1996 		 * about copy-on-write here.
1997 		 */
1998 		if ((old_prot & ~current->protection) != 0) {
1999 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2000 							VM_PROT_ALL)
2001 			pmap_protect(map->pmap, current->start,
2002 			    current->end,
2003 			    current->protection & MASK(current));
2004 #undef	MASK
2005 		}
2006 		vm_map_simplify_entry(map, current);
2007 		current = current->next;
2008 	}
2009 	vm_map_unlock(map);
2010 	return (KERN_SUCCESS);
2011 }
2012 
2013 /*
2014  *	vm_map_madvise:
2015  *
2016  *	This routine traverses a processes map handling the madvise
2017  *	system call.  Advisories are classified as either those effecting
2018  *	the vm_map_entry structure, or those effecting the underlying
2019  *	objects.
2020  */
2021 int
2022 vm_map_madvise(
2023 	vm_map_t map,
2024 	vm_offset_t start,
2025 	vm_offset_t end,
2026 	int behav)
2027 {
2028 	vm_map_entry_t current, entry;
2029 	int modify_map = 0;
2030 
2031 	/*
2032 	 * Some madvise calls directly modify the vm_map_entry, in which case
2033 	 * we need to use an exclusive lock on the map and we need to perform
2034 	 * various clipping operations.  Otherwise we only need a read-lock
2035 	 * on the map.
2036 	 */
2037 	switch(behav) {
2038 	case MADV_NORMAL:
2039 	case MADV_SEQUENTIAL:
2040 	case MADV_RANDOM:
2041 	case MADV_NOSYNC:
2042 	case MADV_AUTOSYNC:
2043 	case MADV_NOCORE:
2044 	case MADV_CORE:
2045 		modify_map = 1;
2046 		vm_map_lock(map);
2047 		break;
2048 	case MADV_WILLNEED:
2049 	case MADV_DONTNEED:
2050 	case MADV_FREE:
2051 		vm_map_lock_read(map);
2052 		break;
2053 	default:
2054 		return (KERN_INVALID_ARGUMENT);
2055 	}
2056 
2057 	/*
2058 	 * Locate starting entry and clip if necessary.
2059 	 */
2060 	VM_MAP_RANGE_CHECK(map, start, end);
2061 
2062 	if (vm_map_lookup_entry(map, start, &entry)) {
2063 		if (modify_map)
2064 			vm_map_clip_start(map, entry, start);
2065 	} else {
2066 		entry = entry->next;
2067 	}
2068 
2069 	if (modify_map) {
2070 		/*
2071 		 * madvise behaviors that are implemented in the vm_map_entry.
2072 		 *
2073 		 * We clip the vm_map_entry so that behavioral changes are
2074 		 * limited to the specified address range.
2075 		 */
2076 		for (current = entry;
2077 		     (current != &map->header) && (current->start < end);
2078 		     current = current->next
2079 		) {
2080 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2081 				continue;
2082 
2083 			vm_map_clip_end(map, current, end);
2084 
2085 			switch (behav) {
2086 			case MADV_NORMAL:
2087 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2088 				break;
2089 			case MADV_SEQUENTIAL:
2090 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2091 				break;
2092 			case MADV_RANDOM:
2093 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2094 				break;
2095 			case MADV_NOSYNC:
2096 				current->eflags |= MAP_ENTRY_NOSYNC;
2097 				break;
2098 			case MADV_AUTOSYNC:
2099 				current->eflags &= ~MAP_ENTRY_NOSYNC;
2100 				break;
2101 			case MADV_NOCORE:
2102 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
2103 				break;
2104 			case MADV_CORE:
2105 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2106 				break;
2107 			default:
2108 				break;
2109 			}
2110 			vm_map_simplify_entry(map, current);
2111 		}
2112 		vm_map_unlock(map);
2113 	} else {
2114 		vm_pindex_t pstart, pend;
2115 
2116 		/*
2117 		 * madvise behaviors that are implemented in the underlying
2118 		 * vm_object.
2119 		 *
2120 		 * Since we don't clip the vm_map_entry, we have to clip
2121 		 * the vm_object pindex and count.
2122 		 */
2123 		for (current = entry;
2124 		     (current != &map->header) && (current->start < end);
2125 		     current = current->next
2126 		) {
2127 			vm_offset_t useStart;
2128 
2129 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2130 				continue;
2131 
2132 			pstart = OFF_TO_IDX(current->offset);
2133 			pend = pstart + atop(current->end - current->start);
2134 			useStart = current->start;
2135 
2136 			if (current->start < start) {
2137 				pstart += atop(start - current->start);
2138 				useStart = start;
2139 			}
2140 			if (current->end > end)
2141 				pend -= atop(current->end - end);
2142 
2143 			if (pstart >= pend)
2144 				continue;
2145 
2146 			vm_object_madvise(current->object.vm_object, pstart,
2147 			    pend, behav);
2148 			if (behav == MADV_WILLNEED) {
2149 				vm_map_pmap_enter(map,
2150 				    useStart,
2151 				    current->protection,
2152 				    current->object.vm_object,
2153 				    pstart,
2154 				    ptoa(pend - pstart),
2155 				    MAP_PREFAULT_MADVISE
2156 				);
2157 			}
2158 		}
2159 		vm_map_unlock_read(map);
2160 	}
2161 	return (0);
2162 }
2163 
2164 
2165 /*
2166  *	vm_map_inherit:
2167  *
2168  *	Sets the inheritance of the specified address
2169  *	range in the target map.  Inheritance
2170  *	affects how the map will be shared with
2171  *	child maps at the time of vmspace_fork.
2172  */
2173 int
2174 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2175 	       vm_inherit_t new_inheritance)
2176 {
2177 	vm_map_entry_t entry;
2178 	vm_map_entry_t temp_entry;
2179 
2180 	switch (new_inheritance) {
2181 	case VM_INHERIT_NONE:
2182 	case VM_INHERIT_COPY:
2183 	case VM_INHERIT_SHARE:
2184 		break;
2185 	default:
2186 		return (KERN_INVALID_ARGUMENT);
2187 	}
2188 	vm_map_lock(map);
2189 	VM_MAP_RANGE_CHECK(map, start, end);
2190 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
2191 		entry = temp_entry;
2192 		vm_map_clip_start(map, entry, start);
2193 	} else
2194 		entry = temp_entry->next;
2195 	while ((entry != &map->header) && (entry->start < end)) {
2196 		vm_map_clip_end(map, entry, end);
2197 		entry->inheritance = new_inheritance;
2198 		vm_map_simplify_entry(map, entry);
2199 		entry = entry->next;
2200 	}
2201 	vm_map_unlock(map);
2202 	return (KERN_SUCCESS);
2203 }
2204 
2205 /*
2206  *	vm_map_unwire:
2207  *
2208  *	Implements both kernel and user unwiring.
2209  */
2210 int
2211 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2212     int flags)
2213 {
2214 	vm_map_entry_t entry, first_entry, tmp_entry;
2215 	vm_offset_t saved_start;
2216 	unsigned int last_timestamp;
2217 	int rv;
2218 	boolean_t need_wakeup, result, user_unwire;
2219 
2220 	user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2221 	vm_map_lock(map);
2222 	VM_MAP_RANGE_CHECK(map, start, end);
2223 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2224 		if (flags & VM_MAP_WIRE_HOLESOK)
2225 			first_entry = first_entry->next;
2226 		else {
2227 			vm_map_unlock(map);
2228 			return (KERN_INVALID_ADDRESS);
2229 		}
2230 	}
2231 	last_timestamp = map->timestamp;
2232 	entry = first_entry;
2233 	while (entry != &map->header && entry->start < end) {
2234 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2235 			/*
2236 			 * We have not yet clipped the entry.
2237 			 */
2238 			saved_start = (start >= entry->start) ? start :
2239 			    entry->start;
2240 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2241 			if (vm_map_unlock_and_wait(map, 0)) {
2242 				/*
2243 				 * Allow interruption of user unwiring?
2244 				 */
2245 			}
2246 			vm_map_lock(map);
2247 			if (last_timestamp+1 != map->timestamp) {
2248 				/*
2249 				 * Look again for the entry because the map was
2250 				 * modified while it was unlocked.
2251 				 * Specifically, the entry may have been
2252 				 * clipped, merged, or deleted.
2253 				 */
2254 				if (!vm_map_lookup_entry(map, saved_start,
2255 				    &tmp_entry)) {
2256 					if (flags & VM_MAP_WIRE_HOLESOK)
2257 						tmp_entry = tmp_entry->next;
2258 					else {
2259 						if (saved_start == start) {
2260 							/*
2261 							 * First_entry has been deleted.
2262 							 */
2263 							vm_map_unlock(map);
2264 							return (KERN_INVALID_ADDRESS);
2265 						}
2266 						end = saved_start;
2267 						rv = KERN_INVALID_ADDRESS;
2268 						goto done;
2269 					}
2270 				}
2271 				if (entry == first_entry)
2272 					first_entry = tmp_entry;
2273 				else
2274 					first_entry = NULL;
2275 				entry = tmp_entry;
2276 			}
2277 			last_timestamp = map->timestamp;
2278 			continue;
2279 		}
2280 		vm_map_clip_start(map, entry, start);
2281 		vm_map_clip_end(map, entry, end);
2282 		/*
2283 		 * Mark the entry in case the map lock is released.  (See
2284 		 * above.)
2285 		 */
2286 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2287 		entry->wiring_thread = curthread;
2288 		/*
2289 		 * Check the map for holes in the specified region.
2290 		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2291 		 */
2292 		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2293 		    (entry->end < end && (entry->next == &map->header ||
2294 		    entry->next->start > entry->end))) {
2295 			end = entry->end;
2296 			rv = KERN_INVALID_ADDRESS;
2297 			goto done;
2298 		}
2299 		/*
2300 		 * If system unwiring, require that the entry is system wired.
2301 		 */
2302 		if (!user_unwire &&
2303 		    vm_map_entry_system_wired_count(entry) == 0) {
2304 			end = entry->end;
2305 			rv = KERN_INVALID_ARGUMENT;
2306 			goto done;
2307 		}
2308 		entry = entry->next;
2309 	}
2310 	rv = KERN_SUCCESS;
2311 done:
2312 	need_wakeup = FALSE;
2313 	if (first_entry == NULL) {
2314 		result = vm_map_lookup_entry(map, start, &first_entry);
2315 		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2316 			first_entry = first_entry->next;
2317 		else
2318 			KASSERT(result, ("vm_map_unwire: lookup failed"));
2319 	}
2320 	for (entry = first_entry; entry != &map->header && entry->start < end;
2321 	    entry = entry->next) {
2322 		/*
2323 		 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2324 		 * space in the unwired region could have been mapped
2325 		 * while the map lock was dropped for draining
2326 		 * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
2327 		 * could be simultaneously wiring this new mapping
2328 		 * entry.  Detect these cases and skip any entries
2329 		 * marked as in transition by us.
2330 		 */
2331 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2332 		    entry->wiring_thread != curthread) {
2333 			KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2334 			    ("vm_map_unwire: !HOLESOK and new/changed entry"));
2335 			continue;
2336 		}
2337 
2338 		if (rv == KERN_SUCCESS && (!user_unwire ||
2339 		    (entry->eflags & MAP_ENTRY_USER_WIRED))) {
2340 			if (user_unwire)
2341 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2342 			entry->wired_count--;
2343 			if (entry->wired_count == 0) {
2344 				/*
2345 				 * Retain the map lock.
2346 				 */
2347 				vm_fault_unwire(map, entry->start, entry->end,
2348 				    entry->object.vm_object != NULL &&
2349 				    (entry->object.vm_object->flags &
2350 				    OBJ_FICTITIOUS) != 0);
2351 			}
2352 		}
2353 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2354 		    ("vm_map_unwire: in-transition flag missing"));
2355 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2356 		entry->wiring_thread = NULL;
2357 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2358 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2359 			need_wakeup = TRUE;
2360 		}
2361 		vm_map_simplify_entry(map, entry);
2362 	}
2363 	vm_map_unlock(map);
2364 	if (need_wakeup)
2365 		vm_map_wakeup(map);
2366 	return (rv);
2367 }
2368 
2369 /*
2370  *	vm_map_wire:
2371  *
2372  *	Implements both kernel and user wiring.
2373  */
2374 int
2375 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2376     int flags)
2377 {
2378 	vm_map_entry_t entry, first_entry, tmp_entry;
2379 	vm_offset_t saved_end, saved_start;
2380 	unsigned int last_timestamp;
2381 	int rv;
2382 	boolean_t fictitious, need_wakeup, result, user_wire;
2383 	vm_prot_t prot;
2384 
2385 	prot = 0;
2386 	if (flags & VM_MAP_WIRE_WRITE)
2387 		prot |= VM_PROT_WRITE;
2388 	user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2389 	vm_map_lock(map);
2390 	VM_MAP_RANGE_CHECK(map, start, end);
2391 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2392 		if (flags & VM_MAP_WIRE_HOLESOK)
2393 			first_entry = first_entry->next;
2394 		else {
2395 			vm_map_unlock(map);
2396 			return (KERN_INVALID_ADDRESS);
2397 		}
2398 	}
2399 	last_timestamp = map->timestamp;
2400 	entry = first_entry;
2401 	while (entry != &map->header && entry->start < end) {
2402 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2403 			/*
2404 			 * We have not yet clipped the entry.
2405 			 */
2406 			saved_start = (start >= entry->start) ? start :
2407 			    entry->start;
2408 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2409 			if (vm_map_unlock_and_wait(map, 0)) {
2410 				/*
2411 				 * Allow interruption of user wiring?
2412 				 */
2413 			}
2414 			vm_map_lock(map);
2415 			if (last_timestamp + 1 != map->timestamp) {
2416 				/*
2417 				 * Look again for the entry because the map was
2418 				 * modified while it was unlocked.
2419 				 * Specifically, the entry may have been
2420 				 * clipped, merged, or deleted.
2421 				 */
2422 				if (!vm_map_lookup_entry(map, saved_start,
2423 				    &tmp_entry)) {
2424 					if (flags & VM_MAP_WIRE_HOLESOK)
2425 						tmp_entry = tmp_entry->next;
2426 					else {
2427 						if (saved_start == start) {
2428 							/*
2429 							 * first_entry has been deleted.
2430 							 */
2431 							vm_map_unlock(map);
2432 							return (KERN_INVALID_ADDRESS);
2433 						}
2434 						end = saved_start;
2435 						rv = KERN_INVALID_ADDRESS;
2436 						goto done;
2437 					}
2438 				}
2439 				if (entry == first_entry)
2440 					first_entry = tmp_entry;
2441 				else
2442 					first_entry = NULL;
2443 				entry = tmp_entry;
2444 			}
2445 			last_timestamp = map->timestamp;
2446 			continue;
2447 		}
2448 		vm_map_clip_start(map, entry, start);
2449 		vm_map_clip_end(map, entry, end);
2450 		/*
2451 		 * Mark the entry in case the map lock is released.  (See
2452 		 * above.)
2453 		 */
2454 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2455 		entry->wiring_thread = curthread;
2456 		if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
2457 		    || (entry->protection & prot) != prot) {
2458 			entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
2459 			if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
2460 				end = entry->end;
2461 				rv = KERN_INVALID_ADDRESS;
2462 				goto done;
2463 			}
2464 			goto next_entry;
2465 		}
2466 		if (entry->wired_count == 0) {
2467 			entry->wired_count++;
2468 			saved_start = entry->start;
2469 			saved_end = entry->end;
2470 			fictitious = entry->object.vm_object != NULL &&
2471 			    (entry->object.vm_object->flags &
2472 			    OBJ_FICTITIOUS) != 0;
2473 			/*
2474 			 * Release the map lock, relying on the in-transition
2475 			 * mark.  Mark the map busy for fork.
2476 			 */
2477 			vm_map_busy(map);
2478 			vm_map_unlock(map);
2479 			rv = vm_fault_wire(map, saved_start, saved_end,
2480 			    fictitious);
2481 			vm_map_lock(map);
2482 			vm_map_unbusy(map);
2483 			if (last_timestamp + 1 != map->timestamp) {
2484 				/*
2485 				 * Look again for the entry because the map was
2486 				 * modified while it was unlocked.  The entry
2487 				 * may have been clipped, but NOT merged or
2488 				 * deleted.
2489 				 */
2490 				result = vm_map_lookup_entry(map, saved_start,
2491 				    &tmp_entry);
2492 				KASSERT(result, ("vm_map_wire: lookup failed"));
2493 				if (entry == first_entry)
2494 					first_entry = tmp_entry;
2495 				else
2496 					first_entry = NULL;
2497 				entry = tmp_entry;
2498 				while (entry->end < saved_end) {
2499 					if (rv != KERN_SUCCESS) {
2500 						KASSERT(entry->wired_count == 1,
2501 						    ("vm_map_wire: bad count"));
2502 						entry->wired_count = -1;
2503 					}
2504 					entry = entry->next;
2505 				}
2506 			}
2507 			last_timestamp = map->timestamp;
2508 			if (rv != KERN_SUCCESS) {
2509 				KASSERT(entry->wired_count == 1,
2510 				    ("vm_map_wire: bad count"));
2511 				/*
2512 				 * Assign an out-of-range value to represent
2513 				 * the failure to wire this entry.
2514 				 */
2515 				entry->wired_count = -1;
2516 				end = entry->end;
2517 				goto done;
2518 			}
2519 		} else if (!user_wire ||
2520 			   (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2521 			entry->wired_count++;
2522 		}
2523 		/*
2524 		 * Check the map for holes in the specified region.
2525 		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2526 		 */
2527 	next_entry:
2528 		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2529 		    (entry->end < end && (entry->next == &map->header ||
2530 		    entry->next->start > entry->end))) {
2531 			end = entry->end;
2532 			rv = KERN_INVALID_ADDRESS;
2533 			goto done;
2534 		}
2535 		entry = entry->next;
2536 	}
2537 	rv = KERN_SUCCESS;
2538 done:
2539 	need_wakeup = FALSE;
2540 	if (first_entry == NULL) {
2541 		result = vm_map_lookup_entry(map, start, &first_entry);
2542 		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2543 			first_entry = first_entry->next;
2544 		else
2545 			KASSERT(result, ("vm_map_wire: lookup failed"));
2546 	}
2547 	for (entry = first_entry; entry != &map->header && entry->start < end;
2548 	    entry = entry->next) {
2549 		if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
2550 			goto next_entry_done;
2551 
2552 		/*
2553 		 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2554 		 * space in the unwired region could have been mapped
2555 		 * while the map lock was dropped for faulting in the
2556 		 * pages or draining MAP_ENTRY_IN_TRANSITION.
2557 		 * Moreover, another thread could be simultaneously
2558 		 * wiring this new mapping entry.  Detect these cases
2559 		 * and skip any entries marked as in transition by us.
2560 		 */
2561 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2562 		    entry->wiring_thread != curthread) {
2563 			KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2564 			    ("vm_map_wire: !HOLESOK and new/changed entry"));
2565 			continue;
2566 		}
2567 
2568 		if (rv == KERN_SUCCESS) {
2569 			if (user_wire)
2570 				entry->eflags |= MAP_ENTRY_USER_WIRED;
2571 		} else if (entry->wired_count == -1) {
2572 			/*
2573 			 * Wiring failed on this entry.  Thus, unwiring is
2574 			 * unnecessary.
2575 			 */
2576 			entry->wired_count = 0;
2577 		} else {
2578 			if (!user_wire ||
2579 			    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)
2580 				entry->wired_count--;
2581 			if (entry->wired_count == 0) {
2582 				/*
2583 				 * Retain the map lock.
2584 				 */
2585 				vm_fault_unwire(map, entry->start, entry->end,
2586 				    entry->object.vm_object != NULL &&
2587 				    (entry->object.vm_object->flags &
2588 				    OBJ_FICTITIOUS) != 0);
2589 			}
2590 		}
2591 	next_entry_done:
2592 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2593 		    ("vm_map_wire: in-transition flag missing %p", entry));
2594 		KASSERT(entry->wiring_thread == curthread,
2595 		    ("vm_map_wire: alien wire %p", entry));
2596 		entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
2597 		    MAP_ENTRY_WIRE_SKIPPED);
2598 		entry->wiring_thread = NULL;
2599 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2600 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2601 			need_wakeup = TRUE;
2602 		}
2603 		vm_map_simplify_entry(map, entry);
2604 	}
2605 	vm_map_unlock(map);
2606 	if (need_wakeup)
2607 		vm_map_wakeup(map);
2608 	return (rv);
2609 }
2610 
2611 /*
2612  * vm_map_sync
2613  *
2614  * Push any dirty cached pages in the address range to their pager.
2615  * If syncio is TRUE, dirty pages are written synchronously.
2616  * If invalidate is TRUE, any cached pages are freed as well.
2617  *
2618  * If the size of the region from start to end is zero, we are
2619  * supposed to flush all modified pages within the region containing
2620  * start.  Unfortunately, a region can be split or coalesced with
2621  * neighboring regions, making it difficult to determine what the
2622  * original region was.  Therefore, we approximate this requirement by
2623  * flushing the current region containing start.
2624  *
2625  * Returns an error if any part of the specified range is not mapped.
2626  */
2627 int
2628 vm_map_sync(
2629 	vm_map_t map,
2630 	vm_offset_t start,
2631 	vm_offset_t end,
2632 	boolean_t syncio,
2633 	boolean_t invalidate)
2634 {
2635 	vm_map_entry_t current;
2636 	vm_map_entry_t entry;
2637 	vm_size_t size;
2638 	vm_object_t object;
2639 	vm_ooffset_t offset;
2640 	unsigned int last_timestamp;
2641 	boolean_t failed;
2642 
2643 	vm_map_lock_read(map);
2644 	VM_MAP_RANGE_CHECK(map, start, end);
2645 	if (!vm_map_lookup_entry(map, start, &entry)) {
2646 		vm_map_unlock_read(map);
2647 		return (KERN_INVALID_ADDRESS);
2648 	} else if (start == end) {
2649 		start = entry->start;
2650 		end = entry->end;
2651 	}
2652 	/*
2653 	 * Make a first pass to check for user-wired memory and holes.
2654 	 */
2655 	for (current = entry; current != &map->header && current->start < end;
2656 	    current = current->next) {
2657 		if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
2658 			vm_map_unlock_read(map);
2659 			return (KERN_INVALID_ARGUMENT);
2660 		}
2661 		if (end > current->end &&
2662 		    (current->next == &map->header ||
2663 			current->end != current->next->start)) {
2664 			vm_map_unlock_read(map);
2665 			return (KERN_INVALID_ADDRESS);
2666 		}
2667 	}
2668 
2669 	if (invalidate)
2670 		pmap_remove(map->pmap, start, end);
2671 	failed = FALSE;
2672 
2673 	/*
2674 	 * Make a second pass, cleaning/uncaching pages from the indicated
2675 	 * objects as we go.
2676 	 */
2677 	for (current = entry; current != &map->header && current->start < end;) {
2678 		offset = current->offset + (start - current->start);
2679 		size = (end <= current->end ? end : current->end) - start;
2680 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2681 			vm_map_t smap;
2682 			vm_map_entry_t tentry;
2683 			vm_size_t tsize;
2684 
2685 			smap = current->object.sub_map;
2686 			vm_map_lock_read(smap);
2687 			(void) vm_map_lookup_entry(smap, offset, &tentry);
2688 			tsize = tentry->end - offset;
2689 			if (tsize < size)
2690 				size = tsize;
2691 			object = tentry->object.vm_object;
2692 			offset = tentry->offset + (offset - tentry->start);
2693 			vm_map_unlock_read(smap);
2694 		} else {
2695 			object = current->object.vm_object;
2696 		}
2697 		vm_object_reference(object);
2698 		last_timestamp = map->timestamp;
2699 		vm_map_unlock_read(map);
2700 		if (!vm_object_sync(object, offset, size, syncio, invalidate))
2701 			failed = TRUE;
2702 		start += size;
2703 		vm_object_deallocate(object);
2704 		vm_map_lock_read(map);
2705 		if (last_timestamp == map->timestamp ||
2706 		    !vm_map_lookup_entry(map, start, &current))
2707 			current = current->next;
2708 	}
2709 
2710 	vm_map_unlock_read(map);
2711 	return (failed ? KERN_FAILURE : KERN_SUCCESS);
2712 }
2713 
2714 /*
2715  *	vm_map_entry_unwire:	[ internal use only ]
2716  *
2717  *	Make the region specified by this entry pageable.
2718  *
2719  *	The map in question should be locked.
2720  *	[This is the reason for this routine's existence.]
2721  */
2722 static void
2723 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2724 {
2725 	vm_fault_unwire(map, entry->start, entry->end,
2726 	    entry->object.vm_object != NULL &&
2727 	    (entry->object.vm_object->flags & OBJ_FICTITIOUS) != 0);
2728 	entry->wired_count = 0;
2729 }
2730 
2731 static void
2732 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
2733 {
2734 
2735 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
2736 		vm_object_deallocate(entry->object.vm_object);
2737 	uma_zfree(system_map ? kmapentzone : mapentzone, entry);
2738 }
2739 
2740 /*
2741  *	vm_map_entry_delete:	[ internal use only ]
2742  *
2743  *	Deallocate the given entry from the target map.
2744  */
2745 static void
2746 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2747 {
2748 	vm_object_t object;
2749 	vm_pindex_t offidxstart, offidxend, count, size1;
2750 	vm_ooffset_t size;
2751 
2752 	vm_map_entry_unlink(map, entry);
2753 	object = entry->object.vm_object;
2754 	size = entry->end - entry->start;
2755 	map->size -= size;
2756 
2757 	if (entry->cred != NULL) {
2758 		swap_release_by_cred(size, entry->cred);
2759 		crfree(entry->cred);
2760 	}
2761 
2762 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2763 	    (object != NULL)) {
2764 		KASSERT(entry->cred == NULL || object->cred == NULL ||
2765 		    (entry->eflags & MAP_ENTRY_NEEDS_COPY),
2766 		    ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
2767 		count = OFF_TO_IDX(size);
2768 		offidxstart = OFF_TO_IDX(entry->offset);
2769 		offidxend = offidxstart + count;
2770 		VM_OBJECT_WLOCK(object);
2771 		if (object->ref_count != 1 &&
2772 		    ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
2773 		    object == kernel_object || object == kmem_object)) {
2774 			vm_object_collapse(object);
2775 
2776 			/*
2777 			 * The option OBJPR_NOTMAPPED can be passed here
2778 			 * because vm_map_delete() already performed
2779 			 * pmap_remove() on the only mapping to this range
2780 			 * of pages.
2781 			 */
2782 			vm_object_page_remove(object, offidxstart, offidxend,
2783 			    OBJPR_NOTMAPPED);
2784 			if (object->type == OBJT_SWAP)
2785 				swap_pager_freespace(object, offidxstart, count);
2786 			if (offidxend >= object->size &&
2787 			    offidxstart < object->size) {
2788 				size1 = object->size;
2789 				object->size = offidxstart;
2790 				if (object->cred != NULL) {
2791 					size1 -= object->size;
2792 					KASSERT(object->charge >= ptoa(size1),
2793 					    ("vm_map_entry_delete: object->charge < 0"));
2794 					swap_release_by_cred(ptoa(size1), object->cred);
2795 					object->charge -= ptoa(size1);
2796 				}
2797 			}
2798 		}
2799 		VM_OBJECT_WUNLOCK(object);
2800 	} else
2801 		entry->object.vm_object = NULL;
2802 	if (map->system_map)
2803 		vm_map_entry_deallocate(entry, TRUE);
2804 	else {
2805 		entry->next = curthread->td_map_def_user;
2806 		curthread->td_map_def_user = entry;
2807 	}
2808 }
2809 
2810 /*
2811  *	vm_map_delete:	[ internal use only ]
2812  *
2813  *	Deallocates the given address range from the target
2814  *	map.
2815  */
2816 int
2817 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2818 {
2819 	vm_map_entry_t entry;
2820 	vm_map_entry_t first_entry;
2821 
2822 	VM_MAP_ASSERT_LOCKED(map);
2823 
2824 	/*
2825 	 * Find the start of the region, and clip it
2826 	 */
2827 	if (!vm_map_lookup_entry(map, start, &first_entry))
2828 		entry = first_entry->next;
2829 	else {
2830 		entry = first_entry;
2831 		vm_map_clip_start(map, entry, start);
2832 	}
2833 
2834 	/*
2835 	 * Step through all entries in this region
2836 	 */
2837 	while ((entry != &map->header) && (entry->start < end)) {
2838 		vm_map_entry_t next;
2839 
2840 		/*
2841 		 * Wait for wiring or unwiring of an entry to complete.
2842 		 * Also wait for any system wirings to disappear on
2843 		 * user maps.
2844 		 */
2845 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
2846 		    (vm_map_pmap(map) != kernel_pmap &&
2847 		    vm_map_entry_system_wired_count(entry) != 0)) {
2848 			unsigned int last_timestamp;
2849 			vm_offset_t saved_start;
2850 			vm_map_entry_t tmp_entry;
2851 
2852 			saved_start = entry->start;
2853 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2854 			last_timestamp = map->timestamp;
2855 			(void) vm_map_unlock_and_wait(map, 0);
2856 			vm_map_lock(map);
2857 			if (last_timestamp + 1 != map->timestamp) {
2858 				/*
2859 				 * Look again for the entry because the map was
2860 				 * modified while it was unlocked.
2861 				 * Specifically, the entry may have been
2862 				 * clipped, merged, or deleted.
2863 				 */
2864 				if (!vm_map_lookup_entry(map, saved_start,
2865 							 &tmp_entry))
2866 					entry = tmp_entry->next;
2867 				else {
2868 					entry = tmp_entry;
2869 					vm_map_clip_start(map, entry,
2870 							  saved_start);
2871 				}
2872 			}
2873 			continue;
2874 		}
2875 		vm_map_clip_end(map, entry, end);
2876 
2877 		next = entry->next;
2878 
2879 		/*
2880 		 * Unwire before removing addresses from the pmap; otherwise,
2881 		 * unwiring will put the entries back in the pmap.
2882 		 */
2883 		if (entry->wired_count != 0) {
2884 			vm_map_entry_unwire(map, entry);
2885 		}
2886 
2887 		pmap_remove(map->pmap, entry->start, entry->end);
2888 
2889 		/*
2890 		 * Delete the entry only after removing all pmap
2891 		 * entries pointing to its pages.  (Otherwise, its
2892 		 * page frames may be reallocated, and any modify bits
2893 		 * will be set in the wrong object!)
2894 		 */
2895 		vm_map_entry_delete(map, entry);
2896 		entry = next;
2897 	}
2898 	return (KERN_SUCCESS);
2899 }
2900 
2901 /*
2902  *	vm_map_remove:
2903  *
2904  *	Remove the given address range from the target map.
2905  *	This is the exported form of vm_map_delete.
2906  */
2907 int
2908 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2909 {
2910 	int result;
2911 
2912 	vm_map_lock(map);
2913 	VM_MAP_RANGE_CHECK(map, start, end);
2914 	result = vm_map_delete(map, start, end);
2915 	vm_map_unlock(map);
2916 	return (result);
2917 }
2918 
2919 /*
2920  *	vm_map_check_protection:
2921  *
2922  *	Assert that the target map allows the specified privilege on the
2923  *	entire address region given.  The entire region must be allocated.
2924  *
2925  *	WARNING!  This code does not and should not check whether the
2926  *	contents of the region is accessible.  For example a smaller file
2927  *	might be mapped into a larger address space.
2928  *
2929  *	NOTE!  This code is also called by munmap().
2930  *
2931  *	The map must be locked.  A read lock is sufficient.
2932  */
2933 boolean_t
2934 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2935 			vm_prot_t protection)
2936 {
2937 	vm_map_entry_t entry;
2938 	vm_map_entry_t tmp_entry;
2939 
2940 	if (!vm_map_lookup_entry(map, start, &tmp_entry))
2941 		return (FALSE);
2942 	entry = tmp_entry;
2943 
2944 	while (start < end) {
2945 		if (entry == &map->header)
2946 			return (FALSE);
2947 		/*
2948 		 * No holes allowed!
2949 		 */
2950 		if (start < entry->start)
2951 			return (FALSE);
2952 		/*
2953 		 * Check protection associated with entry.
2954 		 */
2955 		if ((entry->protection & protection) != protection)
2956 			return (FALSE);
2957 		/* go to next entry */
2958 		start = entry->end;
2959 		entry = entry->next;
2960 	}
2961 	return (TRUE);
2962 }
2963 
2964 /*
2965  *	vm_map_copy_entry:
2966  *
2967  *	Copies the contents of the source entry to the destination
2968  *	entry.  The entries *must* be aligned properly.
2969  */
2970 static void
2971 vm_map_copy_entry(
2972 	vm_map_t src_map,
2973 	vm_map_t dst_map,
2974 	vm_map_entry_t src_entry,
2975 	vm_map_entry_t dst_entry,
2976 	vm_ooffset_t *fork_charge)
2977 {
2978 	vm_object_t src_object;
2979 	vm_map_entry_t fake_entry;
2980 	vm_offset_t size;
2981 	struct ucred *cred;
2982 	int charged;
2983 
2984 	VM_MAP_ASSERT_LOCKED(dst_map);
2985 
2986 	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2987 		return;
2988 
2989 	if (src_entry->wired_count == 0) {
2990 
2991 		/*
2992 		 * If the source entry is marked needs_copy, it is already
2993 		 * write-protected.
2994 		 */
2995 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2996 			pmap_protect(src_map->pmap,
2997 			    src_entry->start,
2998 			    src_entry->end,
2999 			    src_entry->protection & ~VM_PROT_WRITE);
3000 		}
3001 
3002 		/*
3003 		 * Make a copy of the object.
3004 		 */
3005 		size = src_entry->end - src_entry->start;
3006 		if ((src_object = src_entry->object.vm_object) != NULL) {
3007 			VM_OBJECT_WLOCK(src_object);
3008 			charged = ENTRY_CHARGED(src_entry);
3009 			if ((src_object->handle == NULL) &&
3010 				(src_object->type == OBJT_DEFAULT ||
3011 				 src_object->type == OBJT_SWAP)) {
3012 				vm_object_collapse(src_object);
3013 				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
3014 					vm_object_split(src_entry);
3015 					src_object = src_entry->object.vm_object;
3016 				}
3017 			}
3018 			vm_object_reference_locked(src_object);
3019 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3020 			if (src_entry->cred != NULL &&
3021 			    !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
3022 				KASSERT(src_object->cred == NULL,
3023 				    ("OVERCOMMIT: vm_map_copy_entry: cred %p",
3024 				     src_object));
3025 				src_object->cred = src_entry->cred;
3026 				src_object->charge = size;
3027 			}
3028 			VM_OBJECT_WUNLOCK(src_object);
3029 			dst_entry->object.vm_object = src_object;
3030 			if (charged) {
3031 				cred = curthread->td_ucred;
3032 				crhold(cred);
3033 				dst_entry->cred = cred;
3034 				*fork_charge += size;
3035 				if (!(src_entry->eflags &
3036 				      MAP_ENTRY_NEEDS_COPY)) {
3037 					crhold(cred);
3038 					src_entry->cred = cred;
3039 					*fork_charge += size;
3040 				}
3041 			}
3042 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
3043 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
3044 			dst_entry->offset = src_entry->offset;
3045 			if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3046 				/*
3047 				 * MAP_ENTRY_VN_WRITECNT cannot
3048 				 * indicate write reference from
3049 				 * src_entry, since the entry is
3050 				 * marked as needs copy.  Allocate a
3051 				 * fake entry that is used to
3052 				 * decrement object->un_pager.vnp.writecount
3053 				 * at the appropriate time.  Attach
3054 				 * fake_entry to the deferred list.
3055 				 */
3056 				fake_entry = vm_map_entry_create(dst_map);
3057 				fake_entry->eflags = MAP_ENTRY_VN_WRITECNT;
3058 				src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT;
3059 				vm_object_reference(src_object);
3060 				fake_entry->object.vm_object = src_object;
3061 				fake_entry->start = src_entry->start;
3062 				fake_entry->end = src_entry->end;
3063 				fake_entry->next = curthread->td_map_def_user;
3064 				curthread->td_map_def_user = fake_entry;
3065 			}
3066 		} else {
3067 			dst_entry->object.vm_object = NULL;
3068 			dst_entry->offset = 0;
3069 			if (src_entry->cred != NULL) {
3070 				dst_entry->cred = curthread->td_ucred;
3071 				crhold(dst_entry->cred);
3072 				*fork_charge += size;
3073 			}
3074 		}
3075 
3076 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
3077 		    dst_entry->end - dst_entry->start, src_entry->start);
3078 	} else {
3079 		/*
3080 		 * Of course, wired down pages can't be set copy-on-write.
3081 		 * Cause wired pages to be copied into the new map by
3082 		 * simulating faults (the new pages are pageable)
3083 		 */
3084 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
3085 		    fork_charge);
3086 	}
3087 }
3088 
3089 /*
3090  * vmspace_map_entry_forked:
3091  * Update the newly-forked vmspace each time a map entry is inherited
3092  * or copied.  The values for vm_dsize and vm_tsize are approximate
3093  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
3094  */
3095 static void
3096 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
3097     vm_map_entry_t entry)
3098 {
3099 	vm_size_t entrysize;
3100 	vm_offset_t newend;
3101 
3102 	entrysize = entry->end - entry->start;
3103 	vm2->vm_map.size += entrysize;
3104 	if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
3105 		vm2->vm_ssize += btoc(entrysize);
3106 	} else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
3107 	    entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
3108 		newend = MIN(entry->end,
3109 		    (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
3110 		vm2->vm_dsize += btoc(newend - entry->start);
3111 	} else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
3112 	    entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
3113 		newend = MIN(entry->end,
3114 		    (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
3115 		vm2->vm_tsize += btoc(newend - entry->start);
3116 	}
3117 }
3118 
3119 /*
3120  * vmspace_fork:
3121  * Create a new process vmspace structure and vm_map
3122  * based on those of an existing process.  The new map
3123  * is based on the old map, according to the inheritance
3124  * values on the regions in that map.
3125  *
3126  * XXX It might be worth coalescing the entries added to the new vmspace.
3127  *
3128  * The source map must not be locked.
3129  */
3130 struct vmspace *
3131 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
3132 {
3133 	struct vmspace *vm2;
3134 	vm_map_t new_map, old_map;
3135 	vm_map_entry_t new_entry, old_entry;
3136 	vm_object_t object;
3137 	int locked;
3138 
3139 	old_map = &vm1->vm_map;
3140 	/* Copy immutable fields of vm1 to vm2. */
3141 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
3142 	if (vm2 == NULL)
3143 		return (NULL);
3144 	vm2->vm_taddr = vm1->vm_taddr;
3145 	vm2->vm_daddr = vm1->vm_daddr;
3146 	vm2->vm_maxsaddr = vm1->vm_maxsaddr;
3147 	vm_map_lock(old_map);
3148 	if (old_map->busy)
3149 		vm_map_wait_busy(old_map);
3150 	new_map = &vm2->vm_map;
3151 	locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
3152 	KASSERT(locked, ("vmspace_fork: lock failed"));
3153 
3154 	old_entry = old_map->header.next;
3155 
3156 	while (old_entry != &old_map->header) {
3157 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3158 			panic("vm_map_fork: encountered a submap");
3159 
3160 		switch (old_entry->inheritance) {
3161 		case VM_INHERIT_NONE:
3162 			break;
3163 
3164 		case VM_INHERIT_SHARE:
3165 			/*
3166 			 * Clone the entry, creating the shared object if necessary.
3167 			 */
3168 			object = old_entry->object.vm_object;
3169 			if (object == NULL) {
3170 				object = vm_object_allocate(OBJT_DEFAULT,
3171 					atop(old_entry->end - old_entry->start));
3172 				old_entry->object.vm_object = object;
3173 				old_entry->offset = 0;
3174 				if (old_entry->cred != NULL) {
3175 					object->cred = old_entry->cred;
3176 					object->charge = old_entry->end -
3177 					    old_entry->start;
3178 					old_entry->cred = NULL;
3179 				}
3180 			}
3181 
3182 			/*
3183 			 * Add the reference before calling vm_object_shadow
3184 			 * to insure that a shadow object is created.
3185 			 */
3186 			vm_object_reference(object);
3187 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3188 				vm_object_shadow(&old_entry->object.vm_object,
3189 				    &old_entry->offset,
3190 				    old_entry->end - old_entry->start);
3191 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3192 				/* Transfer the second reference too. */
3193 				vm_object_reference(
3194 				    old_entry->object.vm_object);
3195 
3196 				/*
3197 				 * As in vm_map_simplify_entry(), the
3198 				 * vnode lock will not be acquired in
3199 				 * this call to vm_object_deallocate().
3200 				 */
3201 				vm_object_deallocate(object);
3202 				object = old_entry->object.vm_object;
3203 			}
3204 			VM_OBJECT_WLOCK(object);
3205 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
3206 			if (old_entry->cred != NULL) {
3207 				KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
3208 				object->cred = old_entry->cred;
3209 				object->charge = old_entry->end - old_entry->start;
3210 				old_entry->cred = NULL;
3211 			}
3212 
3213 			/*
3214 			 * Assert the correct state of the vnode
3215 			 * v_writecount while the object is locked, to
3216 			 * not relock it later for the assertion
3217 			 * correctness.
3218 			 */
3219 			if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT &&
3220 			    object->type == OBJT_VNODE) {
3221 				KASSERT(((struct vnode *)object->handle)->
3222 				    v_writecount > 0,
3223 				    ("vmspace_fork: v_writecount %p", object));
3224 				KASSERT(object->un_pager.vnp.writemappings > 0,
3225 				    ("vmspace_fork: vnp.writecount %p",
3226 				    object));
3227 			}
3228 			VM_OBJECT_WUNLOCK(object);
3229 
3230 			/*
3231 			 * Clone the entry, referencing the shared object.
3232 			 */
3233 			new_entry = vm_map_entry_create(new_map);
3234 			*new_entry = *old_entry;
3235 			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3236 			    MAP_ENTRY_IN_TRANSITION);
3237 			new_entry->wiring_thread = NULL;
3238 			new_entry->wired_count = 0;
3239 			if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3240 				vnode_pager_update_writecount(object,
3241 				    new_entry->start, new_entry->end);
3242 			}
3243 
3244 			/*
3245 			 * Insert the entry into the new map -- we know we're
3246 			 * inserting at the end of the new map.
3247 			 */
3248 			vm_map_entry_link(new_map, new_map->header.prev,
3249 			    new_entry);
3250 			vmspace_map_entry_forked(vm1, vm2, new_entry);
3251 
3252 			/*
3253 			 * Update the physical map
3254 			 */
3255 			pmap_copy(new_map->pmap, old_map->pmap,
3256 			    new_entry->start,
3257 			    (old_entry->end - old_entry->start),
3258 			    old_entry->start);
3259 			break;
3260 
3261 		case VM_INHERIT_COPY:
3262 			/*
3263 			 * Clone the entry and link into the map.
3264 			 */
3265 			new_entry = vm_map_entry_create(new_map);
3266 			*new_entry = *old_entry;
3267 			/*
3268 			 * Copied entry is COW over the old object.
3269 			 */
3270 			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3271 			    MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT);
3272 			new_entry->wiring_thread = NULL;
3273 			new_entry->wired_count = 0;
3274 			new_entry->object.vm_object = NULL;
3275 			new_entry->cred = NULL;
3276 			vm_map_entry_link(new_map, new_map->header.prev,
3277 			    new_entry);
3278 			vmspace_map_entry_forked(vm1, vm2, new_entry);
3279 			vm_map_copy_entry(old_map, new_map, old_entry,
3280 			    new_entry, fork_charge);
3281 			break;
3282 		}
3283 		old_entry = old_entry->next;
3284 	}
3285 	/*
3286 	 * Use inlined vm_map_unlock() to postpone handling the deferred
3287 	 * map entries, which cannot be done until both old_map and
3288 	 * new_map locks are released.
3289 	 */
3290 	sx_xunlock(&old_map->lock);
3291 	sx_xunlock(&new_map->lock);
3292 	vm_map_process_deferred();
3293 
3294 	return (vm2);
3295 }
3296 
3297 int
3298 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3299     vm_prot_t prot, vm_prot_t max, int cow)
3300 {
3301 	vm_map_entry_t new_entry, prev_entry;
3302 	vm_offset_t bot, top;
3303 	vm_size_t growsize, init_ssize;
3304 	int orient, rv;
3305 	rlim_t lmemlim, vmemlim;
3306 
3307 	/*
3308 	 * The stack orientation is piggybacked with the cow argument.
3309 	 * Extract it into orient and mask the cow argument so that we
3310 	 * don't pass it around further.
3311 	 * NOTE: We explicitly allow bi-directional stacks.
3312 	 */
3313 	orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP);
3314 	cow &= ~orient;
3315 	KASSERT(orient != 0, ("No stack grow direction"));
3316 
3317 	if (addrbos < vm_map_min(map) ||
3318 	    addrbos > vm_map_max(map) ||
3319 	    addrbos + max_ssize < addrbos)
3320 		return (KERN_NO_SPACE);
3321 
3322 	growsize = sgrowsiz;
3323 	init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3324 
3325 	PROC_LOCK(curproc);
3326 	lmemlim = lim_cur(curproc, RLIMIT_MEMLOCK);
3327 	vmemlim = lim_cur(curproc, RLIMIT_VMEM);
3328 	PROC_UNLOCK(curproc);
3329 
3330 	vm_map_lock(map);
3331 
3332 	/* If addr is already mapped, no go */
3333 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
3334 		vm_map_unlock(map);
3335 		return (KERN_NO_SPACE);
3336 	}
3337 
3338 	if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3339 		if (ptoa(pmap_wired_count(map->pmap)) + init_ssize > lmemlim) {
3340 			vm_map_unlock(map);
3341 			return (KERN_NO_SPACE);
3342 		}
3343 	}
3344 
3345 	/* If we would blow our VMEM resource limit, no go */
3346 	if (map->size + init_ssize > vmemlim) {
3347 		vm_map_unlock(map);
3348 		return (KERN_NO_SPACE);
3349 	}
3350 
3351 	/*
3352 	 * If we can't accomodate max_ssize in the current mapping, no go.
3353 	 * However, we need to be aware that subsequent user mappings might
3354 	 * map into the space we have reserved for stack, and currently this
3355 	 * space is not protected.
3356 	 *
3357 	 * Hopefully we will at least detect this condition when we try to
3358 	 * grow the stack.
3359 	 */
3360 	if ((prev_entry->next != &map->header) &&
3361 	    (prev_entry->next->start < addrbos + max_ssize)) {
3362 		vm_map_unlock(map);
3363 		return (KERN_NO_SPACE);
3364 	}
3365 
3366 	/*
3367 	 * We initially map a stack of only init_ssize.  We will grow as
3368 	 * needed later.  Depending on the orientation of the stack (i.e.
3369 	 * the grow direction) we either map at the top of the range, the
3370 	 * bottom of the range or in the middle.
3371 	 *
3372 	 * Note: we would normally expect prot and max to be VM_PROT_ALL,
3373 	 * and cow to be 0.  Possibly we should eliminate these as input
3374 	 * parameters, and just pass these values here in the insert call.
3375 	 */
3376 	if (orient == MAP_STACK_GROWS_DOWN)
3377 		bot = addrbos + max_ssize - init_ssize;
3378 	else if (orient == MAP_STACK_GROWS_UP)
3379 		bot = addrbos;
3380 	else
3381 		bot = round_page(addrbos + max_ssize/2 - init_ssize/2);
3382 	top = bot + init_ssize;
3383 	rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
3384 
3385 	/* Now set the avail_ssize amount. */
3386 	if (rv == KERN_SUCCESS) {
3387 		if (prev_entry != &map->header)
3388 			vm_map_clip_end(map, prev_entry, bot);
3389 		new_entry = prev_entry->next;
3390 		if (new_entry->end != top || new_entry->start != bot)
3391 			panic("Bad entry start/end for new stack entry");
3392 
3393 		new_entry->avail_ssize = max_ssize - init_ssize;
3394 		if (orient & MAP_STACK_GROWS_DOWN)
3395 			new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3396 		if (orient & MAP_STACK_GROWS_UP)
3397 			new_entry->eflags |= MAP_ENTRY_GROWS_UP;
3398 	}
3399 
3400 	vm_map_unlock(map);
3401 	return (rv);
3402 }
3403 
3404 static int stack_guard_page = 0;
3405 TUNABLE_INT("security.bsd.stack_guard_page", &stack_guard_page);
3406 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RW,
3407     &stack_guard_page, 0,
3408     "Insert stack guard page ahead of the growable segments.");
3409 
3410 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3411  * desired address is already mapped, or if we successfully grow
3412  * the stack.  Also returns KERN_SUCCESS if addr is outside the
3413  * stack range (this is strange, but preserves compatibility with
3414  * the grow function in vm_machdep.c).
3415  */
3416 int
3417 vm_map_growstack(struct proc *p, vm_offset_t addr)
3418 {
3419 	vm_map_entry_t next_entry, prev_entry;
3420 	vm_map_entry_t new_entry, stack_entry;
3421 	struct vmspace *vm = p->p_vmspace;
3422 	vm_map_t map = &vm->vm_map;
3423 	vm_offset_t end;
3424 	vm_size_t growsize;
3425 	size_t grow_amount, max_grow;
3426 	rlim_t lmemlim, stacklim, vmemlim;
3427 	int is_procstack, rv;
3428 	struct ucred *cred;
3429 #ifdef notyet
3430 	uint64_t limit;
3431 #endif
3432 #ifdef RACCT
3433 	int error;
3434 #endif
3435 
3436 Retry:
3437 	PROC_LOCK(p);
3438 	lmemlim = lim_cur(p, RLIMIT_MEMLOCK);
3439 	stacklim = lim_cur(p, RLIMIT_STACK);
3440 	vmemlim = lim_cur(p, RLIMIT_VMEM);
3441 	PROC_UNLOCK(p);
3442 
3443 	vm_map_lock_read(map);
3444 
3445 	/* If addr is already in the entry range, no need to grow.*/
3446 	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
3447 		vm_map_unlock_read(map);
3448 		return (KERN_SUCCESS);
3449 	}
3450 
3451 	next_entry = prev_entry->next;
3452 	if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) {
3453 		/*
3454 		 * This entry does not grow upwards. Since the address lies
3455 		 * beyond this entry, the next entry (if one exists) has to
3456 		 * be a downward growable entry. The entry list header is
3457 		 * never a growable entry, so it suffices to check the flags.
3458 		 */
3459 		if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) {
3460 			vm_map_unlock_read(map);
3461 			return (KERN_SUCCESS);
3462 		}
3463 		stack_entry = next_entry;
3464 	} else {
3465 		/*
3466 		 * This entry grows upward. If the next entry does not at
3467 		 * least grow downwards, this is the entry we need to grow.
3468 		 * otherwise we have two possible choices and we have to
3469 		 * select one.
3470 		 */
3471 		if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) {
3472 			/*
3473 			 * We have two choices; grow the entry closest to
3474 			 * the address to minimize the amount of growth.
3475 			 */
3476 			if (addr - prev_entry->end <= next_entry->start - addr)
3477 				stack_entry = prev_entry;
3478 			else
3479 				stack_entry = next_entry;
3480 		} else
3481 			stack_entry = prev_entry;
3482 	}
3483 
3484 	if (stack_entry == next_entry) {
3485 		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo"));
3486 		KASSERT(addr < stack_entry->start, ("foo"));
3487 		end = (prev_entry != &map->header) ? prev_entry->end :
3488 		    stack_entry->start - stack_entry->avail_ssize;
3489 		grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE);
3490 		max_grow = stack_entry->start - end;
3491 	} else {
3492 		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo"));
3493 		KASSERT(addr >= stack_entry->end, ("foo"));
3494 		end = (next_entry != &map->header) ? next_entry->start :
3495 		    stack_entry->end + stack_entry->avail_ssize;
3496 		grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE);
3497 		max_grow = end - stack_entry->end;
3498 	}
3499 
3500 	if (grow_amount > stack_entry->avail_ssize) {
3501 		vm_map_unlock_read(map);
3502 		return (KERN_NO_SPACE);
3503 	}
3504 
3505 	/*
3506 	 * If there is no longer enough space between the entries nogo, and
3507 	 * adjust the available space.  Note: this  should only happen if the
3508 	 * user has mapped into the stack area after the stack was created,
3509 	 * and is probably an error.
3510 	 *
3511 	 * This also effectively destroys any guard page the user might have
3512 	 * intended by limiting the stack size.
3513 	 */
3514 	if (grow_amount + (stack_guard_page ? PAGE_SIZE : 0) > max_grow) {
3515 		if (vm_map_lock_upgrade(map))
3516 			goto Retry;
3517 
3518 		stack_entry->avail_ssize = max_grow;
3519 
3520 		vm_map_unlock(map);
3521 		return (KERN_NO_SPACE);
3522 	}
3523 
3524 	is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0;
3525 
3526 	/*
3527 	 * If this is the main process stack, see if we're over the stack
3528 	 * limit.
3529 	 */
3530 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3531 		vm_map_unlock_read(map);
3532 		return (KERN_NO_SPACE);
3533 	}
3534 #ifdef RACCT
3535 	PROC_LOCK(p);
3536 	if (is_procstack &&
3537 	    racct_set(p, RACCT_STACK, ctob(vm->vm_ssize) + grow_amount)) {
3538 		PROC_UNLOCK(p);
3539 		vm_map_unlock_read(map);
3540 		return (KERN_NO_SPACE);
3541 	}
3542 	PROC_UNLOCK(p);
3543 #endif
3544 
3545 	/* Round up the grow amount modulo sgrowsiz */
3546 	growsize = sgrowsiz;
3547 	grow_amount = roundup(grow_amount, growsize);
3548 	if (grow_amount > stack_entry->avail_ssize)
3549 		grow_amount = stack_entry->avail_ssize;
3550 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3551 		grow_amount = trunc_page((vm_size_t)stacklim) -
3552 		    ctob(vm->vm_ssize);
3553 	}
3554 #ifdef notyet
3555 	PROC_LOCK(p);
3556 	limit = racct_get_available(p, RACCT_STACK);
3557 	PROC_UNLOCK(p);
3558 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
3559 		grow_amount = limit - ctob(vm->vm_ssize);
3560 #endif
3561 	if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3562 		if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
3563 			vm_map_unlock_read(map);
3564 			rv = KERN_NO_SPACE;
3565 			goto out;
3566 		}
3567 #ifdef RACCT
3568 		PROC_LOCK(p);
3569 		if (racct_set(p, RACCT_MEMLOCK,
3570 		    ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
3571 			PROC_UNLOCK(p);
3572 			vm_map_unlock_read(map);
3573 			rv = KERN_NO_SPACE;
3574 			goto out;
3575 		}
3576 		PROC_UNLOCK(p);
3577 #endif
3578 	}
3579 	/* If we would blow our VMEM resource limit, no go */
3580 	if (map->size + grow_amount > vmemlim) {
3581 		vm_map_unlock_read(map);
3582 		rv = KERN_NO_SPACE;
3583 		goto out;
3584 	}
3585 #ifdef RACCT
3586 	PROC_LOCK(p);
3587 	if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
3588 		PROC_UNLOCK(p);
3589 		vm_map_unlock_read(map);
3590 		rv = KERN_NO_SPACE;
3591 		goto out;
3592 	}
3593 	PROC_UNLOCK(p);
3594 #endif
3595 
3596 	if (vm_map_lock_upgrade(map))
3597 		goto Retry;
3598 
3599 	if (stack_entry == next_entry) {
3600 		/*
3601 		 * Growing downward.
3602 		 */
3603 		/* Get the preliminary new entry start value */
3604 		addr = stack_entry->start - grow_amount;
3605 
3606 		/*
3607 		 * If this puts us into the previous entry, cut back our
3608 		 * growth to the available space. Also, see the note above.
3609 		 */
3610 		if (addr < end) {
3611 			stack_entry->avail_ssize = max_grow;
3612 			addr = end;
3613 			if (stack_guard_page)
3614 				addr += PAGE_SIZE;
3615 		}
3616 
3617 		rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
3618 		    next_entry->protection, next_entry->max_protection, 0);
3619 
3620 		/* Adjust the available stack space by the amount we grew. */
3621 		if (rv == KERN_SUCCESS) {
3622 			if (prev_entry != &map->header)
3623 				vm_map_clip_end(map, prev_entry, addr);
3624 			new_entry = prev_entry->next;
3625 			KASSERT(new_entry == stack_entry->prev, ("foo"));
3626 			KASSERT(new_entry->end == stack_entry->start, ("foo"));
3627 			KASSERT(new_entry->start == addr, ("foo"));
3628 			grow_amount = new_entry->end - new_entry->start;
3629 			new_entry->avail_ssize = stack_entry->avail_ssize -
3630 			    grow_amount;
3631 			stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN;
3632 			new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3633 		}
3634 	} else {
3635 		/*
3636 		 * Growing upward.
3637 		 */
3638 		addr = stack_entry->end + grow_amount;
3639 
3640 		/*
3641 		 * If this puts us into the next entry, cut back our growth
3642 		 * to the available space. Also, see the note above.
3643 		 */
3644 		if (addr > end) {
3645 			stack_entry->avail_ssize = end - stack_entry->end;
3646 			addr = end;
3647 			if (stack_guard_page)
3648 				addr -= PAGE_SIZE;
3649 		}
3650 
3651 		grow_amount = addr - stack_entry->end;
3652 		cred = stack_entry->cred;
3653 		if (cred == NULL && stack_entry->object.vm_object != NULL)
3654 			cred = stack_entry->object.vm_object->cred;
3655 		if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
3656 			rv = KERN_NO_SPACE;
3657 		/* Grow the underlying object if applicable. */
3658 		else if (stack_entry->object.vm_object == NULL ||
3659 			 vm_object_coalesce(stack_entry->object.vm_object,
3660 			 stack_entry->offset,
3661 			 (vm_size_t)(stack_entry->end - stack_entry->start),
3662 			 (vm_size_t)grow_amount, cred != NULL)) {
3663 			map->size += (addr - stack_entry->end);
3664 			/* Update the current entry. */
3665 			stack_entry->end = addr;
3666 			stack_entry->avail_ssize -= grow_amount;
3667 			vm_map_entry_resize_free(map, stack_entry);
3668 			rv = KERN_SUCCESS;
3669 
3670 			if (next_entry != &map->header)
3671 				vm_map_clip_start(map, next_entry, addr);
3672 		} else
3673 			rv = KERN_FAILURE;
3674 	}
3675 
3676 	if (rv == KERN_SUCCESS && is_procstack)
3677 		vm->vm_ssize += btoc(grow_amount);
3678 
3679 	vm_map_unlock(map);
3680 
3681 	/*
3682 	 * Heed the MAP_WIREFUTURE flag if it was set for this process.
3683 	 */
3684 	if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) {
3685 		vm_map_wire(map,
3686 		    (stack_entry == next_entry) ? addr : addr - grow_amount,
3687 		    (stack_entry == next_entry) ? stack_entry->start : addr,
3688 		    (p->p_flag & P_SYSTEM)
3689 		    ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES
3690 		    : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
3691 	}
3692 
3693 out:
3694 #ifdef RACCT
3695 	if (rv != KERN_SUCCESS) {
3696 		PROC_LOCK(p);
3697 		error = racct_set(p, RACCT_VMEM, map->size);
3698 		KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
3699 		if (!old_mlock) {
3700 			error = racct_set(p, RACCT_MEMLOCK,
3701 			    ptoa(pmap_wired_count(map->pmap)));
3702 			KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
3703 		}
3704 	    	error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
3705 		KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
3706 		PROC_UNLOCK(p);
3707 	}
3708 #endif
3709 
3710 	return (rv);
3711 }
3712 
3713 /*
3714  * Unshare the specified VM space for exec.  If other processes are
3715  * mapped to it, then create a new one.  The new vmspace is null.
3716  */
3717 int
3718 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
3719 {
3720 	struct vmspace *oldvmspace = p->p_vmspace;
3721 	struct vmspace *newvmspace;
3722 
3723 	newvmspace = vmspace_alloc(minuser, maxuser);
3724 	if (newvmspace == NULL)
3725 		return (ENOMEM);
3726 	newvmspace->vm_swrss = oldvmspace->vm_swrss;
3727 	/*
3728 	 * This code is written like this for prototype purposes.  The
3729 	 * goal is to avoid running down the vmspace here, but let the
3730 	 * other process's that are still using the vmspace to finally
3731 	 * run it down.  Even though there is little or no chance of blocking
3732 	 * here, it is a good idea to keep this form for future mods.
3733 	 */
3734 	PROC_VMSPACE_LOCK(p);
3735 	p->p_vmspace = newvmspace;
3736 	PROC_VMSPACE_UNLOCK(p);
3737 	if (p == curthread->td_proc)
3738 		pmap_activate(curthread);
3739 	vmspace_free(oldvmspace);
3740 	return (0);
3741 }
3742 
3743 /*
3744  * Unshare the specified VM space for forcing COW.  This
3745  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3746  */
3747 int
3748 vmspace_unshare(struct proc *p)
3749 {
3750 	struct vmspace *oldvmspace = p->p_vmspace;
3751 	struct vmspace *newvmspace;
3752 	vm_ooffset_t fork_charge;
3753 
3754 	if (oldvmspace->vm_refcnt == 1)
3755 		return (0);
3756 	fork_charge = 0;
3757 	newvmspace = vmspace_fork(oldvmspace, &fork_charge);
3758 	if (newvmspace == NULL)
3759 		return (ENOMEM);
3760 	if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
3761 		vmspace_free(newvmspace);
3762 		return (ENOMEM);
3763 	}
3764 	PROC_VMSPACE_LOCK(p);
3765 	p->p_vmspace = newvmspace;
3766 	PROC_VMSPACE_UNLOCK(p);
3767 	if (p == curthread->td_proc)
3768 		pmap_activate(curthread);
3769 	vmspace_free(oldvmspace);
3770 	return (0);
3771 }
3772 
3773 /*
3774  *	vm_map_lookup:
3775  *
3776  *	Finds the VM object, offset, and
3777  *	protection for a given virtual address in the
3778  *	specified map, assuming a page fault of the
3779  *	type specified.
3780  *
3781  *	Leaves the map in question locked for read; return
3782  *	values are guaranteed until a vm_map_lookup_done
3783  *	call is performed.  Note that the map argument
3784  *	is in/out; the returned map must be used in
3785  *	the call to vm_map_lookup_done.
3786  *
3787  *	A handle (out_entry) is returned for use in
3788  *	vm_map_lookup_done, to make that fast.
3789  *
3790  *	If a lookup is requested with "write protection"
3791  *	specified, the map may be changed to perform virtual
3792  *	copying operations, although the data referenced will
3793  *	remain the same.
3794  */
3795 int
3796 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
3797 	      vm_offset_t vaddr,
3798 	      vm_prot_t fault_typea,
3799 	      vm_map_entry_t *out_entry,	/* OUT */
3800 	      vm_object_t *object,		/* OUT */
3801 	      vm_pindex_t *pindex,		/* OUT */
3802 	      vm_prot_t *out_prot,		/* OUT */
3803 	      boolean_t *wired)			/* OUT */
3804 {
3805 	vm_map_entry_t entry;
3806 	vm_map_t map = *var_map;
3807 	vm_prot_t prot;
3808 	vm_prot_t fault_type = fault_typea;
3809 	vm_object_t eobject;
3810 	vm_size_t size;
3811 	struct ucred *cred;
3812 
3813 RetryLookup:;
3814 
3815 	vm_map_lock_read(map);
3816 
3817 	/*
3818 	 * Lookup the faulting address.
3819 	 */
3820 	if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
3821 		vm_map_unlock_read(map);
3822 		return (KERN_INVALID_ADDRESS);
3823 	}
3824 
3825 	entry = *out_entry;
3826 
3827 	/*
3828 	 * Handle submaps.
3829 	 */
3830 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3831 		vm_map_t old_map = map;
3832 
3833 		*var_map = map = entry->object.sub_map;
3834 		vm_map_unlock_read(old_map);
3835 		goto RetryLookup;
3836 	}
3837 
3838 	/*
3839 	 * Check whether this task is allowed to have this page.
3840 	 */
3841 	prot = entry->protection;
3842 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3843 	if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
3844 		vm_map_unlock_read(map);
3845 		return (KERN_PROTECTION_FAILURE);
3846 	}
3847 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3848 	    (entry->eflags & MAP_ENTRY_COW) &&
3849 	    (fault_type & VM_PROT_WRITE)) {
3850 		vm_map_unlock_read(map);
3851 		return (KERN_PROTECTION_FAILURE);
3852 	}
3853 	if ((fault_typea & VM_PROT_COPY) != 0 &&
3854 	    (entry->max_protection & VM_PROT_WRITE) == 0 &&
3855 	    (entry->eflags & MAP_ENTRY_COW) == 0) {
3856 		vm_map_unlock_read(map);
3857 		return (KERN_PROTECTION_FAILURE);
3858 	}
3859 
3860 	/*
3861 	 * If this page is not pageable, we have to get it for all possible
3862 	 * accesses.
3863 	 */
3864 	*wired = (entry->wired_count != 0);
3865 	if (*wired)
3866 		fault_type = entry->protection;
3867 	size = entry->end - entry->start;
3868 	/*
3869 	 * If the entry was copy-on-write, we either ...
3870 	 */
3871 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3872 		/*
3873 		 * If we want to write the page, we may as well handle that
3874 		 * now since we've got the map locked.
3875 		 *
3876 		 * If we don't need to write the page, we just demote the
3877 		 * permissions allowed.
3878 		 */
3879 		if ((fault_type & VM_PROT_WRITE) != 0 ||
3880 		    (fault_typea & VM_PROT_COPY) != 0) {
3881 			/*
3882 			 * Make a new object, and place it in the object
3883 			 * chain.  Note that no new references have appeared
3884 			 * -- one just moved from the map to the new
3885 			 * object.
3886 			 */
3887 			if (vm_map_lock_upgrade(map))
3888 				goto RetryLookup;
3889 
3890 			if (entry->cred == NULL) {
3891 				/*
3892 				 * The debugger owner is charged for
3893 				 * the memory.
3894 				 */
3895 				cred = curthread->td_ucred;
3896 				crhold(cred);
3897 				if (!swap_reserve_by_cred(size, cred)) {
3898 					crfree(cred);
3899 					vm_map_unlock(map);
3900 					return (KERN_RESOURCE_SHORTAGE);
3901 				}
3902 				entry->cred = cred;
3903 			}
3904 			vm_object_shadow(&entry->object.vm_object,
3905 			    &entry->offset, size);
3906 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3907 			eobject = entry->object.vm_object;
3908 			if (eobject->cred != NULL) {
3909 				/*
3910 				 * The object was not shadowed.
3911 				 */
3912 				swap_release_by_cred(size, entry->cred);
3913 				crfree(entry->cred);
3914 				entry->cred = NULL;
3915 			} else if (entry->cred != NULL) {
3916 				VM_OBJECT_WLOCK(eobject);
3917 				eobject->cred = entry->cred;
3918 				eobject->charge = size;
3919 				VM_OBJECT_WUNLOCK(eobject);
3920 				entry->cred = NULL;
3921 			}
3922 
3923 			vm_map_lock_downgrade(map);
3924 		} else {
3925 			/*
3926 			 * We're attempting to read a copy-on-write page --
3927 			 * don't allow writes.
3928 			 */
3929 			prot &= ~VM_PROT_WRITE;
3930 		}
3931 	}
3932 
3933 	/*
3934 	 * Create an object if necessary.
3935 	 */
3936 	if (entry->object.vm_object == NULL &&
3937 	    !map->system_map) {
3938 		if (vm_map_lock_upgrade(map))
3939 			goto RetryLookup;
3940 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
3941 		    atop(size));
3942 		entry->offset = 0;
3943 		if (entry->cred != NULL) {
3944 			VM_OBJECT_WLOCK(entry->object.vm_object);
3945 			entry->object.vm_object->cred = entry->cred;
3946 			entry->object.vm_object->charge = size;
3947 			VM_OBJECT_WUNLOCK(entry->object.vm_object);
3948 			entry->cred = NULL;
3949 		}
3950 		vm_map_lock_downgrade(map);
3951 	}
3952 
3953 	/*
3954 	 * Return the object/offset from this entry.  If the entry was
3955 	 * copy-on-write or empty, it has been fixed up.
3956 	 */
3957 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3958 	*object = entry->object.vm_object;
3959 
3960 	*out_prot = prot;
3961 	return (KERN_SUCCESS);
3962 }
3963 
3964 /*
3965  *	vm_map_lookup_locked:
3966  *
3967  *	Lookup the faulting address.  A version of vm_map_lookup that returns
3968  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
3969  */
3970 int
3971 vm_map_lookup_locked(vm_map_t *var_map,		/* IN/OUT */
3972 		     vm_offset_t vaddr,
3973 		     vm_prot_t fault_typea,
3974 		     vm_map_entry_t *out_entry,	/* OUT */
3975 		     vm_object_t *object,	/* OUT */
3976 		     vm_pindex_t *pindex,	/* OUT */
3977 		     vm_prot_t *out_prot,	/* OUT */
3978 		     boolean_t *wired)		/* OUT */
3979 {
3980 	vm_map_entry_t entry;
3981 	vm_map_t map = *var_map;
3982 	vm_prot_t prot;
3983 	vm_prot_t fault_type = fault_typea;
3984 
3985 	/*
3986 	 * Lookup the faulting address.
3987 	 */
3988 	if (!vm_map_lookup_entry(map, vaddr, out_entry))
3989 		return (KERN_INVALID_ADDRESS);
3990 
3991 	entry = *out_entry;
3992 
3993 	/*
3994 	 * Fail if the entry refers to a submap.
3995 	 */
3996 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3997 		return (KERN_FAILURE);
3998 
3999 	/*
4000 	 * Check whether this task is allowed to have this page.
4001 	 */
4002 	prot = entry->protection;
4003 	fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4004 	if ((fault_type & prot) != fault_type)
4005 		return (KERN_PROTECTION_FAILURE);
4006 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
4007 	    (entry->eflags & MAP_ENTRY_COW) &&
4008 	    (fault_type & VM_PROT_WRITE))
4009 		return (KERN_PROTECTION_FAILURE);
4010 
4011 	/*
4012 	 * If this page is not pageable, we have to get it for all possible
4013 	 * accesses.
4014 	 */
4015 	*wired = (entry->wired_count != 0);
4016 	if (*wired)
4017 		fault_type = entry->protection;
4018 
4019 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4020 		/*
4021 		 * Fail if the entry was copy-on-write for a write fault.
4022 		 */
4023 		if (fault_type & VM_PROT_WRITE)
4024 			return (KERN_FAILURE);
4025 		/*
4026 		 * We're attempting to read a copy-on-write page --
4027 		 * don't allow writes.
4028 		 */
4029 		prot &= ~VM_PROT_WRITE;
4030 	}
4031 
4032 	/*
4033 	 * Fail if an object should be created.
4034 	 */
4035 	if (entry->object.vm_object == NULL && !map->system_map)
4036 		return (KERN_FAILURE);
4037 
4038 	/*
4039 	 * Return the object/offset from this entry.  If the entry was
4040 	 * copy-on-write or empty, it has been fixed up.
4041 	 */
4042 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4043 	*object = entry->object.vm_object;
4044 
4045 	*out_prot = prot;
4046 	return (KERN_SUCCESS);
4047 }
4048 
4049 /*
4050  *	vm_map_lookup_done:
4051  *
4052  *	Releases locks acquired by a vm_map_lookup
4053  *	(according to the handle returned by that lookup).
4054  */
4055 void
4056 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
4057 {
4058 	/*
4059 	 * Unlock the main-level map
4060 	 */
4061 	vm_map_unlock_read(map);
4062 }
4063 
4064 #include "opt_ddb.h"
4065 #ifdef DDB
4066 #include <sys/kernel.h>
4067 
4068 #include <ddb/ddb.h>
4069 
4070 static void
4071 vm_map_print(vm_map_t map)
4072 {
4073 	vm_map_entry_t entry;
4074 
4075 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4076 	    (void *)map,
4077 	    (void *)map->pmap, map->nentries, map->timestamp);
4078 
4079 	db_indent += 2;
4080 	for (entry = map->header.next; entry != &map->header;
4081 	    entry = entry->next) {
4082 		db_iprintf("map entry %p: start=%p, end=%p\n",
4083 		    (void *)entry, (void *)entry->start, (void *)entry->end);
4084 		{
4085 			static char *inheritance_name[4] =
4086 			{"share", "copy", "none", "donate_copy"};
4087 
4088 			db_iprintf(" prot=%x/%x/%s",
4089 			    entry->protection,
4090 			    entry->max_protection,
4091 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
4092 			if (entry->wired_count != 0)
4093 				db_printf(", wired");
4094 		}
4095 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4096 			db_printf(", share=%p, offset=0x%jx\n",
4097 			    (void *)entry->object.sub_map,
4098 			    (uintmax_t)entry->offset);
4099 			if ((entry->prev == &map->header) ||
4100 			    (entry->prev->object.sub_map !=
4101 				entry->object.sub_map)) {
4102 				db_indent += 2;
4103 				vm_map_print((vm_map_t)entry->object.sub_map);
4104 				db_indent -= 2;
4105 			}
4106 		} else {
4107 			if (entry->cred != NULL)
4108 				db_printf(", ruid %d", entry->cred->cr_ruid);
4109 			db_printf(", object=%p, offset=0x%jx",
4110 			    (void *)entry->object.vm_object,
4111 			    (uintmax_t)entry->offset);
4112 			if (entry->object.vm_object && entry->object.vm_object->cred)
4113 				db_printf(", obj ruid %d charge %jx",
4114 				    entry->object.vm_object->cred->cr_ruid,
4115 				    (uintmax_t)entry->object.vm_object->charge);
4116 			if (entry->eflags & MAP_ENTRY_COW)
4117 				db_printf(", copy (%s)",
4118 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4119 			db_printf("\n");
4120 
4121 			if ((entry->prev == &map->header) ||
4122 			    (entry->prev->object.vm_object !=
4123 				entry->object.vm_object)) {
4124 				db_indent += 2;
4125 				vm_object_print((db_expr_t)(intptr_t)
4126 						entry->object.vm_object,
4127 						1, 0, (char *)0);
4128 				db_indent -= 2;
4129 			}
4130 		}
4131 	}
4132 	db_indent -= 2;
4133 }
4134 
4135 DB_SHOW_COMMAND(map, map)
4136 {
4137 
4138 	if (!have_addr) {
4139 		db_printf("usage: show map <addr>\n");
4140 		return;
4141 	}
4142 	vm_map_print((vm_map_t)addr);
4143 }
4144 
4145 DB_SHOW_COMMAND(procvm, procvm)
4146 {
4147 	struct proc *p;
4148 
4149 	if (have_addr) {
4150 		p = (struct proc *) addr;
4151 	} else {
4152 		p = curproc;
4153 	}
4154 
4155 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4156 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4157 	    (void *)vmspace_pmap(p->p_vmspace));
4158 
4159 	vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
4160 }
4161 
4162 #endif /* DDB */
4163