xref: /freebsd/sys/vm/vm_map.c (revision dd48af360fdbbb9552f9fc6de7abe50d68ad5331)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 /*
62  *	Virtual memory mapping module.
63  */
64 
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/ktr.h>
71 #include <sys/lock.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/vmmeter.h>
75 #include <sys/mman.h>
76 #include <sys/vnode.h>
77 #include <sys/resourcevar.h>
78 #include <sys/file.h>
79 #include <sys/sysent.h>
80 #include <sys/shm.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_pager.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_extern.h>
91 #include <vm/swap_pager.h>
92 #include <vm/uma.h>
93 
94 /*
95  *	Virtual memory maps provide for the mapping, protection,
96  *	and sharing of virtual memory objects.  In addition,
97  *	this module provides for an efficient virtual copy of
98  *	memory from one map to another.
99  *
100  *	Synchronization is required prior to most operations.
101  *
102  *	Maps consist of an ordered doubly-linked list of simple
103  *	entries; a self-adjusting binary search tree of these
104  *	entries is used to speed up lookups.
105  *
106  *	Since portions of maps are specified by start/end addresses,
107  *	which may not align with existing map entries, all
108  *	routines merely "clip" entries to these start/end values.
109  *	[That is, an entry is split into two, bordering at a
110  *	start or end value.]  Note that these clippings may not
111  *	always be necessary (as the two resulting entries are then
112  *	not changed); however, the clipping is done for convenience.
113  *
114  *	As mentioned above, virtual copy operations are performed
115  *	by copying VM object references from one map to
116  *	another, and then marking both regions as copy-on-write.
117  */
118 
119 static struct mtx map_sleep_mtx;
120 static uma_zone_t mapentzone;
121 static uma_zone_t kmapentzone;
122 static uma_zone_t mapzone;
123 static uma_zone_t vmspace_zone;
124 static struct vm_object kmapentobj;
125 static int vmspace_zinit(void *mem, int size, int flags);
126 static void vmspace_zfini(void *mem, int size);
127 static int vm_map_zinit(void *mem, int ize, int flags);
128 static void vm_map_zfini(void *mem, int size);
129 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
130     vm_offset_t max);
131 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
132 #ifdef INVARIANTS
133 static void vm_map_zdtor(void *mem, int size, void *arg);
134 static void vmspace_zdtor(void *mem, int size, void *arg);
135 #endif
136 
137 #define	ENTRY_CHARGED(e) ((e)->uip != NULL || \
138     ((e)->object.vm_object != NULL && (e)->object.vm_object->uip != NULL && \
139      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
140 
141 /*
142  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
143  * stable.
144  */
145 #define PROC_VMSPACE_LOCK(p) do { } while (0)
146 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
147 
148 /*
149  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
150  *
151  *	Asserts that the starting and ending region
152  *	addresses fall within the valid range of the map.
153  */
154 #define	VM_MAP_RANGE_CHECK(map, start, end)		\
155 		{					\
156 		if (start < vm_map_min(map))		\
157 			start = vm_map_min(map);	\
158 		if (end > vm_map_max(map))		\
159 			end = vm_map_max(map);		\
160 		if (start > end)			\
161 			start = end;			\
162 		}
163 
164 /*
165  *	vm_map_startup:
166  *
167  *	Initialize the vm_map module.  Must be called before
168  *	any other vm_map routines.
169  *
170  *	Map and entry structures are allocated from the general
171  *	purpose memory pool with some exceptions:
172  *
173  *	- The kernel map and kmem submap are allocated statically.
174  *	- Kernel map entries are allocated out of a static pool.
175  *
176  *	These restrictions are necessary since malloc() uses the
177  *	maps and requires map entries.
178  */
179 
180 void
181 vm_map_startup(void)
182 {
183 	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
184 	mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
185 #ifdef INVARIANTS
186 	    vm_map_zdtor,
187 #else
188 	    NULL,
189 #endif
190 	    vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
191 	uma_prealloc(mapzone, MAX_KMAP);
192 	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
193 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
194 	    UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
195 	uma_prealloc(kmapentzone, MAX_KMAPENT);
196 	mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
197 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
198 }
199 
200 static void
201 vmspace_zfini(void *mem, int size)
202 {
203 	struct vmspace *vm;
204 
205 	vm = (struct vmspace *)mem;
206 	vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
207 }
208 
209 static int
210 vmspace_zinit(void *mem, int size, int flags)
211 {
212 	struct vmspace *vm;
213 
214 	vm = (struct vmspace *)mem;
215 
216 	vm->vm_map.pmap = NULL;
217 	(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
218 	return (0);
219 }
220 
221 static void
222 vm_map_zfini(void *mem, int size)
223 {
224 	vm_map_t map;
225 
226 	map = (vm_map_t)mem;
227 	mtx_destroy(&map->system_mtx);
228 	sx_destroy(&map->lock);
229 }
230 
231 static int
232 vm_map_zinit(void *mem, int size, int flags)
233 {
234 	vm_map_t map;
235 
236 	map = (vm_map_t)mem;
237 	map->nentries = 0;
238 	map->size = 0;
239 	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
240 	sx_init(&map->lock, "user map");
241 	return (0);
242 }
243 
244 #ifdef INVARIANTS
245 static void
246 vmspace_zdtor(void *mem, int size, void *arg)
247 {
248 	struct vmspace *vm;
249 
250 	vm = (struct vmspace *)mem;
251 
252 	vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
253 }
254 static void
255 vm_map_zdtor(void *mem, int size, void *arg)
256 {
257 	vm_map_t map;
258 
259 	map = (vm_map_t)mem;
260 	KASSERT(map->nentries == 0,
261 	    ("map %p nentries == %d on free.",
262 	    map, map->nentries));
263 	KASSERT(map->size == 0,
264 	    ("map %p size == %lu on free.",
265 	    map, (unsigned long)map->size));
266 }
267 #endif	/* INVARIANTS */
268 
269 /*
270  * Allocate a vmspace structure, including a vm_map and pmap,
271  * and initialize those structures.  The refcnt is set to 1.
272  */
273 struct vmspace *
274 vmspace_alloc(min, max)
275 	vm_offset_t min, max;
276 {
277 	struct vmspace *vm;
278 
279 	vm = uma_zalloc(vmspace_zone, M_WAITOK);
280 	if (vm->vm_map.pmap == NULL && !pmap_pinit(vmspace_pmap(vm))) {
281 		uma_zfree(vmspace_zone, vm);
282 		return (NULL);
283 	}
284 	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
285 	_vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
286 	vm->vm_refcnt = 1;
287 	vm->vm_shm = NULL;
288 	vm->vm_swrss = 0;
289 	vm->vm_tsize = 0;
290 	vm->vm_dsize = 0;
291 	vm->vm_ssize = 0;
292 	vm->vm_taddr = 0;
293 	vm->vm_daddr = 0;
294 	vm->vm_maxsaddr = 0;
295 	return (vm);
296 }
297 
298 void
299 vm_init2(void)
300 {
301 	uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count,
302 	    (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE) / 8 +
303 	     maxproc * 2 + maxfiles);
304 	vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
305 #ifdef INVARIANTS
306 	    vmspace_zdtor,
307 #else
308 	    NULL,
309 #endif
310 	    vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
311 }
312 
313 static inline void
314 vmspace_dofree(struct vmspace *vm)
315 {
316 
317 	CTR1(KTR_VM, "vmspace_free: %p", vm);
318 
319 	/*
320 	 * Make sure any SysV shm is freed, it might not have been in
321 	 * exit1().
322 	 */
323 	shmexit(vm);
324 
325 	/*
326 	 * Lock the map, to wait out all other references to it.
327 	 * Delete all of the mappings and pages they hold, then call
328 	 * the pmap module to reclaim anything left.
329 	 */
330 	(void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset,
331 	    vm->vm_map.max_offset);
332 
333 	pmap_release(vmspace_pmap(vm));
334 	vm->vm_map.pmap = NULL;
335 	uma_zfree(vmspace_zone, vm);
336 }
337 
338 void
339 vmspace_free(struct vmspace *vm)
340 {
341 	int refcnt;
342 
343 	if (vm->vm_refcnt == 0)
344 		panic("vmspace_free: attempt to free already freed vmspace");
345 
346 	do
347 		refcnt = vm->vm_refcnt;
348 	while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
349 	if (refcnt == 1)
350 		vmspace_dofree(vm);
351 }
352 
353 void
354 vmspace_exitfree(struct proc *p)
355 {
356 	struct vmspace *vm;
357 
358 	PROC_VMSPACE_LOCK(p);
359 	vm = p->p_vmspace;
360 	p->p_vmspace = NULL;
361 	PROC_VMSPACE_UNLOCK(p);
362 	KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
363 	vmspace_free(vm);
364 }
365 
366 void
367 vmspace_exit(struct thread *td)
368 {
369 	int refcnt;
370 	struct vmspace *vm;
371 	struct proc *p;
372 
373 	/*
374 	 * Release user portion of address space.
375 	 * This releases references to vnodes,
376 	 * which could cause I/O if the file has been unlinked.
377 	 * Need to do this early enough that we can still sleep.
378 	 *
379 	 * The last exiting process to reach this point releases as
380 	 * much of the environment as it can. vmspace_dofree() is the
381 	 * slower fallback in case another process had a temporary
382 	 * reference to the vmspace.
383 	 */
384 
385 	p = td->td_proc;
386 	vm = p->p_vmspace;
387 	atomic_add_int(&vmspace0.vm_refcnt, 1);
388 	do {
389 		refcnt = vm->vm_refcnt;
390 		if (refcnt > 1 && p->p_vmspace != &vmspace0) {
391 			/* Switch now since other proc might free vmspace */
392 			PROC_VMSPACE_LOCK(p);
393 			p->p_vmspace = &vmspace0;
394 			PROC_VMSPACE_UNLOCK(p);
395 			pmap_activate(td);
396 		}
397 	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
398 	if (refcnt == 1) {
399 		if (p->p_vmspace != vm) {
400 			/* vmspace not yet freed, switch back */
401 			PROC_VMSPACE_LOCK(p);
402 			p->p_vmspace = vm;
403 			PROC_VMSPACE_UNLOCK(p);
404 			pmap_activate(td);
405 		}
406 		pmap_remove_pages(vmspace_pmap(vm));
407 		/* Switch now since this proc will free vmspace */
408 		PROC_VMSPACE_LOCK(p);
409 		p->p_vmspace = &vmspace0;
410 		PROC_VMSPACE_UNLOCK(p);
411 		pmap_activate(td);
412 		vmspace_dofree(vm);
413 	}
414 }
415 
416 /* Acquire reference to vmspace owned by another process. */
417 
418 struct vmspace *
419 vmspace_acquire_ref(struct proc *p)
420 {
421 	struct vmspace *vm;
422 	int refcnt;
423 
424 	PROC_VMSPACE_LOCK(p);
425 	vm = p->p_vmspace;
426 	if (vm == NULL) {
427 		PROC_VMSPACE_UNLOCK(p);
428 		return (NULL);
429 	}
430 	do {
431 		refcnt = vm->vm_refcnt;
432 		if (refcnt <= 0) { 	/* Avoid 0->1 transition */
433 			PROC_VMSPACE_UNLOCK(p);
434 			return (NULL);
435 		}
436 	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
437 	if (vm != p->p_vmspace) {
438 		PROC_VMSPACE_UNLOCK(p);
439 		vmspace_free(vm);
440 		return (NULL);
441 	}
442 	PROC_VMSPACE_UNLOCK(p);
443 	return (vm);
444 }
445 
446 void
447 _vm_map_lock(vm_map_t map, const char *file, int line)
448 {
449 
450 	if (map->system_map)
451 		_mtx_lock_flags(&map->system_mtx, 0, file, line);
452 	else
453 		(void)_sx_xlock(&map->lock, 0, file, line);
454 	map->timestamp++;
455 }
456 
457 void
458 _vm_map_unlock(vm_map_t map, const char *file, int line)
459 {
460 	vm_map_entry_t free_entry, entry;
461 	vm_object_t object;
462 
463 	free_entry = map->deferred_freelist;
464 	map->deferred_freelist = NULL;
465 
466 	if (map->system_map)
467 		_mtx_unlock_flags(&map->system_mtx, 0, file, line);
468 	else
469 		_sx_xunlock(&map->lock, file, line);
470 
471 	while (free_entry != NULL) {
472 		entry = free_entry;
473 		free_entry = free_entry->next;
474 
475 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
476 			object = entry->object.vm_object;
477 			vm_object_deallocate(object);
478 		}
479 
480 		vm_map_entry_dispose(map, entry);
481 	}
482 }
483 
484 void
485 _vm_map_lock_read(vm_map_t map, const char *file, int line)
486 {
487 
488 	if (map->system_map)
489 		_mtx_lock_flags(&map->system_mtx, 0, file, line);
490 	else
491 		(void)_sx_slock(&map->lock, 0, file, line);
492 }
493 
494 void
495 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
496 {
497 
498 	if (map->system_map)
499 		_mtx_unlock_flags(&map->system_mtx, 0, file, line);
500 	else
501 		_sx_sunlock(&map->lock, file, line);
502 }
503 
504 int
505 _vm_map_trylock(vm_map_t map, const char *file, int line)
506 {
507 	int error;
508 
509 	error = map->system_map ?
510 	    !_mtx_trylock(&map->system_mtx, 0, file, line) :
511 	    !_sx_try_xlock(&map->lock, file, line);
512 	if (error == 0)
513 		map->timestamp++;
514 	return (error == 0);
515 }
516 
517 int
518 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
519 {
520 	int error;
521 
522 	error = map->system_map ?
523 	    !_mtx_trylock(&map->system_mtx, 0, file, line) :
524 	    !_sx_try_slock(&map->lock, file, line);
525 	return (error == 0);
526 }
527 
528 /*
529  *	_vm_map_lock_upgrade:	[ internal use only ]
530  *
531  *	Tries to upgrade a read (shared) lock on the specified map to a write
532  *	(exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
533  *	non-zero value if the upgrade fails.  If the upgrade fails, the map is
534  *	returned without a read or write lock held.
535  *
536  *	Requires that the map be read locked.
537  */
538 int
539 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
540 {
541 	unsigned int last_timestamp;
542 
543 	if (map->system_map) {
544 #ifdef INVARIANTS
545 		_mtx_assert(&map->system_mtx, MA_OWNED, file, line);
546 #endif
547 	} else {
548 		if (!_sx_try_upgrade(&map->lock, file, line)) {
549 			last_timestamp = map->timestamp;
550 			_sx_sunlock(&map->lock, file, line);
551 			/*
552 			 * If the map's timestamp does not change while the
553 			 * map is unlocked, then the upgrade succeeds.
554 			 */
555 			(void)_sx_xlock(&map->lock, 0, file, line);
556 			if (last_timestamp != map->timestamp) {
557 				_sx_xunlock(&map->lock, file, line);
558 				return (1);
559 			}
560 		}
561 	}
562 	map->timestamp++;
563 	return (0);
564 }
565 
566 void
567 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
568 {
569 
570 	if (map->system_map) {
571 #ifdef INVARIANTS
572 		_mtx_assert(&map->system_mtx, MA_OWNED, file, line);
573 #endif
574 	} else
575 		_sx_downgrade(&map->lock, file, line);
576 }
577 
578 /*
579  *	vm_map_locked:
580  *
581  *	Returns a non-zero value if the caller holds a write (exclusive) lock
582  *	on the specified map and the value "0" otherwise.
583  */
584 int
585 vm_map_locked(vm_map_t map)
586 {
587 
588 	if (map->system_map)
589 		return (mtx_owned(&map->system_mtx));
590 	else
591 		return (sx_xlocked(&map->lock));
592 }
593 
594 #ifdef INVARIANTS
595 static void
596 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
597 {
598 
599 	if (map->system_map)
600 		_mtx_assert(&map->system_mtx, MA_OWNED, file, line);
601 	else
602 		_sx_assert(&map->lock, SA_XLOCKED, file, line);
603 }
604 
605 #if 0
606 static void
607 _vm_map_assert_locked_read(vm_map_t map, const char *file, int line)
608 {
609 
610 	if (map->system_map)
611 		_mtx_assert(&map->system_mtx, MA_OWNED, file, line);
612 	else
613 		_sx_assert(&map->lock, SA_SLOCKED, file, line);
614 }
615 #endif
616 
617 #define	VM_MAP_ASSERT_LOCKED(map) \
618     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
619 #define	VM_MAP_ASSERT_LOCKED_READ(map) \
620     _vm_map_assert_locked_read(map, LOCK_FILE, LOCK_LINE)
621 #else
622 #define	VM_MAP_ASSERT_LOCKED(map)
623 #define	VM_MAP_ASSERT_LOCKED_READ(map)
624 #endif
625 
626 /*
627  *	vm_map_unlock_and_wait:
628  */
629 int
630 vm_map_unlock_and_wait(vm_map_t map, int timo)
631 {
632 
633 	mtx_lock(&map_sleep_mtx);
634 	vm_map_unlock(map);
635 	return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", timo));
636 }
637 
638 /*
639  *	vm_map_wakeup:
640  */
641 void
642 vm_map_wakeup(vm_map_t map)
643 {
644 
645 	/*
646 	 * Acquire and release map_sleep_mtx to prevent a wakeup()
647 	 * from being performed (and lost) between the vm_map_unlock()
648 	 * and the msleep() in vm_map_unlock_and_wait().
649 	 */
650 	mtx_lock(&map_sleep_mtx);
651 	mtx_unlock(&map_sleep_mtx);
652 	wakeup(&map->root);
653 }
654 
655 long
656 vmspace_resident_count(struct vmspace *vmspace)
657 {
658 	return pmap_resident_count(vmspace_pmap(vmspace));
659 }
660 
661 long
662 vmspace_wired_count(struct vmspace *vmspace)
663 {
664 	return pmap_wired_count(vmspace_pmap(vmspace));
665 }
666 
667 /*
668  *	vm_map_create:
669  *
670  *	Creates and returns a new empty VM map with
671  *	the given physical map structure, and having
672  *	the given lower and upper address bounds.
673  */
674 vm_map_t
675 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
676 {
677 	vm_map_t result;
678 
679 	result = uma_zalloc(mapzone, M_WAITOK);
680 	CTR1(KTR_VM, "vm_map_create: %p", result);
681 	_vm_map_init(result, pmap, min, max);
682 	return (result);
683 }
684 
685 /*
686  * Initialize an existing vm_map structure
687  * such as that in the vmspace structure.
688  */
689 static void
690 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
691 {
692 
693 	map->header.next = map->header.prev = &map->header;
694 	map->needs_wakeup = FALSE;
695 	map->system_map = 0;
696 	map->pmap = pmap;
697 	map->min_offset = min;
698 	map->max_offset = max;
699 	map->flags = 0;
700 	map->root = NULL;
701 	map->timestamp = 0;
702 	map->deferred_freelist = NULL;
703 }
704 
705 void
706 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
707 {
708 
709 	_vm_map_init(map, pmap, min, max);
710 	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
711 	sx_init(&map->lock, "user map");
712 }
713 
714 /*
715  *	vm_map_entry_dispose:	[ internal use only ]
716  *
717  *	Inverse of vm_map_entry_create.
718  */
719 static void
720 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
721 {
722 	uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
723 }
724 
725 /*
726  *	vm_map_entry_create:	[ internal use only ]
727  *
728  *	Allocates a VM map entry for insertion.
729  *	No entry fields are filled in.
730  */
731 static vm_map_entry_t
732 vm_map_entry_create(vm_map_t map)
733 {
734 	vm_map_entry_t new_entry;
735 
736 	if (map->system_map)
737 		new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
738 	else
739 		new_entry = uma_zalloc(mapentzone, M_WAITOK);
740 	if (new_entry == NULL)
741 		panic("vm_map_entry_create: kernel resources exhausted");
742 	return (new_entry);
743 }
744 
745 /*
746  *	vm_map_entry_set_behavior:
747  *
748  *	Set the expected access behavior, either normal, random, or
749  *	sequential.
750  */
751 static inline void
752 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
753 {
754 	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
755 	    (behavior & MAP_ENTRY_BEHAV_MASK);
756 }
757 
758 /*
759  *	vm_map_entry_set_max_free:
760  *
761  *	Set the max_free field in a vm_map_entry.
762  */
763 static inline void
764 vm_map_entry_set_max_free(vm_map_entry_t entry)
765 {
766 
767 	entry->max_free = entry->adj_free;
768 	if (entry->left != NULL && entry->left->max_free > entry->max_free)
769 		entry->max_free = entry->left->max_free;
770 	if (entry->right != NULL && entry->right->max_free > entry->max_free)
771 		entry->max_free = entry->right->max_free;
772 }
773 
774 /*
775  *	vm_map_entry_splay:
776  *
777  *	The Sleator and Tarjan top-down splay algorithm with the
778  *	following variation.  Max_free must be computed bottom-up, so
779  *	on the downward pass, maintain the left and right spines in
780  *	reverse order.  Then, make a second pass up each side to fix
781  *	the pointers and compute max_free.  The time bound is O(log n)
782  *	amortized.
783  *
784  *	The new root is the vm_map_entry containing "addr", or else an
785  *	adjacent entry (lower or higher) if addr is not in the tree.
786  *
787  *	The map must be locked, and leaves it so.
788  *
789  *	Returns: the new root.
790  */
791 static vm_map_entry_t
792 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
793 {
794 	vm_map_entry_t llist, rlist;
795 	vm_map_entry_t ltree, rtree;
796 	vm_map_entry_t y;
797 
798 	/* Special case of empty tree. */
799 	if (root == NULL)
800 		return (root);
801 
802 	/*
803 	 * Pass One: Splay down the tree until we find addr or a NULL
804 	 * pointer where addr would go.  llist and rlist are the two
805 	 * sides in reverse order (bottom-up), with llist linked by
806 	 * the right pointer and rlist linked by the left pointer in
807 	 * the vm_map_entry.  Wait until Pass Two to set max_free on
808 	 * the two spines.
809 	 */
810 	llist = NULL;
811 	rlist = NULL;
812 	for (;;) {
813 		/* root is never NULL in here. */
814 		if (addr < root->start) {
815 			y = root->left;
816 			if (y == NULL)
817 				break;
818 			if (addr < y->start && y->left != NULL) {
819 				/* Rotate right and put y on rlist. */
820 				root->left = y->right;
821 				y->right = root;
822 				vm_map_entry_set_max_free(root);
823 				root = y->left;
824 				y->left = rlist;
825 				rlist = y;
826 			} else {
827 				/* Put root on rlist. */
828 				root->left = rlist;
829 				rlist = root;
830 				root = y;
831 			}
832 		} else if (addr >= root->end) {
833 			y = root->right;
834 			if (y == NULL)
835 				break;
836 			if (addr >= y->end && y->right != NULL) {
837 				/* Rotate left and put y on llist. */
838 				root->right = y->left;
839 				y->left = root;
840 				vm_map_entry_set_max_free(root);
841 				root = y->right;
842 				y->right = llist;
843 				llist = y;
844 			} else {
845 				/* Put root on llist. */
846 				root->right = llist;
847 				llist = root;
848 				root = y;
849 			}
850 		} else
851 			break;
852 	}
853 
854 	/*
855 	 * Pass Two: Walk back up the two spines, flip the pointers
856 	 * and set max_free.  The subtrees of the root go at the
857 	 * bottom of llist and rlist.
858 	 */
859 	ltree = root->left;
860 	while (llist != NULL) {
861 		y = llist->right;
862 		llist->right = ltree;
863 		vm_map_entry_set_max_free(llist);
864 		ltree = llist;
865 		llist = y;
866 	}
867 	rtree = root->right;
868 	while (rlist != NULL) {
869 		y = rlist->left;
870 		rlist->left = rtree;
871 		vm_map_entry_set_max_free(rlist);
872 		rtree = rlist;
873 		rlist = y;
874 	}
875 
876 	/*
877 	 * Final assembly: add ltree and rtree as subtrees of root.
878 	 */
879 	root->left = ltree;
880 	root->right = rtree;
881 	vm_map_entry_set_max_free(root);
882 
883 	return (root);
884 }
885 
886 /*
887  *	vm_map_entry_{un,}link:
888  *
889  *	Insert/remove entries from maps.
890  */
891 static void
892 vm_map_entry_link(vm_map_t map,
893 		  vm_map_entry_t after_where,
894 		  vm_map_entry_t entry)
895 {
896 
897 	CTR4(KTR_VM,
898 	    "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
899 	    map->nentries, entry, after_where);
900 	VM_MAP_ASSERT_LOCKED(map);
901 	map->nentries++;
902 	entry->prev = after_where;
903 	entry->next = after_where->next;
904 	entry->next->prev = entry;
905 	after_where->next = entry;
906 
907 	if (after_where != &map->header) {
908 		if (after_where != map->root)
909 			vm_map_entry_splay(after_where->start, map->root);
910 		entry->right = after_where->right;
911 		entry->left = after_where;
912 		after_where->right = NULL;
913 		after_where->adj_free = entry->start - after_where->end;
914 		vm_map_entry_set_max_free(after_where);
915 	} else {
916 		entry->right = map->root;
917 		entry->left = NULL;
918 	}
919 	entry->adj_free = (entry->next == &map->header ? map->max_offset :
920 	    entry->next->start) - entry->end;
921 	vm_map_entry_set_max_free(entry);
922 	map->root = entry;
923 }
924 
925 static void
926 vm_map_entry_unlink(vm_map_t map,
927 		    vm_map_entry_t entry)
928 {
929 	vm_map_entry_t next, prev, root;
930 
931 	VM_MAP_ASSERT_LOCKED(map);
932 	if (entry != map->root)
933 		vm_map_entry_splay(entry->start, map->root);
934 	if (entry->left == NULL)
935 		root = entry->right;
936 	else {
937 		root = vm_map_entry_splay(entry->start, entry->left);
938 		root->right = entry->right;
939 		root->adj_free = (entry->next == &map->header ? map->max_offset :
940 		    entry->next->start) - root->end;
941 		vm_map_entry_set_max_free(root);
942 	}
943 	map->root = root;
944 
945 	prev = entry->prev;
946 	next = entry->next;
947 	next->prev = prev;
948 	prev->next = next;
949 	map->nentries--;
950 	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
951 	    map->nentries, entry);
952 }
953 
954 /*
955  *	vm_map_entry_resize_free:
956  *
957  *	Recompute the amount of free space following a vm_map_entry
958  *	and propagate that value up the tree.  Call this function after
959  *	resizing a map entry in-place, that is, without a call to
960  *	vm_map_entry_link() or _unlink().
961  *
962  *	The map must be locked, and leaves it so.
963  */
964 static void
965 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
966 {
967 
968 	/*
969 	 * Using splay trees without parent pointers, propagating
970 	 * max_free up the tree is done by moving the entry to the
971 	 * root and making the change there.
972 	 */
973 	if (entry != map->root)
974 		map->root = vm_map_entry_splay(entry->start, map->root);
975 
976 	entry->adj_free = (entry->next == &map->header ? map->max_offset :
977 	    entry->next->start) - entry->end;
978 	vm_map_entry_set_max_free(entry);
979 }
980 
981 /*
982  *	vm_map_lookup_entry:	[ internal use only ]
983  *
984  *	Finds the map entry containing (or
985  *	immediately preceding) the specified address
986  *	in the given map; the entry is returned
987  *	in the "entry" parameter.  The boolean
988  *	result indicates whether the address is
989  *	actually contained in the map.
990  */
991 boolean_t
992 vm_map_lookup_entry(
993 	vm_map_t map,
994 	vm_offset_t address,
995 	vm_map_entry_t *entry)	/* OUT */
996 {
997 	vm_map_entry_t cur;
998 	boolean_t locked;
999 
1000 	/*
1001 	 * If the map is empty, then the map entry immediately preceding
1002 	 * "address" is the map's header.
1003 	 */
1004 	cur = map->root;
1005 	if (cur == NULL)
1006 		*entry = &map->header;
1007 	else if (address >= cur->start && cur->end > address) {
1008 		*entry = cur;
1009 		return (TRUE);
1010 	} else if ((locked = vm_map_locked(map)) ||
1011 	    sx_try_upgrade(&map->lock)) {
1012 		/*
1013 		 * Splay requires a write lock on the map.  However, it only
1014 		 * restructures the binary search tree; it does not otherwise
1015 		 * change the map.  Thus, the map's timestamp need not change
1016 		 * on a temporary upgrade.
1017 		 */
1018 		map->root = cur = vm_map_entry_splay(address, cur);
1019 		if (!locked)
1020 			sx_downgrade(&map->lock);
1021 
1022 		/*
1023 		 * If "address" is contained within a map entry, the new root
1024 		 * is that map entry.  Otherwise, the new root is a map entry
1025 		 * immediately before or after "address".
1026 		 */
1027 		if (address >= cur->start) {
1028 			*entry = cur;
1029 			if (cur->end > address)
1030 				return (TRUE);
1031 		} else
1032 			*entry = cur->prev;
1033 	} else
1034 		/*
1035 		 * Since the map is only locked for read access, perform a
1036 		 * standard binary search tree lookup for "address".
1037 		 */
1038 		for (;;) {
1039 			if (address < cur->start) {
1040 				if (cur->left == NULL) {
1041 					*entry = cur->prev;
1042 					break;
1043 				}
1044 				cur = cur->left;
1045 			} else if (cur->end > address) {
1046 				*entry = cur;
1047 				return (TRUE);
1048 			} else {
1049 				if (cur->right == NULL) {
1050 					*entry = cur;
1051 					break;
1052 				}
1053 				cur = cur->right;
1054 			}
1055 		}
1056 	return (FALSE);
1057 }
1058 
1059 /*
1060  *	vm_map_insert:
1061  *
1062  *	Inserts the given whole VM object into the target
1063  *	map at the specified address range.  The object's
1064  *	size should match that of the address range.
1065  *
1066  *	Requires that the map be locked, and leaves it so.
1067  *
1068  *	If object is non-NULL, ref count must be bumped by caller
1069  *	prior to making call to account for the new entry.
1070  */
1071 int
1072 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1073 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
1074 	      int cow)
1075 {
1076 	vm_map_entry_t new_entry;
1077 	vm_map_entry_t prev_entry;
1078 	vm_map_entry_t temp_entry;
1079 	vm_eflags_t protoeflags;
1080 	struct uidinfo *uip;
1081 	boolean_t charge_prev_obj;
1082 
1083 	VM_MAP_ASSERT_LOCKED(map);
1084 
1085 	/*
1086 	 * Check that the start and end points are not bogus.
1087 	 */
1088 	if ((start < map->min_offset) || (end > map->max_offset) ||
1089 	    (start >= end))
1090 		return (KERN_INVALID_ADDRESS);
1091 
1092 	/*
1093 	 * Find the entry prior to the proposed starting address; if it's part
1094 	 * of an existing entry, this range is bogus.
1095 	 */
1096 	if (vm_map_lookup_entry(map, start, &temp_entry))
1097 		return (KERN_NO_SPACE);
1098 
1099 	prev_entry = temp_entry;
1100 
1101 	/*
1102 	 * Assert that the next entry doesn't overlap the end point.
1103 	 */
1104 	if ((prev_entry->next != &map->header) &&
1105 	    (prev_entry->next->start < end))
1106 		return (KERN_NO_SPACE);
1107 
1108 	protoeflags = 0;
1109 	charge_prev_obj = FALSE;
1110 
1111 	if (cow & MAP_COPY_ON_WRITE)
1112 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1113 
1114 	if (cow & MAP_NOFAULT) {
1115 		protoeflags |= MAP_ENTRY_NOFAULT;
1116 
1117 		KASSERT(object == NULL,
1118 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
1119 	}
1120 	if (cow & MAP_DISABLE_SYNCER)
1121 		protoeflags |= MAP_ENTRY_NOSYNC;
1122 	if (cow & MAP_DISABLE_COREDUMP)
1123 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
1124 
1125 	uip = NULL;
1126 	KASSERT((object != kmem_object && object != kernel_object) ||
1127 	    ((object == kmem_object || object == kernel_object) &&
1128 		!(protoeflags & MAP_ENTRY_NEEDS_COPY)),
1129 	    ("kmem or kernel object and cow"));
1130 	if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT))
1131 		goto charged;
1132 	if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1133 	    ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1134 		if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1135 			return (KERN_RESOURCE_SHORTAGE);
1136 		KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) ||
1137 		    object->uip == NULL,
1138 		    ("OVERCOMMIT: vm_map_insert o %p", object));
1139 		uip = curthread->td_ucred->cr_ruidinfo;
1140 		uihold(uip);
1141 		if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY))
1142 			charge_prev_obj = TRUE;
1143 	}
1144 
1145 charged:
1146 	if (object != NULL) {
1147 		/*
1148 		 * OBJ_ONEMAPPING must be cleared unless this mapping
1149 		 * is trivially proven to be the only mapping for any
1150 		 * of the object's pages.  (Object granularity
1151 		 * reference counting is insufficient to recognize
1152 		 * aliases with precision.)
1153 		 */
1154 		VM_OBJECT_LOCK(object);
1155 		if (object->ref_count > 1 || object->shadow_count != 0)
1156 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
1157 		VM_OBJECT_UNLOCK(object);
1158 	}
1159 	else if ((prev_entry != &map->header) &&
1160 		 (prev_entry->eflags == protoeflags) &&
1161 		 (prev_entry->end == start) &&
1162 		 (prev_entry->wired_count == 0) &&
1163 		 (prev_entry->uip == uip ||
1164 		  (prev_entry->object.vm_object != NULL &&
1165 		   (prev_entry->object.vm_object->uip == uip))) &&
1166 		   vm_object_coalesce(prev_entry->object.vm_object,
1167 		       prev_entry->offset,
1168 		       (vm_size_t)(prev_entry->end - prev_entry->start),
1169 		       (vm_size_t)(end - prev_entry->end), charge_prev_obj)) {
1170 		/*
1171 		 * We were able to extend the object.  Determine if we
1172 		 * can extend the previous map entry to include the
1173 		 * new range as well.
1174 		 */
1175 		if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
1176 		    (prev_entry->protection == prot) &&
1177 		    (prev_entry->max_protection == max)) {
1178 			map->size += (end - prev_entry->end);
1179 			prev_entry->end = end;
1180 			vm_map_entry_resize_free(map, prev_entry);
1181 			vm_map_simplify_entry(map, prev_entry);
1182 			if (uip != NULL)
1183 				uifree(uip);
1184 			return (KERN_SUCCESS);
1185 		}
1186 
1187 		/*
1188 		 * If we can extend the object but cannot extend the
1189 		 * map entry, we have to create a new map entry.  We
1190 		 * must bump the ref count on the extended object to
1191 		 * account for it.  object may be NULL.
1192 		 */
1193 		object = prev_entry->object.vm_object;
1194 		offset = prev_entry->offset +
1195 			(prev_entry->end - prev_entry->start);
1196 		vm_object_reference(object);
1197 		if (uip != NULL && object != NULL && object->uip != NULL &&
1198 		    !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1199 			/* Object already accounts for this uid. */
1200 			uifree(uip);
1201 			uip = NULL;
1202 		}
1203 	}
1204 
1205 	/*
1206 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
1207 	 * in things like the buffer map where we manage kva but do not manage
1208 	 * backing objects.
1209 	 */
1210 
1211 	/*
1212 	 * Create a new entry
1213 	 */
1214 	new_entry = vm_map_entry_create(map);
1215 	new_entry->start = start;
1216 	new_entry->end = end;
1217 	new_entry->uip = NULL;
1218 
1219 	new_entry->eflags = protoeflags;
1220 	new_entry->object.vm_object = object;
1221 	new_entry->offset = offset;
1222 	new_entry->avail_ssize = 0;
1223 
1224 	new_entry->inheritance = VM_INHERIT_DEFAULT;
1225 	new_entry->protection = prot;
1226 	new_entry->max_protection = max;
1227 	new_entry->wired_count = 0;
1228 
1229 	KASSERT(uip == NULL || !ENTRY_CHARGED(new_entry),
1230 	    ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry));
1231 	new_entry->uip = uip;
1232 
1233 	/*
1234 	 * Insert the new entry into the list
1235 	 */
1236 	vm_map_entry_link(map, prev_entry, new_entry);
1237 	map->size += new_entry->end - new_entry->start;
1238 
1239 #if 0
1240 	/*
1241 	 * Temporarily removed to avoid MAP_STACK panic, due to
1242 	 * MAP_STACK being a huge hack.  Will be added back in
1243 	 * when MAP_STACK (and the user stack mapping) is fixed.
1244 	 */
1245 	/*
1246 	 * It may be possible to simplify the entry
1247 	 */
1248 	vm_map_simplify_entry(map, new_entry);
1249 #endif
1250 
1251 	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
1252 		vm_map_pmap_enter(map, start, prot,
1253 				    object, OFF_TO_IDX(offset), end - start,
1254 				    cow & MAP_PREFAULT_PARTIAL);
1255 	}
1256 
1257 	return (KERN_SUCCESS);
1258 }
1259 
1260 /*
1261  *	vm_map_findspace:
1262  *
1263  *	Find the first fit (lowest VM address) for "length" free bytes
1264  *	beginning at address >= start in the given map.
1265  *
1266  *	In a vm_map_entry, "adj_free" is the amount of free space
1267  *	adjacent (higher address) to this entry, and "max_free" is the
1268  *	maximum amount of contiguous free space in its subtree.  This
1269  *	allows finding a free region in one path down the tree, so
1270  *	O(log n) amortized with splay trees.
1271  *
1272  *	The map must be locked, and leaves it so.
1273  *
1274  *	Returns: 0 on success, and starting address in *addr,
1275  *		 1 if insufficient space.
1276  */
1277 int
1278 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1279     vm_offset_t *addr)	/* OUT */
1280 {
1281 	vm_map_entry_t entry;
1282 	vm_offset_t end, st;
1283 
1284 	/*
1285 	 * Request must fit within min/max VM address and must avoid
1286 	 * address wrap.
1287 	 */
1288 	if (start < map->min_offset)
1289 		start = map->min_offset;
1290 	if (start + length > map->max_offset || start + length < start)
1291 		return (1);
1292 
1293 	/* Empty tree means wide open address space. */
1294 	if (map->root == NULL) {
1295 		*addr = start;
1296 		goto found;
1297 	}
1298 
1299 	/*
1300 	 * After splay, if start comes before root node, then there
1301 	 * must be a gap from start to the root.
1302 	 */
1303 	map->root = vm_map_entry_splay(start, map->root);
1304 	if (start + length <= map->root->start) {
1305 		*addr = start;
1306 		goto found;
1307 	}
1308 
1309 	/*
1310 	 * Root is the last node that might begin its gap before
1311 	 * start, and this is the last comparison where address
1312 	 * wrap might be a problem.
1313 	 */
1314 	st = (start > map->root->end) ? start : map->root->end;
1315 	if (length <= map->root->end + map->root->adj_free - st) {
1316 		*addr = st;
1317 		goto found;
1318 	}
1319 
1320 	/* With max_free, can immediately tell if no solution. */
1321 	entry = map->root->right;
1322 	if (entry == NULL || length > entry->max_free)
1323 		return (1);
1324 
1325 	/*
1326 	 * Search the right subtree in the order: left subtree, root,
1327 	 * right subtree (first fit).  The previous splay implies that
1328 	 * all regions in the right subtree have addresses > start.
1329 	 */
1330 	while (entry != NULL) {
1331 		if (entry->left != NULL && entry->left->max_free >= length)
1332 			entry = entry->left;
1333 		else if (entry->adj_free >= length) {
1334 			*addr = entry->end;
1335 			goto found;
1336 		} else
1337 			entry = entry->right;
1338 	}
1339 
1340 	/* Can't get here, so panic if we do. */
1341 	panic("vm_map_findspace: max_free corrupt");
1342 
1343 found:
1344 	/* Expand the kernel pmap, if necessary. */
1345 	if (map == kernel_map) {
1346 		end = round_page(*addr + length);
1347 		if (end > kernel_vm_end)
1348 			pmap_growkernel(end);
1349 	}
1350 	return (0);
1351 }
1352 
1353 int
1354 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1355     vm_offset_t start, vm_size_t length, vm_prot_t prot,
1356     vm_prot_t max, int cow)
1357 {
1358 	vm_offset_t end;
1359 	int result;
1360 
1361 	end = start + length;
1362 	vm_map_lock(map);
1363 	VM_MAP_RANGE_CHECK(map, start, end);
1364 	(void) vm_map_delete(map, start, end);
1365 	result = vm_map_insert(map, object, offset, start, end, prot,
1366 	    max, cow);
1367 	vm_map_unlock(map);
1368 	return (result);
1369 }
1370 
1371 /*
1372  *	vm_map_find finds an unallocated region in the target address
1373  *	map with the given length.  The search is defined to be
1374  *	first-fit from the specified address; the region found is
1375  *	returned in the same parameter.
1376  *
1377  *	If object is non-NULL, ref count must be bumped by caller
1378  *	prior to making call to account for the new entry.
1379  */
1380 int
1381 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1382 	    vm_offset_t *addr,	/* IN/OUT */
1383 	    vm_size_t length, int find_space, vm_prot_t prot,
1384 	    vm_prot_t max, int cow)
1385 {
1386 	vm_offset_t start;
1387 	int result;
1388 
1389 	start = *addr;
1390 	vm_map_lock(map);
1391 	do {
1392 		if (find_space != VMFS_NO_SPACE) {
1393 			if (vm_map_findspace(map, start, length, addr)) {
1394 				vm_map_unlock(map);
1395 				return (KERN_NO_SPACE);
1396 			}
1397 			switch (find_space) {
1398 			case VMFS_ALIGNED_SPACE:
1399 				pmap_align_superpage(object, offset, addr,
1400 				    length);
1401 				break;
1402 #ifdef VMFS_TLB_ALIGNED_SPACE
1403 			case VMFS_TLB_ALIGNED_SPACE:
1404 				pmap_align_tlb(addr);
1405 				break;
1406 #endif
1407 			default:
1408 				break;
1409 			}
1410 
1411 			start = *addr;
1412 		}
1413 		result = vm_map_insert(map, object, offset, start, start +
1414 		    length, prot, max, cow);
1415 	} while (result == KERN_NO_SPACE && (find_space == VMFS_ALIGNED_SPACE
1416 #ifdef VMFS_TLB_ALIGNED_SPACE
1417 	    || find_space == VMFS_TLB_ALIGNED_SPACE
1418 #endif
1419 	    ));
1420 	vm_map_unlock(map);
1421 	return (result);
1422 }
1423 
1424 /*
1425  *	vm_map_simplify_entry:
1426  *
1427  *	Simplify the given map entry by merging with either neighbor.  This
1428  *	routine also has the ability to merge with both neighbors.
1429  *
1430  *	The map must be locked.
1431  *
1432  *	This routine guarentees that the passed entry remains valid (though
1433  *	possibly extended).  When merging, this routine may delete one or
1434  *	both neighbors.
1435  */
1436 void
1437 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1438 {
1439 	vm_map_entry_t next, prev;
1440 	vm_size_t prevsize, esize;
1441 
1442 	if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
1443 		return;
1444 
1445 	prev = entry->prev;
1446 	if (prev != &map->header) {
1447 		prevsize = prev->end - prev->start;
1448 		if ( (prev->end == entry->start) &&
1449 		     (prev->object.vm_object == entry->object.vm_object) &&
1450 		     (!prev->object.vm_object ||
1451 			(prev->offset + prevsize == entry->offset)) &&
1452 		     (prev->eflags == entry->eflags) &&
1453 		     (prev->protection == entry->protection) &&
1454 		     (prev->max_protection == entry->max_protection) &&
1455 		     (prev->inheritance == entry->inheritance) &&
1456 		     (prev->wired_count == entry->wired_count) &&
1457 		     (prev->uip == entry->uip)) {
1458 			vm_map_entry_unlink(map, prev);
1459 			entry->start = prev->start;
1460 			entry->offset = prev->offset;
1461 			if (entry->prev != &map->header)
1462 				vm_map_entry_resize_free(map, entry->prev);
1463 
1464 			/*
1465 			 * If the backing object is a vnode object,
1466 			 * vm_object_deallocate() calls vrele().
1467 			 * However, vrele() does not lock the vnode
1468 			 * because the vnode has additional
1469 			 * references.  Thus, the map lock can be kept
1470 			 * without causing a lock-order reversal with
1471 			 * the vnode lock.
1472 			 */
1473 			if (prev->object.vm_object)
1474 				vm_object_deallocate(prev->object.vm_object);
1475 			if (prev->uip != NULL)
1476 				uifree(prev->uip);
1477 			vm_map_entry_dispose(map, prev);
1478 		}
1479 	}
1480 
1481 	next = entry->next;
1482 	if (next != &map->header) {
1483 		esize = entry->end - entry->start;
1484 		if ((entry->end == next->start) &&
1485 		    (next->object.vm_object == entry->object.vm_object) &&
1486 		     (!entry->object.vm_object ||
1487 			(entry->offset + esize == next->offset)) &&
1488 		    (next->eflags == entry->eflags) &&
1489 		    (next->protection == entry->protection) &&
1490 		    (next->max_protection == entry->max_protection) &&
1491 		    (next->inheritance == entry->inheritance) &&
1492 		    (next->wired_count == entry->wired_count) &&
1493 		    (next->uip == entry->uip)) {
1494 			vm_map_entry_unlink(map, next);
1495 			entry->end = next->end;
1496 			vm_map_entry_resize_free(map, entry);
1497 
1498 			/*
1499 			 * See comment above.
1500 			 */
1501 			if (next->object.vm_object)
1502 				vm_object_deallocate(next->object.vm_object);
1503 			if (next->uip != NULL)
1504 				uifree(next->uip);
1505 			vm_map_entry_dispose(map, next);
1506 		}
1507 	}
1508 }
1509 /*
1510  *	vm_map_clip_start:	[ internal use only ]
1511  *
1512  *	Asserts that the given entry begins at or after
1513  *	the specified address; if necessary,
1514  *	it splits the entry into two.
1515  */
1516 #define vm_map_clip_start(map, entry, startaddr) \
1517 { \
1518 	if (startaddr > entry->start) \
1519 		_vm_map_clip_start(map, entry, startaddr); \
1520 }
1521 
1522 /*
1523  *	This routine is called only when it is known that
1524  *	the entry must be split.
1525  */
1526 static void
1527 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1528 {
1529 	vm_map_entry_t new_entry;
1530 
1531 	VM_MAP_ASSERT_LOCKED(map);
1532 
1533 	/*
1534 	 * Split off the front portion -- note that we must insert the new
1535 	 * entry BEFORE this one, so that this entry has the specified
1536 	 * starting address.
1537 	 */
1538 	vm_map_simplify_entry(map, entry);
1539 
1540 	/*
1541 	 * If there is no object backing this entry, we might as well create
1542 	 * one now.  If we defer it, an object can get created after the map
1543 	 * is clipped, and individual objects will be created for the split-up
1544 	 * map.  This is a bit of a hack, but is also about the best place to
1545 	 * put this improvement.
1546 	 */
1547 	if (entry->object.vm_object == NULL && !map->system_map) {
1548 		vm_object_t object;
1549 		object = vm_object_allocate(OBJT_DEFAULT,
1550 				atop(entry->end - entry->start));
1551 		entry->object.vm_object = object;
1552 		entry->offset = 0;
1553 		if (entry->uip != NULL) {
1554 			object->uip = entry->uip;
1555 			object->charge = entry->end - entry->start;
1556 			entry->uip = NULL;
1557 		}
1558 	} else if (entry->object.vm_object != NULL &&
1559 		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1560 		   entry->uip != NULL) {
1561 		VM_OBJECT_LOCK(entry->object.vm_object);
1562 		KASSERT(entry->object.vm_object->uip == NULL,
1563 		    ("OVERCOMMIT: vm_entry_clip_start: both uip e %p", entry));
1564 		entry->object.vm_object->uip = entry->uip;
1565 		entry->object.vm_object->charge = entry->end - entry->start;
1566 		VM_OBJECT_UNLOCK(entry->object.vm_object);
1567 		entry->uip = NULL;
1568 	}
1569 
1570 	new_entry = vm_map_entry_create(map);
1571 	*new_entry = *entry;
1572 
1573 	new_entry->end = start;
1574 	entry->offset += (start - entry->start);
1575 	entry->start = start;
1576 	if (new_entry->uip != NULL)
1577 		uihold(entry->uip);
1578 
1579 	vm_map_entry_link(map, entry->prev, new_entry);
1580 
1581 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1582 		vm_object_reference(new_entry->object.vm_object);
1583 	}
1584 }
1585 
1586 /*
1587  *	vm_map_clip_end:	[ internal use only ]
1588  *
1589  *	Asserts that the given entry ends at or before
1590  *	the specified address; if necessary,
1591  *	it splits the entry into two.
1592  */
1593 #define vm_map_clip_end(map, entry, endaddr) \
1594 { \
1595 	if ((endaddr) < (entry->end)) \
1596 		_vm_map_clip_end((map), (entry), (endaddr)); \
1597 }
1598 
1599 /*
1600  *	This routine is called only when it is known that
1601  *	the entry must be split.
1602  */
1603 static void
1604 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1605 {
1606 	vm_map_entry_t new_entry;
1607 
1608 	VM_MAP_ASSERT_LOCKED(map);
1609 
1610 	/*
1611 	 * If there is no object backing this entry, we might as well create
1612 	 * one now.  If we defer it, an object can get created after the map
1613 	 * is clipped, and individual objects will be created for the split-up
1614 	 * map.  This is a bit of a hack, but is also about the best place to
1615 	 * put this improvement.
1616 	 */
1617 	if (entry->object.vm_object == NULL && !map->system_map) {
1618 		vm_object_t object;
1619 		object = vm_object_allocate(OBJT_DEFAULT,
1620 				atop(entry->end - entry->start));
1621 		entry->object.vm_object = object;
1622 		entry->offset = 0;
1623 		if (entry->uip != NULL) {
1624 			object->uip = entry->uip;
1625 			object->charge = entry->end - entry->start;
1626 			entry->uip = NULL;
1627 		}
1628 	} else if (entry->object.vm_object != NULL &&
1629 		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1630 		   entry->uip != NULL) {
1631 		VM_OBJECT_LOCK(entry->object.vm_object);
1632 		KASSERT(entry->object.vm_object->uip == NULL,
1633 		    ("OVERCOMMIT: vm_entry_clip_end: both uip e %p", entry));
1634 		entry->object.vm_object->uip = entry->uip;
1635 		entry->object.vm_object->charge = entry->end - entry->start;
1636 		VM_OBJECT_UNLOCK(entry->object.vm_object);
1637 		entry->uip = NULL;
1638 	}
1639 
1640 	/*
1641 	 * Create a new entry and insert it AFTER the specified entry
1642 	 */
1643 	new_entry = vm_map_entry_create(map);
1644 	*new_entry = *entry;
1645 
1646 	new_entry->start = entry->end = end;
1647 	new_entry->offset += (end - entry->start);
1648 	if (new_entry->uip != NULL)
1649 		uihold(entry->uip);
1650 
1651 	vm_map_entry_link(map, entry, new_entry);
1652 
1653 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1654 		vm_object_reference(new_entry->object.vm_object);
1655 	}
1656 }
1657 
1658 /*
1659  *	vm_map_submap:		[ kernel use only ]
1660  *
1661  *	Mark the given range as handled by a subordinate map.
1662  *
1663  *	This range must have been created with vm_map_find,
1664  *	and no other operations may have been performed on this
1665  *	range prior to calling vm_map_submap.
1666  *
1667  *	Only a limited number of operations can be performed
1668  *	within this rage after calling vm_map_submap:
1669  *		vm_fault
1670  *	[Don't try vm_map_copy!]
1671  *
1672  *	To remove a submapping, one must first remove the
1673  *	range from the superior map, and then destroy the
1674  *	submap (if desired).  [Better yet, don't try it.]
1675  */
1676 int
1677 vm_map_submap(
1678 	vm_map_t map,
1679 	vm_offset_t start,
1680 	vm_offset_t end,
1681 	vm_map_t submap)
1682 {
1683 	vm_map_entry_t entry;
1684 	int result = KERN_INVALID_ARGUMENT;
1685 
1686 	vm_map_lock(map);
1687 
1688 	VM_MAP_RANGE_CHECK(map, start, end);
1689 
1690 	if (vm_map_lookup_entry(map, start, &entry)) {
1691 		vm_map_clip_start(map, entry, start);
1692 	} else
1693 		entry = entry->next;
1694 
1695 	vm_map_clip_end(map, entry, end);
1696 
1697 	if ((entry->start == start) && (entry->end == end) &&
1698 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1699 	    (entry->object.vm_object == NULL)) {
1700 		entry->object.sub_map = submap;
1701 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1702 		result = KERN_SUCCESS;
1703 	}
1704 	vm_map_unlock(map);
1705 
1706 	return (result);
1707 }
1708 
1709 /*
1710  * The maximum number of pages to map
1711  */
1712 #define	MAX_INIT_PT	96
1713 
1714 /*
1715  *	vm_map_pmap_enter:
1716  *
1717  *	Preload read-only mappings for the given object's resident pages into
1718  *	the given map.  This eliminates the soft faults on process startup and
1719  *	immediately after an mmap(2).  Because these are speculative mappings,
1720  *	cached pages are not reactivated and mapped.
1721  */
1722 void
1723 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
1724     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1725 {
1726 	vm_offset_t start;
1727 	vm_page_t p, p_start;
1728 	vm_pindex_t psize, tmpidx;
1729 	boolean_t are_queues_locked;
1730 
1731 	if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
1732 		return;
1733 	VM_OBJECT_LOCK(object);
1734 	if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1735 		pmap_object_init_pt(map->pmap, addr, object, pindex, size);
1736 		goto unlock_return;
1737 	}
1738 
1739 	psize = atop(size);
1740 
1741 	if ((flags & MAP_PREFAULT_PARTIAL) && psize > MAX_INIT_PT &&
1742 	    object->resident_page_count > MAX_INIT_PT)
1743 		goto unlock_return;
1744 
1745 	if (psize + pindex > object->size) {
1746 		if (object->size < pindex)
1747 			goto unlock_return;
1748 		psize = object->size - pindex;
1749 	}
1750 
1751 	are_queues_locked = FALSE;
1752 	start = 0;
1753 	p_start = NULL;
1754 
1755 	if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
1756 		if (p->pindex < pindex) {
1757 			p = vm_page_splay(pindex, object->root);
1758 			if ((object->root = p)->pindex < pindex)
1759 				p = TAILQ_NEXT(p, listq);
1760 		}
1761 	}
1762 	/*
1763 	 * Assert: the variable p is either (1) the page with the
1764 	 * least pindex greater than or equal to the parameter pindex
1765 	 * or (2) NULL.
1766 	 */
1767 	for (;
1768 	     p != NULL && (tmpidx = p->pindex - pindex) < psize;
1769 	     p = TAILQ_NEXT(p, listq)) {
1770 		/*
1771 		 * don't allow an madvise to blow away our really
1772 		 * free pages allocating pv entries.
1773 		 */
1774 		if ((flags & MAP_PREFAULT_MADVISE) &&
1775 		    cnt.v_free_count < cnt.v_free_reserved) {
1776 			psize = tmpidx;
1777 			break;
1778 		}
1779 		if (p->valid == VM_PAGE_BITS_ALL) {
1780 			if (p_start == NULL) {
1781 				start = addr + ptoa(tmpidx);
1782 				p_start = p;
1783 			}
1784 		} else if (p_start != NULL) {
1785 			if (!are_queues_locked) {
1786 				are_queues_locked = TRUE;
1787 				vm_page_lock_queues();
1788 			}
1789 			pmap_enter_object(map->pmap, start, addr +
1790 			    ptoa(tmpidx), p_start, prot);
1791 			p_start = NULL;
1792 		}
1793 	}
1794 	if (p_start != NULL) {
1795 		if (!are_queues_locked) {
1796 			are_queues_locked = TRUE;
1797 			vm_page_lock_queues();
1798 		}
1799 		pmap_enter_object(map->pmap, start, addr + ptoa(psize),
1800 		    p_start, prot);
1801 	}
1802 	if (are_queues_locked)
1803 		vm_page_unlock_queues();
1804 unlock_return:
1805 	VM_OBJECT_UNLOCK(object);
1806 }
1807 
1808 /*
1809  *	vm_map_protect:
1810  *
1811  *	Sets the protection of the specified address
1812  *	region in the target map.  If "set_max" is
1813  *	specified, the maximum protection is to be set;
1814  *	otherwise, only the current protection is affected.
1815  */
1816 int
1817 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1818 	       vm_prot_t new_prot, boolean_t set_max)
1819 {
1820 	vm_map_entry_t current, entry;
1821 	vm_object_t obj;
1822 	struct uidinfo *uip;
1823 	vm_prot_t old_prot;
1824 
1825 	vm_map_lock(map);
1826 
1827 	VM_MAP_RANGE_CHECK(map, start, end);
1828 
1829 	if (vm_map_lookup_entry(map, start, &entry)) {
1830 		vm_map_clip_start(map, entry, start);
1831 	} else {
1832 		entry = entry->next;
1833 	}
1834 
1835 	/*
1836 	 * Make a first pass to check for protection violations.
1837 	 */
1838 	current = entry;
1839 	while ((current != &map->header) && (current->start < end)) {
1840 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1841 			vm_map_unlock(map);
1842 			return (KERN_INVALID_ARGUMENT);
1843 		}
1844 		if ((new_prot & current->max_protection) != new_prot) {
1845 			vm_map_unlock(map);
1846 			return (KERN_PROTECTION_FAILURE);
1847 		}
1848 		current = current->next;
1849 	}
1850 
1851 
1852 	/*
1853 	 * Do an accounting pass for private read-only mappings that
1854 	 * now will do cow due to allowed write (e.g. debugger sets
1855 	 * breakpoint on text segment)
1856 	 */
1857 	for (current = entry; (current != &map->header) &&
1858 	     (current->start < end); current = current->next) {
1859 
1860 		vm_map_clip_end(map, current, end);
1861 
1862 		if (set_max ||
1863 		    ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
1864 		    ENTRY_CHARGED(current)) {
1865 			continue;
1866 		}
1867 
1868 		uip = curthread->td_ucred->cr_ruidinfo;
1869 		obj = current->object.vm_object;
1870 
1871 		if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
1872 			if (!swap_reserve(current->end - current->start)) {
1873 				vm_map_unlock(map);
1874 				return (KERN_RESOURCE_SHORTAGE);
1875 			}
1876 			uihold(uip);
1877 			current->uip = uip;
1878 			continue;
1879 		}
1880 
1881 		VM_OBJECT_LOCK(obj);
1882 		if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
1883 			VM_OBJECT_UNLOCK(obj);
1884 			continue;
1885 		}
1886 
1887 		/*
1888 		 * Charge for the whole object allocation now, since
1889 		 * we cannot distinguish between non-charged and
1890 		 * charged clipped mapping of the same object later.
1891 		 */
1892 		KASSERT(obj->charge == 0,
1893 		    ("vm_map_protect: object %p overcharged\n", obj));
1894 		if (!swap_reserve(ptoa(obj->size))) {
1895 			VM_OBJECT_UNLOCK(obj);
1896 			vm_map_unlock(map);
1897 			return (KERN_RESOURCE_SHORTAGE);
1898 		}
1899 
1900 		uihold(uip);
1901 		obj->uip = uip;
1902 		obj->charge = ptoa(obj->size);
1903 		VM_OBJECT_UNLOCK(obj);
1904 	}
1905 
1906 	/*
1907 	 * Go back and fix up protections. [Note that clipping is not
1908 	 * necessary the second time.]
1909 	 */
1910 	current = entry;
1911 	while ((current != &map->header) && (current->start < end)) {
1912 		old_prot = current->protection;
1913 
1914 		if (set_max)
1915 			current->protection =
1916 			    (current->max_protection = new_prot) &
1917 			    old_prot;
1918 		else
1919 			current->protection = new_prot;
1920 
1921 		if ((current->eflags & (MAP_ENTRY_COW | MAP_ENTRY_USER_WIRED))
1922 		     == (MAP_ENTRY_COW | MAP_ENTRY_USER_WIRED) &&
1923 		    (current->protection & VM_PROT_WRITE) != 0 &&
1924 		    (old_prot & VM_PROT_WRITE) == 0) {
1925 			vm_fault_copy_entry(map, map, current, current, NULL);
1926 		}
1927 
1928 		/*
1929 		 * When restricting access, update the physical map.  Worry
1930 		 * about copy-on-write here.
1931 		 */
1932 		if ((old_prot & ~current->protection) != 0) {
1933 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1934 							VM_PROT_ALL)
1935 			pmap_protect(map->pmap, current->start,
1936 			    current->end,
1937 			    current->protection & MASK(current));
1938 #undef	MASK
1939 		}
1940 		vm_map_simplify_entry(map, current);
1941 		current = current->next;
1942 	}
1943 	vm_map_unlock(map);
1944 	return (KERN_SUCCESS);
1945 }
1946 
1947 /*
1948  *	vm_map_madvise:
1949  *
1950  *	This routine traverses a processes map handling the madvise
1951  *	system call.  Advisories are classified as either those effecting
1952  *	the vm_map_entry structure, or those effecting the underlying
1953  *	objects.
1954  */
1955 int
1956 vm_map_madvise(
1957 	vm_map_t map,
1958 	vm_offset_t start,
1959 	vm_offset_t end,
1960 	int behav)
1961 {
1962 	vm_map_entry_t current, entry;
1963 	int modify_map = 0;
1964 
1965 	/*
1966 	 * Some madvise calls directly modify the vm_map_entry, in which case
1967 	 * we need to use an exclusive lock on the map and we need to perform
1968 	 * various clipping operations.  Otherwise we only need a read-lock
1969 	 * on the map.
1970 	 */
1971 	switch(behav) {
1972 	case MADV_NORMAL:
1973 	case MADV_SEQUENTIAL:
1974 	case MADV_RANDOM:
1975 	case MADV_NOSYNC:
1976 	case MADV_AUTOSYNC:
1977 	case MADV_NOCORE:
1978 	case MADV_CORE:
1979 		modify_map = 1;
1980 		vm_map_lock(map);
1981 		break;
1982 	case MADV_WILLNEED:
1983 	case MADV_DONTNEED:
1984 	case MADV_FREE:
1985 		vm_map_lock_read(map);
1986 		break;
1987 	default:
1988 		return (KERN_INVALID_ARGUMENT);
1989 	}
1990 
1991 	/*
1992 	 * Locate starting entry and clip if necessary.
1993 	 */
1994 	VM_MAP_RANGE_CHECK(map, start, end);
1995 
1996 	if (vm_map_lookup_entry(map, start, &entry)) {
1997 		if (modify_map)
1998 			vm_map_clip_start(map, entry, start);
1999 	} else {
2000 		entry = entry->next;
2001 	}
2002 
2003 	if (modify_map) {
2004 		/*
2005 		 * madvise behaviors that are implemented in the vm_map_entry.
2006 		 *
2007 		 * We clip the vm_map_entry so that behavioral changes are
2008 		 * limited to the specified address range.
2009 		 */
2010 		for (current = entry;
2011 		     (current != &map->header) && (current->start < end);
2012 		     current = current->next
2013 		) {
2014 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2015 				continue;
2016 
2017 			vm_map_clip_end(map, current, end);
2018 
2019 			switch (behav) {
2020 			case MADV_NORMAL:
2021 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2022 				break;
2023 			case MADV_SEQUENTIAL:
2024 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2025 				break;
2026 			case MADV_RANDOM:
2027 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2028 				break;
2029 			case MADV_NOSYNC:
2030 				current->eflags |= MAP_ENTRY_NOSYNC;
2031 				break;
2032 			case MADV_AUTOSYNC:
2033 				current->eflags &= ~MAP_ENTRY_NOSYNC;
2034 				break;
2035 			case MADV_NOCORE:
2036 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
2037 				break;
2038 			case MADV_CORE:
2039 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2040 				break;
2041 			default:
2042 				break;
2043 			}
2044 			vm_map_simplify_entry(map, current);
2045 		}
2046 		vm_map_unlock(map);
2047 	} else {
2048 		vm_pindex_t pindex;
2049 		int count;
2050 
2051 		/*
2052 		 * madvise behaviors that are implemented in the underlying
2053 		 * vm_object.
2054 		 *
2055 		 * Since we don't clip the vm_map_entry, we have to clip
2056 		 * the vm_object pindex and count.
2057 		 */
2058 		for (current = entry;
2059 		     (current != &map->header) && (current->start < end);
2060 		     current = current->next
2061 		) {
2062 			vm_offset_t useStart;
2063 
2064 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2065 				continue;
2066 
2067 			pindex = OFF_TO_IDX(current->offset);
2068 			count = atop(current->end - current->start);
2069 			useStart = current->start;
2070 
2071 			if (current->start < start) {
2072 				pindex += atop(start - current->start);
2073 				count -= atop(start - current->start);
2074 				useStart = start;
2075 			}
2076 			if (current->end > end)
2077 				count -= atop(current->end - end);
2078 
2079 			if (count <= 0)
2080 				continue;
2081 
2082 			vm_object_madvise(current->object.vm_object,
2083 					  pindex, count, behav);
2084 			if (behav == MADV_WILLNEED) {
2085 				vm_map_pmap_enter(map,
2086 				    useStart,
2087 				    current->protection,
2088 				    current->object.vm_object,
2089 				    pindex,
2090 				    (count << PAGE_SHIFT),
2091 				    MAP_PREFAULT_MADVISE
2092 				);
2093 			}
2094 		}
2095 		vm_map_unlock_read(map);
2096 	}
2097 	return (0);
2098 }
2099 
2100 
2101 /*
2102  *	vm_map_inherit:
2103  *
2104  *	Sets the inheritance of the specified address
2105  *	range in the target map.  Inheritance
2106  *	affects how the map will be shared with
2107  *	child maps at the time of vmspace_fork.
2108  */
2109 int
2110 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2111 	       vm_inherit_t new_inheritance)
2112 {
2113 	vm_map_entry_t entry;
2114 	vm_map_entry_t temp_entry;
2115 
2116 	switch (new_inheritance) {
2117 	case VM_INHERIT_NONE:
2118 	case VM_INHERIT_COPY:
2119 	case VM_INHERIT_SHARE:
2120 		break;
2121 	default:
2122 		return (KERN_INVALID_ARGUMENT);
2123 	}
2124 	vm_map_lock(map);
2125 	VM_MAP_RANGE_CHECK(map, start, end);
2126 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
2127 		entry = temp_entry;
2128 		vm_map_clip_start(map, entry, start);
2129 	} else
2130 		entry = temp_entry->next;
2131 	while ((entry != &map->header) && (entry->start < end)) {
2132 		vm_map_clip_end(map, entry, end);
2133 		entry->inheritance = new_inheritance;
2134 		vm_map_simplify_entry(map, entry);
2135 		entry = entry->next;
2136 	}
2137 	vm_map_unlock(map);
2138 	return (KERN_SUCCESS);
2139 }
2140 
2141 /*
2142  *	vm_map_unwire:
2143  *
2144  *	Implements both kernel and user unwiring.
2145  */
2146 int
2147 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2148     int flags)
2149 {
2150 	vm_map_entry_t entry, first_entry, tmp_entry;
2151 	vm_offset_t saved_start;
2152 	unsigned int last_timestamp;
2153 	int rv;
2154 	boolean_t need_wakeup, result, user_unwire;
2155 
2156 	user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2157 	vm_map_lock(map);
2158 	VM_MAP_RANGE_CHECK(map, start, end);
2159 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2160 		if (flags & VM_MAP_WIRE_HOLESOK)
2161 			first_entry = first_entry->next;
2162 		else {
2163 			vm_map_unlock(map);
2164 			return (KERN_INVALID_ADDRESS);
2165 		}
2166 	}
2167 	last_timestamp = map->timestamp;
2168 	entry = first_entry;
2169 	while (entry != &map->header && entry->start < end) {
2170 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2171 			/*
2172 			 * We have not yet clipped the entry.
2173 			 */
2174 			saved_start = (start >= entry->start) ? start :
2175 			    entry->start;
2176 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2177 			if (vm_map_unlock_and_wait(map, 0)) {
2178 				/*
2179 				 * Allow interruption of user unwiring?
2180 				 */
2181 			}
2182 			vm_map_lock(map);
2183 			if (last_timestamp+1 != map->timestamp) {
2184 				/*
2185 				 * Look again for the entry because the map was
2186 				 * modified while it was unlocked.
2187 				 * Specifically, the entry may have been
2188 				 * clipped, merged, or deleted.
2189 				 */
2190 				if (!vm_map_lookup_entry(map, saved_start,
2191 				    &tmp_entry)) {
2192 					if (flags & VM_MAP_WIRE_HOLESOK)
2193 						tmp_entry = tmp_entry->next;
2194 					else {
2195 						if (saved_start == start) {
2196 							/*
2197 							 * First_entry has been deleted.
2198 							 */
2199 							vm_map_unlock(map);
2200 							return (KERN_INVALID_ADDRESS);
2201 						}
2202 						end = saved_start;
2203 						rv = KERN_INVALID_ADDRESS;
2204 						goto done;
2205 					}
2206 				}
2207 				if (entry == first_entry)
2208 					first_entry = tmp_entry;
2209 				else
2210 					first_entry = NULL;
2211 				entry = tmp_entry;
2212 			}
2213 			last_timestamp = map->timestamp;
2214 			continue;
2215 		}
2216 		vm_map_clip_start(map, entry, start);
2217 		vm_map_clip_end(map, entry, end);
2218 		/*
2219 		 * Mark the entry in case the map lock is released.  (See
2220 		 * above.)
2221 		 */
2222 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2223 		/*
2224 		 * Check the map for holes in the specified region.
2225 		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2226 		 */
2227 		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2228 		    (entry->end < end && (entry->next == &map->header ||
2229 		    entry->next->start > entry->end))) {
2230 			end = entry->end;
2231 			rv = KERN_INVALID_ADDRESS;
2232 			goto done;
2233 		}
2234 		/*
2235 		 * If system unwiring, require that the entry is system wired.
2236 		 */
2237 		if (!user_unwire &&
2238 		    vm_map_entry_system_wired_count(entry) == 0) {
2239 			end = entry->end;
2240 			rv = KERN_INVALID_ARGUMENT;
2241 			goto done;
2242 		}
2243 		entry = entry->next;
2244 	}
2245 	rv = KERN_SUCCESS;
2246 done:
2247 	need_wakeup = FALSE;
2248 	if (first_entry == NULL) {
2249 		result = vm_map_lookup_entry(map, start, &first_entry);
2250 		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2251 			first_entry = first_entry->next;
2252 		else
2253 			KASSERT(result, ("vm_map_unwire: lookup failed"));
2254 	}
2255 	entry = first_entry;
2256 	while (entry != &map->header && entry->start < end) {
2257 		if (rv == KERN_SUCCESS && (!user_unwire ||
2258 		    (entry->eflags & MAP_ENTRY_USER_WIRED))) {
2259 			if (user_unwire)
2260 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2261 			entry->wired_count--;
2262 			if (entry->wired_count == 0) {
2263 				/*
2264 				 * Retain the map lock.
2265 				 */
2266 				vm_fault_unwire(map, entry->start, entry->end,
2267 				    entry->object.vm_object != NULL &&
2268 				    (entry->object.vm_object->type == OBJT_DEVICE ||
2269 				    entry->object.vm_object->type == OBJT_SG));
2270 			}
2271 		}
2272 		KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
2273 			("vm_map_unwire: in-transition flag missing"));
2274 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2275 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2276 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2277 			need_wakeup = TRUE;
2278 		}
2279 		vm_map_simplify_entry(map, entry);
2280 		entry = entry->next;
2281 	}
2282 	vm_map_unlock(map);
2283 	if (need_wakeup)
2284 		vm_map_wakeup(map);
2285 	return (rv);
2286 }
2287 
2288 /*
2289  *	vm_map_wire:
2290  *
2291  *	Implements both kernel and user wiring.
2292  */
2293 int
2294 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2295     int flags)
2296 {
2297 	vm_map_entry_t entry, first_entry, tmp_entry;
2298 	vm_offset_t saved_end, saved_start;
2299 	unsigned int last_timestamp;
2300 	int rv;
2301 	boolean_t fictitious, need_wakeup, result, user_wire;
2302 
2303 	user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2304 	vm_map_lock(map);
2305 	VM_MAP_RANGE_CHECK(map, start, end);
2306 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2307 		if (flags & VM_MAP_WIRE_HOLESOK)
2308 			first_entry = first_entry->next;
2309 		else {
2310 			vm_map_unlock(map);
2311 			return (KERN_INVALID_ADDRESS);
2312 		}
2313 	}
2314 	last_timestamp = map->timestamp;
2315 	entry = first_entry;
2316 	while (entry != &map->header && entry->start < end) {
2317 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2318 			/*
2319 			 * We have not yet clipped the entry.
2320 			 */
2321 			saved_start = (start >= entry->start) ? start :
2322 			    entry->start;
2323 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2324 			if (vm_map_unlock_and_wait(map, 0)) {
2325 				/*
2326 				 * Allow interruption of user wiring?
2327 				 */
2328 			}
2329 			vm_map_lock(map);
2330 			if (last_timestamp + 1 != map->timestamp) {
2331 				/*
2332 				 * Look again for the entry because the map was
2333 				 * modified while it was unlocked.
2334 				 * Specifically, the entry may have been
2335 				 * clipped, merged, or deleted.
2336 				 */
2337 				if (!vm_map_lookup_entry(map, saved_start,
2338 				    &tmp_entry)) {
2339 					if (flags & VM_MAP_WIRE_HOLESOK)
2340 						tmp_entry = tmp_entry->next;
2341 					else {
2342 						if (saved_start == start) {
2343 							/*
2344 							 * first_entry has been deleted.
2345 							 */
2346 							vm_map_unlock(map);
2347 							return (KERN_INVALID_ADDRESS);
2348 						}
2349 						end = saved_start;
2350 						rv = KERN_INVALID_ADDRESS;
2351 						goto done;
2352 					}
2353 				}
2354 				if (entry == first_entry)
2355 					first_entry = tmp_entry;
2356 				else
2357 					first_entry = NULL;
2358 				entry = tmp_entry;
2359 			}
2360 			last_timestamp = map->timestamp;
2361 			continue;
2362 		}
2363 		vm_map_clip_start(map, entry, start);
2364 		vm_map_clip_end(map, entry, end);
2365 		/*
2366 		 * Mark the entry in case the map lock is released.  (See
2367 		 * above.)
2368 		 */
2369 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2370 		/*
2371 		 *
2372 		 */
2373 		if (entry->wired_count == 0) {
2374 			if ((entry->protection & (VM_PROT_READ|VM_PROT_EXECUTE))
2375 			    == 0) {
2376 				entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
2377 				if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
2378 					end = entry->end;
2379 					rv = KERN_INVALID_ADDRESS;
2380 					goto done;
2381 				}
2382 				goto next_entry;
2383 			}
2384 			entry->wired_count++;
2385 			saved_start = entry->start;
2386 			saved_end = entry->end;
2387 			fictitious = entry->object.vm_object != NULL &&
2388 			    (entry->object.vm_object->type == OBJT_DEVICE ||
2389 			    entry->object.vm_object->type == OBJT_SG);
2390 			/*
2391 			 * Release the map lock, relying on the in-transition
2392 			 * mark.
2393 			 */
2394 			vm_map_unlock(map);
2395 			rv = vm_fault_wire(map, saved_start, saved_end,
2396 			    fictitious);
2397 			vm_map_lock(map);
2398 			if (last_timestamp + 1 != map->timestamp) {
2399 				/*
2400 				 * Look again for the entry because the map was
2401 				 * modified while it was unlocked.  The entry
2402 				 * may have been clipped, but NOT merged or
2403 				 * deleted.
2404 				 */
2405 				result = vm_map_lookup_entry(map, saved_start,
2406 				    &tmp_entry);
2407 				KASSERT(result, ("vm_map_wire: lookup failed"));
2408 				if (entry == first_entry)
2409 					first_entry = tmp_entry;
2410 				else
2411 					first_entry = NULL;
2412 				entry = tmp_entry;
2413 				while (entry->end < saved_end) {
2414 					if (rv != KERN_SUCCESS) {
2415 						KASSERT(entry->wired_count == 1,
2416 						    ("vm_map_wire: bad count"));
2417 						entry->wired_count = -1;
2418 					}
2419 					entry = entry->next;
2420 				}
2421 			}
2422 			last_timestamp = map->timestamp;
2423 			if (rv != KERN_SUCCESS) {
2424 				KASSERT(entry->wired_count == 1,
2425 				    ("vm_map_wire: bad count"));
2426 				/*
2427 				 * Assign an out-of-range value to represent
2428 				 * the failure to wire this entry.
2429 				 */
2430 				entry->wired_count = -1;
2431 				end = entry->end;
2432 				goto done;
2433 			}
2434 		} else if (!user_wire ||
2435 			   (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2436 			entry->wired_count++;
2437 		}
2438 		/*
2439 		 * Check the map for holes in the specified region.
2440 		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2441 		 */
2442 	next_entry:
2443 		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2444 		    (entry->end < end && (entry->next == &map->header ||
2445 		    entry->next->start > entry->end))) {
2446 			end = entry->end;
2447 			rv = KERN_INVALID_ADDRESS;
2448 			goto done;
2449 		}
2450 		entry = entry->next;
2451 	}
2452 	rv = KERN_SUCCESS;
2453 done:
2454 	need_wakeup = FALSE;
2455 	if (first_entry == NULL) {
2456 		result = vm_map_lookup_entry(map, start, &first_entry);
2457 		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2458 			first_entry = first_entry->next;
2459 		else
2460 			KASSERT(result, ("vm_map_wire: lookup failed"));
2461 	}
2462 	entry = first_entry;
2463 	while (entry != &map->header && entry->start < end) {
2464 		if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
2465 			goto next_entry_done;
2466 		if (rv == KERN_SUCCESS) {
2467 			if (user_wire)
2468 				entry->eflags |= MAP_ENTRY_USER_WIRED;
2469 		} else if (entry->wired_count == -1) {
2470 			/*
2471 			 * Wiring failed on this entry.  Thus, unwiring is
2472 			 * unnecessary.
2473 			 */
2474 			entry->wired_count = 0;
2475 		} else {
2476 			if (!user_wire ||
2477 			    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)
2478 				entry->wired_count--;
2479 			if (entry->wired_count == 0) {
2480 				/*
2481 				 * Retain the map lock.
2482 				 */
2483 				vm_fault_unwire(map, entry->start, entry->end,
2484 				    entry->object.vm_object != NULL &&
2485 				    (entry->object.vm_object->type == OBJT_DEVICE ||
2486 				    entry->object.vm_object->type == OBJT_SG));
2487 			}
2488 		}
2489 	next_entry_done:
2490 		KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
2491 			("vm_map_wire: in-transition flag missing"));
2492 		entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION|MAP_ENTRY_WIRE_SKIPPED);
2493 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2494 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2495 			need_wakeup = TRUE;
2496 		}
2497 		vm_map_simplify_entry(map, entry);
2498 		entry = entry->next;
2499 	}
2500 	vm_map_unlock(map);
2501 	if (need_wakeup)
2502 		vm_map_wakeup(map);
2503 	return (rv);
2504 }
2505 
2506 /*
2507  * vm_map_sync
2508  *
2509  * Push any dirty cached pages in the address range to their pager.
2510  * If syncio is TRUE, dirty pages are written synchronously.
2511  * If invalidate is TRUE, any cached pages are freed as well.
2512  *
2513  * If the size of the region from start to end is zero, we are
2514  * supposed to flush all modified pages within the region containing
2515  * start.  Unfortunately, a region can be split or coalesced with
2516  * neighboring regions, making it difficult to determine what the
2517  * original region was.  Therefore, we approximate this requirement by
2518  * flushing the current region containing start.
2519  *
2520  * Returns an error if any part of the specified range is not mapped.
2521  */
2522 int
2523 vm_map_sync(
2524 	vm_map_t map,
2525 	vm_offset_t start,
2526 	vm_offset_t end,
2527 	boolean_t syncio,
2528 	boolean_t invalidate)
2529 {
2530 	vm_map_entry_t current;
2531 	vm_map_entry_t entry;
2532 	vm_size_t size;
2533 	vm_object_t object;
2534 	vm_ooffset_t offset;
2535 	unsigned int last_timestamp;
2536 
2537 	vm_map_lock_read(map);
2538 	VM_MAP_RANGE_CHECK(map, start, end);
2539 	if (!vm_map_lookup_entry(map, start, &entry)) {
2540 		vm_map_unlock_read(map);
2541 		return (KERN_INVALID_ADDRESS);
2542 	} else if (start == end) {
2543 		start = entry->start;
2544 		end = entry->end;
2545 	}
2546 	/*
2547 	 * Make a first pass to check for user-wired memory and holes.
2548 	 */
2549 	for (current = entry; current != &map->header && current->start < end;
2550 	    current = current->next) {
2551 		if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
2552 			vm_map_unlock_read(map);
2553 			return (KERN_INVALID_ARGUMENT);
2554 		}
2555 		if (end > current->end &&
2556 		    (current->next == &map->header ||
2557 			current->end != current->next->start)) {
2558 			vm_map_unlock_read(map);
2559 			return (KERN_INVALID_ADDRESS);
2560 		}
2561 	}
2562 
2563 	if (invalidate)
2564 		pmap_remove(map->pmap, start, end);
2565 
2566 	/*
2567 	 * Make a second pass, cleaning/uncaching pages from the indicated
2568 	 * objects as we go.
2569 	 */
2570 	for (current = entry; current != &map->header && current->start < end;) {
2571 		offset = current->offset + (start - current->start);
2572 		size = (end <= current->end ? end : current->end) - start;
2573 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2574 			vm_map_t smap;
2575 			vm_map_entry_t tentry;
2576 			vm_size_t tsize;
2577 
2578 			smap = current->object.sub_map;
2579 			vm_map_lock_read(smap);
2580 			(void) vm_map_lookup_entry(smap, offset, &tentry);
2581 			tsize = tentry->end - offset;
2582 			if (tsize < size)
2583 				size = tsize;
2584 			object = tentry->object.vm_object;
2585 			offset = tentry->offset + (offset - tentry->start);
2586 			vm_map_unlock_read(smap);
2587 		} else {
2588 			object = current->object.vm_object;
2589 		}
2590 		vm_object_reference(object);
2591 		last_timestamp = map->timestamp;
2592 		vm_map_unlock_read(map);
2593 		vm_object_sync(object, offset, size, syncio, invalidate);
2594 		start += size;
2595 		vm_object_deallocate(object);
2596 		vm_map_lock_read(map);
2597 		if (last_timestamp == map->timestamp ||
2598 		    !vm_map_lookup_entry(map, start, &current))
2599 			current = current->next;
2600 	}
2601 
2602 	vm_map_unlock_read(map);
2603 	return (KERN_SUCCESS);
2604 }
2605 
2606 /*
2607  *	vm_map_entry_unwire:	[ internal use only ]
2608  *
2609  *	Make the region specified by this entry pageable.
2610  *
2611  *	The map in question should be locked.
2612  *	[This is the reason for this routine's existence.]
2613  */
2614 static void
2615 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2616 {
2617 	vm_fault_unwire(map, entry->start, entry->end,
2618 	    entry->object.vm_object != NULL &&
2619 	    (entry->object.vm_object->type == OBJT_DEVICE ||
2620 	    entry->object.vm_object->type == OBJT_SG));
2621 	entry->wired_count = 0;
2622 }
2623 
2624 /*
2625  *	vm_map_entry_delete:	[ internal use only ]
2626  *
2627  *	Deallocate the given entry from the target map.
2628  */
2629 static void
2630 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2631 {
2632 	vm_object_t object;
2633 	vm_pindex_t offidxstart, offidxend, count, size1;
2634 	vm_ooffset_t size;
2635 
2636 	vm_map_entry_unlink(map, entry);
2637 	object = entry->object.vm_object;
2638 	size = entry->end - entry->start;
2639 	map->size -= size;
2640 
2641 	if (entry->uip != NULL) {
2642 		swap_release_by_uid(size, entry->uip);
2643 		uifree(entry->uip);
2644 	}
2645 
2646 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2647 	    (object != NULL)) {
2648 		KASSERT(entry->uip == NULL || object->uip == NULL ||
2649 		    (entry->eflags & MAP_ENTRY_NEEDS_COPY),
2650 		    ("OVERCOMMIT vm_map_entry_delete: both uip %p", entry));
2651 		count = OFF_TO_IDX(size);
2652 		offidxstart = OFF_TO_IDX(entry->offset);
2653 		offidxend = offidxstart + count;
2654 		VM_OBJECT_LOCK(object);
2655 		if (object->ref_count != 1 &&
2656 		    ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
2657 		    object == kernel_object || object == kmem_object)) {
2658 			vm_object_collapse(object);
2659 			vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2660 			if (object->type == OBJT_SWAP)
2661 				swap_pager_freespace(object, offidxstart, count);
2662 			if (offidxend >= object->size &&
2663 			    offidxstart < object->size) {
2664 				size1 = object->size;
2665 				object->size = offidxstart;
2666 				if (object->uip != NULL) {
2667 					size1 -= object->size;
2668 					KASSERT(object->charge >= ptoa(size1),
2669 					    ("vm_map_entry_delete: object->charge < 0"));
2670 					swap_release_by_uid(ptoa(size1), object->uip);
2671 					object->charge -= ptoa(size1);
2672 				}
2673 			}
2674 		}
2675 		VM_OBJECT_UNLOCK(object);
2676 	} else
2677 		entry->object.vm_object = NULL;
2678 }
2679 
2680 /*
2681  *	vm_map_delete:	[ internal use only ]
2682  *
2683  *	Deallocates the given address range from the target
2684  *	map.
2685  */
2686 int
2687 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2688 {
2689 	vm_map_entry_t entry;
2690 	vm_map_entry_t first_entry;
2691 
2692 	VM_MAP_ASSERT_LOCKED(map);
2693 
2694 	/*
2695 	 * Find the start of the region, and clip it
2696 	 */
2697 	if (!vm_map_lookup_entry(map, start, &first_entry))
2698 		entry = first_entry->next;
2699 	else {
2700 		entry = first_entry;
2701 		vm_map_clip_start(map, entry, start);
2702 	}
2703 
2704 	/*
2705 	 * Step through all entries in this region
2706 	 */
2707 	while ((entry != &map->header) && (entry->start < end)) {
2708 		vm_map_entry_t next;
2709 
2710 		/*
2711 		 * Wait for wiring or unwiring of an entry to complete.
2712 		 * Also wait for any system wirings to disappear on
2713 		 * user maps.
2714 		 */
2715 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
2716 		    (vm_map_pmap(map) != kernel_pmap &&
2717 		    vm_map_entry_system_wired_count(entry) != 0)) {
2718 			unsigned int last_timestamp;
2719 			vm_offset_t saved_start;
2720 			vm_map_entry_t tmp_entry;
2721 
2722 			saved_start = entry->start;
2723 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2724 			last_timestamp = map->timestamp;
2725 			(void) vm_map_unlock_and_wait(map, 0);
2726 			vm_map_lock(map);
2727 			if (last_timestamp + 1 != map->timestamp) {
2728 				/*
2729 				 * Look again for the entry because the map was
2730 				 * modified while it was unlocked.
2731 				 * Specifically, the entry may have been
2732 				 * clipped, merged, or deleted.
2733 				 */
2734 				if (!vm_map_lookup_entry(map, saved_start,
2735 							 &tmp_entry))
2736 					entry = tmp_entry->next;
2737 				else {
2738 					entry = tmp_entry;
2739 					vm_map_clip_start(map, entry,
2740 							  saved_start);
2741 				}
2742 			}
2743 			continue;
2744 		}
2745 		vm_map_clip_end(map, entry, end);
2746 
2747 		next = entry->next;
2748 
2749 		/*
2750 		 * Unwire before removing addresses from the pmap; otherwise,
2751 		 * unwiring will put the entries back in the pmap.
2752 		 */
2753 		if (entry->wired_count != 0) {
2754 			vm_map_entry_unwire(map, entry);
2755 		}
2756 
2757 		pmap_remove(map->pmap, entry->start, entry->end);
2758 
2759 		/*
2760 		 * Delete the entry only after removing all pmap
2761 		 * entries pointing to its pages.  (Otherwise, its
2762 		 * page frames may be reallocated, and any modify bits
2763 		 * will be set in the wrong object!)
2764 		 */
2765 		vm_map_entry_delete(map, entry);
2766 		entry->next = map->deferred_freelist;
2767 		map->deferred_freelist = entry;
2768 		entry = next;
2769 	}
2770 	return (KERN_SUCCESS);
2771 }
2772 
2773 /*
2774  *	vm_map_remove:
2775  *
2776  *	Remove the given address range from the target map.
2777  *	This is the exported form of vm_map_delete.
2778  */
2779 int
2780 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2781 {
2782 	int result;
2783 
2784 	vm_map_lock(map);
2785 	VM_MAP_RANGE_CHECK(map, start, end);
2786 	result = vm_map_delete(map, start, end);
2787 	vm_map_unlock(map);
2788 	return (result);
2789 }
2790 
2791 /*
2792  *	vm_map_check_protection:
2793  *
2794  *	Assert that the target map allows the specified privilege on the
2795  *	entire address region given.  The entire region must be allocated.
2796  *
2797  *	WARNING!  This code does not and should not check whether the
2798  *	contents of the region is accessible.  For example a smaller file
2799  *	might be mapped into a larger address space.
2800  *
2801  *	NOTE!  This code is also called by munmap().
2802  *
2803  *	The map must be locked.  A read lock is sufficient.
2804  */
2805 boolean_t
2806 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2807 			vm_prot_t protection)
2808 {
2809 	vm_map_entry_t entry;
2810 	vm_map_entry_t tmp_entry;
2811 
2812 	if (!vm_map_lookup_entry(map, start, &tmp_entry))
2813 		return (FALSE);
2814 	entry = tmp_entry;
2815 
2816 	while (start < end) {
2817 		if (entry == &map->header)
2818 			return (FALSE);
2819 		/*
2820 		 * No holes allowed!
2821 		 */
2822 		if (start < entry->start)
2823 			return (FALSE);
2824 		/*
2825 		 * Check protection associated with entry.
2826 		 */
2827 		if ((entry->protection & protection) != protection)
2828 			return (FALSE);
2829 		/* go to next entry */
2830 		start = entry->end;
2831 		entry = entry->next;
2832 	}
2833 	return (TRUE);
2834 }
2835 
2836 /*
2837  *	vm_map_copy_entry:
2838  *
2839  *	Copies the contents of the source entry to the destination
2840  *	entry.  The entries *must* be aligned properly.
2841  */
2842 static void
2843 vm_map_copy_entry(
2844 	vm_map_t src_map,
2845 	vm_map_t dst_map,
2846 	vm_map_entry_t src_entry,
2847 	vm_map_entry_t dst_entry,
2848 	vm_ooffset_t *fork_charge)
2849 {
2850 	vm_object_t src_object;
2851 	vm_offset_t size;
2852 	struct uidinfo *uip;
2853 	int charged;
2854 
2855 	VM_MAP_ASSERT_LOCKED(dst_map);
2856 
2857 	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2858 		return;
2859 
2860 	if (src_entry->wired_count == 0) {
2861 
2862 		/*
2863 		 * If the source entry is marked needs_copy, it is already
2864 		 * write-protected.
2865 		 */
2866 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2867 			pmap_protect(src_map->pmap,
2868 			    src_entry->start,
2869 			    src_entry->end,
2870 			    src_entry->protection & ~VM_PROT_WRITE);
2871 		}
2872 
2873 		/*
2874 		 * Make a copy of the object.
2875 		 */
2876 		size = src_entry->end - src_entry->start;
2877 		if ((src_object = src_entry->object.vm_object) != NULL) {
2878 			VM_OBJECT_LOCK(src_object);
2879 			charged = ENTRY_CHARGED(src_entry);
2880 			if ((src_object->handle == NULL) &&
2881 				(src_object->type == OBJT_DEFAULT ||
2882 				 src_object->type == OBJT_SWAP)) {
2883 				vm_object_collapse(src_object);
2884 				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2885 					vm_object_split(src_entry);
2886 					src_object = src_entry->object.vm_object;
2887 				}
2888 			}
2889 			vm_object_reference_locked(src_object);
2890 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2891 			if (src_entry->uip != NULL &&
2892 			    !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
2893 				KASSERT(src_object->uip == NULL,
2894 				    ("OVERCOMMIT: vm_map_copy_entry: uip %p",
2895 				     src_object));
2896 				src_object->uip = src_entry->uip;
2897 				src_object->charge = size;
2898 			}
2899 			VM_OBJECT_UNLOCK(src_object);
2900 			dst_entry->object.vm_object = src_object;
2901 			if (charged) {
2902 				uip = curthread->td_ucred->cr_ruidinfo;
2903 				uihold(uip);
2904 				dst_entry->uip = uip;
2905 				*fork_charge += size;
2906 				if (!(src_entry->eflags &
2907 				      MAP_ENTRY_NEEDS_COPY)) {
2908 					uihold(uip);
2909 					src_entry->uip = uip;
2910 					*fork_charge += size;
2911 				}
2912 			}
2913 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2914 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2915 			dst_entry->offset = src_entry->offset;
2916 		} else {
2917 			dst_entry->object.vm_object = NULL;
2918 			dst_entry->offset = 0;
2919 			if (src_entry->uip != NULL) {
2920 				dst_entry->uip = curthread->td_ucred->cr_ruidinfo;
2921 				uihold(dst_entry->uip);
2922 				*fork_charge += size;
2923 			}
2924 		}
2925 
2926 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2927 		    dst_entry->end - dst_entry->start, src_entry->start);
2928 	} else {
2929 		/*
2930 		 * Of course, wired down pages can't be set copy-on-write.
2931 		 * Cause wired pages to be copied into the new map by
2932 		 * simulating faults (the new pages are pageable)
2933 		 */
2934 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
2935 		    fork_charge);
2936 	}
2937 }
2938 
2939 /*
2940  * vmspace_map_entry_forked:
2941  * Update the newly-forked vmspace each time a map entry is inherited
2942  * or copied.  The values for vm_dsize and vm_tsize are approximate
2943  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
2944  */
2945 static void
2946 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
2947     vm_map_entry_t entry)
2948 {
2949 	vm_size_t entrysize;
2950 	vm_offset_t newend;
2951 
2952 	entrysize = entry->end - entry->start;
2953 	vm2->vm_map.size += entrysize;
2954 	if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
2955 		vm2->vm_ssize += btoc(entrysize);
2956 	} else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
2957 	    entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
2958 		newend = MIN(entry->end,
2959 		    (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
2960 		vm2->vm_dsize += btoc(newend - entry->start);
2961 	} else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
2962 	    entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
2963 		newend = MIN(entry->end,
2964 		    (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
2965 		vm2->vm_tsize += btoc(newend - entry->start);
2966 	}
2967 }
2968 
2969 /*
2970  * vmspace_fork:
2971  * Create a new process vmspace structure and vm_map
2972  * based on those of an existing process.  The new map
2973  * is based on the old map, according to the inheritance
2974  * values on the regions in that map.
2975  *
2976  * XXX It might be worth coalescing the entries added to the new vmspace.
2977  *
2978  * The source map must not be locked.
2979  */
2980 struct vmspace *
2981 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
2982 {
2983 	struct vmspace *vm2;
2984 	vm_map_t old_map = &vm1->vm_map;
2985 	vm_map_t new_map;
2986 	vm_map_entry_t old_entry;
2987 	vm_map_entry_t new_entry;
2988 	vm_object_t object;
2989 	int locked;
2990 
2991 	vm_map_lock(old_map);
2992 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2993 	if (vm2 == NULL)
2994 		goto unlock_and_return;
2995 	vm2->vm_taddr = vm1->vm_taddr;
2996 	vm2->vm_daddr = vm1->vm_daddr;
2997 	vm2->vm_maxsaddr = vm1->vm_maxsaddr;
2998 	new_map = &vm2->vm_map;	/* XXX */
2999 	locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
3000 	KASSERT(locked, ("vmspace_fork: lock failed"));
3001 	new_map->timestamp = 1;
3002 
3003 	old_entry = old_map->header.next;
3004 
3005 	while (old_entry != &old_map->header) {
3006 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3007 			panic("vm_map_fork: encountered a submap");
3008 
3009 		switch (old_entry->inheritance) {
3010 		case VM_INHERIT_NONE:
3011 			break;
3012 
3013 		case VM_INHERIT_SHARE:
3014 			/*
3015 			 * Clone the entry, creating the shared object if necessary.
3016 			 */
3017 			object = old_entry->object.vm_object;
3018 			if (object == NULL) {
3019 				object = vm_object_allocate(OBJT_DEFAULT,
3020 					atop(old_entry->end - old_entry->start));
3021 				old_entry->object.vm_object = object;
3022 				old_entry->offset = 0;
3023 				if (old_entry->uip != NULL) {
3024 					object->uip = old_entry->uip;
3025 					object->charge = old_entry->end -
3026 					    old_entry->start;
3027 					old_entry->uip = NULL;
3028 				}
3029 			}
3030 
3031 			/*
3032 			 * Add the reference before calling vm_object_shadow
3033 			 * to insure that a shadow object is created.
3034 			 */
3035 			vm_object_reference(object);
3036 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3037 				vm_object_shadow(&old_entry->object.vm_object,
3038 					&old_entry->offset,
3039 					atop(old_entry->end - old_entry->start));
3040 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3041 				/* Transfer the second reference too. */
3042 				vm_object_reference(
3043 				    old_entry->object.vm_object);
3044 
3045 				/*
3046 				 * As in vm_map_simplify_entry(), the
3047 				 * vnode lock will not be acquired in
3048 				 * this call to vm_object_deallocate().
3049 				 */
3050 				vm_object_deallocate(object);
3051 				object = old_entry->object.vm_object;
3052 			}
3053 			VM_OBJECT_LOCK(object);
3054 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
3055 			if (old_entry->uip != NULL) {
3056 				KASSERT(object->uip == NULL, ("vmspace_fork both uip"));
3057 				object->uip = old_entry->uip;
3058 				object->charge = old_entry->end - old_entry->start;
3059 				old_entry->uip = NULL;
3060 			}
3061 			VM_OBJECT_UNLOCK(object);
3062 
3063 			/*
3064 			 * Clone the entry, referencing the shared object.
3065 			 */
3066 			new_entry = vm_map_entry_create(new_map);
3067 			*new_entry = *old_entry;
3068 			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3069 			    MAP_ENTRY_IN_TRANSITION);
3070 			new_entry->wired_count = 0;
3071 
3072 			/*
3073 			 * Insert the entry into the new map -- we know we're
3074 			 * inserting at the end of the new map.
3075 			 */
3076 			vm_map_entry_link(new_map, new_map->header.prev,
3077 			    new_entry);
3078 			vmspace_map_entry_forked(vm1, vm2, new_entry);
3079 
3080 			/*
3081 			 * Update the physical map
3082 			 */
3083 			pmap_copy(new_map->pmap, old_map->pmap,
3084 			    new_entry->start,
3085 			    (old_entry->end - old_entry->start),
3086 			    old_entry->start);
3087 			break;
3088 
3089 		case VM_INHERIT_COPY:
3090 			/*
3091 			 * Clone the entry and link into the map.
3092 			 */
3093 			new_entry = vm_map_entry_create(new_map);
3094 			*new_entry = *old_entry;
3095 			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3096 			    MAP_ENTRY_IN_TRANSITION);
3097 			new_entry->wired_count = 0;
3098 			new_entry->object.vm_object = NULL;
3099 			new_entry->uip = NULL;
3100 			vm_map_entry_link(new_map, new_map->header.prev,
3101 			    new_entry);
3102 			vmspace_map_entry_forked(vm1, vm2, new_entry);
3103 			vm_map_copy_entry(old_map, new_map, old_entry,
3104 			    new_entry, fork_charge);
3105 			break;
3106 		}
3107 		old_entry = old_entry->next;
3108 	}
3109 unlock_and_return:
3110 	vm_map_unlock(old_map);
3111 	if (vm2 != NULL)
3112 		vm_map_unlock(new_map);
3113 
3114 	return (vm2);
3115 }
3116 
3117 int
3118 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3119     vm_prot_t prot, vm_prot_t max, int cow)
3120 {
3121 	vm_map_entry_t new_entry, prev_entry;
3122 	vm_offset_t bot, top;
3123 	vm_size_t init_ssize;
3124 	int orient, rv;
3125 	rlim_t vmemlim;
3126 
3127 	/*
3128 	 * The stack orientation is piggybacked with the cow argument.
3129 	 * Extract it into orient and mask the cow argument so that we
3130 	 * don't pass it around further.
3131 	 * NOTE: We explicitly allow bi-directional stacks.
3132 	 */
3133 	orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP);
3134 	cow &= ~orient;
3135 	KASSERT(orient != 0, ("No stack grow direction"));
3136 
3137 	if (addrbos < vm_map_min(map) ||
3138 	    addrbos > vm_map_max(map) ||
3139 	    addrbos + max_ssize < addrbos)
3140 		return (KERN_NO_SPACE);
3141 
3142 	init_ssize = (max_ssize < sgrowsiz) ? max_ssize : sgrowsiz;
3143 
3144 	PROC_LOCK(curthread->td_proc);
3145 	vmemlim = lim_cur(curthread->td_proc, RLIMIT_VMEM);
3146 	PROC_UNLOCK(curthread->td_proc);
3147 
3148 	vm_map_lock(map);
3149 
3150 	/* If addr is already mapped, no go */
3151 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
3152 		vm_map_unlock(map);
3153 		return (KERN_NO_SPACE);
3154 	}
3155 
3156 	/* If we would blow our VMEM resource limit, no go */
3157 	if (map->size + init_ssize > vmemlim) {
3158 		vm_map_unlock(map);
3159 		return (KERN_NO_SPACE);
3160 	}
3161 
3162 	/*
3163 	 * If we can't accomodate max_ssize in the current mapping, no go.
3164 	 * However, we need to be aware that subsequent user mappings might
3165 	 * map into the space we have reserved for stack, and currently this
3166 	 * space is not protected.
3167 	 *
3168 	 * Hopefully we will at least detect this condition when we try to
3169 	 * grow the stack.
3170 	 */
3171 	if ((prev_entry->next != &map->header) &&
3172 	    (prev_entry->next->start < addrbos + max_ssize)) {
3173 		vm_map_unlock(map);
3174 		return (KERN_NO_SPACE);
3175 	}
3176 
3177 	/*
3178 	 * We initially map a stack of only init_ssize.  We will grow as
3179 	 * needed later.  Depending on the orientation of the stack (i.e.
3180 	 * the grow direction) we either map at the top of the range, the
3181 	 * bottom of the range or in the middle.
3182 	 *
3183 	 * Note: we would normally expect prot and max to be VM_PROT_ALL,
3184 	 * and cow to be 0.  Possibly we should eliminate these as input
3185 	 * parameters, and just pass these values here in the insert call.
3186 	 */
3187 	if (orient == MAP_STACK_GROWS_DOWN)
3188 		bot = addrbos + max_ssize - init_ssize;
3189 	else if (orient == MAP_STACK_GROWS_UP)
3190 		bot = addrbos;
3191 	else
3192 		bot = round_page(addrbos + max_ssize/2 - init_ssize/2);
3193 	top = bot + init_ssize;
3194 	rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
3195 
3196 	/* Now set the avail_ssize amount. */
3197 	if (rv == KERN_SUCCESS) {
3198 		if (prev_entry != &map->header)
3199 			vm_map_clip_end(map, prev_entry, bot);
3200 		new_entry = prev_entry->next;
3201 		if (new_entry->end != top || new_entry->start != bot)
3202 			panic("Bad entry start/end for new stack entry");
3203 
3204 		new_entry->avail_ssize = max_ssize - init_ssize;
3205 		if (orient & MAP_STACK_GROWS_DOWN)
3206 			new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3207 		if (orient & MAP_STACK_GROWS_UP)
3208 			new_entry->eflags |= MAP_ENTRY_GROWS_UP;
3209 	}
3210 
3211 	vm_map_unlock(map);
3212 	return (rv);
3213 }
3214 
3215 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3216  * desired address is already mapped, or if we successfully grow
3217  * the stack.  Also returns KERN_SUCCESS if addr is outside the
3218  * stack range (this is strange, but preserves compatibility with
3219  * the grow function in vm_machdep.c).
3220  */
3221 int
3222 vm_map_growstack(struct proc *p, vm_offset_t addr)
3223 {
3224 	vm_map_entry_t next_entry, prev_entry;
3225 	vm_map_entry_t new_entry, stack_entry;
3226 	struct vmspace *vm = p->p_vmspace;
3227 	vm_map_t map = &vm->vm_map;
3228 	vm_offset_t end;
3229 	size_t grow_amount, max_grow;
3230 	rlim_t stacklim, vmemlim;
3231 	int is_procstack, rv;
3232 	struct uidinfo *uip;
3233 
3234 Retry:
3235 	PROC_LOCK(p);
3236 	stacklim = lim_cur(p, RLIMIT_STACK);
3237 	vmemlim = lim_cur(p, RLIMIT_VMEM);
3238 	PROC_UNLOCK(p);
3239 
3240 	vm_map_lock_read(map);
3241 
3242 	/* If addr is already in the entry range, no need to grow.*/
3243 	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
3244 		vm_map_unlock_read(map);
3245 		return (KERN_SUCCESS);
3246 	}
3247 
3248 	next_entry = prev_entry->next;
3249 	if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) {
3250 		/*
3251 		 * This entry does not grow upwards. Since the address lies
3252 		 * beyond this entry, the next entry (if one exists) has to
3253 		 * be a downward growable entry. The entry list header is
3254 		 * never a growable entry, so it suffices to check the flags.
3255 		 */
3256 		if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) {
3257 			vm_map_unlock_read(map);
3258 			return (KERN_SUCCESS);
3259 		}
3260 		stack_entry = next_entry;
3261 	} else {
3262 		/*
3263 		 * This entry grows upward. If the next entry does not at
3264 		 * least grow downwards, this is the entry we need to grow.
3265 		 * otherwise we have two possible choices and we have to
3266 		 * select one.
3267 		 */
3268 		if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) {
3269 			/*
3270 			 * We have two choices; grow the entry closest to
3271 			 * the address to minimize the amount of growth.
3272 			 */
3273 			if (addr - prev_entry->end <= next_entry->start - addr)
3274 				stack_entry = prev_entry;
3275 			else
3276 				stack_entry = next_entry;
3277 		} else
3278 			stack_entry = prev_entry;
3279 	}
3280 
3281 	if (stack_entry == next_entry) {
3282 		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo"));
3283 		KASSERT(addr < stack_entry->start, ("foo"));
3284 		end = (prev_entry != &map->header) ? prev_entry->end :
3285 		    stack_entry->start - stack_entry->avail_ssize;
3286 		grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE);
3287 		max_grow = stack_entry->start - end;
3288 	} else {
3289 		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo"));
3290 		KASSERT(addr >= stack_entry->end, ("foo"));
3291 		end = (next_entry != &map->header) ? next_entry->start :
3292 		    stack_entry->end + stack_entry->avail_ssize;
3293 		grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE);
3294 		max_grow = end - stack_entry->end;
3295 	}
3296 
3297 	if (grow_amount > stack_entry->avail_ssize) {
3298 		vm_map_unlock_read(map);
3299 		return (KERN_NO_SPACE);
3300 	}
3301 
3302 	/*
3303 	 * If there is no longer enough space between the entries nogo, and
3304 	 * adjust the available space.  Note: this  should only happen if the
3305 	 * user has mapped into the stack area after the stack was created,
3306 	 * and is probably an error.
3307 	 *
3308 	 * This also effectively destroys any guard page the user might have
3309 	 * intended by limiting the stack size.
3310 	 */
3311 	if (grow_amount > max_grow) {
3312 		if (vm_map_lock_upgrade(map))
3313 			goto Retry;
3314 
3315 		stack_entry->avail_ssize = max_grow;
3316 
3317 		vm_map_unlock(map);
3318 		return (KERN_NO_SPACE);
3319 	}
3320 
3321 	is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0;
3322 
3323 	/*
3324 	 * If this is the main process stack, see if we're over the stack
3325 	 * limit.
3326 	 */
3327 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3328 		vm_map_unlock_read(map);
3329 		return (KERN_NO_SPACE);
3330 	}
3331 
3332 	/* Round up the grow amount modulo SGROWSIZ */
3333 	grow_amount = roundup (grow_amount, sgrowsiz);
3334 	if (grow_amount > stack_entry->avail_ssize)
3335 		grow_amount = stack_entry->avail_ssize;
3336 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3337 		grow_amount = stacklim - ctob(vm->vm_ssize);
3338 	}
3339 
3340 	/* If we would blow our VMEM resource limit, no go */
3341 	if (map->size + grow_amount > vmemlim) {
3342 		vm_map_unlock_read(map);
3343 		return (KERN_NO_SPACE);
3344 	}
3345 
3346 	if (vm_map_lock_upgrade(map))
3347 		goto Retry;
3348 
3349 	if (stack_entry == next_entry) {
3350 		/*
3351 		 * Growing downward.
3352 		 */
3353 		/* Get the preliminary new entry start value */
3354 		addr = stack_entry->start - grow_amount;
3355 
3356 		/*
3357 		 * If this puts us into the previous entry, cut back our
3358 		 * growth to the available space. Also, see the note above.
3359 		 */
3360 		if (addr < end) {
3361 			stack_entry->avail_ssize = max_grow;
3362 			addr = end;
3363 		}
3364 
3365 		rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
3366 		    p->p_sysent->sv_stackprot, VM_PROT_ALL, 0);
3367 
3368 		/* Adjust the available stack space by the amount we grew. */
3369 		if (rv == KERN_SUCCESS) {
3370 			if (prev_entry != &map->header)
3371 				vm_map_clip_end(map, prev_entry, addr);
3372 			new_entry = prev_entry->next;
3373 			KASSERT(new_entry == stack_entry->prev, ("foo"));
3374 			KASSERT(new_entry->end == stack_entry->start, ("foo"));
3375 			KASSERT(new_entry->start == addr, ("foo"));
3376 			grow_amount = new_entry->end - new_entry->start;
3377 			new_entry->avail_ssize = stack_entry->avail_ssize -
3378 			    grow_amount;
3379 			stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN;
3380 			new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3381 		}
3382 	} else {
3383 		/*
3384 		 * Growing upward.
3385 		 */
3386 		addr = stack_entry->end + grow_amount;
3387 
3388 		/*
3389 		 * If this puts us into the next entry, cut back our growth
3390 		 * to the available space. Also, see the note above.
3391 		 */
3392 		if (addr > end) {
3393 			stack_entry->avail_ssize = end - stack_entry->end;
3394 			addr = end;
3395 		}
3396 
3397 		grow_amount = addr - stack_entry->end;
3398 		uip = stack_entry->uip;
3399 		if (uip == NULL && stack_entry->object.vm_object != NULL)
3400 			uip = stack_entry->object.vm_object->uip;
3401 		if (uip != NULL && !swap_reserve_by_uid(grow_amount, uip))
3402 			rv = KERN_NO_SPACE;
3403 		/* Grow the underlying object if applicable. */
3404 		else if (stack_entry->object.vm_object == NULL ||
3405 			 vm_object_coalesce(stack_entry->object.vm_object,
3406 			 stack_entry->offset,
3407 			 (vm_size_t)(stack_entry->end - stack_entry->start),
3408 			 (vm_size_t)grow_amount, uip != NULL)) {
3409 			map->size += (addr - stack_entry->end);
3410 			/* Update the current entry. */
3411 			stack_entry->end = addr;
3412 			stack_entry->avail_ssize -= grow_amount;
3413 			vm_map_entry_resize_free(map, stack_entry);
3414 			rv = KERN_SUCCESS;
3415 
3416 			if (next_entry != &map->header)
3417 				vm_map_clip_start(map, next_entry, addr);
3418 		} else
3419 			rv = KERN_FAILURE;
3420 	}
3421 
3422 	if (rv == KERN_SUCCESS && is_procstack)
3423 		vm->vm_ssize += btoc(grow_amount);
3424 
3425 	vm_map_unlock(map);
3426 
3427 	/*
3428 	 * Heed the MAP_WIREFUTURE flag if it was set for this process.
3429 	 */
3430 	if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) {
3431 		vm_map_wire(map,
3432 		    (stack_entry == next_entry) ? addr : addr - grow_amount,
3433 		    (stack_entry == next_entry) ? stack_entry->start : addr,
3434 		    (p->p_flag & P_SYSTEM)
3435 		    ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES
3436 		    : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
3437 	}
3438 
3439 	return (rv);
3440 }
3441 
3442 /*
3443  * Unshare the specified VM space for exec.  If other processes are
3444  * mapped to it, then create a new one.  The new vmspace is null.
3445  */
3446 int
3447 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
3448 {
3449 	struct vmspace *oldvmspace = p->p_vmspace;
3450 	struct vmspace *newvmspace;
3451 
3452 	newvmspace = vmspace_alloc(minuser, maxuser);
3453 	if (newvmspace == NULL)
3454 		return (ENOMEM);
3455 	newvmspace->vm_swrss = oldvmspace->vm_swrss;
3456 	/*
3457 	 * This code is written like this for prototype purposes.  The
3458 	 * goal is to avoid running down the vmspace here, but let the
3459 	 * other process's that are still using the vmspace to finally
3460 	 * run it down.  Even though there is little or no chance of blocking
3461 	 * here, it is a good idea to keep this form for future mods.
3462 	 */
3463 	PROC_VMSPACE_LOCK(p);
3464 	p->p_vmspace = newvmspace;
3465 	PROC_VMSPACE_UNLOCK(p);
3466 	if (p == curthread->td_proc)
3467 		pmap_activate(curthread);
3468 	vmspace_free(oldvmspace);
3469 	return (0);
3470 }
3471 
3472 /*
3473  * Unshare the specified VM space for forcing COW.  This
3474  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3475  */
3476 int
3477 vmspace_unshare(struct proc *p)
3478 {
3479 	struct vmspace *oldvmspace = p->p_vmspace;
3480 	struct vmspace *newvmspace;
3481 	vm_ooffset_t fork_charge;
3482 
3483 	if (oldvmspace->vm_refcnt == 1)
3484 		return (0);
3485 	fork_charge = 0;
3486 	newvmspace = vmspace_fork(oldvmspace, &fork_charge);
3487 	if (newvmspace == NULL)
3488 		return (ENOMEM);
3489 	if (!swap_reserve_by_uid(fork_charge, p->p_ucred->cr_ruidinfo)) {
3490 		vmspace_free(newvmspace);
3491 		return (ENOMEM);
3492 	}
3493 	PROC_VMSPACE_LOCK(p);
3494 	p->p_vmspace = newvmspace;
3495 	PROC_VMSPACE_UNLOCK(p);
3496 	if (p == curthread->td_proc)
3497 		pmap_activate(curthread);
3498 	vmspace_free(oldvmspace);
3499 	return (0);
3500 }
3501 
3502 /*
3503  *	vm_map_lookup:
3504  *
3505  *	Finds the VM object, offset, and
3506  *	protection for a given virtual address in the
3507  *	specified map, assuming a page fault of the
3508  *	type specified.
3509  *
3510  *	Leaves the map in question locked for read; return
3511  *	values are guaranteed until a vm_map_lookup_done
3512  *	call is performed.  Note that the map argument
3513  *	is in/out; the returned map must be used in
3514  *	the call to vm_map_lookup_done.
3515  *
3516  *	A handle (out_entry) is returned for use in
3517  *	vm_map_lookup_done, to make that fast.
3518  *
3519  *	If a lookup is requested with "write protection"
3520  *	specified, the map may be changed to perform virtual
3521  *	copying operations, although the data referenced will
3522  *	remain the same.
3523  */
3524 int
3525 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
3526 	      vm_offset_t vaddr,
3527 	      vm_prot_t fault_typea,
3528 	      vm_map_entry_t *out_entry,	/* OUT */
3529 	      vm_object_t *object,		/* OUT */
3530 	      vm_pindex_t *pindex,		/* OUT */
3531 	      vm_prot_t *out_prot,		/* OUT */
3532 	      boolean_t *wired)			/* OUT */
3533 {
3534 	vm_map_entry_t entry;
3535 	vm_map_t map = *var_map;
3536 	vm_prot_t prot;
3537 	vm_prot_t fault_type = fault_typea;
3538 	vm_object_t eobject;
3539 	struct uidinfo *uip;
3540 	vm_ooffset_t size;
3541 
3542 RetryLookup:;
3543 
3544 	vm_map_lock_read(map);
3545 
3546 	/*
3547 	 * Lookup the faulting address.
3548 	 */
3549 	if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
3550 		vm_map_unlock_read(map);
3551 		return (KERN_INVALID_ADDRESS);
3552 	}
3553 
3554 	entry = *out_entry;
3555 
3556 	/*
3557 	 * Handle submaps.
3558 	 */
3559 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3560 		vm_map_t old_map = map;
3561 
3562 		*var_map = map = entry->object.sub_map;
3563 		vm_map_unlock_read(old_map);
3564 		goto RetryLookup;
3565 	}
3566 
3567 	/*
3568 	 * Check whether this task is allowed to have this page.
3569 	 */
3570 	prot = entry->protection;
3571 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3572 	if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
3573 		vm_map_unlock_read(map);
3574 		return (KERN_PROTECTION_FAILURE);
3575 	}
3576 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3577 	    (entry->eflags & MAP_ENTRY_COW) &&
3578 	    (fault_type & VM_PROT_WRITE)) {
3579 		vm_map_unlock_read(map);
3580 		return (KERN_PROTECTION_FAILURE);
3581 	}
3582 
3583 	/*
3584 	 * If this page is not pageable, we have to get it for all possible
3585 	 * accesses.
3586 	 */
3587 	*wired = (entry->wired_count != 0);
3588 	if (*wired)
3589 		fault_type = entry->protection;
3590 	size = entry->end - entry->start;
3591 	/*
3592 	 * If the entry was copy-on-write, we either ...
3593 	 */
3594 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3595 		/*
3596 		 * If we want to write the page, we may as well handle that
3597 		 * now since we've got the map locked.
3598 		 *
3599 		 * If we don't need to write the page, we just demote the
3600 		 * permissions allowed.
3601 		 */
3602 		if ((fault_type & VM_PROT_WRITE) != 0 ||
3603 		    (fault_typea & VM_PROT_COPY) != 0) {
3604 			/*
3605 			 * Make a new object, and place it in the object
3606 			 * chain.  Note that no new references have appeared
3607 			 * -- one just moved from the map to the new
3608 			 * object.
3609 			 */
3610 			if (vm_map_lock_upgrade(map))
3611 				goto RetryLookup;
3612 
3613 			if (entry->uip == NULL) {
3614 				/*
3615 				 * The debugger owner is charged for
3616 				 * the memory.
3617 				 */
3618 				uip = curthread->td_ucred->cr_ruidinfo;
3619 				uihold(uip);
3620 				if (!swap_reserve_by_uid(size, uip)) {
3621 					uifree(uip);
3622 					vm_map_unlock(map);
3623 					return (KERN_RESOURCE_SHORTAGE);
3624 				}
3625 				entry->uip = uip;
3626 			}
3627 			vm_object_shadow(
3628 			    &entry->object.vm_object,
3629 			    &entry->offset,
3630 			    atop(size));
3631 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3632 			eobject = entry->object.vm_object;
3633 			if (eobject->uip != NULL) {
3634 				/*
3635 				 * The object was not shadowed.
3636 				 */
3637 				swap_release_by_uid(size, entry->uip);
3638 				uifree(entry->uip);
3639 				entry->uip = NULL;
3640 			} else if (entry->uip != NULL) {
3641 				VM_OBJECT_LOCK(eobject);
3642 				eobject->uip = entry->uip;
3643 				eobject->charge = size;
3644 				VM_OBJECT_UNLOCK(eobject);
3645 				entry->uip = NULL;
3646 			}
3647 
3648 			vm_map_lock_downgrade(map);
3649 		} else {
3650 			/*
3651 			 * We're attempting to read a copy-on-write page --
3652 			 * don't allow writes.
3653 			 */
3654 			prot &= ~VM_PROT_WRITE;
3655 		}
3656 	}
3657 
3658 	/*
3659 	 * Create an object if necessary.
3660 	 */
3661 	if (entry->object.vm_object == NULL &&
3662 	    !map->system_map) {
3663 		if (vm_map_lock_upgrade(map))
3664 			goto RetryLookup;
3665 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
3666 		    atop(size));
3667 		entry->offset = 0;
3668 		if (entry->uip != NULL) {
3669 			VM_OBJECT_LOCK(entry->object.vm_object);
3670 			entry->object.vm_object->uip = entry->uip;
3671 			entry->object.vm_object->charge = size;
3672 			VM_OBJECT_UNLOCK(entry->object.vm_object);
3673 			entry->uip = NULL;
3674 		}
3675 		vm_map_lock_downgrade(map);
3676 	}
3677 
3678 	/*
3679 	 * Return the object/offset from this entry.  If the entry was
3680 	 * copy-on-write or empty, it has been fixed up.
3681 	 */
3682 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3683 	*object = entry->object.vm_object;
3684 
3685 	*out_prot = prot;
3686 	return (KERN_SUCCESS);
3687 }
3688 
3689 /*
3690  *	vm_map_lookup_locked:
3691  *
3692  *	Lookup the faulting address.  A version of vm_map_lookup that returns
3693  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
3694  */
3695 int
3696 vm_map_lookup_locked(vm_map_t *var_map,		/* IN/OUT */
3697 		     vm_offset_t vaddr,
3698 		     vm_prot_t fault_typea,
3699 		     vm_map_entry_t *out_entry,	/* OUT */
3700 		     vm_object_t *object,	/* OUT */
3701 		     vm_pindex_t *pindex,	/* OUT */
3702 		     vm_prot_t *out_prot,	/* OUT */
3703 		     boolean_t *wired)		/* OUT */
3704 {
3705 	vm_map_entry_t entry;
3706 	vm_map_t map = *var_map;
3707 	vm_prot_t prot;
3708 	vm_prot_t fault_type = fault_typea;
3709 
3710 	/*
3711 	 * Lookup the faulting address.
3712 	 */
3713 	if (!vm_map_lookup_entry(map, vaddr, out_entry))
3714 		return (KERN_INVALID_ADDRESS);
3715 
3716 	entry = *out_entry;
3717 
3718 	/*
3719 	 * Fail if the entry refers to a submap.
3720 	 */
3721 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3722 		return (KERN_FAILURE);
3723 
3724 	/*
3725 	 * Check whether this task is allowed to have this page.
3726 	 */
3727 	prot = entry->protection;
3728 	fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
3729 	if ((fault_type & prot) != fault_type)
3730 		return (KERN_PROTECTION_FAILURE);
3731 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3732 	    (entry->eflags & MAP_ENTRY_COW) &&
3733 	    (fault_type & VM_PROT_WRITE))
3734 		return (KERN_PROTECTION_FAILURE);
3735 
3736 	/*
3737 	 * If this page is not pageable, we have to get it for all possible
3738 	 * accesses.
3739 	 */
3740 	*wired = (entry->wired_count != 0);
3741 	if (*wired)
3742 		fault_type = entry->protection;
3743 
3744 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3745 		/*
3746 		 * Fail if the entry was copy-on-write for a write fault.
3747 		 */
3748 		if (fault_type & VM_PROT_WRITE)
3749 			return (KERN_FAILURE);
3750 		/*
3751 		 * We're attempting to read a copy-on-write page --
3752 		 * don't allow writes.
3753 		 */
3754 		prot &= ~VM_PROT_WRITE;
3755 	}
3756 
3757 	/*
3758 	 * Fail if an object should be created.
3759 	 */
3760 	if (entry->object.vm_object == NULL && !map->system_map)
3761 		return (KERN_FAILURE);
3762 
3763 	/*
3764 	 * Return the object/offset from this entry.  If the entry was
3765 	 * copy-on-write or empty, it has been fixed up.
3766 	 */
3767 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3768 	*object = entry->object.vm_object;
3769 
3770 	*out_prot = prot;
3771 	return (KERN_SUCCESS);
3772 }
3773 
3774 /*
3775  *	vm_map_lookup_done:
3776  *
3777  *	Releases locks acquired by a vm_map_lookup
3778  *	(according to the handle returned by that lookup).
3779  */
3780 void
3781 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
3782 {
3783 	/*
3784 	 * Unlock the main-level map
3785 	 */
3786 	vm_map_unlock_read(map);
3787 }
3788 
3789 #include "opt_ddb.h"
3790 #ifdef DDB
3791 #include <sys/kernel.h>
3792 
3793 #include <ddb/ddb.h>
3794 
3795 /*
3796  *	vm_map_print:	[ debug ]
3797  */
3798 DB_SHOW_COMMAND(map, vm_map_print)
3799 {
3800 	static int nlines;
3801 	/* XXX convert args. */
3802 	vm_map_t map = (vm_map_t)addr;
3803 	boolean_t full = have_addr;
3804 
3805 	vm_map_entry_t entry;
3806 
3807 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3808 	    (void *)map,
3809 	    (void *)map->pmap, map->nentries, map->timestamp);
3810 	nlines++;
3811 
3812 	if (!full && db_indent)
3813 		return;
3814 
3815 	db_indent += 2;
3816 	for (entry = map->header.next; entry != &map->header;
3817 	    entry = entry->next) {
3818 		db_iprintf("map entry %p: start=%p, end=%p\n",
3819 		    (void *)entry, (void *)entry->start, (void *)entry->end);
3820 		nlines++;
3821 		{
3822 			static char *inheritance_name[4] =
3823 			{"share", "copy", "none", "donate_copy"};
3824 
3825 			db_iprintf(" prot=%x/%x/%s",
3826 			    entry->protection,
3827 			    entry->max_protection,
3828 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
3829 			if (entry->wired_count != 0)
3830 				db_printf(", wired");
3831 		}
3832 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3833 			db_printf(", share=%p, offset=0x%jx\n",
3834 			    (void *)entry->object.sub_map,
3835 			    (uintmax_t)entry->offset);
3836 			nlines++;
3837 			if ((entry->prev == &map->header) ||
3838 			    (entry->prev->object.sub_map !=
3839 				entry->object.sub_map)) {
3840 				db_indent += 2;
3841 				vm_map_print((db_expr_t)(intptr_t)
3842 					     entry->object.sub_map,
3843 					     full, 0, (char *)0);
3844 				db_indent -= 2;
3845 			}
3846 		} else {
3847 			if (entry->uip != NULL)
3848 				db_printf(", uip %d", entry->uip->ui_uid);
3849 			db_printf(", object=%p, offset=0x%jx",
3850 			    (void *)entry->object.vm_object,
3851 			    (uintmax_t)entry->offset);
3852 			if (entry->object.vm_object && entry->object.vm_object->uip)
3853 				db_printf(", obj uip %d charge %jx",
3854 				    entry->object.vm_object->uip->ui_uid,
3855 				    (uintmax_t)entry->object.vm_object->charge);
3856 			if (entry->eflags & MAP_ENTRY_COW)
3857 				db_printf(", copy (%s)",
3858 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3859 			db_printf("\n");
3860 			nlines++;
3861 
3862 			if ((entry->prev == &map->header) ||
3863 			    (entry->prev->object.vm_object !=
3864 				entry->object.vm_object)) {
3865 				db_indent += 2;
3866 				vm_object_print((db_expr_t)(intptr_t)
3867 						entry->object.vm_object,
3868 						full, 0, (char *)0);
3869 				nlines += 4;
3870 				db_indent -= 2;
3871 			}
3872 		}
3873 	}
3874 	db_indent -= 2;
3875 	if (db_indent == 0)
3876 		nlines = 0;
3877 }
3878 
3879 
3880 DB_SHOW_COMMAND(procvm, procvm)
3881 {
3882 	struct proc *p;
3883 
3884 	if (have_addr) {
3885 		p = (struct proc *) addr;
3886 	} else {
3887 		p = curproc;
3888 	}
3889 
3890 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3891 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3892 	    (void *)vmspace_pmap(p->p_vmspace));
3893 
3894 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3895 }
3896 
3897 #endif /* DDB */
3898