xref: /freebsd/sys/vm/vm_map.c (revision b2db760808f74bb53c232900091c9da801ebbfcc)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 /*
62  *	Virtual memory mapping module.
63  */
64 
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/ktr.h>
71 #include <sys/lock.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/vmmeter.h>
75 #include <sys/mman.h>
76 #include <sys/vnode.h>
77 #include <sys/resourcevar.h>
78 #include <sys/file.h>
79 #include <sys/sysent.h>
80 #include <sys/shm.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_pager.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_extern.h>
91 #include <vm/swap_pager.h>
92 #include <vm/uma.h>
93 
94 /*
95  *	Virtual memory maps provide for the mapping, protection,
96  *	and sharing of virtual memory objects.  In addition,
97  *	this module provides for an efficient virtual copy of
98  *	memory from one map to another.
99  *
100  *	Synchronization is required prior to most operations.
101  *
102  *	Maps consist of an ordered doubly-linked list of simple
103  *	entries; a self-adjusting binary search tree of these
104  *	entries is used to speed up lookups.
105  *
106  *	Since portions of maps are specified by start/end addresses,
107  *	which may not align with existing map entries, all
108  *	routines merely "clip" entries to these start/end values.
109  *	[That is, an entry is split into two, bordering at a
110  *	start or end value.]  Note that these clippings may not
111  *	always be necessary (as the two resulting entries are then
112  *	not changed); however, the clipping is done for convenience.
113  *
114  *	As mentioned above, virtual copy operations are performed
115  *	by copying VM object references from one map to
116  *	another, and then marking both regions as copy-on-write.
117  */
118 
119 static struct mtx map_sleep_mtx;
120 static uma_zone_t mapentzone;
121 static uma_zone_t kmapentzone;
122 static uma_zone_t mapzone;
123 static uma_zone_t vmspace_zone;
124 static struct vm_object kmapentobj;
125 static int vmspace_zinit(void *mem, int size, int flags);
126 static void vmspace_zfini(void *mem, int size);
127 static int vm_map_zinit(void *mem, int ize, int flags);
128 static void vm_map_zfini(void *mem, int size);
129 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
130     vm_offset_t max);
131 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
132 #ifdef INVARIANTS
133 static void vm_map_zdtor(void *mem, int size, void *arg);
134 static void vmspace_zdtor(void *mem, int size, void *arg);
135 #endif
136 
137 #define	ENTRY_CHARGED(e) ((e)->uip != NULL || \
138     ((e)->object.vm_object != NULL && (e)->object.vm_object->uip != NULL && \
139      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
140 
141 /*
142  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
143  * stable.
144  */
145 #define PROC_VMSPACE_LOCK(p) do { } while (0)
146 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
147 
148 /*
149  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
150  *
151  *	Asserts that the starting and ending region
152  *	addresses fall within the valid range of the map.
153  */
154 #define	VM_MAP_RANGE_CHECK(map, start, end)		\
155 		{					\
156 		if (start < vm_map_min(map))		\
157 			start = vm_map_min(map);	\
158 		if (end > vm_map_max(map))		\
159 			end = vm_map_max(map);		\
160 		if (start > end)			\
161 			start = end;			\
162 		}
163 
164 /*
165  *	vm_map_startup:
166  *
167  *	Initialize the vm_map module.  Must be called before
168  *	any other vm_map routines.
169  *
170  *	Map and entry structures are allocated from the general
171  *	purpose memory pool with some exceptions:
172  *
173  *	- The kernel map and kmem submap are allocated statically.
174  *	- Kernel map entries are allocated out of a static pool.
175  *
176  *	These restrictions are necessary since malloc() uses the
177  *	maps and requires map entries.
178  */
179 
180 void
181 vm_map_startup(void)
182 {
183 	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
184 	mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
185 #ifdef INVARIANTS
186 	    vm_map_zdtor,
187 #else
188 	    NULL,
189 #endif
190 	    vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
191 	uma_prealloc(mapzone, MAX_KMAP);
192 	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
193 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
194 	    UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
195 	uma_prealloc(kmapentzone, MAX_KMAPENT);
196 	mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
197 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
198 }
199 
200 static void
201 vmspace_zfini(void *mem, int size)
202 {
203 	struct vmspace *vm;
204 
205 	vm = (struct vmspace *)mem;
206 	vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
207 }
208 
209 static int
210 vmspace_zinit(void *mem, int size, int flags)
211 {
212 	struct vmspace *vm;
213 
214 	vm = (struct vmspace *)mem;
215 
216 	vm->vm_map.pmap = NULL;
217 	(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
218 	return (0);
219 }
220 
221 static void
222 vm_map_zfini(void *mem, int size)
223 {
224 	vm_map_t map;
225 
226 	map = (vm_map_t)mem;
227 	mtx_destroy(&map->system_mtx);
228 	sx_destroy(&map->lock);
229 }
230 
231 static int
232 vm_map_zinit(void *mem, int size, int flags)
233 {
234 	vm_map_t map;
235 
236 	map = (vm_map_t)mem;
237 	map->nentries = 0;
238 	map->size = 0;
239 	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
240 	sx_init(&map->lock, "user map");
241 	return (0);
242 }
243 
244 #ifdef INVARIANTS
245 static void
246 vmspace_zdtor(void *mem, int size, void *arg)
247 {
248 	struct vmspace *vm;
249 
250 	vm = (struct vmspace *)mem;
251 
252 	vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
253 }
254 static void
255 vm_map_zdtor(void *mem, int size, void *arg)
256 {
257 	vm_map_t map;
258 
259 	map = (vm_map_t)mem;
260 	KASSERT(map->nentries == 0,
261 	    ("map %p nentries == %d on free.",
262 	    map, map->nentries));
263 	KASSERT(map->size == 0,
264 	    ("map %p size == %lu on free.",
265 	    map, (unsigned long)map->size));
266 }
267 #endif	/* INVARIANTS */
268 
269 /*
270  * Allocate a vmspace structure, including a vm_map and pmap,
271  * and initialize those structures.  The refcnt is set to 1.
272  */
273 struct vmspace *
274 vmspace_alloc(min, max)
275 	vm_offset_t min, max;
276 {
277 	struct vmspace *vm;
278 
279 	vm = uma_zalloc(vmspace_zone, M_WAITOK);
280 	if (vm->vm_map.pmap == NULL && !pmap_pinit(vmspace_pmap(vm))) {
281 		uma_zfree(vmspace_zone, vm);
282 		return (NULL);
283 	}
284 	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
285 	_vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
286 	vm->vm_refcnt = 1;
287 	vm->vm_shm = NULL;
288 	vm->vm_swrss = 0;
289 	vm->vm_tsize = 0;
290 	vm->vm_dsize = 0;
291 	vm->vm_ssize = 0;
292 	vm->vm_taddr = 0;
293 	vm->vm_daddr = 0;
294 	vm->vm_maxsaddr = 0;
295 	return (vm);
296 }
297 
298 void
299 vm_init2(void)
300 {
301 	uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count,
302 	    (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE) / 8 +
303 	     maxproc * 2 + maxfiles);
304 	vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
305 #ifdef INVARIANTS
306 	    vmspace_zdtor,
307 #else
308 	    NULL,
309 #endif
310 	    vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
311 }
312 
313 static inline void
314 vmspace_dofree(struct vmspace *vm)
315 {
316 
317 	CTR1(KTR_VM, "vmspace_free: %p", vm);
318 
319 	/*
320 	 * Make sure any SysV shm is freed, it might not have been in
321 	 * exit1().
322 	 */
323 	shmexit(vm);
324 
325 	/*
326 	 * Lock the map, to wait out all other references to it.
327 	 * Delete all of the mappings and pages they hold, then call
328 	 * the pmap module to reclaim anything left.
329 	 */
330 	(void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset,
331 	    vm->vm_map.max_offset);
332 
333 	pmap_release(vmspace_pmap(vm));
334 	vm->vm_map.pmap = NULL;
335 	uma_zfree(vmspace_zone, vm);
336 }
337 
338 void
339 vmspace_free(struct vmspace *vm)
340 {
341 	int refcnt;
342 
343 	if (vm->vm_refcnt == 0)
344 		panic("vmspace_free: attempt to free already freed vmspace");
345 
346 	do
347 		refcnt = vm->vm_refcnt;
348 	while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
349 	if (refcnt == 1)
350 		vmspace_dofree(vm);
351 }
352 
353 void
354 vmspace_exitfree(struct proc *p)
355 {
356 	struct vmspace *vm;
357 
358 	PROC_VMSPACE_LOCK(p);
359 	vm = p->p_vmspace;
360 	p->p_vmspace = NULL;
361 	PROC_VMSPACE_UNLOCK(p);
362 	KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
363 	vmspace_free(vm);
364 }
365 
366 void
367 vmspace_exit(struct thread *td)
368 {
369 	int refcnt;
370 	struct vmspace *vm;
371 	struct proc *p;
372 
373 	/*
374 	 * Release user portion of address space.
375 	 * This releases references to vnodes,
376 	 * which could cause I/O if the file has been unlinked.
377 	 * Need to do this early enough that we can still sleep.
378 	 *
379 	 * The last exiting process to reach this point releases as
380 	 * much of the environment as it can. vmspace_dofree() is the
381 	 * slower fallback in case another process had a temporary
382 	 * reference to the vmspace.
383 	 */
384 
385 	p = td->td_proc;
386 	vm = p->p_vmspace;
387 	atomic_add_int(&vmspace0.vm_refcnt, 1);
388 	do {
389 		refcnt = vm->vm_refcnt;
390 		if (refcnt > 1 && p->p_vmspace != &vmspace0) {
391 			/* Switch now since other proc might free vmspace */
392 			PROC_VMSPACE_LOCK(p);
393 			p->p_vmspace = &vmspace0;
394 			PROC_VMSPACE_UNLOCK(p);
395 			pmap_activate(td);
396 		}
397 	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
398 	if (refcnt == 1) {
399 		if (p->p_vmspace != vm) {
400 			/* vmspace not yet freed, switch back */
401 			PROC_VMSPACE_LOCK(p);
402 			p->p_vmspace = vm;
403 			PROC_VMSPACE_UNLOCK(p);
404 			pmap_activate(td);
405 		}
406 		pmap_remove_pages(vmspace_pmap(vm));
407 		/* Switch now since this proc will free vmspace */
408 		PROC_VMSPACE_LOCK(p);
409 		p->p_vmspace = &vmspace0;
410 		PROC_VMSPACE_UNLOCK(p);
411 		pmap_activate(td);
412 		vmspace_dofree(vm);
413 	}
414 }
415 
416 /* Acquire reference to vmspace owned by another process. */
417 
418 struct vmspace *
419 vmspace_acquire_ref(struct proc *p)
420 {
421 	struct vmspace *vm;
422 	int refcnt;
423 
424 	PROC_VMSPACE_LOCK(p);
425 	vm = p->p_vmspace;
426 	if (vm == NULL) {
427 		PROC_VMSPACE_UNLOCK(p);
428 		return (NULL);
429 	}
430 	do {
431 		refcnt = vm->vm_refcnt;
432 		if (refcnt <= 0) { 	/* Avoid 0->1 transition */
433 			PROC_VMSPACE_UNLOCK(p);
434 			return (NULL);
435 		}
436 	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
437 	if (vm != p->p_vmspace) {
438 		PROC_VMSPACE_UNLOCK(p);
439 		vmspace_free(vm);
440 		return (NULL);
441 	}
442 	PROC_VMSPACE_UNLOCK(p);
443 	return (vm);
444 }
445 
446 void
447 _vm_map_lock(vm_map_t map, const char *file, int line)
448 {
449 
450 	if (map->system_map)
451 		_mtx_lock_flags(&map->system_mtx, 0, file, line);
452 	else
453 		(void)_sx_xlock(&map->lock, 0, file, line);
454 	map->timestamp++;
455 }
456 
457 void
458 _vm_map_unlock(vm_map_t map, const char *file, int line)
459 {
460 	vm_map_entry_t free_entry, entry;
461 	vm_object_t object;
462 
463 	free_entry = map->deferred_freelist;
464 	map->deferred_freelist = NULL;
465 
466 	if (map->system_map)
467 		_mtx_unlock_flags(&map->system_mtx, 0, file, line);
468 	else
469 		_sx_xunlock(&map->lock, file, line);
470 
471 	while (free_entry != NULL) {
472 		entry = free_entry;
473 		free_entry = free_entry->next;
474 
475 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
476 			object = entry->object.vm_object;
477 			vm_object_deallocate(object);
478 		}
479 
480 		vm_map_entry_dispose(map, entry);
481 	}
482 }
483 
484 void
485 _vm_map_lock_read(vm_map_t map, const char *file, int line)
486 {
487 
488 	if (map->system_map)
489 		_mtx_lock_flags(&map->system_mtx, 0, file, line);
490 	else
491 		(void)_sx_slock(&map->lock, 0, file, line);
492 }
493 
494 void
495 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
496 {
497 
498 	if (map->system_map)
499 		_mtx_unlock_flags(&map->system_mtx, 0, file, line);
500 	else
501 		_sx_sunlock(&map->lock, file, line);
502 }
503 
504 int
505 _vm_map_trylock(vm_map_t map, const char *file, int line)
506 {
507 	int error;
508 
509 	error = map->system_map ?
510 	    !_mtx_trylock(&map->system_mtx, 0, file, line) :
511 	    !_sx_try_xlock(&map->lock, file, line);
512 	if (error == 0)
513 		map->timestamp++;
514 	return (error == 0);
515 }
516 
517 int
518 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
519 {
520 	int error;
521 
522 	error = map->system_map ?
523 	    !_mtx_trylock(&map->system_mtx, 0, file, line) :
524 	    !_sx_try_slock(&map->lock, file, line);
525 	return (error == 0);
526 }
527 
528 /*
529  *	_vm_map_lock_upgrade:	[ internal use only ]
530  *
531  *	Tries to upgrade a read (shared) lock on the specified map to a write
532  *	(exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
533  *	non-zero value if the upgrade fails.  If the upgrade fails, the map is
534  *	returned without a read or write lock held.
535  *
536  *	Requires that the map be read locked.
537  */
538 int
539 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
540 {
541 	unsigned int last_timestamp;
542 
543 	if (map->system_map) {
544 #ifdef INVARIANTS
545 		_mtx_assert(&map->system_mtx, MA_OWNED, file, line);
546 #endif
547 	} else {
548 		if (!_sx_try_upgrade(&map->lock, file, line)) {
549 			last_timestamp = map->timestamp;
550 			_sx_sunlock(&map->lock, file, line);
551 			/*
552 			 * If the map's timestamp does not change while the
553 			 * map is unlocked, then the upgrade succeeds.
554 			 */
555 			(void)_sx_xlock(&map->lock, 0, file, line);
556 			if (last_timestamp != map->timestamp) {
557 				_sx_xunlock(&map->lock, file, line);
558 				return (1);
559 			}
560 		}
561 	}
562 	map->timestamp++;
563 	return (0);
564 }
565 
566 void
567 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
568 {
569 
570 	if (map->system_map) {
571 #ifdef INVARIANTS
572 		_mtx_assert(&map->system_mtx, MA_OWNED, file, line);
573 #endif
574 	} else
575 		_sx_downgrade(&map->lock, file, line);
576 }
577 
578 /*
579  *	vm_map_locked:
580  *
581  *	Returns a non-zero value if the caller holds a write (exclusive) lock
582  *	on the specified map and the value "0" otherwise.
583  */
584 int
585 vm_map_locked(vm_map_t map)
586 {
587 
588 	if (map->system_map)
589 		return (mtx_owned(&map->system_mtx));
590 	else
591 		return (sx_xlocked(&map->lock));
592 }
593 
594 #ifdef INVARIANTS
595 static void
596 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
597 {
598 
599 	if (map->system_map)
600 		_mtx_assert(&map->system_mtx, MA_OWNED, file, line);
601 	else
602 		_sx_assert(&map->lock, SA_XLOCKED, file, line);
603 }
604 
605 #if 0
606 static void
607 _vm_map_assert_locked_read(vm_map_t map, const char *file, int line)
608 {
609 
610 	if (map->system_map)
611 		_mtx_assert(&map->system_mtx, MA_OWNED, file, line);
612 	else
613 		_sx_assert(&map->lock, SA_SLOCKED, file, line);
614 }
615 #endif
616 
617 #define	VM_MAP_ASSERT_LOCKED(map) \
618     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
619 #define	VM_MAP_ASSERT_LOCKED_READ(map) \
620     _vm_map_assert_locked_read(map, LOCK_FILE, LOCK_LINE)
621 #else
622 #define	VM_MAP_ASSERT_LOCKED(map)
623 #define	VM_MAP_ASSERT_LOCKED_READ(map)
624 #endif
625 
626 /*
627  *	vm_map_unlock_and_wait:
628  */
629 int
630 vm_map_unlock_and_wait(vm_map_t map, int timo)
631 {
632 
633 	mtx_lock(&map_sleep_mtx);
634 	vm_map_unlock(map);
635 	return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", timo));
636 }
637 
638 /*
639  *	vm_map_wakeup:
640  */
641 void
642 vm_map_wakeup(vm_map_t map)
643 {
644 
645 	/*
646 	 * Acquire and release map_sleep_mtx to prevent a wakeup()
647 	 * from being performed (and lost) between the vm_map_unlock()
648 	 * and the msleep() in vm_map_unlock_and_wait().
649 	 */
650 	mtx_lock(&map_sleep_mtx);
651 	mtx_unlock(&map_sleep_mtx);
652 	wakeup(&map->root);
653 }
654 
655 long
656 vmspace_resident_count(struct vmspace *vmspace)
657 {
658 	return pmap_resident_count(vmspace_pmap(vmspace));
659 }
660 
661 long
662 vmspace_wired_count(struct vmspace *vmspace)
663 {
664 	return pmap_wired_count(vmspace_pmap(vmspace));
665 }
666 
667 /*
668  *	vm_map_create:
669  *
670  *	Creates and returns a new empty VM map with
671  *	the given physical map structure, and having
672  *	the given lower and upper address bounds.
673  */
674 vm_map_t
675 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
676 {
677 	vm_map_t result;
678 
679 	result = uma_zalloc(mapzone, M_WAITOK);
680 	CTR1(KTR_VM, "vm_map_create: %p", result);
681 	_vm_map_init(result, pmap, min, max);
682 	return (result);
683 }
684 
685 /*
686  * Initialize an existing vm_map structure
687  * such as that in the vmspace structure.
688  */
689 static void
690 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
691 {
692 
693 	map->header.next = map->header.prev = &map->header;
694 	map->needs_wakeup = FALSE;
695 	map->system_map = 0;
696 	map->pmap = pmap;
697 	map->min_offset = min;
698 	map->max_offset = max;
699 	map->flags = 0;
700 	map->root = NULL;
701 	map->timestamp = 0;
702 	map->deferred_freelist = NULL;
703 }
704 
705 void
706 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
707 {
708 
709 	_vm_map_init(map, pmap, min, max);
710 	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
711 	sx_init(&map->lock, "user map");
712 }
713 
714 /*
715  *	vm_map_entry_dispose:	[ internal use only ]
716  *
717  *	Inverse of vm_map_entry_create.
718  */
719 static void
720 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
721 {
722 	uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
723 }
724 
725 /*
726  *	vm_map_entry_create:	[ internal use only ]
727  *
728  *	Allocates a VM map entry for insertion.
729  *	No entry fields are filled in.
730  */
731 static vm_map_entry_t
732 vm_map_entry_create(vm_map_t map)
733 {
734 	vm_map_entry_t new_entry;
735 
736 	if (map->system_map)
737 		new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
738 	else
739 		new_entry = uma_zalloc(mapentzone, M_WAITOK);
740 	if (new_entry == NULL)
741 		panic("vm_map_entry_create: kernel resources exhausted");
742 	return (new_entry);
743 }
744 
745 /*
746  *	vm_map_entry_set_behavior:
747  *
748  *	Set the expected access behavior, either normal, random, or
749  *	sequential.
750  */
751 static inline void
752 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
753 {
754 	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
755 	    (behavior & MAP_ENTRY_BEHAV_MASK);
756 }
757 
758 /*
759  *	vm_map_entry_set_max_free:
760  *
761  *	Set the max_free field in a vm_map_entry.
762  */
763 static inline void
764 vm_map_entry_set_max_free(vm_map_entry_t entry)
765 {
766 
767 	entry->max_free = entry->adj_free;
768 	if (entry->left != NULL && entry->left->max_free > entry->max_free)
769 		entry->max_free = entry->left->max_free;
770 	if (entry->right != NULL && entry->right->max_free > entry->max_free)
771 		entry->max_free = entry->right->max_free;
772 }
773 
774 /*
775  *	vm_map_entry_splay:
776  *
777  *	The Sleator and Tarjan top-down splay algorithm with the
778  *	following variation.  Max_free must be computed bottom-up, so
779  *	on the downward pass, maintain the left and right spines in
780  *	reverse order.  Then, make a second pass up each side to fix
781  *	the pointers and compute max_free.  The time bound is O(log n)
782  *	amortized.
783  *
784  *	The new root is the vm_map_entry containing "addr", or else an
785  *	adjacent entry (lower or higher) if addr is not in the tree.
786  *
787  *	The map must be locked, and leaves it so.
788  *
789  *	Returns: the new root.
790  */
791 static vm_map_entry_t
792 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
793 {
794 	vm_map_entry_t llist, rlist;
795 	vm_map_entry_t ltree, rtree;
796 	vm_map_entry_t y;
797 
798 	/* Special case of empty tree. */
799 	if (root == NULL)
800 		return (root);
801 
802 	/*
803 	 * Pass One: Splay down the tree until we find addr or a NULL
804 	 * pointer where addr would go.  llist and rlist are the two
805 	 * sides in reverse order (bottom-up), with llist linked by
806 	 * the right pointer and rlist linked by the left pointer in
807 	 * the vm_map_entry.  Wait until Pass Two to set max_free on
808 	 * the two spines.
809 	 */
810 	llist = NULL;
811 	rlist = NULL;
812 	for (;;) {
813 		/* root is never NULL in here. */
814 		if (addr < root->start) {
815 			y = root->left;
816 			if (y == NULL)
817 				break;
818 			if (addr < y->start && y->left != NULL) {
819 				/* Rotate right and put y on rlist. */
820 				root->left = y->right;
821 				y->right = root;
822 				vm_map_entry_set_max_free(root);
823 				root = y->left;
824 				y->left = rlist;
825 				rlist = y;
826 			} else {
827 				/* Put root on rlist. */
828 				root->left = rlist;
829 				rlist = root;
830 				root = y;
831 			}
832 		} else if (addr >= root->end) {
833 			y = root->right;
834 			if (y == NULL)
835 				break;
836 			if (addr >= y->end && y->right != NULL) {
837 				/* Rotate left and put y on llist. */
838 				root->right = y->left;
839 				y->left = root;
840 				vm_map_entry_set_max_free(root);
841 				root = y->right;
842 				y->right = llist;
843 				llist = y;
844 			} else {
845 				/* Put root on llist. */
846 				root->right = llist;
847 				llist = root;
848 				root = y;
849 			}
850 		} else
851 			break;
852 	}
853 
854 	/*
855 	 * Pass Two: Walk back up the two spines, flip the pointers
856 	 * and set max_free.  The subtrees of the root go at the
857 	 * bottom of llist and rlist.
858 	 */
859 	ltree = root->left;
860 	while (llist != NULL) {
861 		y = llist->right;
862 		llist->right = ltree;
863 		vm_map_entry_set_max_free(llist);
864 		ltree = llist;
865 		llist = y;
866 	}
867 	rtree = root->right;
868 	while (rlist != NULL) {
869 		y = rlist->left;
870 		rlist->left = rtree;
871 		vm_map_entry_set_max_free(rlist);
872 		rtree = rlist;
873 		rlist = y;
874 	}
875 
876 	/*
877 	 * Final assembly: add ltree and rtree as subtrees of root.
878 	 */
879 	root->left = ltree;
880 	root->right = rtree;
881 	vm_map_entry_set_max_free(root);
882 
883 	return (root);
884 }
885 
886 /*
887  *	vm_map_entry_{un,}link:
888  *
889  *	Insert/remove entries from maps.
890  */
891 static void
892 vm_map_entry_link(vm_map_t map,
893 		  vm_map_entry_t after_where,
894 		  vm_map_entry_t entry)
895 {
896 
897 	CTR4(KTR_VM,
898 	    "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
899 	    map->nentries, entry, after_where);
900 	VM_MAP_ASSERT_LOCKED(map);
901 	map->nentries++;
902 	entry->prev = after_where;
903 	entry->next = after_where->next;
904 	entry->next->prev = entry;
905 	after_where->next = entry;
906 
907 	if (after_where != &map->header) {
908 		if (after_where != map->root)
909 			vm_map_entry_splay(after_where->start, map->root);
910 		entry->right = after_where->right;
911 		entry->left = after_where;
912 		after_where->right = NULL;
913 		after_where->adj_free = entry->start - after_where->end;
914 		vm_map_entry_set_max_free(after_where);
915 	} else {
916 		entry->right = map->root;
917 		entry->left = NULL;
918 	}
919 	entry->adj_free = (entry->next == &map->header ? map->max_offset :
920 	    entry->next->start) - entry->end;
921 	vm_map_entry_set_max_free(entry);
922 	map->root = entry;
923 }
924 
925 static void
926 vm_map_entry_unlink(vm_map_t map,
927 		    vm_map_entry_t entry)
928 {
929 	vm_map_entry_t next, prev, root;
930 
931 	VM_MAP_ASSERT_LOCKED(map);
932 	if (entry != map->root)
933 		vm_map_entry_splay(entry->start, map->root);
934 	if (entry->left == NULL)
935 		root = entry->right;
936 	else {
937 		root = vm_map_entry_splay(entry->start, entry->left);
938 		root->right = entry->right;
939 		root->adj_free = (entry->next == &map->header ? map->max_offset :
940 		    entry->next->start) - root->end;
941 		vm_map_entry_set_max_free(root);
942 	}
943 	map->root = root;
944 
945 	prev = entry->prev;
946 	next = entry->next;
947 	next->prev = prev;
948 	prev->next = next;
949 	map->nentries--;
950 	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
951 	    map->nentries, entry);
952 }
953 
954 /*
955  *	vm_map_entry_resize_free:
956  *
957  *	Recompute the amount of free space following a vm_map_entry
958  *	and propagate that value up the tree.  Call this function after
959  *	resizing a map entry in-place, that is, without a call to
960  *	vm_map_entry_link() or _unlink().
961  *
962  *	The map must be locked, and leaves it so.
963  */
964 static void
965 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
966 {
967 
968 	/*
969 	 * Using splay trees without parent pointers, propagating
970 	 * max_free up the tree is done by moving the entry to the
971 	 * root and making the change there.
972 	 */
973 	if (entry != map->root)
974 		map->root = vm_map_entry_splay(entry->start, map->root);
975 
976 	entry->adj_free = (entry->next == &map->header ? map->max_offset :
977 	    entry->next->start) - entry->end;
978 	vm_map_entry_set_max_free(entry);
979 }
980 
981 /*
982  *	vm_map_lookup_entry:	[ internal use only ]
983  *
984  *	Finds the map entry containing (or
985  *	immediately preceding) the specified address
986  *	in the given map; the entry is returned
987  *	in the "entry" parameter.  The boolean
988  *	result indicates whether the address is
989  *	actually contained in the map.
990  */
991 boolean_t
992 vm_map_lookup_entry(
993 	vm_map_t map,
994 	vm_offset_t address,
995 	vm_map_entry_t *entry)	/* OUT */
996 {
997 	vm_map_entry_t cur;
998 	boolean_t locked;
999 
1000 	/*
1001 	 * If the map is empty, then the map entry immediately preceding
1002 	 * "address" is the map's header.
1003 	 */
1004 	cur = map->root;
1005 	if (cur == NULL)
1006 		*entry = &map->header;
1007 	else if (address >= cur->start && cur->end > address) {
1008 		*entry = cur;
1009 		return (TRUE);
1010 	} else if ((locked = vm_map_locked(map)) ||
1011 	    sx_try_upgrade(&map->lock)) {
1012 		/*
1013 		 * Splay requires a write lock on the map.  However, it only
1014 		 * restructures the binary search tree; it does not otherwise
1015 		 * change the map.  Thus, the map's timestamp need not change
1016 		 * on a temporary upgrade.
1017 		 */
1018 		map->root = cur = vm_map_entry_splay(address, cur);
1019 		if (!locked)
1020 			sx_downgrade(&map->lock);
1021 
1022 		/*
1023 		 * If "address" is contained within a map entry, the new root
1024 		 * is that map entry.  Otherwise, the new root is a map entry
1025 		 * immediately before or after "address".
1026 		 */
1027 		if (address >= cur->start) {
1028 			*entry = cur;
1029 			if (cur->end > address)
1030 				return (TRUE);
1031 		} else
1032 			*entry = cur->prev;
1033 	} else
1034 		/*
1035 		 * Since the map is only locked for read access, perform a
1036 		 * standard binary search tree lookup for "address".
1037 		 */
1038 		for (;;) {
1039 			if (address < cur->start) {
1040 				if (cur->left == NULL) {
1041 					*entry = cur->prev;
1042 					break;
1043 				}
1044 				cur = cur->left;
1045 			} else if (cur->end > address) {
1046 				*entry = cur;
1047 				return (TRUE);
1048 			} else {
1049 				if (cur->right == NULL) {
1050 					*entry = cur;
1051 					break;
1052 				}
1053 				cur = cur->right;
1054 			}
1055 		}
1056 	return (FALSE);
1057 }
1058 
1059 /*
1060  *	vm_map_insert:
1061  *
1062  *	Inserts the given whole VM object into the target
1063  *	map at the specified address range.  The object's
1064  *	size should match that of the address range.
1065  *
1066  *	Requires that the map be locked, and leaves it so.
1067  *
1068  *	If object is non-NULL, ref count must be bumped by caller
1069  *	prior to making call to account for the new entry.
1070  */
1071 int
1072 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1073 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
1074 	      int cow)
1075 {
1076 	vm_map_entry_t new_entry;
1077 	vm_map_entry_t prev_entry;
1078 	vm_map_entry_t temp_entry;
1079 	vm_eflags_t protoeflags;
1080 	struct uidinfo *uip;
1081 	boolean_t charge_prev_obj;
1082 
1083 	VM_MAP_ASSERT_LOCKED(map);
1084 
1085 	/*
1086 	 * Check that the start and end points are not bogus.
1087 	 */
1088 	if ((start < map->min_offset) || (end > map->max_offset) ||
1089 	    (start >= end))
1090 		return (KERN_INVALID_ADDRESS);
1091 
1092 	/*
1093 	 * Find the entry prior to the proposed starting address; if it's part
1094 	 * of an existing entry, this range is bogus.
1095 	 */
1096 	if (vm_map_lookup_entry(map, start, &temp_entry))
1097 		return (KERN_NO_SPACE);
1098 
1099 	prev_entry = temp_entry;
1100 
1101 	/*
1102 	 * Assert that the next entry doesn't overlap the end point.
1103 	 */
1104 	if ((prev_entry->next != &map->header) &&
1105 	    (prev_entry->next->start < end))
1106 		return (KERN_NO_SPACE);
1107 
1108 	protoeflags = 0;
1109 	charge_prev_obj = FALSE;
1110 
1111 	if (cow & MAP_COPY_ON_WRITE)
1112 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1113 
1114 	if (cow & MAP_NOFAULT) {
1115 		protoeflags |= MAP_ENTRY_NOFAULT;
1116 
1117 		KASSERT(object == NULL,
1118 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
1119 	}
1120 	if (cow & MAP_DISABLE_SYNCER)
1121 		protoeflags |= MAP_ENTRY_NOSYNC;
1122 	if (cow & MAP_DISABLE_COREDUMP)
1123 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
1124 
1125 	uip = NULL;
1126 	KASSERT((object != kmem_object && object != kernel_object) ||
1127 	    ((object == kmem_object || object == kernel_object) &&
1128 		!(protoeflags & MAP_ENTRY_NEEDS_COPY)),
1129 	    ("kmem or kernel object and cow"));
1130 	if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT))
1131 		goto charged;
1132 	if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1133 	    ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1134 		if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1135 			return (KERN_RESOURCE_SHORTAGE);
1136 		KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) ||
1137 		    object->uip == NULL,
1138 		    ("OVERCOMMIT: vm_map_insert o %p", object));
1139 		uip = curthread->td_ucred->cr_ruidinfo;
1140 		uihold(uip);
1141 		if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY))
1142 			charge_prev_obj = TRUE;
1143 	}
1144 
1145 charged:
1146 	if (object != NULL) {
1147 		/*
1148 		 * OBJ_ONEMAPPING must be cleared unless this mapping
1149 		 * is trivially proven to be the only mapping for any
1150 		 * of the object's pages.  (Object granularity
1151 		 * reference counting is insufficient to recognize
1152 		 * aliases with precision.)
1153 		 */
1154 		VM_OBJECT_LOCK(object);
1155 		if (object->ref_count > 1 || object->shadow_count != 0)
1156 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
1157 		VM_OBJECT_UNLOCK(object);
1158 	}
1159 	else if ((prev_entry != &map->header) &&
1160 		 (prev_entry->eflags == protoeflags) &&
1161 		 (prev_entry->end == start) &&
1162 		 (prev_entry->wired_count == 0) &&
1163 		 (prev_entry->uip == uip ||
1164 		  (prev_entry->object.vm_object != NULL &&
1165 		   (prev_entry->object.vm_object->uip == uip))) &&
1166 		   vm_object_coalesce(prev_entry->object.vm_object,
1167 		       prev_entry->offset,
1168 		       (vm_size_t)(prev_entry->end - prev_entry->start),
1169 		       (vm_size_t)(end - prev_entry->end), charge_prev_obj)) {
1170 		/*
1171 		 * We were able to extend the object.  Determine if we
1172 		 * can extend the previous map entry to include the
1173 		 * new range as well.
1174 		 */
1175 		if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
1176 		    (prev_entry->protection == prot) &&
1177 		    (prev_entry->max_protection == max)) {
1178 			map->size += (end - prev_entry->end);
1179 			prev_entry->end = end;
1180 			vm_map_entry_resize_free(map, prev_entry);
1181 			vm_map_simplify_entry(map, prev_entry);
1182 			if (uip != NULL)
1183 				uifree(uip);
1184 			return (KERN_SUCCESS);
1185 		}
1186 
1187 		/*
1188 		 * If we can extend the object but cannot extend the
1189 		 * map entry, we have to create a new map entry.  We
1190 		 * must bump the ref count on the extended object to
1191 		 * account for it.  object may be NULL.
1192 		 */
1193 		object = prev_entry->object.vm_object;
1194 		offset = prev_entry->offset +
1195 			(prev_entry->end - prev_entry->start);
1196 		vm_object_reference(object);
1197 		if (uip != NULL && object != NULL && object->uip != NULL &&
1198 		    !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1199 			/* Object already accounts for this uid. */
1200 			uifree(uip);
1201 			uip = NULL;
1202 		}
1203 	}
1204 
1205 	/*
1206 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
1207 	 * in things like the buffer map where we manage kva but do not manage
1208 	 * backing objects.
1209 	 */
1210 
1211 	/*
1212 	 * Create a new entry
1213 	 */
1214 	new_entry = vm_map_entry_create(map);
1215 	new_entry->start = start;
1216 	new_entry->end = end;
1217 	new_entry->uip = NULL;
1218 
1219 	new_entry->eflags = protoeflags;
1220 	new_entry->object.vm_object = object;
1221 	new_entry->offset = offset;
1222 	new_entry->avail_ssize = 0;
1223 
1224 	new_entry->inheritance = VM_INHERIT_DEFAULT;
1225 	new_entry->protection = prot;
1226 	new_entry->max_protection = max;
1227 	new_entry->wired_count = 0;
1228 
1229 	KASSERT(uip == NULL || !ENTRY_CHARGED(new_entry),
1230 	    ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry));
1231 	new_entry->uip = uip;
1232 
1233 	/*
1234 	 * Insert the new entry into the list
1235 	 */
1236 	vm_map_entry_link(map, prev_entry, new_entry);
1237 	map->size += new_entry->end - new_entry->start;
1238 
1239 #if 0
1240 	/*
1241 	 * Temporarily removed to avoid MAP_STACK panic, due to
1242 	 * MAP_STACK being a huge hack.  Will be added back in
1243 	 * when MAP_STACK (and the user stack mapping) is fixed.
1244 	 */
1245 	/*
1246 	 * It may be possible to simplify the entry
1247 	 */
1248 	vm_map_simplify_entry(map, new_entry);
1249 #endif
1250 
1251 	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
1252 		vm_map_pmap_enter(map, start, prot,
1253 				    object, OFF_TO_IDX(offset), end - start,
1254 				    cow & MAP_PREFAULT_PARTIAL);
1255 	}
1256 
1257 	return (KERN_SUCCESS);
1258 }
1259 
1260 /*
1261  *	vm_map_findspace:
1262  *
1263  *	Find the first fit (lowest VM address) for "length" free bytes
1264  *	beginning at address >= start in the given map.
1265  *
1266  *	In a vm_map_entry, "adj_free" is the amount of free space
1267  *	adjacent (higher address) to this entry, and "max_free" is the
1268  *	maximum amount of contiguous free space in its subtree.  This
1269  *	allows finding a free region in one path down the tree, so
1270  *	O(log n) amortized with splay trees.
1271  *
1272  *	The map must be locked, and leaves it so.
1273  *
1274  *	Returns: 0 on success, and starting address in *addr,
1275  *		 1 if insufficient space.
1276  */
1277 int
1278 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1279     vm_offset_t *addr)	/* OUT */
1280 {
1281 	vm_map_entry_t entry;
1282 	vm_offset_t end, st;
1283 
1284 	/*
1285 	 * Request must fit within min/max VM address and must avoid
1286 	 * address wrap.
1287 	 */
1288 	if (start < map->min_offset)
1289 		start = map->min_offset;
1290 	if (start + length > map->max_offset || start + length < start)
1291 		return (1);
1292 
1293 	/* Empty tree means wide open address space. */
1294 	if (map->root == NULL) {
1295 		*addr = start;
1296 		goto found;
1297 	}
1298 
1299 	/*
1300 	 * After splay, if start comes before root node, then there
1301 	 * must be a gap from start to the root.
1302 	 */
1303 	map->root = vm_map_entry_splay(start, map->root);
1304 	if (start + length <= map->root->start) {
1305 		*addr = start;
1306 		goto found;
1307 	}
1308 
1309 	/*
1310 	 * Root is the last node that might begin its gap before
1311 	 * start, and this is the last comparison where address
1312 	 * wrap might be a problem.
1313 	 */
1314 	st = (start > map->root->end) ? start : map->root->end;
1315 	if (length <= map->root->end + map->root->adj_free - st) {
1316 		*addr = st;
1317 		goto found;
1318 	}
1319 
1320 	/* With max_free, can immediately tell if no solution. */
1321 	entry = map->root->right;
1322 	if (entry == NULL || length > entry->max_free)
1323 		return (1);
1324 
1325 	/*
1326 	 * Search the right subtree in the order: left subtree, root,
1327 	 * right subtree (first fit).  The previous splay implies that
1328 	 * all regions in the right subtree have addresses > start.
1329 	 */
1330 	while (entry != NULL) {
1331 		if (entry->left != NULL && entry->left->max_free >= length)
1332 			entry = entry->left;
1333 		else if (entry->adj_free >= length) {
1334 			*addr = entry->end;
1335 			goto found;
1336 		} else
1337 			entry = entry->right;
1338 	}
1339 
1340 	/* Can't get here, so panic if we do. */
1341 	panic("vm_map_findspace: max_free corrupt");
1342 
1343 found:
1344 	/* Expand the kernel pmap, if necessary. */
1345 	if (map == kernel_map) {
1346 		end = round_page(*addr + length);
1347 		if (end > kernel_vm_end)
1348 			pmap_growkernel(end);
1349 	}
1350 	return (0);
1351 }
1352 
1353 int
1354 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1355     vm_offset_t start, vm_size_t length, vm_prot_t prot,
1356     vm_prot_t max, int cow)
1357 {
1358 	vm_offset_t end;
1359 	int result;
1360 
1361 	end = start + length;
1362 	vm_map_lock(map);
1363 	VM_MAP_RANGE_CHECK(map, start, end);
1364 	(void) vm_map_delete(map, start, end);
1365 	result = vm_map_insert(map, object, offset, start, end, prot,
1366 	    max, cow);
1367 	vm_map_unlock(map);
1368 	return (result);
1369 }
1370 
1371 /*
1372  *	vm_map_find finds an unallocated region in the target address
1373  *	map with the given length.  The search is defined to be
1374  *	first-fit from the specified address; the region found is
1375  *	returned in the same parameter.
1376  *
1377  *	If object is non-NULL, ref count must be bumped by caller
1378  *	prior to making call to account for the new entry.
1379  */
1380 int
1381 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1382 	    vm_offset_t *addr,	/* IN/OUT */
1383 	    vm_size_t length, int find_space, vm_prot_t prot,
1384 	    vm_prot_t max, int cow)
1385 {
1386 	vm_offset_t start;
1387 	int result;
1388 
1389 	start = *addr;
1390 	vm_map_lock(map);
1391 	do {
1392 		if (find_space != VMFS_NO_SPACE) {
1393 			if (vm_map_findspace(map, start, length, addr)) {
1394 				vm_map_unlock(map);
1395 				return (KERN_NO_SPACE);
1396 			}
1397 			switch (find_space) {
1398 			case VMFS_ALIGNED_SPACE:
1399 				pmap_align_superpage(object, offset, addr,
1400 				    length);
1401 				break;
1402 #ifdef VMFS_TLB_ALIGNED_SPACE
1403 			case VMFS_TLB_ALIGNED_SPACE:
1404 				pmap_align_tlb(addr);
1405 				break;
1406 #endif
1407 			default:
1408 				break;
1409 			}
1410 
1411 			start = *addr;
1412 		}
1413 		result = vm_map_insert(map, object, offset, start, start +
1414 		    length, prot, max, cow);
1415 	} while (result == KERN_NO_SPACE && (find_space == VMFS_ALIGNED_SPACE
1416 #ifdef VMFS_TLB_ALIGNED_SPACE
1417 	    || find_space == VMFS_TLB_ALIGNED_SPACE
1418 #endif
1419 	    ));
1420 	vm_map_unlock(map);
1421 	return (result);
1422 }
1423 
1424 /*
1425  *	vm_map_simplify_entry:
1426  *
1427  *	Simplify the given map entry by merging with either neighbor.  This
1428  *	routine also has the ability to merge with both neighbors.
1429  *
1430  *	The map must be locked.
1431  *
1432  *	This routine guarentees that the passed entry remains valid (though
1433  *	possibly extended).  When merging, this routine may delete one or
1434  *	both neighbors.
1435  */
1436 void
1437 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1438 {
1439 	vm_map_entry_t next, prev;
1440 	vm_size_t prevsize, esize;
1441 
1442 	if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
1443 		return;
1444 
1445 	prev = entry->prev;
1446 	if (prev != &map->header) {
1447 		prevsize = prev->end - prev->start;
1448 		if ( (prev->end == entry->start) &&
1449 		     (prev->object.vm_object == entry->object.vm_object) &&
1450 		     (!prev->object.vm_object ||
1451 			(prev->offset + prevsize == entry->offset)) &&
1452 		     (prev->eflags == entry->eflags) &&
1453 		     (prev->protection == entry->protection) &&
1454 		     (prev->max_protection == entry->max_protection) &&
1455 		     (prev->inheritance == entry->inheritance) &&
1456 		     (prev->wired_count == entry->wired_count) &&
1457 		     (prev->uip == entry->uip)) {
1458 			vm_map_entry_unlink(map, prev);
1459 			entry->start = prev->start;
1460 			entry->offset = prev->offset;
1461 			if (entry->prev != &map->header)
1462 				vm_map_entry_resize_free(map, entry->prev);
1463 
1464 			/*
1465 			 * If the backing object is a vnode object,
1466 			 * vm_object_deallocate() calls vrele().
1467 			 * However, vrele() does not lock the vnode
1468 			 * because the vnode has additional
1469 			 * references.  Thus, the map lock can be kept
1470 			 * without causing a lock-order reversal with
1471 			 * the vnode lock.
1472 			 */
1473 			if (prev->object.vm_object)
1474 				vm_object_deallocate(prev->object.vm_object);
1475 			if (prev->uip != NULL)
1476 				uifree(prev->uip);
1477 			vm_map_entry_dispose(map, prev);
1478 		}
1479 	}
1480 
1481 	next = entry->next;
1482 	if (next != &map->header) {
1483 		esize = entry->end - entry->start;
1484 		if ((entry->end == next->start) &&
1485 		    (next->object.vm_object == entry->object.vm_object) &&
1486 		     (!entry->object.vm_object ||
1487 			(entry->offset + esize == next->offset)) &&
1488 		    (next->eflags == entry->eflags) &&
1489 		    (next->protection == entry->protection) &&
1490 		    (next->max_protection == entry->max_protection) &&
1491 		    (next->inheritance == entry->inheritance) &&
1492 		    (next->wired_count == entry->wired_count) &&
1493 		    (next->uip == entry->uip)) {
1494 			vm_map_entry_unlink(map, next);
1495 			entry->end = next->end;
1496 			vm_map_entry_resize_free(map, entry);
1497 
1498 			/*
1499 			 * See comment above.
1500 			 */
1501 			if (next->object.vm_object)
1502 				vm_object_deallocate(next->object.vm_object);
1503 			if (next->uip != NULL)
1504 				uifree(next->uip);
1505 			vm_map_entry_dispose(map, next);
1506 		}
1507 	}
1508 }
1509 /*
1510  *	vm_map_clip_start:	[ internal use only ]
1511  *
1512  *	Asserts that the given entry begins at or after
1513  *	the specified address; if necessary,
1514  *	it splits the entry into two.
1515  */
1516 #define vm_map_clip_start(map, entry, startaddr) \
1517 { \
1518 	if (startaddr > entry->start) \
1519 		_vm_map_clip_start(map, entry, startaddr); \
1520 }
1521 
1522 /*
1523  *	This routine is called only when it is known that
1524  *	the entry must be split.
1525  */
1526 static void
1527 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1528 {
1529 	vm_map_entry_t new_entry;
1530 
1531 	VM_MAP_ASSERT_LOCKED(map);
1532 
1533 	/*
1534 	 * Split off the front portion -- note that we must insert the new
1535 	 * entry BEFORE this one, so that this entry has the specified
1536 	 * starting address.
1537 	 */
1538 	vm_map_simplify_entry(map, entry);
1539 
1540 	/*
1541 	 * If there is no object backing this entry, we might as well create
1542 	 * one now.  If we defer it, an object can get created after the map
1543 	 * is clipped, and individual objects will be created for the split-up
1544 	 * map.  This is a bit of a hack, but is also about the best place to
1545 	 * put this improvement.
1546 	 */
1547 	if (entry->object.vm_object == NULL && !map->system_map) {
1548 		vm_object_t object;
1549 		object = vm_object_allocate(OBJT_DEFAULT,
1550 				atop(entry->end - entry->start));
1551 		entry->object.vm_object = object;
1552 		entry->offset = 0;
1553 		if (entry->uip != NULL) {
1554 			object->uip = entry->uip;
1555 			object->charge = entry->end - entry->start;
1556 			entry->uip = NULL;
1557 		}
1558 	} else if (entry->object.vm_object != NULL &&
1559 		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1560 		   entry->uip != NULL) {
1561 		VM_OBJECT_LOCK(entry->object.vm_object);
1562 		KASSERT(entry->object.vm_object->uip == NULL,
1563 		    ("OVERCOMMIT: vm_entry_clip_start: both uip e %p", entry));
1564 		entry->object.vm_object->uip = entry->uip;
1565 		entry->object.vm_object->charge = entry->end - entry->start;
1566 		VM_OBJECT_UNLOCK(entry->object.vm_object);
1567 		entry->uip = NULL;
1568 	}
1569 
1570 	new_entry = vm_map_entry_create(map);
1571 	*new_entry = *entry;
1572 
1573 	new_entry->end = start;
1574 	entry->offset += (start - entry->start);
1575 	entry->start = start;
1576 	if (new_entry->uip != NULL)
1577 		uihold(entry->uip);
1578 
1579 	vm_map_entry_link(map, entry->prev, new_entry);
1580 
1581 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1582 		vm_object_reference(new_entry->object.vm_object);
1583 	}
1584 }
1585 
1586 /*
1587  *	vm_map_clip_end:	[ internal use only ]
1588  *
1589  *	Asserts that the given entry ends at or before
1590  *	the specified address; if necessary,
1591  *	it splits the entry into two.
1592  */
1593 #define vm_map_clip_end(map, entry, endaddr) \
1594 { \
1595 	if ((endaddr) < (entry->end)) \
1596 		_vm_map_clip_end((map), (entry), (endaddr)); \
1597 }
1598 
1599 /*
1600  *	This routine is called only when it is known that
1601  *	the entry must be split.
1602  */
1603 static void
1604 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1605 {
1606 	vm_map_entry_t new_entry;
1607 
1608 	VM_MAP_ASSERT_LOCKED(map);
1609 
1610 	/*
1611 	 * If there is no object backing this entry, we might as well create
1612 	 * one now.  If we defer it, an object can get created after the map
1613 	 * is clipped, and individual objects will be created for the split-up
1614 	 * map.  This is a bit of a hack, but is also about the best place to
1615 	 * put this improvement.
1616 	 */
1617 	if (entry->object.vm_object == NULL && !map->system_map) {
1618 		vm_object_t object;
1619 		object = vm_object_allocate(OBJT_DEFAULT,
1620 				atop(entry->end - entry->start));
1621 		entry->object.vm_object = object;
1622 		entry->offset = 0;
1623 		if (entry->uip != NULL) {
1624 			object->uip = entry->uip;
1625 			object->charge = entry->end - entry->start;
1626 			entry->uip = NULL;
1627 		}
1628 	} else if (entry->object.vm_object != NULL &&
1629 		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1630 		   entry->uip != NULL) {
1631 		VM_OBJECT_LOCK(entry->object.vm_object);
1632 		KASSERT(entry->object.vm_object->uip == NULL,
1633 		    ("OVERCOMMIT: vm_entry_clip_end: both uip e %p", entry));
1634 		entry->object.vm_object->uip = entry->uip;
1635 		entry->object.vm_object->charge = entry->end - entry->start;
1636 		VM_OBJECT_UNLOCK(entry->object.vm_object);
1637 		entry->uip = NULL;
1638 	}
1639 
1640 	/*
1641 	 * Create a new entry and insert it AFTER the specified entry
1642 	 */
1643 	new_entry = vm_map_entry_create(map);
1644 	*new_entry = *entry;
1645 
1646 	new_entry->start = entry->end = end;
1647 	new_entry->offset += (end - entry->start);
1648 	if (new_entry->uip != NULL)
1649 		uihold(entry->uip);
1650 
1651 	vm_map_entry_link(map, entry, new_entry);
1652 
1653 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1654 		vm_object_reference(new_entry->object.vm_object);
1655 	}
1656 }
1657 
1658 /*
1659  *	vm_map_submap:		[ kernel use only ]
1660  *
1661  *	Mark the given range as handled by a subordinate map.
1662  *
1663  *	This range must have been created with vm_map_find,
1664  *	and no other operations may have been performed on this
1665  *	range prior to calling vm_map_submap.
1666  *
1667  *	Only a limited number of operations can be performed
1668  *	within this rage after calling vm_map_submap:
1669  *		vm_fault
1670  *	[Don't try vm_map_copy!]
1671  *
1672  *	To remove a submapping, one must first remove the
1673  *	range from the superior map, and then destroy the
1674  *	submap (if desired).  [Better yet, don't try it.]
1675  */
1676 int
1677 vm_map_submap(
1678 	vm_map_t map,
1679 	vm_offset_t start,
1680 	vm_offset_t end,
1681 	vm_map_t submap)
1682 {
1683 	vm_map_entry_t entry;
1684 	int result = KERN_INVALID_ARGUMENT;
1685 
1686 	vm_map_lock(map);
1687 
1688 	VM_MAP_RANGE_CHECK(map, start, end);
1689 
1690 	if (vm_map_lookup_entry(map, start, &entry)) {
1691 		vm_map_clip_start(map, entry, start);
1692 	} else
1693 		entry = entry->next;
1694 
1695 	vm_map_clip_end(map, entry, end);
1696 
1697 	if ((entry->start == start) && (entry->end == end) &&
1698 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1699 	    (entry->object.vm_object == NULL)) {
1700 		entry->object.sub_map = submap;
1701 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1702 		result = KERN_SUCCESS;
1703 	}
1704 	vm_map_unlock(map);
1705 
1706 	return (result);
1707 }
1708 
1709 /*
1710  * The maximum number of pages to map
1711  */
1712 #define	MAX_INIT_PT	96
1713 
1714 /*
1715  *	vm_map_pmap_enter:
1716  *
1717  *	Preload read-only mappings for the given object's resident pages into
1718  *	the given map.  This eliminates the soft faults on process startup and
1719  *	immediately after an mmap(2).  Because these are speculative mappings,
1720  *	cached pages are not reactivated and mapped.
1721  */
1722 void
1723 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
1724     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1725 {
1726 	vm_offset_t start;
1727 	vm_page_t p, p_start;
1728 	vm_pindex_t psize, tmpidx;
1729 
1730 	if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
1731 		return;
1732 	VM_OBJECT_LOCK(object);
1733 	if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1734 		pmap_object_init_pt(map->pmap, addr, object, pindex, size);
1735 		goto unlock_return;
1736 	}
1737 
1738 	psize = atop(size);
1739 
1740 	if ((flags & MAP_PREFAULT_PARTIAL) && psize > MAX_INIT_PT &&
1741 	    object->resident_page_count > MAX_INIT_PT)
1742 		goto unlock_return;
1743 
1744 	if (psize + pindex > object->size) {
1745 		if (object->size < pindex)
1746 			goto unlock_return;
1747 		psize = object->size - pindex;
1748 	}
1749 
1750 	start = 0;
1751 	p_start = NULL;
1752 
1753 	p = vm_page_find_least(object, pindex);
1754 	/*
1755 	 * Assert: the variable p is either (1) the page with the
1756 	 * least pindex greater than or equal to the parameter pindex
1757 	 * or (2) NULL.
1758 	 */
1759 	for (;
1760 	     p != NULL && (tmpidx = p->pindex - pindex) < psize;
1761 	     p = TAILQ_NEXT(p, listq)) {
1762 		/*
1763 		 * don't allow an madvise to blow away our really
1764 		 * free pages allocating pv entries.
1765 		 */
1766 		if ((flags & MAP_PREFAULT_MADVISE) &&
1767 		    cnt.v_free_count < cnt.v_free_reserved) {
1768 			psize = tmpidx;
1769 			break;
1770 		}
1771 		if (p->valid == VM_PAGE_BITS_ALL) {
1772 			if (p_start == NULL) {
1773 				start = addr + ptoa(tmpidx);
1774 				p_start = p;
1775 			}
1776 		} else if (p_start != NULL) {
1777 			pmap_enter_object(map->pmap, start, addr +
1778 			    ptoa(tmpidx), p_start, prot);
1779 			p_start = NULL;
1780 		}
1781 	}
1782 	if (p_start != NULL)
1783 		pmap_enter_object(map->pmap, start, addr + ptoa(psize),
1784 		    p_start, prot);
1785 unlock_return:
1786 	VM_OBJECT_UNLOCK(object);
1787 }
1788 
1789 /*
1790  *	vm_map_protect:
1791  *
1792  *	Sets the protection of the specified address
1793  *	region in the target map.  If "set_max" is
1794  *	specified, the maximum protection is to be set;
1795  *	otherwise, only the current protection is affected.
1796  */
1797 int
1798 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1799 	       vm_prot_t new_prot, boolean_t set_max)
1800 {
1801 	vm_map_entry_t current, entry;
1802 	vm_object_t obj;
1803 	struct uidinfo *uip;
1804 	vm_prot_t old_prot;
1805 
1806 	vm_map_lock(map);
1807 
1808 	VM_MAP_RANGE_CHECK(map, start, end);
1809 
1810 	if (vm_map_lookup_entry(map, start, &entry)) {
1811 		vm_map_clip_start(map, entry, start);
1812 	} else {
1813 		entry = entry->next;
1814 	}
1815 
1816 	/*
1817 	 * Make a first pass to check for protection violations.
1818 	 */
1819 	current = entry;
1820 	while ((current != &map->header) && (current->start < end)) {
1821 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1822 			vm_map_unlock(map);
1823 			return (KERN_INVALID_ARGUMENT);
1824 		}
1825 		if ((new_prot & current->max_protection) != new_prot) {
1826 			vm_map_unlock(map);
1827 			return (KERN_PROTECTION_FAILURE);
1828 		}
1829 		current = current->next;
1830 	}
1831 
1832 
1833 	/*
1834 	 * Do an accounting pass for private read-only mappings that
1835 	 * now will do cow due to allowed write (e.g. debugger sets
1836 	 * breakpoint on text segment)
1837 	 */
1838 	for (current = entry; (current != &map->header) &&
1839 	     (current->start < end); current = current->next) {
1840 
1841 		vm_map_clip_end(map, current, end);
1842 
1843 		if (set_max ||
1844 		    ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
1845 		    ENTRY_CHARGED(current)) {
1846 			continue;
1847 		}
1848 
1849 		uip = curthread->td_ucred->cr_ruidinfo;
1850 		obj = current->object.vm_object;
1851 
1852 		if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
1853 			if (!swap_reserve(current->end - current->start)) {
1854 				vm_map_unlock(map);
1855 				return (KERN_RESOURCE_SHORTAGE);
1856 			}
1857 			uihold(uip);
1858 			current->uip = uip;
1859 			continue;
1860 		}
1861 
1862 		VM_OBJECT_LOCK(obj);
1863 		if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
1864 			VM_OBJECT_UNLOCK(obj);
1865 			continue;
1866 		}
1867 
1868 		/*
1869 		 * Charge for the whole object allocation now, since
1870 		 * we cannot distinguish between non-charged and
1871 		 * charged clipped mapping of the same object later.
1872 		 */
1873 		KASSERT(obj->charge == 0,
1874 		    ("vm_map_protect: object %p overcharged\n", obj));
1875 		if (!swap_reserve(ptoa(obj->size))) {
1876 			VM_OBJECT_UNLOCK(obj);
1877 			vm_map_unlock(map);
1878 			return (KERN_RESOURCE_SHORTAGE);
1879 		}
1880 
1881 		uihold(uip);
1882 		obj->uip = uip;
1883 		obj->charge = ptoa(obj->size);
1884 		VM_OBJECT_UNLOCK(obj);
1885 	}
1886 
1887 	/*
1888 	 * Go back and fix up protections. [Note that clipping is not
1889 	 * necessary the second time.]
1890 	 */
1891 	current = entry;
1892 	while ((current != &map->header) && (current->start < end)) {
1893 		old_prot = current->protection;
1894 
1895 		if (set_max)
1896 			current->protection =
1897 			    (current->max_protection = new_prot) &
1898 			    old_prot;
1899 		else
1900 			current->protection = new_prot;
1901 
1902 		if ((current->eflags & (MAP_ENTRY_COW | MAP_ENTRY_USER_WIRED))
1903 		     == (MAP_ENTRY_COW | MAP_ENTRY_USER_WIRED) &&
1904 		    (current->protection & VM_PROT_WRITE) != 0 &&
1905 		    (old_prot & VM_PROT_WRITE) == 0) {
1906 			vm_fault_copy_entry(map, map, current, current, NULL);
1907 		}
1908 
1909 		/*
1910 		 * When restricting access, update the physical map.  Worry
1911 		 * about copy-on-write here.
1912 		 */
1913 		if ((old_prot & ~current->protection) != 0) {
1914 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1915 							VM_PROT_ALL)
1916 			pmap_protect(map->pmap, current->start,
1917 			    current->end,
1918 			    current->protection & MASK(current));
1919 #undef	MASK
1920 		}
1921 		vm_map_simplify_entry(map, current);
1922 		current = current->next;
1923 	}
1924 	vm_map_unlock(map);
1925 	return (KERN_SUCCESS);
1926 }
1927 
1928 /*
1929  *	vm_map_madvise:
1930  *
1931  *	This routine traverses a processes map handling the madvise
1932  *	system call.  Advisories are classified as either those effecting
1933  *	the vm_map_entry structure, or those effecting the underlying
1934  *	objects.
1935  */
1936 int
1937 vm_map_madvise(
1938 	vm_map_t map,
1939 	vm_offset_t start,
1940 	vm_offset_t end,
1941 	int behav)
1942 {
1943 	vm_map_entry_t current, entry;
1944 	int modify_map = 0;
1945 
1946 	/*
1947 	 * Some madvise calls directly modify the vm_map_entry, in which case
1948 	 * we need to use an exclusive lock on the map and we need to perform
1949 	 * various clipping operations.  Otherwise we only need a read-lock
1950 	 * on the map.
1951 	 */
1952 	switch(behav) {
1953 	case MADV_NORMAL:
1954 	case MADV_SEQUENTIAL:
1955 	case MADV_RANDOM:
1956 	case MADV_NOSYNC:
1957 	case MADV_AUTOSYNC:
1958 	case MADV_NOCORE:
1959 	case MADV_CORE:
1960 		modify_map = 1;
1961 		vm_map_lock(map);
1962 		break;
1963 	case MADV_WILLNEED:
1964 	case MADV_DONTNEED:
1965 	case MADV_FREE:
1966 		vm_map_lock_read(map);
1967 		break;
1968 	default:
1969 		return (KERN_INVALID_ARGUMENT);
1970 	}
1971 
1972 	/*
1973 	 * Locate starting entry and clip if necessary.
1974 	 */
1975 	VM_MAP_RANGE_CHECK(map, start, end);
1976 
1977 	if (vm_map_lookup_entry(map, start, &entry)) {
1978 		if (modify_map)
1979 			vm_map_clip_start(map, entry, start);
1980 	} else {
1981 		entry = entry->next;
1982 	}
1983 
1984 	if (modify_map) {
1985 		/*
1986 		 * madvise behaviors that are implemented in the vm_map_entry.
1987 		 *
1988 		 * We clip the vm_map_entry so that behavioral changes are
1989 		 * limited to the specified address range.
1990 		 */
1991 		for (current = entry;
1992 		     (current != &map->header) && (current->start < end);
1993 		     current = current->next
1994 		) {
1995 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1996 				continue;
1997 
1998 			vm_map_clip_end(map, current, end);
1999 
2000 			switch (behav) {
2001 			case MADV_NORMAL:
2002 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2003 				break;
2004 			case MADV_SEQUENTIAL:
2005 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2006 				break;
2007 			case MADV_RANDOM:
2008 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2009 				break;
2010 			case MADV_NOSYNC:
2011 				current->eflags |= MAP_ENTRY_NOSYNC;
2012 				break;
2013 			case MADV_AUTOSYNC:
2014 				current->eflags &= ~MAP_ENTRY_NOSYNC;
2015 				break;
2016 			case MADV_NOCORE:
2017 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
2018 				break;
2019 			case MADV_CORE:
2020 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2021 				break;
2022 			default:
2023 				break;
2024 			}
2025 			vm_map_simplify_entry(map, current);
2026 		}
2027 		vm_map_unlock(map);
2028 	} else {
2029 		vm_pindex_t pindex;
2030 		int count;
2031 
2032 		/*
2033 		 * madvise behaviors that are implemented in the underlying
2034 		 * vm_object.
2035 		 *
2036 		 * Since we don't clip the vm_map_entry, we have to clip
2037 		 * the vm_object pindex and count.
2038 		 */
2039 		for (current = entry;
2040 		     (current != &map->header) && (current->start < end);
2041 		     current = current->next
2042 		) {
2043 			vm_offset_t useStart;
2044 
2045 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2046 				continue;
2047 
2048 			pindex = OFF_TO_IDX(current->offset);
2049 			count = atop(current->end - current->start);
2050 			useStart = current->start;
2051 
2052 			if (current->start < start) {
2053 				pindex += atop(start - current->start);
2054 				count -= atop(start - current->start);
2055 				useStart = start;
2056 			}
2057 			if (current->end > end)
2058 				count -= atop(current->end - end);
2059 
2060 			if (count <= 0)
2061 				continue;
2062 
2063 			vm_object_madvise(current->object.vm_object,
2064 					  pindex, count, behav);
2065 			if (behav == MADV_WILLNEED) {
2066 				vm_map_pmap_enter(map,
2067 				    useStart,
2068 				    current->protection,
2069 				    current->object.vm_object,
2070 				    pindex,
2071 				    (count << PAGE_SHIFT),
2072 				    MAP_PREFAULT_MADVISE
2073 				);
2074 			}
2075 		}
2076 		vm_map_unlock_read(map);
2077 	}
2078 	return (0);
2079 }
2080 
2081 
2082 /*
2083  *	vm_map_inherit:
2084  *
2085  *	Sets the inheritance of the specified address
2086  *	range in the target map.  Inheritance
2087  *	affects how the map will be shared with
2088  *	child maps at the time of vmspace_fork.
2089  */
2090 int
2091 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2092 	       vm_inherit_t new_inheritance)
2093 {
2094 	vm_map_entry_t entry;
2095 	vm_map_entry_t temp_entry;
2096 
2097 	switch (new_inheritance) {
2098 	case VM_INHERIT_NONE:
2099 	case VM_INHERIT_COPY:
2100 	case VM_INHERIT_SHARE:
2101 		break;
2102 	default:
2103 		return (KERN_INVALID_ARGUMENT);
2104 	}
2105 	vm_map_lock(map);
2106 	VM_MAP_RANGE_CHECK(map, start, end);
2107 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
2108 		entry = temp_entry;
2109 		vm_map_clip_start(map, entry, start);
2110 	} else
2111 		entry = temp_entry->next;
2112 	while ((entry != &map->header) && (entry->start < end)) {
2113 		vm_map_clip_end(map, entry, end);
2114 		entry->inheritance = new_inheritance;
2115 		vm_map_simplify_entry(map, entry);
2116 		entry = entry->next;
2117 	}
2118 	vm_map_unlock(map);
2119 	return (KERN_SUCCESS);
2120 }
2121 
2122 /*
2123  *	vm_map_unwire:
2124  *
2125  *	Implements both kernel and user unwiring.
2126  */
2127 int
2128 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2129     int flags)
2130 {
2131 	vm_map_entry_t entry, first_entry, tmp_entry;
2132 	vm_offset_t saved_start;
2133 	unsigned int last_timestamp;
2134 	int rv;
2135 	boolean_t need_wakeup, result, user_unwire;
2136 
2137 	user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2138 	vm_map_lock(map);
2139 	VM_MAP_RANGE_CHECK(map, start, end);
2140 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2141 		if (flags & VM_MAP_WIRE_HOLESOK)
2142 			first_entry = first_entry->next;
2143 		else {
2144 			vm_map_unlock(map);
2145 			return (KERN_INVALID_ADDRESS);
2146 		}
2147 	}
2148 	last_timestamp = map->timestamp;
2149 	entry = first_entry;
2150 	while (entry != &map->header && entry->start < end) {
2151 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2152 			/*
2153 			 * We have not yet clipped the entry.
2154 			 */
2155 			saved_start = (start >= entry->start) ? start :
2156 			    entry->start;
2157 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2158 			if (vm_map_unlock_and_wait(map, 0)) {
2159 				/*
2160 				 * Allow interruption of user unwiring?
2161 				 */
2162 			}
2163 			vm_map_lock(map);
2164 			if (last_timestamp+1 != map->timestamp) {
2165 				/*
2166 				 * Look again for the entry because the map was
2167 				 * modified while it was unlocked.
2168 				 * Specifically, the entry may have been
2169 				 * clipped, merged, or deleted.
2170 				 */
2171 				if (!vm_map_lookup_entry(map, saved_start,
2172 				    &tmp_entry)) {
2173 					if (flags & VM_MAP_WIRE_HOLESOK)
2174 						tmp_entry = tmp_entry->next;
2175 					else {
2176 						if (saved_start == start) {
2177 							/*
2178 							 * First_entry has been deleted.
2179 							 */
2180 							vm_map_unlock(map);
2181 							return (KERN_INVALID_ADDRESS);
2182 						}
2183 						end = saved_start;
2184 						rv = KERN_INVALID_ADDRESS;
2185 						goto done;
2186 					}
2187 				}
2188 				if (entry == first_entry)
2189 					first_entry = tmp_entry;
2190 				else
2191 					first_entry = NULL;
2192 				entry = tmp_entry;
2193 			}
2194 			last_timestamp = map->timestamp;
2195 			continue;
2196 		}
2197 		vm_map_clip_start(map, entry, start);
2198 		vm_map_clip_end(map, entry, end);
2199 		/*
2200 		 * Mark the entry in case the map lock is released.  (See
2201 		 * above.)
2202 		 */
2203 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2204 		/*
2205 		 * Check the map for holes in the specified region.
2206 		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2207 		 */
2208 		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2209 		    (entry->end < end && (entry->next == &map->header ||
2210 		    entry->next->start > entry->end))) {
2211 			end = entry->end;
2212 			rv = KERN_INVALID_ADDRESS;
2213 			goto done;
2214 		}
2215 		/*
2216 		 * If system unwiring, require that the entry is system wired.
2217 		 */
2218 		if (!user_unwire &&
2219 		    vm_map_entry_system_wired_count(entry) == 0) {
2220 			end = entry->end;
2221 			rv = KERN_INVALID_ARGUMENT;
2222 			goto done;
2223 		}
2224 		entry = entry->next;
2225 	}
2226 	rv = KERN_SUCCESS;
2227 done:
2228 	need_wakeup = FALSE;
2229 	if (first_entry == NULL) {
2230 		result = vm_map_lookup_entry(map, start, &first_entry);
2231 		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2232 			first_entry = first_entry->next;
2233 		else
2234 			KASSERT(result, ("vm_map_unwire: lookup failed"));
2235 	}
2236 	entry = first_entry;
2237 	while (entry != &map->header && entry->start < end) {
2238 		if (rv == KERN_SUCCESS && (!user_unwire ||
2239 		    (entry->eflags & MAP_ENTRY_USER_WIRED))) {
2240 			if (user_unwire)
2241 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2242 			entry->wired_count--;
2243 			if (entry->wired_count == 0) {
2244 				/*
2245 				 * Retain the map lock.
2246 				 */
2247 				vm_fault_unwire(map, entry->start, entry->end,
2248 				    entry->object.vm_object != NULL &&
2249 				    (entry->object.vm_object->type == OBJT_DEVICE ||
2250 				    entry->object.vm_object->type == OBJT_SG));
2251 			}
2252 		}
2253 		KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
2254 			("vm_map_unwire: in-transition flag missing"));
2255 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2256 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2257 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2258 			need_wakeup = TRUE;
2259 		}
2260 		vm_map_simplify_entry(map, entry);
2261 		entry = entry->next;
2262 	}
2263 	vm_map_unlock(map);
2264 	if (need_wakeup)
2265 		vm_map_wakeup(map);
2266 	return (rv);
2267 }
2268 
2269 /*
2270  *	vm_map_wire:
2271  *
2272  *	Implements both kernel and user wiring.
2273  */
2274 int
2275 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2276     int flags)
2277 {
2278 	vm_map_entry_t entry, first_entry, tmp_entry;
2279 	vm_offset_t saved_end, saved_start;
2280 	unsigned int last_timestamp;
2281 	int rv;
2282 	boolean_t fictitious, need_wakeup, result, user_wire;
2283 
2284 	user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2285 	vm_map_lock(map);
2286 	VM_MAP_RANGE_CHECK(map, start, end);
2287 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2288 		if (flags & VM_MAP_WIRE_HOLESOK)
2289 			first_entry = first_entry->next;
2290 		else {
2291 			vm_map_unlock(map);
2292 			return (KERN_INVALID_ADDRESS);
2293 		}
2294 	}
2295 	last_timestamp = map->timestamp;
2296 	entry = first_entry;
2297 	while (entry != &map->header && entry->start < end) {
2298 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2299 			/*
2300 			 * We have not yet clipped the entry.
2301 			 */
2302 			saved_start = (start >= entry->start) ? start :
2303 			    entry->start;
2304 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2305 			if (vm_map_unlock_and_wait(map, 0)) {
2306 				/*
2307 				 * Allow interruption of user wiring?
2308 				 */
2309 			}
2310 			vm_map_lock(map);
2311 			if (last_timestamp + 1 != map->timestamp) {
2312 				/*
2313 				 * Look again for the entry because the map was
2314 				 * modified while it was unlocked.
2315 				 * Specifically, the entry may have been
2316 				 * clipped, merged, or deleted.
2317 				 */
2318 				if (!vm_map_lookup_entry(map, saved_start,
2319 				    &tmp_entry)) {
2320 					if (flags & VM_MAP_WIRE_HOLESOK)
2321 						tmp_entry = tmp_entry->next;
2322 					else {
2323 						if (saved_start == start) {
2324 							/*
2325 							 * first_entry has been deleted.
2326 							 */
2327 							vm_map_unlock(map);
2328 							return (KERN_INVALID_ADDRESS);
2329 						}
2330 						end = saved_start;
2331 						rv = KERN_INVALID_ADDRESS;
2332 						goto done;
2333 					}
2334 				}
2335 				if (entry == first_entry)
2336 					first_entry = tmp_entry;
2337 				else
2338 					first_entry = NULL;
2339 				entry = tmp_entry;
2340 			}
2341 			last_timestamp = map->timestamp;
2342 			continue;
2343 		}
2344 		vm_map_clip_start(map, entry, start);
2345 		vm_map_clip_end(map, entry, end);
2346 		/*
2347 		 * Mark the entry in case the map lock is released.  (See
2348 		 * above.)
2349 		 */
2350 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2351 		/*
2352 		 *
2353 		 */
2354 		if (entry->wired_count == 0) {
2355 			if ((entry->protection & (VM_PROT_READ|VM_PROT_EXECUTE))
2356 			    == 0) {
2357 				entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
2358 				if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
2359 					end = entry->end;
2360 					rv = KERN_INVALID_ADDRESS;
2361 					goto done;
2362 				}
2363 				goto next_entry;
2364 			}
2365 			entry->wired_count++;
2366 			saved_start = entry->start;
2367 			saved_end = entry->end;
2368 			fictitious = entry->object.vm_object != NULL &&
2369 			    (entry->object.vm_object->type == OBJT_DEVICE ||
2370 			    entry->object.vm_object->type == OBJT_SG);
2371 			/*
2372 			 * Release the map lock, relying on the in-transition
2373 			 * mark.
2374 			 */
2375 			vm_map_unlock(map);
2376 			rv = vm_fault_wire(map, saved_start, saved_end,
2377 			    fictitious);
2378 			vm_map_lock(map);
2379 			if (last_timestamp + 1 != map->timestamp) {
2380 				/*
2381 				 * Look again for the entry because the map was
2382 				 * modified while it was unlocked.  The entry
2383 				 * may have been clipped, but NOT merged or
2384 				 * deleted.
2385 				 */
2386 				result = vm_map_lookup_entry(map, saved_start,
2387 				    &tmp_entry);
2388 				KASSERT(result, ("vm_map_wire: lookup failed"));
2389 				if (entry == first_entry)
2390 					first_entry = tmp_entry;
2391 				else
2392 					first_entry = NULL;
2393 				entry = tmp_entry;
2394 				while (entry->end < saved_end) {
2395 					if (rv != KERN_SUCCESS) {
2396 						KASSERT(entry->wired_count == 1,
2397 						    ("vm_map_wire: bad count"));
2398 						entry->wired_count = -1;
2399 					}
2400 					entry = entry->next;
2401 				}
2402 			}
2403 			last_timestamp = map->timestamp;
2404 			if (rv != KERN_SUCCESS) {
2405 				KASSERT(entry->wired_count == 1,
2406 				    ("vm_map_wire: bad count"));
2407 				/*
2408 				 * Assign an out-of-range value to represent
2409 				 * the failure to wire this entry.
2410 				 */
2411 				entry->wired_count = -1;
2412 				end = entry->end;
2413 				goto done;
2414 			}
2415 		} else if (!user_wire ||
2416 			   (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2417 			entry->wired_count++;
2418 		}
2419 		/*
2420 		 * Check the map for holes in the specified region.
2421 		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2422 		 */
2423 	next_entry:
2424 		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2425 		    (entry->end < end && (entry->next == &map->header ||
2426 		    entry->next->start > entry->end))) {
2427 			end = entry->end;
2428 			rv = KERN_INVALID_ADDRESS;
2429 			goto done;
2430 		}
2431 		entry = entry->next;
2432 	}
2433 	rv = KERN_SUCCESS;
2434 done:
2435 	need_wakeup = FALSE;
2436 	if (first_entry == NULL) {
2437 		result = vm_map_lookup_entry(map, start, &first_entry);
2438 		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2439 			first_entry = first_entry->next;
2440 		else
2441 			KASSERT(result, ("vm_map_wire: lookup failed"));
2442 	}
2443 	entry = first_entry;
2444 	while (entry != &map->header && entry->start < end) {
2445 		if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
2446 			goto next_entry_done;
2447 		if (rv == KERN_SUCCESS) {
2448 			if (user_wire)
2449 				entry->eflags |= MAP_ENTRY_USER_WIRED;
2450 		} else if (entry->wired_count == -1) {
2451 			/*
2452 			 * Wiring failed on this entry.  Thus, unwiring is
2453 			 * unnecessary.
2454 			 */
2455 			entry->wired_count = 0;
2456 		} else {
2457 			if (!user_wire ||
2458 			    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)
2459 				entry->wired_count--;
2460 			if (entry->wired_count == 0) {
2461 				/*
2462 				 * Retain the map lock.
2463 				 */
2464 				vm_fault_unwire(map, entry->start, entry->end,
2465 				    entry->object.vm_object != NULL &&
2466 				    (entry->object.vm_object->type == OBJT_DEVICE ||
2467 				    entry->object.vm_object->type == OBJT_SG));
2468 			}
2469 		}
2470 	next_entry_done:
2471 		KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
2472 			("vm_map_wire: in-transition flag missing"));
2473 		entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION|MAP_ENTRY_WIRE_SKIPPED);
2474 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2475 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2476 			need_wakeup = TRUE;
2477 		}
2478 		vm_map_simplify_entry(map, entry);
2479 		entry = entry->next;
2480 	}
2481 	vm_map_unlock(map);
2482 	if (need_wakeup)
2483 		vm_map_wakeup(map);
2484 	return (rv);
2485 }
2486 
2487 /*
2488  * vm_map_sync
2489  *
2490  * Push any dirty cached pages in the address range to their pager.
2491  * If syncio is TRUE, dirty pages are written synchronously.
2492  * If invalidate is TRUE, any cached pages are freed as well.
2493  *
2494  * If the size of the region from start to end is zero, we are
2495  * supposed to flush all modified pages within the region containing
2496  * start.  Unfortunately, a region can be split or coalesced with
2497  * neighboring regions, making it difficult to determine what the
2498  * original region was.  Therefore, we approximate this requirement by
2499  * flushing the current region containing start.
2500  *
2501  * Returns an error if any part of the specified range is not mapped.
2502  */
2503 int
2504 vm_map_sync(
2505 	vm_map_t map,
2506 	vm_offset_t start,
2507 	vm_offset_t end,
2508 	boolean_t syncio,
2509 	boolean_t invalidate)
2510 {
2511 	vm_map_entry_t current;
2512 	vm_map_entry_t entry;
2513 	vm_size_t size;
2514 	vm_object_t object;
2515 	vm_ooffset_t offset;
2516 	unsigned int last_timestamp;
2517 
2518 	vm_map_lock_read(map);
2519 	VM_MAP_RANGE_CHECK(map, start, end);
2520 	if (!vm_map_lookup_entry(map, start, &entry)) {
2521 		vm_map_unlock_read(map);
2522 		return (KERN_INVALID_ADDRESS);
2523 	} else if (start == end) {
2524 		start = entry->start;
2525 		end = entry->end;
2526 	}
2527 	/*
2528 	 * Make a first pass to check for user-wired memory and holes.
2529 	 */
2530 	for (current = entry; current != &map->header && current->start < end;
2531 	    current = current->next) {
2532 		if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
2533 			vm_map_unlock_read(map);
2534 			return (KERN_INVALID_ARGUMENT);
2535 		}
2536 		if (end > current->end &&
2537 		    (current->next == &map->header ||
2538 			current->end != current->next->start)) {
2539 			vm_map_unlock_read(map);
2540 			return (KERN_INVALID_ADDRESS);
2541 		}
2542 	}
2543 
2544 	if (invalidate)
2545 		pmap_remove(map->pmap, start, end);
2546 
2547 	/*
2548 	 * Make a second pass, cleaning/uncaching pages from the indicated
2549 	 * objects as we go.
2550 	 */
2551 	for (current = entry; current != &map->header && current->start < end;) {
2552 		offset = current->offset + (start - current->start);
2553 		size = (end <= current->end ? end : current->end) - start;
2554 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2555 			vm_map_t smap;
2556 			vm_map_entry_t tentry;
2557 			vm_size_t tsize;
2558 
2559 			smap = current->object.sub_map;
2560 			vm_map_lock_read(smap);
2561 			(void) vm_map_lookup_entry(smap, offset, &tentry);
2562 			tsize = tentry->end - offset;
2563 			if (tsize < size)
2564 				size = tsize;
2565 			object = tentry->object.vm_object;
2566 			offset = tentry->offset + (offset - tentry->start);
2567 			vm_map_unlock_read(smap);
2568 		} else {
2569 			object = current->object.vm_object;
2570 		}
2571 		vm_object_reference(object);
2572 		last_timestamp = map->timestamp;
2573 		vm_map_unlock_read(map);
2574 		vm_object_sync(object, offset, size, syncio, invalidate);
2575 		start += size;
2576 		vm_object_deallocate(object);
2577 		vm_map_lock_read(map);
2578 		if (last_timestamp == map->timestamp ||
2579 		    !vm_map_lookup_entry(map, start, &current))
2580 			current = current->next;
2581 	}
2582 
2583 	vm_map_unlock_read(map);
2584 	return (KERN_SUCCESS);
2585 }
2586 
2587 /*
2588  *	vm_map_entry_unwire:	[ internal use only ]
2589  *
2590  *	Make the region specified by this entry pageable.
2591  *
2592  *	The map in question should be locked.
2593  *	[This is the reason for this routine's existence.]
2594  */
2595 static void
2596 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2597 {
2598 	vm_fault_unwire(map, entry->start, entry->end,
2599 	    entry->object.vm_object != NULL &&
2600 	    (entry->object.vm_object->type == OBJT_DEVICE ||
2601 	    entry->object.vm_object->type == OBJT_SG));
2602 	entry->wired_count = 0;
2603 }
2604 
2605 /*
2606  *	vm_map_entry_delete:	[ internal use only ]
2607  *
2608  *	Deallocate the given entry from the target map.
2609  */
2610 static void
2611 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2612 {
2613 	vm_object_t object;
2614 	vm_pindex_t offidxstart, offidxend, count, size1;
2615 	vm_ooffset_t size;
2616 
2617 	vm_map_entry_unlink(map, entry);
2618 	object = entry->object.vm_object;
2619 	size = entry->end - entry->start;
2620 	map->size -= size;
2621 
2622 	if (entry->uip != NULL) {
2623 		swap_release_by_uid(size, entry->uip);
2624 		uifree(entry->uip);
2625 	}
2626 
2627 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2628 	    (object != NULL)) {
2629 		KASSERT(entry->uip == NULL || object->uip == NULL ||
2630 		    (entry->eflags & MAP_ENTRY_NEEDS_COPY),
2631 		    ("OVERCOMMIT vm_map_entry_delete: both uip %p", entry));
2632 		count = OFF_TO_IDX(size);
2633 		offidxstart = OFF_TO_IDX(entry->offset);
2634 		offidxend = offidxstart + count;
2635 		VM_OBJECT_LOCK(object);
2636 		if (object->ref_count != 1 &&
2637 		    ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
2638 		    object == kernel_object || object == kmem_object)) {
2639 			vm_object_collapse(object);
2640 			vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2641 			if (object->type == OBJT_SWAP)
2642 				swap_pager_freespace(object, offidxstart, count);
2643 			if (offidxend >= object->size &&
2644 			    offidxstart < object->size) {
2645 				size1 = object->size;
2646 				object->size = offidxstart;
2647 				if (object->uip != NULL) {
2648 					size1 -= object->size;
2649 					KASSERT(object->charge >= ptoa(size1),
2650 					    ("vm_map_entry_delete: object->charge < 0"));
2651 					swap_release_by_uid(ptoa(size1), object->uip);
2652 					object->charge -= ptoa(size1);
2653 				}
2654 			}
2655 		}
2656 		VM_OBJECT_UNLOCK(object);
2657 	} else
2658 		entry->object.vm_object = NULL;
2659 }
2660 
2661 /*
2662  *	vm_map_delete:	[ internal use only ]
2663  *
2664  *	Deallocates the given address range from the target
2665  *	map.
2666  */
2667 int
2668 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2669 {
2670 	vm_map_entry_t entry;
2671 	vm_map_entry_t first_entry;
2672 
2673 	VM_MAP_ASSERT_LOCKED(map);
2674 
2675 	/*
2676 	 * Find the start of the region, and clip it
2677 	 */
2678 	if (!vm_map_lookup_entry(map, start, &first_entry))
2679 		entry = first_entry->next;
2680 	else {
2681 		entry = first_entry;
2682 		vm_map_clip_start(map, entry, start);
2683 	}
2684 
2685 	/*
2686 	 * Step through all entries in this region
2687 	 */
2688 	while ((entry != &map->header) && (entry->start < end)) {
2689 		vm_map_entry_t next;
2690 
2691 		/*
2692 		 * Wait for wiring or unwiring of an entry to complete.
2693 		 * Also wait for any system wirings to disappear on
2694 		 * user maps.
2695 		 */
2696 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
2697 		    (vm_map_pmap(map) != kernel_pmap &&
2698 		    vm_map_entry_system_wired_count(entry) != 0)) {
2699 			unsigned int last_timestamp;
2700 			vm_offset_t saved_start;
2701 			vm_map_entry_t tmp_entry;
2702 
2703 			saved_start = entry->start;
2704 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2705 			last_timestamp = map->timestamp;
2706 			(void) vm_map_unlock_and_wait(map, 0);
2707 			vm_map_lock(map);
2708 			if (last_timestamp + 1 != map->timestamp) {
2709 				/*
2710 				 * Look again for the entry because the map was
2711 				 * modified while it was unlocked.
2712 				 * Specifically, the entry may have been
2713 				 * clipped, merged, or deleted.
2714 				 */
2715 				if (!vm_map_lookup_entry(map, saved_start,
2716 							 &tmp_entry))
2717 					entry = tmp_entry->next;
2718 				else {
2719 					entry = tmp_entry;
2720 					vm_map_clip_start(map, entry,
2721 							  saved_start);
2722 				}
2723 			}
2724 			continue;
2725 		}
2726 		vm_map_clip_end(map, entry, end);
2727 
2728 		next = entry->next;
2729 
2730 		/*
2731 		 * Unwire before removing addresses from the pmap; otherwise,
2732 		 * unwiring will put the entries back in the pmap.
2733 		 */
2734 		if (entry->wired_count != 0) {
2735 			vm_map_entry_unwire(map, entry);
2736 		}
2737 
2738 		pmap_remove(map->pmap, entry->start, entry->end);
2739 
2740 		/*
2741 		 * Delete the entry only after removing all pmap
2742 		 * entries pointing to its pages.  (Otherwise, its
2743 		 * page frames may be reallocated, and any modify bits
2744 		 * will be set in the wrong object!)
2745 		 */
2746 		vm_map_entry_delete(map, entry);
2747 		entry->next = map->deferred_freelist;
2748 		map->deferred_freelist = entry;
2749 		entry = next;
2750 	}
2751 	return (KERN_SUCCESS);
2752 }
2753 
2754 /*
2755  *	vm_map_remove:
2756  *
2757  *	Remove the given address range from the target map.
2758  *	This is the exported form of vm_map_delete.
2759  */
2760 int
2761 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2762 {
2763 	int result;
2764 
2765 	vm_map_lock(map);
2766 	VM_MAP_RANGE_CHECK(map, start, end);
2767 	result = vm_map_delete(map, start, end);
2768 	vm_map_unlock(map);
2769 	return (result);
2770 }
2771 
2772 /*
2773  *	vm_map_check_protection:
2774  *
2775  *	Assert that the target map allows the specified privilege on the
2776  *	entire address region given.  The entire region must be allocated.
2777  *
2778  *	WARNING!  This code does not and should not check whether the
2779  *	contents of the region is accessible.  For example a smaller file
2780  *	might be mapped into a larger address space.
2781  *
2782  *	NOTE!  This code is also called by munmap().
2783  *
2784  *	The map must be locked.  A read lock is sufficient.
2785  */
2786 boolean_t
2787 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2788 			vm_prot_t protection)
2789 {
2790 	vm_map_entry_t entry;
2791 	vm_map_entry_t tmp_entry;
2792 
2793 	if (!vm_map_lookup_entry(map, start, &tmp_entry))
2794 		return (FALSE);
2795 	entry = tmp_entry;
2796 
2797 	while (start < end) {
2798 		if (entry == &map->header)
2799 			return (FALSE);
2800 		/*
2801 		 * No holes allowed!
2802 		 */
2803 		if (start < entry->start)
2804 			return (FALSE);
2805 		/*
2806 		 * Check protection associated with entry.
2807 		 */
2808 		if ((entry->protection & protection) != protection)
2809 			return (FALSE);
2810 		/* go to next entry */
2811 		start = entry->end;
2812 		entry = entry->next;
2813 	}
2814 	return (TRUE);
2815 }
2816 
2817 /*
2818  *	vm_map_copy_entry:
2819  *
2820  *	Copies the contents of the source entry to the destination
2821  *	entry.  The entries *must* be aligned properly.
2822  */
2823 static void
2824 vm_map_copy_entry(
2825 	vm_map_t src_map,
2826 	vm_map_t dst_map,
2827 	vm_map_entry_t src_entry,
2828 	vm_map_entry_t dst_entry,
2829 	vm_ooffset_t *fork_charge)
2830 {
2831 	vm_object_t src_object;
2832 	vm_offset_t size;
2833 	struct uidinfo *uip;
2834 	int charged;
2835 
2836 	VM_MAP_ASSERT_LOCKED(dst_map);
2837 
2838 	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2839 		return;
2840 
2841 	if (src_entry->wired_count == 0) {
2842 
2843 		/*
2844 		 * If the source entry is marked needs_copy, it is already
2845 		 * write-protected.
2846 		 */
2847 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2848 			pmap_protect(src_map->pmap,
2849 			    src_entry->start,
2850 			    src_entry->end,
2851 			    src_entry->protection & ~VM_PROT_WRITE);
2852 		}
2853 
2854 		/*
2855 		 * Make a copy of the object.
2856 		 */
2857 		size = src_entry->end - src_entry->start;
2858 		if ((src_object = src_entry->object.vm_object) != NULL) {
2859 			VM_OBJECT_LOCK(src_object);
2860 			charged = ENTRY_CHARGED(src_entry);
2861 			if ((src_object->handle == NULL) &&
2862 				(src_object->type == OBJT_DEFAULT ||
2863 				 src_object->type == OBJT_SWAP)) {
2864 				vm_object_collapse(src_object);
2865 				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2866 					vm_object_split(src_entry);
2867 					src_object = src_entry->object.vm_object;
2868 				}
2869 			}
2870 			vm_object_reference_locked(src_object);
2871 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2872 			if (src_entry->uip != NULL &&
2873 			    !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
2874 				KASSERT(src_object->uip == NULL,
2875 				    ("OVERCOMMIT: vm_map_copy_entry: uip %p",
2876 				     src_object));
2877 				src_object->uip = src_entry->uip;
2878 				src_object->charge = size;
2879 			}
2880 			VM_OBJECT_UNLOCK(src_object);
2881 			dst_entry->object.vm_object = src_object;
2882 			if (charged) {
2883 				uip = curthread->td_ucred->cr_ruidinfo;
2884 				uihold(uip);
2885 				dst_entry->uip = uip;
2886 				*fork_charge += size;
2887 				if (!(src_entry->eflags &
2888 				      MAP_ENTRY_NEEDS_COPY)) {
2889 					uihold(uip);
2890 					src_entry->uip = uip;
2891 					*fork_charge += size;
2892 				}
2893 			}
2894 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2895 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2896 			dst_entry->offset = src_entry->offset;
2897 		} else {
2898 			dst_entry->object.vm_object = NULL;
2899 			dst_entry->offset = 0;
2900 			if (src_entry->uip != NULL) {
2901 				dst_entry->uip = curthread->td_ucred->cr_ruidinfo;
2902 				uihold(dst_entry->uip);
2903 				*fork_charge += size;
2904 			}
2905 		}
2906 
2907 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2908 		    dst_entry->end - dst_entry->start, src_entry->start);
2909 	} else {
2910 		/*
2911 		 * Of course, wired down pages can't be set copy-on-write.
2912 		 * Cause wired pages to be copied into the new map by
2913 		 * simulating faults (the new pages are pageable)
2914 		 */
2915 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
2916 		    fork_charge);
2917 	}
2918 }
2919 
2920 /*
2921  * vmspace_map_entry_forked:
2922  * Update the newly-forked vmspace each time a map entry is inherited
2923  * or copied.  The values for vm_dsize and vm_tsize are approximate
2924  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
2925  */
2926 static void
2927 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
2928     vm_map_entry_t entry)
2929 {
2930 	vm_size_t entrysize;
2931 	vm_offset_t newend;
2932 
2933 	entrysize = entry->end - entry->start;
2934 	vm2->vm_map.size += entrysize;
2935 	if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
2936 		vm2->vm_ssize += btoc(entrysize);
2937 	} else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
2938 	    entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
2939 		newend = MIN(entry->end,
2940 		    (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
2941 		vm2->vm_dsize += btoc(newend - entry->start);
2942 	} else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
2943 	    entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
2944 		newend = MIN(entry->end,
2945 		    (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
2946 		vm2->vm_tsize += btoc(newend - entry->start);
2947 	}
2948 }
2949 
2950 /*
2951  * vmspace_fork:
2952  * Create a new process vmspace structure and vm_map
2953  * based on those of an existing process.  The new map
2954  * is based on the old map, according to the inheritance
2955  * values on the regions in that map.
2956  *
2957  * XXX It might be worth coalescing the entries added to the new vmspace.
2958  *
2959  * The source map must not be locked.
2960  */
2961 struct vmspace *
2962 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
2963 {
2964 	struct vmspace *vm2;
2965 	vm_map_t old_map = &vm1->vm_map;
2966 	vm_map_t new_map;
2967 	vm_map_entry_t old_entry;
2968 	vm_map_entry_t new_entry;
2969 	vm_object_t object;
2970 	int locked;
2971 
2972 	vm_map_lock(old_map);
2973 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2974 	if (vm2 == NULL)
2975 		goto unlock_and_return;
2976 	vm2->vm_taddr = vm1->vm_taddr;
2977 	vm2->vm_daddr = vm1->vm_daddr;
2978 	vm2->vm_maxsaddr = vm1->vm_maxsaddr;
2979 	new_map = &vm2->vm_map;	/* XXX */
2980 	locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
2981 	KASSERT(locked, ("vmspace_fork: lock failed"));
2982 	new_map->timestamp = 1;
2983 
2984 	old_entry = old_map->header.next;
2985 
2986 	while (old_entry != &old_map->header) {
2987 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2988 			panic("vm_map_fork: encountered a submap");
2989 
2990 		switch (old_entry->inheritance) {
2991 		case VM_INHERIT_NONE:
2992 			break;
2993 
2994 		case VM_INHERIT_SHARE:
2995 			/*
2996 			 * Clone the entry, creating the shared object if necessary.
2997 			 */
2998 			object = old_entry->object.vm_object;
2999 			if (object == NULL) {
3000 				object = vm_object_allocate(OBJT_DEFAULT,
3001 					atop(old_entry->end - old_entry->start));
3002 				old_entry->object.vm_object = object;
3003 				old_entry->offset = 0;
3004 				if (old_entry->uip != NULL) {
3005 					object->uip = old_entry->uip;
3006 					object->charge = old_entry->end -
3007 					    old_entry->start;
3008 					old_entry->uip = NULL;
3009 				}
3010 			}
3011 
3012 			/*
3013 			 * Add the reference before calling vm_object_shadow
3014 			 * to insure that a shadow object is created.
3015 			 */
3016 			vm_object_reference(object);
3017 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3018 				vm_object_shadow(&old_entry->object.vm_object,
3019 					&old_entry->offset,
3020 					atop(old_entry->end - old_entry->start));
3021 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3022 				/* Transfer the second reference too. */
3023 				vm_object_reference(
3024 				    old_entry->object.vm_object);
3025 
3026 				/*
3027 				 * As in vm_map_simplify_entry(), the
3028 				 * vnode lock will not be acquired in
3029 				 * this call to vm_object_deallocate().
3030 				 */
3031 				vm_object_deallocate(object);
3032 				object = old_entry->object.vm_object;
3033 			}
3034 			VM_OBJECT_LOCK(object);
3035 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
3036 			if (old_entry->uip != NULL) {
3037 				KASSERT(object->uip == NULL, ("vmspace_fork both uip"));
3038 				object->uip = old_entry->uip;
3039 				object->charge = old_entry->end - old_entry->start;
3040 				old_entry->uip = NULL;
3041 			}
3042 			VM_OBJECT_UNLOCK(object);
3043 
3044 			/*
3045 			 * Clone the entry, referencing the shared object.
3046 			 */
3047 			new_entry = vm_map_entry_create(new_map);
3048 			*new_entry = *old_entry;
3049 			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3050 			    MAP_ENTRY_IN_TRANSITION);
3051 			new_entry->wired_count = 0;
3052 
3053 			/*
3054 			 * Insert the entry into the new map -- we know we're
3055 			 * inserting at the end of the new map.
3056 			 */
3057 			vm_map_entry_link(new_map, new_map->header.prev,
3058 			    new_entry);
3059 			vmspace_map_entry_forked(vm1, vm2, new_entry);
3060 
3061 			/*
3062 			 * Update the physical map
3063 			 */
3064 			pmap_copy(new_map->pmap, old_map->pmap,
3065 			    new_entry->start,
3066 			    (old_entry->end - old_entry->start),
3067 			    old_entry->start);
3068 			break;
3069 
3070 		case VM_INHERIT_COPY:
3071 			/*
3072 			 * Clone the entry and link into the map.
3073 			 */
3074 			new_entry = vm_map_entry_create(new_map);
3075 			*new_entry = *old_entry;
3076 			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3077 			    MAP_ENTRY_IN_TRANSITION);
3078 			new_entry->wired_count = 0;
3079 			new_entry->object.vm_object = NULL;
3080 			new_entry->uip = NULL;
3081 			vm_map_entry_link(new_map, new_map->header.prev,
3082 			    new_entry);
3083 			vmspace_map_entry_forked(vm1, vm2, new_entry);
3084 			vm_map_copy_entry(old_map, new_map, old_entry,
3085 			    new_entry, fork_charge);
3086 			break;
3087 		}
3088 		old_entry = old_entry->next;
3089 	}
3090 unlock_and_return:
3091 	vm_map_unlock(old_map);
3092 	if (vm2 != NULL)
3093 		vm_map_unlock(new_map);
3094 
3095 	return (vm2);
3096 }
3097 
3098 int
3099 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3100     vm_prot_t prot, vm_prot_t max, int cow)
3101 {
3102 	vm_map_entry_t new_entry, prev_entry;
3103 	vm_offset_t bot, top;
3104 	vm_size_t init_ssize;
3105 	int orient, rv;
3106 	rlim_t vmemlim;
3107 
3108 	/*
3109 	 * The stack orientation is piggybacked with the cow argument.
3110 	 * Extract it into orient and mask the cow argument so that we
3111 	 * don't pass it around further.
3112 	 * NOTE: We explicitly allow bi-directional stacks.
3113 	 */
3114 	orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP);
3115 	cow &= ~orient;
3116 	KASSERT(orient != 0, ("No stack grow direction"));
3117 
3118 	if (addrbos < vm_map_min(map) ||
3119 	    addrbos > vm_map_max(map) ||
3120 	    addrbos + max_ssize < addrbos)
3121 		return (KERN_NO_SPACE);
3122 
3123 	init_ssize = (max_ssize < sgrowsiz) ? max_ssize : sgrowsiz;
3124 
3125 	PROC_LOCK(curthread->td_proc);
3126 	vmemlim = lim_cur(curthread->td_proc, RLIMIT_VMEM);
3127 	PROC_UNLOCK(curthread->td_proc);
3128 
3129 	vm_map_lock(map);
3130 
3131 	/* If addr is already mapped, no go */
3132 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
3133 		vm_map_unlock(map);
3134 		return (KERN_NO_SPACE);
3135 	}
3136 
3137 	/* If we would blow our VMEM resource limit, no go */
3138 	if (map->size + init_ssize > vmemlim) {
3139 		vm_map_unlock(map);
3140 		return (KERN_NO_SPACE);
3141 	}
3142 
3143 	/*
3144 	 * If we can't accomodate max_ssize in the current mapping, no go.
3145 	 * However, we need to be aware that subsequent user mappings might
3146 	 * map into the space we have reserved for stack, and currently this
3147 	 * space is not protected.
3148 	 *
3149 	 * Hopefully we will at least detect this condition when we try to
3150 	 * grow the stack.
3151 	 */
3152 	if ((prev_entry->next != &map->header) &&
3153 	    (prev_entry->next->start < addrbos + max_ssize)) {
3154 		vm_map_unlock(map);
3155 		return (KERN_NO_SPACE);
3156 	}
3157 
3158 	/*
3159 	 * We initially map a stack of only init_ssize.  We will grow as
3160 	 * needed later.  Depending on the orientation of the stack (i.e.
3161 	 * the grow direction) we either map at the top of the range, the
3162 	 * bottom of the range or in the middle.
3163 	 *
3164 	 * Note: we would normally expect prot and max to be VM_PROT_ALL,
3165 	 * and cow to be 0.  Possibly we should eliminate these as input
3166 	 * parameters, and just pass these values here in the insert call.
3167 	 */
3168 	if (orient == MAP_STACK_GROWS_DOWN)
3169 		bot = addrbos + max_ssize - init_ssize;
3170 	else if (orient == MAP_STACK_GROWS_UP)
3171 		bot = addrbos;
3172 	else
3173 		bot = round_page(addrbos + max_ssize/2 - init_ssize/2);
3174 	top = bot + init_ssize;
3175 	rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
3176 
3177 	/* Now set the avail_ssize amount. */
3178 	if (rv == KERN_SUCCESS) {
3179 		if (prev_entry != &map->header)
3180 			vm_map_clip_end(map, prev_entry, bot);
3181 		new_entry = prev_entry->next;
3182 		if (new_entry->end != top || new_entry->start != bot)
3183 			panic("Bad entry start/end for new stack entry");
3184 
3185 		new_entry->avail_ssize = max_ssize - init_ssize;
3186 		if (orient & MAP_STACK_GROWS_DOWN)
3187 			new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3188 		if (orient & MAP_STACK_GROWS_UP)
3189 			new_entry->eflags |= MAP_ENTRY_GROWS_UP;
3190 	}
3191 
3192 	vm_map_unlock(map);
3193 	return (rv);
3194 }
3195 
3196 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3197  * desired address is already mapped, or if we successfully grow
3198  * the stack.  Also returns KERN_SUCCESS if addr is outside the
3199  * stack range (this is strange, but preserves compatibility with
3200  * the grow function in vm_machdep.c).
3201  */
3202 int
3203 vm_map_growstack(struct proc *p, vm_offset_t addr)
3204 {
3205 	vm_map_entry_t next_entry, prev_entry;
3206 	vm_map_entry_t new_entry, stack_entry;
3207 	struct vmspace *vm = p->p_vmspace;
3208 	vm_map_t map = &vm->vm_map;
3209 	vm_offset_t end;
3210 	size_t grow_amount, max_grow;
3211 	rlim_t stacklim, vmemlim;
3212 	int is_procstack, rv;
3213 	struct uidinfo *uip;
3214 
3215 Retry:
3216 	PROC_LOCK(p);
3217 	stacklim = lim_cur(p, RLIMIT_STACK);
3218 	vmemlim = lim_cur(p, RLIMIT_VMEM);
3219 	PROC_UNLOCK(p);
3220 
3221 	vm_map_lock_read(map);
3222 
3223 	/* If addr is already in the entry range, no need to grow.*/
3224 	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
3225 		vm_map_unlock_read(map);
3226 		return (KERN_SUCCESS);
3227 	}
3228 
3229 	next_entry = prev_entry->next;
3230 	if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) {
3231 		/*
3232 		 * This entry does not grow upwards. Since the address lies
3233 		 * beyond this entry, the next entry (if one exists) has to
3234 		 * be a downward growable entry. The entry list header is
3235 		 * never a growable entry, so it suffices to check the flags.
3236 		 */
3237 		if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) {
3238 			vm_map_unlock_read(map);
3239 			return (KERN_SUCCESS);
3240 		}
3241 		stack_entry = next_entry;
3242 	} else {
3243 		/*
3244 		 * This entry grows upward. If the next entry does not at
3245 		 * least grow downwards, this is the entry we need to grow.
3246 		 * otherwise we have two possible choices and we have to
3247 		 * select one.
3248 		 */
3249 		if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) {
3250 			/*
3251 			 * We have two choices; grow the entry closest to
3252 			 * the address to minimize the amount of growth.
3253 			 */
3254 			if (addr - prev_entry->end <= next_entry->start - addr)
3255 				stack_entry = prev_entry;
3256 			else
3257 				stack_entry = next_entry;
3258 		} else
3259 			stack_entry = prev_entry;
3260 	}
3261 
3262 	if (stack_entry == next_entry) {
3263 		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo"));
3264 		KASSERT(addr < stack_entry->start, ("foo"));
3265 		end = (prev_entry != &map->header) ? prev_entry->end :
3266 		    stack_entry->start - stack_entry->avail_ssize;
3267 		grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE);
3268 		max_grow = stack_entry->start - end;
3269 	} else {
3270 		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo"));
3271 		KASSERT(addr >= stack_entry->end, ("foo"));
3272 		end = (next_entry != &map->header) ? next_entry->start :
3273 		    stack_entry->end + stack_entry->avail_ssize;
3274 		grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE);
3275 		max_grow = end - stack_entry->end;
3276 	}
3277 
3278 	if (grow_amount > stack_entry->avail_ssize) {
3279 		vm_map_unlock_read(map);
3280 		return (KERN_NO_SPACE);
3281 	}
3282 
3283 	/*
3284 	 * If there is no longer enough space between the entries nogo, and
3285 	 * adjust the available space.  Note: this  should only happen if the
3286 	 * user has mapped into the stack area after the stack was created,
3287 	 * and is probably an error.
3288 	 *
3289 	 * This also effectively destroys any guard page the user might have
3290 	 * intended by limiting the stack size.
3291 	 */
3292 	if (grow_amount > max_grow) {
3293 		if (vm_map_lock_upgrade(map))
3294 			goto Retry;
3295 
3296 		stack_entry->avail_ssize = max_grow;
3297 
3298 		vm_map_unlock(map);
3299 		return (KERN_NO_SPACE);
3300 	}
3301 
3302 	is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0;
3303 
3304 	/*
3305 	 * If this is the main process stack, see if we're over the stack
3306 	 * limit.
3307 	 */
3308 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3309 		vm_map_unlock_read(map);
3310 		return (KERN_NO_SPACE);
3311 	}
3312 
3313 	/* Round up the grow amount modulo SGROWSIZ */
3314 	grow_amount = roundup (grow_amount, sgrowsiz);
3315 	if (grow_amount > stack_entry->avail_ssize)
3316 		grow_amount = stack_entry->avail_ssize;
3317 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3318 		grow_amount = stacklim - ctob(vm->vm_ssize);
3319 	}
3320 
3321 	/* If we would blow our VMEM resource limit, no go */
3322 	if (map->size + grow_amount > vmemlim) {
3323 		vm_map_unlock_read(map);
3324 		return (KERN_NO_SPACE);
3325 	}
3326 
3327 	if (vm_map_lock_upgrade(map))
3328 		goto Retry;
3329 
3330 	if (stack_entry == next_entry) {
3331 		/*
3332 		 * Growing downward.
3333 		 */
3334 		/* Get the preliminary new entry start value */
3335 		addr = stack_entry->start - grow_amount;
3336 
3337 		/*
3338 		 * If this puts us into the previous entry, cut back our
3339 		 * growth to the available space. Also, see the note above.
3340 		 */
3341 		if (addr < end) {
3342 			stack_entry->avail_ssize = max_grow;
3343 			addr = end;
3344 		}
3345 
3346 		rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
3347 		    p->p_sysent->sv_stackprot, VM_PROT_ALL, 0);
3348 
3349 		/* Adjust the available stack space by the amount we grew. */
3350 		if (rv == KERN_SUCCESS) {
3351 			if (prev_entry != &map->header)
3352 				vm_map_clip_end(map, prev_entry, addr);
3353 			new_entry = prev_entry->next;
3354 			KASSERT(new_entry == stack_entry->prev, ("foo"));
3355 			KASSERT(new_entry->end == stack_entry->start, ("foo"));
3356 			KASSERT(new_entry->start == addr, ("foo"));
3357 			grow_amount = new_entry->end - new_entry->start;
3358 			new_entry->avail_ssize = stack_entry->avail_ssize -
3359 			    grow_amount;
3360 			stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN;
3361 			new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3362 		}
3363 	} else {
3364 		/*
3365 		 * Growing upward.
3366 		 */
3367 		addr = stack_entry->end + grow_amount;
3368 
3369 		/*
3370 		 * If this puts us into the next entry, cut back our growth
3371 		 * to the available space. Also, see the note above.
3372 		 */
3373 		if (addr > end) {
3374 			stack_entry->avail_ssize = end - stack_entry->end;
3375 			addr = end;
3376 		}
3377 
3378 		grow_amount = addr - stack_entry->end;
3379 		uip = stack_entry->uip;
3380 		if (uip == NULL && stack_entry->object.vm_object != NULL)
3381 			uip = stack_entry->object.vm_object->uip;
3382 		if (uip != NULL && !swap_reserve_by_uid(grow_amount, uip))
3383 			rv = KERN_NO_SPACE;
3384 		/* Grow the underlying object if applicable. */
3385 		else if (stack_entry->object.vm_object == NULL ||
3386 			 vm_object_coalesce(stack_entry->object.vm_object,
3387 			 stack_entry->offset,
3388 			 (vm_size_t)(stack_entry->end - stack_entry->start),
3389 			 (vm_size_t)grow_amount, uip != NULL)) {
3390 			map->size += (addr - stack_entry->end);
3391 			/* Update the current entry. */
3392 			stack_entry->end = addr;
3393 			stack_entry->avail_ssize -= grow_amount;
3394 			vm_map_entry_resize_free(map, stack_entry);
3395 			rv = KERN_SUCCESS;
3396 
3397 			if (next_entry != &map->header)
3398 				vm_map_clip_start(map, next_entry, addr);
3399 		} else
3400 			rv = KERN_FAILURE;
3401 	}
3402 
3403 	if (rv == KERN_SUCCESS && is_procstack)
3404 		vm->vm_ssize += btoc(grow_amount);
3405 
3406 	vm_map_unlock(map);
3407 
3408 	/*
3409 	 * Heed the MAP_WIREFUTURE flag if it was set for this process.
3410 	 */
3411 	if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) {
3412 		vm_map_wire(map,
3413 		    (stack_entry == next_entry) ? addr : addr - grow_amount,
3414 		    (stack_entry == next_entry) ? stack_entry->start : addr,
3415 		    (p->p_flag & P_SYSTEM)
3416 		    ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES
3417 		    : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
3418 	}
3419 
3420 	return (rv);
3421 }
3422 
3423 /*
3424  * Unshare the specified VM space for exec.  If other processes are
3425  * mapped to it, then create a new one.  The new vmspace is null.
3426  */
3427 int
3428 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
3429 {
3430 	struct vmspace *oldvmspace = p->p_vmspace;
3431 	struct vmspace *newvmspace;
3432 
3433 	newvmspace = vmspace_alloc(minuser, maxuser);
3434 	if (newvmspace == NULL)
3435 		return (ENOMEM);
3436 	newvmspace->vm_swrss = oldvmspace->vm_swrss;
3437 	/*
3438 	 * This code is written like this for prototype purposes.  The
3439 	 * goal is to avoid running down the vmspace here, but let the
3440 	 * other process's that are still using the vmspace to finally
3441 	 * run it down.  Even though there is little or no chance of blocking
3442 	 * here, it is a good idea to keep this form for future mods.
3443 	 */
3444 	PROC_VMSPACE_LOCK(p);
3445 	p->p_vmspace = newvmspace;
3446 	PROC_VMSPACE_UNLOCK(p);
3447 	if (p == curthread->td_proc)
3448 		pmap_activate(curthread);
3449 	vmspace_free(oldvmspace);
3450 	return (0);
3451 }
3452 
3453 /*
3454  * Unshare the specified VM space for forcing COW.  This
3455  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3456  */
3457 int
3458 vmspace_unshare(struct proc *p)
3459 {
3460 	struct vmspace *oldvmspace = p->p_vmspace;
3461 	struct vmspace *newvmspace;
3462 	vm_ooffset_t fork_charge;
3463 
3464 	if (oldvmspace->vm_refcnt == 1)
3465 		return (0);
3466 	fork_charge = 0;
3467 	newvmspace = vmspace_fork(oldvmspace, &fork_charge);
3468 	if (newvmspace == NULL)
3469 		return (ENOMEM);
3470 	if (!swap_reserve_by_uid(fork_charge, p->p_ucred->cr_ruidinfo)) {
3471 		vmspace_free(newvmspace);
3472 		return (ENOMEM);
3473 	}
3474 	PROC_VMSPACE_LOCK(p);
3475 	p->p_vmspace = newvmspace;
3476 	PROC_VMSPACE_UNLOCK(p);
3477 	if (p == curthread->td_proc)
3478 		pmap_activate(curthread);
3479 	vmspace_free(oldvmspace);
3480 	return (0);
3481 }
3482 
3483 /*
3484  *	vm_map_lookup:
3485  *
3486  *	Finds the VM object, offset, and
3487  *	protection for a given virtual address in the
3488  *	specified map, assuming a page fault of the
3489  *	type specified.
3490  *
3491  *	Leaves the map in question locked for read; return
3492  *	values are guaranteed until a vm_map_lookup_done
3493  *	call is performed.  Note that the map argument
3494  *	is in/out; the returned map must be used in
3495  *	the call to vm_map_lookup_done.
3496  *
3497  *	A handle (out_entry) is returned for use in
3498  *	vm_map_lookup_done, to make that fast.
3499  *
3500  *	If a lookup is requested with "write protection"
3501  *	specified, the map may be changed to perform virtual
3502  *	copying operations, although the data referenced will
3503  *	remain the same.
3504  */
3505 int
3506 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
3507 	      vm_offset_t vaddr,
3508 	      vm_prot_t fault_typea,
3509 	      vm_map_entry_t *out_entry,	/* OUT */
3510 	      vm_object_t *object,		/* OUT */
3511 	      vm_pindex_t *pindex,		/* OUT */
3512 	      vm_prot_t *out_prot,		/* OUT */
3513 	      boolean_t *wired)			/* OUT */
3514 {
3515 	vm_map_entry_t entry;
3516 	vm_map_t map = *var_map;
3517 	vm_prot_t prot;
3518 	vm_prot_t fault_type = fault_typea;
3519 	vm_object_t eobject;
3520 	struct uidinfo *uip;
3521 	vm_ooffset_t size;
3522 
3523 RetryLookup:;
3524 
3525 	vm_map_lock_read(map);
3526 
3527 	/*
3528 	 * Lookup the faulting address.
3529 	 */
3530 	if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
3531 		vm_map_unlock_read(map);
3532 		return (KERN_INVALID_ADDRESS);
3533 	}
3534 
3535 	entry = *out_entry;
3536 
3537 	/*
3538 	 * Handle submaps.
3539 	 */
3540 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3541 		vm_map_t old_map = map;
3542 
3543 		*var_map = map = entry->object.sub_map;
3544 		vm_map_unlock_read(old_map);
3545 		goto RetryLookup;
3546 	}
3547 
3548 	/*
3549 	 * Check whether this task is allowed to have this page.
3550 	 */
3551 	prot = entry->protection;
3552 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3553 	if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
3554 		vm_map_unlock_read(map);
3555 		return (KERN_PROTECTION_FAILURE);
3556 	}
3557 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3558 	    (entry->eflags & MAP_ENTRY_COW) &&
3559 	    (fault_type & VM_PROT_WRITE)) {
3560 		vm_map_unlock_read(map);
3561 		return (KERN_PROTECTION_FAILURE);
3562 	}
3563 
3564 	/*
3565 	 * If this page is not pageable, we have to get it for all possible
3566 	 * accesses.
3567 	 */
3568 	*wired = (entry->wired_count != 0);
3569 	if (*wired)
3570 		fault_type = entry->protection;
3571 	size = entry->end - entry->start;
3572 	/*
3573 	 * If the entry was copy-on-write, we either ...
3574 	 */
3575 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3576 		/*
3577 		 * If we want to write the page, we may as well handle that
3578 		 * now since we've got the map locked.
3579 		 *
3580 		 * If we don't need to write the page, we just demote the
3581 		 * permissions allowed.
3582 		 */
3583 		if ((fault_type & VM_PROT_WRITE) != 0 ||
3584 		    (fault_typea & VM_PROT_COPY) != 0) {
3585 			/*
3586 			 * Make a new object, and place it in the object
3587 			 * chain.  Note that no new references have appeared
3588 			 * -- one just moved from the map to the new
3589 			 * object.
3590 			 */
3591 			if (vm_map_lock_upgrade(map))
3592 				goto RetryLookup;
3593 
3594 			if (entry->uip == NULL) {
3595 				/*
3596 				 * The debugger owner is charged for
3597 				 * the memory.
3598 				 */
3599 				uip = curthread->td_ucred->cr_ruidinfo;
3600 				uihold(uip);
3601 				if (!swap_reserve_by_uid(size, uip)) {
3602 					uifree(uip);
3603 					vm_map_unlock(map);
3604 					return (KERN_RESOURCE_SHORTAGE);
3605 				}
3606 				entry->uip = uip;
3607 			}
3608 			vm_object_shadow(
3609 			    &entry->object.vm_object,
3610 			    &entry->offset,
3611 			    atop(size));
3612 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3613 			eobject = entry->object.vm_object;
3614 			if (eobject->uip != NULL) {
3615 				/*
3616 				 * The object was not shadowed.
3617 				 */
3618 				swap_release_by_uid(size, entry->uip);
3619 				uifree(entry->uip);
3620 				entry->uip = NULL;
3621 			} else if (entry->uip != NULL) {
3622 				VM_OBJECT_LOCK(eobject);
3623 				eobject->uip = entry->uip;
3624 				eobject->charge = size;
3625 				VM_OBJECT_UNLOCK(eobject);
3626 				entry->uip = NULL;
3627 			}
3628 
3629 			vm_map_lock_downgrade(map);
3630 		} else {
3631 			/*
3632 			 * We're attempting to read a copy-on-write page --
3633 			 * don't allow writes.
3634 			 */
3635 			prot &= ~VM_PROT_WRITE;
3636 		}
3637 	}
3638 
3639 	/*
3640 	 * Create an object if necessary.
3641 	 */
3642 	if (entry->object.vm_object == NULL &&
3643 	    !map->system_map) {
3644 		if (vm_map_lock_upgrade(map))
3645 			goto RetryLookup;
3646 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
3647 		    atop(size));
3648 		entry->offset = 0;
3649 		if (entry->uip != NULL) {
3650 			VM_OBJECT_LOCK(entry->object.vm_object);
3651 			entry->object.vm_object->uip = entry->uip;
3652 			entry->object.vm_object->charge = size;
3653 			VM_OBJECT_UNLOCK(entry->object.vm_object);
3654 			entry->uip = NULL;
3655 		}
3656 		vm_map_lock_downgrade(map);
3657 	}
3658 
3659 	/*
3660 	 * Return the object/offset from this entry.  If the entry was
3661 	 * copy-on-write or empty, it has been fixed up.
3662 	 */
3663 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3664 	*object = entry->object.vm_object;
3665 
3666 	*out_prot = prot;
3667 	return (KERN_SUCCESS);
3668 }
3669 
3670 /*
3671  *	vm_map_lookup_locked:
3672  *
3673  *	Lookup the faulting address.  A version of vm_map_lookup that returns
3674  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
3675  */
3676 int
3677 vm_map_lookup_locked(vm_map_t *var_map,		/* IN/OUT */
3678 		     vm_offset_t vaddr,
3679 		     vm_prot_t fault_typea,
3680 		     vm_map_entry_t *out_entry,	/* OUT */
3681 		     vm_object_t *object,	/* OUT */
3682 		     vm_pindex_t *pindex,	/* OUT */
3683 		     vm_prot_t *out_prot,	/* OUT */
3684 		     boolean_t *wired)		/* OUT */
3685 {
3686 	vm_map_entry_t entry;
3687 	vm_map_t map = *var_map;
3688 	vm_prot_t prot;
3689 	vm_prot_t fault_type = fault_typea;
3690 
3691 	/*
3692 	 * Lookup the faulting address.
3693 	 */
3694 	if (!vm_map_lookup_entry(map, vaddr, out_entry))
3695 		return (KERN_INVALID_ADDRESS);
3696 
3697 	entry = *out_entry;
3698 
3699 	/*
3700 	 * Fail if the entry refers to a submap.
3701 	 */
3702 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3703 		return (KERN_FAILURE);
3704 
3705 	/*
3706 	 * Check whether this task is allowed to have this page.
3707 	 */
3708 	prot = entry->protection;
3709 	fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
3710 	if ((fault_type & prot) != fault_type)
3711 		return (KERN_PROTECTION_FAILURE);
3712 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3713 	    (entry->eflags & MAP_ENTRY_COW) &&
3714 	    (fault_type & VM_PROT_WRITE))
3715 		return (KERN_PROTECTION_FAILURE);
3716 
3717 	/*
3718 	 * If this page is not pageable, we have to get it for all possible
3719 	 * accesses.
3720 	 */
3721 	*wired = (entry->wired_count != 0);
3722 	if (*wired)
3723 		fault_type = entry->protection;
3724 
3725 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3726 		/*
3727 		 * Fail if the entry was copy-on-write for a write fault.
3728 		 */
3729 		if (fault_type & VM_PROT_WRITE)
3730 			return (KERN_FAILURE);
3731 		/*
3732 		 * We're attempting to read a copy-on-write page --
3733 		 * don't allow writes.
3734 		 */
3735 		prot &= ~VM_PROT_WRITE;
3736 	}
3737 
3738 	/*
3739 	 * Fail if an object should be created.
3740 	 */
3741 	if (entry->object.vm_object == NULL && !map->system_map)
3742 		return (KERN_FAILURE);
3743 
3744 	/*
3745 	 * Return the object/offset from this entry.  If the entry was
3746 	 * copy-on-write or empty, it has been fixed up.
3747 	 */
3748 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3749 	*object = entry->object.vm_object;
3750 
3751 	*out_prot = prot;
3752 	return (KERN_SUCCESS);
3753 }
3754 
3755 /*
3756  *	vm_map_lookup_done:
3757  *
3758  *	Releases locks acquired by a vm_map_lookup
3759  *	(according to the handle returned by that lookup).
3760  */
3761 void
3762 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
3763 {
3764 	/*
3765 	 * Unlock the main-level map
3766 	 */
3767 	vm_map_unlock_read(map);
3768 }
3769 
3770 #include "opt_ddb.h"
3771 #ifdef DDB
3772 #include <sys/kernel.h>
3773 
3774 #include <ddb/ddb.h>
3775 
3776 /*
3777  *	vm_map_print:	[ debug ]
3778  */
3779 DB_SHOW_COMMAND(map, vm_map_print)
3780 {
3781 	static int nlines;
3782 	/* XXX convert args. */
3783 	vm_map_t map = (vm_map_t)addr;
3784 	boolean_t full = have_addr;
3785 
3786 	vm_map_entry_t entry;
3787 
3788 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3789 	    (void *)map,
3790 	    (void *)map->pmap, map->nentries, map->timestamp);
3791 	nlines++;
3792 
3793 	if (!full && db_indent)
3794 		return;
3795 
3796 	db_indent += 2;
3797 	for (entry = map->header.next; entry != &map->header;
3798 	    entry = entry->next) {
3799 		db_iprintf("map entry %p: start=%p, end=%p\n",
3800 		    (void *)entry, (void *)entry->start, (void *)entry->end);
3801 		nlines++;
3802 		{
3803 			static char *inheritance_name[4] =
3804 			{"share", "copy", "none", "donate_copy"};
3805 
3806 			db_iprintf(" prot=%x/%x/%s",
3807 			    entry->protection,
3808 			    entry->max_protection,
3809 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
3810 			if (entry->wired_count != 0)
3811 				db_printf(", wired");
3812 		}
3813 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3814 			db_printf(", share=%p, offset=0x%jx\n",
3815 			    (void *)entry->object.sub_map,
3816 			    (uintmax_t)entry->offset);
3817 			nlines++;
3818 			if ((entry->prev == &map->header) ||
3819 			    (entry->prev->object.sub_map !=
3820 				entry->object.sub_map)) {
3821 				db_indent += 2;
3822 				vm_map_print((db_expr_t)(intptr_t)
3823 					     entry->object.sub_map,
3824 					     full, 0, (char *)0);
3825 				db_indent -= 2;
3826 			}
3827 		} else {
3828 			if (entry->uip != NULL)
3829 				db_printf(", uip %d", entry->uip->ui_uid);
3830 			db_printf(", object=%p, offset=0x%jx",
3831 			    (void *)entry->object.vm_object,
3832 			    (uintmax_t)entry->offset);
3833 			if (entry->object.vm_object && entry->object.vm_object->uip)
3834 				db_printf(", obj uip %d charge %jx",
3835 				    entry->object.vm_object->uip->ui_uid,
3836 				    (uintmax_t)entry->object.vm_object->charge);
3837 			if (entry->eflags & MAP_ENTRY_COW)
3838 				db_printf(", copy (%s)",
3839 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3840 			db_printf("\n");
3841 			nlines++;
3842 
3843 			if ((entry->prev == &map->header) ||
3844 			    (entry->prev->object.vm_object !=
3845 				entry->object.vm_object)) {
3846 				db_indent += 2;
3847 				vm_object_print((db_expr_t)(intptr_t)
3848 						entry->object.vm_object,
3849 						full, 0, (char *)0);
3850 				nlines += 4;
3851 				db_indent -= 2;
3852 			}
3853 		}
3854 	}
3855 	db_indent -= 2;
3856 	if (db_indent == 0)
3857 		nlines = 0;
3858 }
3859 
3860 
3861 DB_SHOW_COMMAND(procvm, procvm)
3862 {
3863 	struct proc *p;
3864 
3865 	if (have_addr) {
3866 		p = (struct proc *) addr;
3867 	} else {
3868 		p = curproc;
3869 	}
3870 
3871 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3872 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3873 	    (void *)vmspace_pmap(p->p_vmspace));
3874 
3875 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3876 }
3877 
3878 #endif /* DDB */
3879