xref: /freebsd/sys/vm/vm_map.c (revision cbc89bfbfe8e7f75bd743851f4890a1f1c58f4be)
1df8bae1dSRodney W. Grimes /*
2df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  *
5df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
6df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
9df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
10df8bae1dSRodney W. Grimes  * are met:
11df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
12df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
13df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
15df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
16df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
175929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
18df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
19df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
20df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
21df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
22df8bae1dSRodney W. Grimes  *    without specific prior written permission.
23df8bae1dSRodney W. Grimes  *
24df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
35df8bae1dSRodney W. Grimes  *
363c4dd356SDavid Greenman  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  *
39df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40df8bae1dSRodney W. Grimes  * All rights reserved.
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43df8bae1dSRodney W. Grimes  *
44df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
45df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
46df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
47df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
48df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
49df8bae1dSRodney W. Grimes  *
50df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
55df8bae1dSRodney W. Grimes  *
56df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57df8bae1dSRodney W. Grimes  *  School of Computer Science
58df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
59df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
60df8bae1dSRodney W. Grimes  *
61df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
62df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
633c4dd356SDavid Greenman  *
64c3aac50fSPeter Wemm  * $FreeBSD$
65df8bae1dSRodney W. Grimes  */
66df8bae1dSRodney W. Grimes 
67df8bae1dSRodney W. Grimes /*
68df8bae1dSRodney W. Grimes  *	Virtual memory mapping module.
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes #include <sys/param.h>
72df8bae1dSRodney W. Grimes #include <sys/systm.h>
73fb919e4dSMark Murray #include <sys/lock.h>
74fb919e4dSMark Murray #include <sys/mutex.h>
75b5e8ce9fSBruce Evans #include <sys/proc.h>
76efeaf95aSDavid Greenman #include <sys/vmmeter.h>
77867a482dSJohn Dyson #include <sys/mman.h>
781efb74fbSJohn Dyson #include <sys/vnode.h>
792267af78SJulian Elischer #include <sys/resourcevar.h>
80df8bae1dSRodney W. Grimes 
81df8bae1dSRodney W. Grimes #include <vm/vm.h>
82efeaf95aSDavid Greenman #include <vm/vm_param.h>
83efeaf95aSDavid Greenman #include <vm/pmap.h>
84efeaf95aSDavid Greenman #include <vm/vm_map.h>
85df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
86df8bae1dSRodney W. Grimes #include <vm/vm_object.h>
8747221757SJohn Dyson #include <vm/vm_pager.h>
8826f9a767SRodney W. Grimes #include <vm/vm_kern.h>
89efeaf95aSDavid Greenman #include <vm/vm_extern.h>
903075778bSJohn Dyson #include <vm/vm_zone.h>
9121cd6e62SSeigo Tanimura #include <vm/swap_pager.h>
92df8bae1dSRodney W. Grimes 
93df8bae1dSRodney W. Grimes /*
94df8bae1dSRodney W. Grimes  *	Virtual memory maps provide for the mapping, protection,
95df8bae1dSRodney W. Grimes  *	and sharing of virtual memory objects.  In addition,
96df8bae1dSRodney W. Grimes  *	this module provides for an efficient virtual copy of
97df8bae1dSRodney W. Grimes  *	memory from one map to another.
98df8bae1dSRodney W. Grimes  *
99df8bae1dSRodney W. Grimes  *	Synchronization is required prior to most operations.
100df8bae1dSRodney W. Grimes  *
101df8bae1dSRodney W. Grimes  *	Maps consist of an ordered doubly-linked list of simple
102df8bae1dSRodney W. Grimes  *	entries; a single hint is used to speed up lookups.
103df8bae1dSRodney W. Grimes  *
104956f3135SPhilippe Charnier  *	Since portions of maps are specified by start/end addresses,
105df8bae1dSRodney W. Grimes  *	which may not align with existing map entries, all
106df8bae1dSRodney W. Grimes  *	routines merely "clip" entries to these start/end values.
107df8bae1dSRodney W. Grimes  *	[That is, an entry is split into two, bordering at a
108df8bae1dSRodney W. Grimes  *	start or end value.]  Note that these clippings may not
109df8bae1dSRodney W. Grimes  *	always be necessary (as the two resulting entries are then
110df8bae1dSRodney W. Grimes  *	not changed); however, the clipping is done for convenience.
111df8bae1dSRodney W. Grimes  *
112df8bae1dSRodney W. Grimes  *	As mentioned above, virtual copy operations are performed
113ad5fca3bSAlan Cox  *	by copying VM object references from one map to
114df8bae1dSRodney W. Grimes  *	another, and then marking both regions as copy-on-write.
115df8bae1dSRodney W. Grimes  */
116df8bae1dSRodney W. Grimes 
117df8bae1dSRodney W. Grimes /*
118df8bae1dSRodney W. Grimes  *	vm_map_startup:
119df8bae1dSRodney W. Grimes  *
120df8bae1dSRodney W. Grimes  *	Initialize the vm_map module.  Must be called before
121df8bae1dSRodney W. Grimes  *	any other vm_map routines.
122df8bae1dSRodney W. Grimes  *
123df8bae1dSRodney W. Grimes  *	Map and entry structures are allocated from the general
124df8bae1dSRodney W. Grimes  *	purpose memory pool with some exceptions:
125df8bae1dSRodney W. Grimes  *
126df8bae1dSRodney W. Grimes  *	- The kernel map and kmem submap are allocated statically.
127df8bae1dSRodney W. Grimes  *	- Kernel map entries are allocated out of a static pool.
128df8bae1dSRodney W. Grimes  *
129df8bae1dSRodney W. Grimes  *	These restrictions are necessary since malloc() uses the
130df8bae1dSRodney W. Grimes  *	maps and requires map entries.
131df8bae1dSRodney W. Grimes  */
132df8bae1dSRodney W. Grimes 
1333075778bSJohn Dyson static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store;
1342d8acc0fSJohn Dyson static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone;
1353075778bSJohn Dyson static struct vm_object kmapentobj, mapentobj, mapobj;
1361fc43fd1SAlan Cox 
137303b270bSEivind Eklund static struct vm_map_entry map_entry_init[MAX_MAPENT];
138303b270bSEivind Eklund static struct vm_map_entry kmap_entry_init[MAX_KMAPENT];
139303b270bSEivind Eklund static struct vm_map map_init[MAX_KMAP];
140b18bfc3dSJohn Dyson 
1410d94caffSDavid Greenman void
1421b40f8c0SMatthew Dillon vm_map_startup(void)
143df8bae1dSRodney W. Grimes {
1443075778bSJohn Dyson 	mapzone = &mapzone_store;
1450d65e566SJohn Dyson 	zbootinit(mapzone, "MAP", sizeof (struct vm_map),
1463075778bSJohn Dyson 		map_init, MAX_KMAP);
1473075778bSJohn Dyson 	kmapentzone = &kmapentzone_store;
1480d65e566SJohn Dyson 	zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry),
1493075778bSJohn Dyson 		kmap_entry_init, MAX_KMAPENT);
1503075778bSJohn Dyson 	mapentzone = &mapentzone_store;
1510d65e566SJohn Dyson 	zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
1523075778bSJohn Dyson 		map_entry_init, MAX_MAPENT);
153df8bae1dSRodney W. Grimes }
154df8bae1dSRodney W. Grimes 
155df8bae1dSRodney W. Grimes /*
156df8bae1dSRodney W. Grimes  * Allocate a vmspace structure, including a vm_map and pmap,
157df8bae1dSRodney W. Grimes  * and initialize those structures.  The refcnt is set to 1.
158df8bae1dSRodney W. Grimes  * The remaining fields must be initialized by the caller.
159df8bae1dSRodney W. Grimes  */
160df8bae1dSRodney W. Grimes struct vmspace *
1612d8acc0fSJohn Dyson vmspace_alloc(min, max)
162df8bae1dSRodney W. Grimes 	vm_offset_t min, max;
163df8bae1dSRodney W. Grimes {
164c0877f10SJohn Dyson 	struct vmspace *vm;
1650d94caffSDavid Greenman 
1660cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
1672d8acc0fSJohn Dyson 	vm = zalloc(vmspace_zone);
16821c641b2SJohn Baldwin 	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
1692d8acc0fSJohn Dyson 	vm_map_init(&vm->vm_map, min, max);
170b1028ad1SLuoqi Chen 	pmap_pinit(vmspace_pmap(vm));
171b1028ad1SLuoqi Chen 	vm->vm_map.pmap = vmspace_pmap(vm);		/* XXX */
172df8bae1dSRodney W. Grimes 	vm->vm_refcnt = 1;
1732d8acc0fSJohn Dyson 	vm->vm_shm = NULL;
174df8bae1dSRodney W. Grimes 	return (vm);
175df8bae1dSRodney W. Grimes }
176df8bae1dSRodney W. Grimes 
177df8bae1dSRodney W. Grimes void
1781b40f8c0SMatthew Dillon vm_init2(void)
1791b40f8c0SMatthew Dillon {
1800d65e566SJohn Dyson 	zinitna(kmapentzone, &kmapentobj,
1810a80f406SJohn Dyson 		NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1);
1820d65e566SJohn Dyson 	zinitna(mapentzone, &mapentobj,
1830a80f406SJohn Dyson 		NULL, 0, 0, 0, 1);
1840d65e566SJohn Dyson 	zinitna(mapzone, &mapobj,
1850a80f406SJohn Dyson 		NULL, 0, 0, 0, 1);
1862d8acc0fSJohn Dyson 	vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
187ba9be04cSJohn Dyson 	pmap_init2();
18899448ed1SJohn Dyson 	vm_object_init2();
1893075778bSJohn Dyson }
1903075778bSJohn Dyson 
1913075778bSJohn Dyson void
1921b40f8c0SMatthew Dillon vmspace_free(struct vmspace *vm)
193df8bae1dSRodney W. Grimes {
1940cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
195df8bae1dSRodney W. Grimes 
196a1f6d91cSDavid Greenman 	if (vm->vm_refcnt == 0)
197a1f6d91cSDavid Greenman 		panic("vmspace_free: attempt to free already freed vmspace");
198a1f6d91cSDavid Greenman 
199df8bae1dSRodney W. Grimes 	if (--vm->vm_refcnt == 0) {
200bd7e5f99SJohn Dyson 
20121c641b2SJohn Baldwin 		CTR1(KTR_VM, "vmspace_free: %p", vm);
20230dcfc09SJohn Dyson 		/*
203df8bae1dSRodney W. Grimes 		 * Lock the map, to wait out all other references to it.
2040d94caffSDavid Greenman 		 * Delete all of the mappings and pages they hold, then call
2050d94caffSDavid Greenman 		 * the pmap module to reclaim anything left.
206df8bae1dSRodney W. Grimes 		 */
207df8bae1dSRodney W. Grimes 		vm_map_lock(&vm->vm_map);
208df8bae1dSRodney W. Grimes 		(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
209df8bae1dSRodney W. Grimes 		    vm->vm_map.max_offset);
210a1f6d91cSDavid Greenman 		vm_map_unlock(&vm->vm_map);
211b18bfc3dSJohn Dyson 
212b1028ad1SLuoqi Chen 		pmap_release(vmspace_pmap(vm));
213a18b1f1dSJason Evans 		vm_map_destroy(&vm->vm_map);
2142d8acc0fSJohn Dyson 		zfree(vmspace_zone, vm);
215df8bae1dSRodney W. Grimes 	}
216df8bae1dSRodney W. Grimes }
217df8bae1dSRodney W. Grimes 
218df8bae1dSRodney W. Grimes /*
219ff2b5645SMatthew Dillon  * vmspace_swap_count() - count the approximate swap useage in pages for a
220ff2b5645SMatthew Dillon  *			  vmspace.
221ff2b5645SMatthew Dillon  *
222ff2b5645SMatthew Dillon  *	Swap useage is determined by taking the proportional swap used by
223ff2b5645SMatthew Dillon  *	VM objects backing the VM map.  To make up for fractional losses,
224ff2b5645SMatthew Dillon  *	if the VM object has any swap use at all the associated map entries
225ff2b5645SMatthew Dillon  *	count for at least 1 swap page.
226ff2b5645SMatthew Dillon  */
227ff2b5645SMatthew Dillon int
228ff2b5645SMatthew Dillon vmspace_swap_count(struct vmspace *vmspace)
229ff2b5645SMatthew Dillon {
230ff2b5645SMatthew Dillon 	vm_map_t map = &vmspace->vm_map;
231ff2b5645SMatthew Dillon 	vm_map_entry_t cur;
232ff2b5645SMatthew Dillon 	int count = 0;
233ff2b5645SMatthew Dillon 
234ff2b5645SMatthew Dillon 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
235ff2b5645SMatthew Dillon 		vm_object_t object;
236ff2b5645SMatthew Dillon 
237ff2b5645SMatthew Dillon 		if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
238ff2b5645SMatthew Dillon 		    (object = cur->object.vm_object) != NULL &&
239ff2b5645SMatthew Dillon 		    object->type == OBJT_SWAP
240ff2b5645SMatthew Dillon 		) {
241ff2b5645SMatthew Dillon 			int n = (cur->end - cur->start) / PAGE_SIZE;
242ff2b5645SMatthew Dillon 
243ff2b5645SMatthew Dillon 			if (object->un_pager.swp.swp_bcount) {
244ef6a93efSMatthew Dillon 				count += object->un_pager.swp.swp_bcount *
245ef6a93efSMatthew Dillon 				    SWAP_META_PAGES * n / object->size + 1;
246ff2b5645SMatthew Dillon 			}
247ff2b5645SMatthew Dillon 		}
248ff2b5645SMatthew Dillon 	}
249ff2b5645SMatthew Dillon 	return(count);
250ff2b5645SMatthew Dillon }
251ff2b5645SMatthew Dillon 
2521b40f8c0SMatthew Dillon u_char
2531b40f8c0SMatthew Dillon vm_map_entry_behavior(struct vm_map_entry *entry)
2541b40f8c0SMatthew Dillon {
2551b40f8c0SMatthew Dillon 	return entry->eflags & MAP_ENTRY_BEHAV_MASK;
2561b40f8c0SMatthew Dillon }
2571b40f8c0SMatthew Dillon 
2581b40f8c0SMatthew Dillon void
2591b40f8c0SMatthew Dillon vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior)
2601b40f8c0SMatthew Dillon {
2611b40f8c0SMatthew Dillon 	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
2621b40f8c0SMatthew Dillon 		(behavior & MAP_ENTRY_BEHAV_MASK);
2631b40f8c0SMatthew Dillon }
2641b40f8c0SMatthew Dillon 
2651b40f8c0SMatthew Dillon void
2661b40f8c0SMatthew Dillon vm_map_lock(vm_map_t map)
2671b40f8c0SMatthew Dillon {
2681b40f8c0SMatthew Dillon 	vm_map_printf("locking map LK_EXCLUSIVE: %p\n", map);
269b40ce416SJulian Elischer 	if (lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread) != 0)
2701b40f8c0SMatthew Dillon 		panic("vm_map_lock: failed to get lock");
2711b40f8c0SMatthew Dillon 	map->timestamp++;
2721b40f8c0SMatthew Dillon }
2731b40f8c0SMatthew Dillon 
2741b40f8c0SMatthew Dillon void
2751b40f8c0SMatthew Dillon vm_map_unlock(vm_map_t map)
2761b40f8c0SMatthew Dillon {
2771b40f8c0SMatthew Dillon 	vm_map_printf("locking map LK_RELEASE: %p\n", map);
278b40ce416SJulian Elischer 	lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread);
2791b40f8c0SMatthew Dillon }
2801b40f8c0SMatthew Dillon 
2811b40f8c0SMatthew Dillon void
2821b40f8c0SMatthew Dillon vm_map_lock_read(vm_map_t map)
2831b40f8c0SMatthew Dillon {
2841b40f8c0SMatthew Dillon 	vm_map_printf("locking map LK_SHARED: %p\n", map);
285b40ce416SJulian Elischer 	lockmgr(&(map)->lock, LK_SHARED, NULL, curthread);
2861b40f8c0SMatthew Dillon }
2871b40f8c0SMatthew Dillon 
2881b40f8c0SMatthew Dillon void
2891b40f8c0SMatthew Dillon vm_map_unlock_read(vm_map_t map)
2901b40f8c0SMatthew Dillon {
2911b40f8c0SMatthew Dillon 	vm_map_printf("locking map LK_RELEASE: %p\n", map);
292b40ce416SJulian Elischer 	lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread);
2931b40f8c0SMatthew Dillon }
2941b40f8c0SMatthew Dillon 
2951b40f8c0SMatthew Dillon static __inline__ int
296b40ce416SJulian Elischer _vm_map_lock_upgrade(vm_map_t map, struct thread *td) {
2971b40f8c0SMatthew Dillon 	int error;
2981b40f8c0SMatthew Dillon 
2991b40f8c0SMatthew Dillon 	vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map);
300b40ce416SJulian Elischer 	error = lockmgr(&map->lock, LK_EXCLUPGRADE, NULL, td);
3011b40f8c0SMatthew Dillon 	if (error == 0)
3021b40f8c0SMatthew Dillon 		map->timestamp++;
3031b40f8c0SMatthew Dillon 	return error;
3041b40f8c0SMatthew Dillon }
3051b40f8c0SMatthew Dillon 
3061b40f8c0SMatthew Dillon int
3071b40f8c0SMatthew Dillon vm_map_lock_upgrade(vm_map_t map)
3081b40f8c0SMatthew Dillon {
309b40ce416SJulian Elischer     return(_vm_map_lock_upgrade(map, curthread));
3101b40f8c0SMatthew Dillon }
3111b40f8c0SMatthew Dillon 
3121b40f8c0SMatthew Dillon void
3131b40f8c0SMatthew Dillon vm_map_lock_downgrade(vm_map_t map)
3141b40f8c0SMatthew Dillon {
3151b40f8c0SMatthew Dillon 	vm_map_printf("locking map LK_DOWNGRADE: %p\n", map);
316b40ce416SJulian Elischer 	lockmgr(&map->lock, LK_DOWNGRADE, NULL, curthread);
3171b40f8c0SMatthew Dillon }
3181b40f8c0SMatthew Dillon 
3191b40f8c0SMatthew Dillon void
3201b40f8c0SMatthew Dillon vm_map_set_recursive(vm_map_t map)
3211b40f8c0SMatthew Dillon {
3221b40f8c0SMatthew Dillon 	mtx_lock((map)->lock.lk_interlock);
3231b40f8c0SMatthew Dillon 	map->lock.lk_flags |= LK_CANRECURSE;
3241b40f8c0SMatthew Dillon 	mtx_unlock((map)->lock.lk_interlock);
3251b40f8c0SMatthew Dillon }
3261b40f8c0SMatthew Dillon 
3271b40f8c0SMatthew Dillon void
3281b40f8c0SMatthew Dillon vm_map_clear_recursive(vm_map_t map)
3291b40f8c0SMatthew Dillon {
3301b40f8c0SMatthew Dillon 	mtx_lock((map)->lock.lk_interlock);
3311b40f8c0SMatthew Dillon 	map->lock.lk_flags &= ~LK_CANRECURSE;
3321b40f8c0SMatthew Dillon 	mtx_unlock((map)->lock.lk_interlock);
3331b40f8c0SMatthew Dillon }
3341b40f8c0SMatthew Dillon 
3351b40f8c0SMatthew Dillon vm_offset_t
3361b40f8c0SMatthew Dillon vm_map_min(vm_map_t map)
3371b40f8c0SMatthew Dillon {
3381b40f8c0SMatthew Dillon 	return(map->min_offset);
3391b40f8c0SMatthew Dillon }
3401b40f8c0SMatthew Dillon 
3411b40f8c0SMatthew Dillon vm_offset_t
3421b40f8c0SMatthew Dillon vm_map_max(vm_map_t map)
3431b40f8c0SMatthew Dillon {
3441b40f8c0SMatthew Dillon 	return(map->max_offset);
3451b40f8c0SMatthew Dillon }
3461b40f8c0SMatthew Dillon 
3471b40f8c0SMatthew Dillon struct pmap *
3481b40f8c0SMatthew Dillon vm_map_pmap(vm_map_t map)
3491b40f8c0SMatthew Dillon {
3501b40f8c0SMatthew Dillon 	return(map->pmap);
3511b40f8c0SMatthew Dillon }
3521b40f8c0SMatthew Dillon 
3531b40f8c0SMatthew Dillon struct pmap *
3541b40f8c0SMatthew Dillon vmspace_pmap(struct vmspace *vmspace)
3551b40f8c0SMatthew Dillon {
3561b40f8c0SMatthew Dillon 	return &vmspace->vm_pmap;
3571b40f8c0SMatthew Dillon }
3581b40f8c0SMatthew Dillon 
3591b40f8c0SMatthew Dillon long
3601b40f8c0SMatthew Dillon vmspace_resident_count(struct vmspace *vmspace)
3611b40f8c0SMatthew Dillon {
3621b40f8c0SMatthew Dillon 	return pmap_resident_count(vmspace_pmap(vmspace));
3631b40f8c0SMatthew Dillon }
3641b40f8c0SMatthew Dillon 
365ff2b5645SMatthew Dillon /*
366df8bae1dSRodney W. Grimes  *	vm_map_create:
367df8bae1dSRodney W. Grimes  *
368df8bae1dSRodney W. Grimes  *	Creates and returns a new empty VM map with
369df8bae1dSRodney W. Grimes  *	the given physical map structure, and having
370df8bae1dSRodney W. Grimes  *	the given lower and upper address bounds.
371df8bae1dSRodney W. Grimes  */
3720d94caffSDavid Greenman vm_map_t
3731b40f8c0SMatthew Dillon vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
374df8bae1dSRodney W. Grimes {
375c0877f10SJohn Dyson 	vm_map_t result;
376df8bae1dSRodney W. Grimes 
3770cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
3780cddd8f0SMatthew Dillon 
3793075778bSJohn Dyson 	result = zalloc(mapzone);
38021c641b2SJohn Baldwin 	CTR1(KTR_VM, "vm_map_create: %p", result);
3812d8acc0fSJohn Dyson 	vm_map_init(result, min, max);
382df8bae1dSRodney W. Grimes 	result->pmap = pmap;
383df8bae1dSRodney W. Grimes 	return (result);
384df8bae1dSRodney W. Grimes }
385df8bae1dSRodney W. Grimes 
386df8bae1dSRodney W. Grimes /*
387df8bae1dSRodney W. Grimes  * Initialize an existing vm_map structure
388df8bae1dSRodney W. Grimes  * such as that in the vmspace structure.
389df8bae1dSRodney W. Grimes  * The pmap is set elsewhere.
390df8bae1dSRodney W. Grimes  */
391df8bae1dSRodney W. Grimes void
3921b40f8c0SMatthew Dillon vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
393df8bae1dSRodney W. Grimes {
3940cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
39521c641b2SJohn Baldwin 
396df8bae1dSRodney W. Grimes 	map->header.next = map->header.prev = &map->header;
397df8bae1dSRodney W. Grimes 	map->nentries = 0;
398df8bae1dSRodney W. Grimes 	map->size = 0;
3993075778bSJohn Dyson 	map->system_map = 0;
400b823bbd6SMatthew Dillon 	map->infork = 0;
401df8bae1dSRodney W. Grimes 	map->min_offset = min;
402df8bae1dSRodney W. Grimes 	map->max_offset = max;
403df8bae1dSRodney W. Grimes 	map->first_free = &map->header;
404df8bae1dSRodney W. Grimes 	map->hint = &map->header;
405df8bae1dSRodney W. Grimes 	map->timestamp = 0;
4068f9110f6SJohn Dyson 	lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
407df8bae1dSRodney W. Grimes }
408df8bae1dSRodney W. Grimes 
409a18b1f1dSJason Evans void
410a18b1f1dSJason Evans vm_map_destroy(map)
411a18b1f1dSJason Evans 	struct vm_map *map;
412a18b1f1dSJason Evans {
4130cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
414a18b1f1dSJason Evans 	lockdestroy(&map->lock);
415a18b1f1dSJason Evans }
416a18b1f1dSJason Evans 
417df8bae1dSRodney W. Grimes /*
418b18bfc3dSJohn Dyson  *	vm_map_entry_dispose:	[ internal use only ]
419b18bfc3dSJohn Dyson  *
420b18bfc3dSJohn Dyson  *	Inverse of vm_map_entry_create.
421b18bfc3dSJohn Dyson  */
42262487bb4SJohn Dyson static void
4231b40f8c0SMatthew Dillon vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
424b18bfc3dSJohn Dyson {
425b79933ebSJohn Dyson 	zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry);
426b18bfc3dSJohn Dyson }
427b18bfc3dSJohn Dyson 
428b18bfc3dSJohn Dyson /*
429df8bae1dSRodney W. Grimes  *	vm_map_entry_create:	[ internal use only ]
430df8bae1dSRodney W. Grimes  *
431df8bae1dSRodney W. Grimes  *	Allocates a VM map entry for insertion.
432b28cb1caSAlfred Perlstein  *	No entry fields are filled in.
433df8bae1dSRodney W. Grimes  */
434f708ef1bSPoul-Henning Kamp static vm_map_entry_t
4351b40f8c0SMatthew Dillon vm_map_entry_create(vm_map_t map)
436df8bae1dSRodney W. Grimes {
4371f6889a1SMatthew Dillon 	vm_map_entry_t new_entry;
4381f6889a1SMatthew Dillon 
4391f6889a1SMatthew Dillon 	new_entry = zalloc((map->system_map || !mapentzone) ?
4401f6889a1SMatthew Dillon 		kmapentzone : mapentzone);
4411f6889a1SMatthew Dillon 	if (new_entry == NULL)
4421f6889a1SMatthew Dillon 	    panic("vm_map_entry_create: kernel resources exhausted");
4431f6889a1SMatthew Dillon 	return(new_entry);
444df8bae1dSRodney W. Grimes }
445df8bae1dSRodney W. Grimes 
446df8bae1dSRodney W. Grimes /*
447df8bae1dSRodney W. Grimes  *	vm_map_entry_{un,}link:
448df8bae1dSRodney W. Grimes  *
449df8bae1dSRodney W. Grimes  *	Insert/remove entries from maps.
450df8bae1dSRodney W. Grimes  */
45199c81ca9SAlan Cox static __inline void
45299c81ca9SAlan Cox vm_map_entry_link(vm_map_t map,
45399c81ca9SAlan Cox 		  vm_map_entry_t after_where,
45499c81ca9SAlan Cox 		  vm_map_entry_t entry)
45599c81ca9SAlan Cox {
45621c641b2SJohn Baldwin 
45721c641b2SJohn Baldwin 	CTR4(KTR_VM,
45821c641b2SJohn Baldwin 	    "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
45921c641b2SJohn Baldwin 	    map->nentries, entry, after_where);
46099c81ca9SAlan Cox 	map->nentries++;
46199c81ca9SAlan Cox 	entry->prev = after_where;
46299c81ca9SAlan Cox 	entry->next = after_where->next;
46399c81ca9SAlan Cox 	entry->next->prev = entry;
46499c81ca9SAlan Cox 	after_where->next = entry;
465df8bae1dSRodney W. Grimes }
46699c81ca9SAlan Cox 
46799c81ca9SAlan Cox static __inline void
46899c81ca9SAlan Cox vm_map_entry_unlink(vm_map_t map,
46999c81ca9SAlan Cox 		    vm_map_entry_t entry)
47099c81ca9SAlan Cox {
47199c81ca9SAlan Cox 	vm_map_entry_t prev = entry->prev;
47299c81ca9SAlan Cox 	vm_map_entry_t next = entry->next;
47399c81ca9SAlan Cox 
47499c81ca9SAlan Cox 	next->prev = prev;
47599c81ca9SAlan Cox 	prev->next = next;
47699c81ca9SAlan Cox 	map->nentries--;
47721c641b2SJohn Baldwin 	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
47821c641b2SJohn Baldwin 	    map->nentries, entry);
479df8bae1dSRodney W. Grimes }
480df8bae1dSRodney W. Grimes 
481df8bae1dSRodney W. Grimes /*
482df8bae1dSRodney W. Grimes  *	SAVE_HINT:
483df8bae1dSRodney W. Grimes  *
484df8bae1dSRodney W. Grimes  *	Saves the specified entry as the hint for
48524a1cce3SDavid Greenman  *	future lookups.
486df8bae1dSRodney W. Grimes  */
487df8bae1dSRodney W. Grimes #define	SAVE_HINT(map,value) \
48824a1cce3SDavid Greenman 		(map)->hint = (value);
489df8bae1dSRodney W. Grimes 
490df8bae1dSRodney W. Grimes /*
491df8bae1dSRodney W. Grimes  *	vm_map_lookup_entry:	[ internal use only ]
492df8bae1dSRodney W. Grimes  *
493df8bae1dSRodney W. Grimes  *	Finds the map entry containing (or
494df8bae1dSRodney W. Grimes  *	immediately preceding) the specified address
495df8bae1dSRodney W. Grimes  *	in the given map; the entry is returned
496df8bae1dSRodney W. Grimes  *	in the "entry" parameter.  The boolean
497df8bae1dSRodney W. Grimes  *	result indicates whether the address is
498df8bae1dSRodney W. Grimes  *	actually contained in the map.
499df8bae1dSRodney W. Grimes  */
5000d94caffSDavid Greenman boolean_t
5011b40f8c0SMatthew Dillon vm_map_lookup_entry(
5021b40f8c0SMatthew Dillon 	vm_map_t map,
5031b40f8c0SMatthew Dillon 	vm_offset_t address,
5041b40f8c0SMatthew Dillon 	vm_map_entry_t *entry)	/* OUT */
505df8bae1dSRodney W. Grimes {
506c0877f10SJohn Dyson 	vm_map_entry_t cur;
507c0877f10SJohn Dyson 	vm_map_entry_t last;
508df8bae1dSRodney W. Grimes 
5090cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
510df8bae1dSRodney W. Grimes 	/*
5110d94caffSDavid Greenman 	 * Start looking either from the head of the list, or from the hint.
512df8bae1dSRodney W. Grimes 	 */
513df8bae1dSRodney W. Grimes 
514df8bae1dSRodney W. Grimes 	cur = map->hint;
515df8bae1dSRodney W. Grimes 
516df8bae1dSRodney W. Grimes 	if (cur == &map->header)
517df8bae1dSRodney W. Grimes 		cur = cur->next;
518df8bae1dSRodney W. Grimes 
519df8bae1dSRodney W. Grimes 	if (address >= cur->start) {
520df8bae1dSRodney W. Grimes 		/*
521df8bae1dSRodney W. Grimes 		 * Go from hint to end of list.
522df8bae1dSRodney W. Grimes 		 *
5230d94caffSDavid Greenman 		 * But first, make a quick check to see if we are already looking
5240d94caffSDavid Greenman 		 * at the entry we want (which is usually the case). Note also
5250d94caffSDavid Greenman 		 * that we don't need to save the hint here... it is the same
5260d94caffSDavid Greenman 		 * hint (unless we are at the header, in which case the hint
5270d94caffSDavid Greenman 		 * didn't buy us anything anyway).
528df8bae1dSRodney W. Grimes 		 */
529df8bae1dSRodney W. Grimes 		last = &map->header;
530df8bae1dSRodney W. Grimes 		if ((cur != last) && (cur->end > address)) {
531df8bae1dSRodney W. Grimes 			*entry = cur;
532df8bae1dSRodney W. Grimes 			return (TRUE);
533df8bae1dSRodney W. Grimes 		}
5340d94caffSDavid Greenman 	} else {
535df8bae1dSRodney W. Grimes 		/*
536df8bae1dSRodney W. Grimes 		 * Go from start to hint, *inclusively*
537df8bae1dSRodney W. Grimes 		 */
538df8bae1dSRodney W. Grimes 		last = cur->next;
539df8bae1dSRodney W. Grimes 		cur = map->header.next;
540df8bae1dSRodney W. Grimes 	}
541df8bae1dSRodney W. Grimes 
542df8bae1dSRodney W. Grimes 	/*
543df8bae1dSRodney W. Grimes 	 * Search linearly
544df8bae1dSRodney W. Grimes 	 */
545df8bae1dSRodney W. Grimes 
546df8bae1dSRodney W. Grimes 	while (cur != last) {
547df8bae1dSRodney W. Grimes 		if (cur->end > address) {
548df8bae1dSRodney W. Grimes 			if (address >= cur->start) {
549df8bae1dSRodney W. Grimes 				/*
5500d94caffSDavid Greenman 				 * Save this lookup for future hints, and
5510d94caffSDavid Greenman 				 * return
552df8bae1dSRodney W. Grimes 				 */
553df8bae1dSRodney W. Grimes 
554df8bae1dSRodney W. Grimes 				*entry = cur;
555df8bae1dSRodney W. Grimes 				SAVE_HINT(map, cur);
556df8bae1dSRodney W. Grimes 				return (TRUE);
557df8bae1dSRodney W. Grimes 			}
558df8bae1dSRodney W. Grimes 			break;
559df8bae1dSRodney W. Grimes 		}
560df8bae1dSRodney W. Grimes 		cur = cur->next;
561df8bae1dSRodney W. Grimes 	}
562df8bae1dSRodney W. Grimes 	*entry = cur->prev;
563df8bae1dSRodney W. Grimes 	SAVE_HINT(map, *entry);
564df8bae1dSRodney W. Grimes 	return (FALSE);
565df8bae1dSRodney W. Grimes }
566df8bae1dSRodney W. Grimes 
567df8bae1dSRodney W. Grimes /*
56830dcfc09SJohn Dyson  *	vm_map_insert:
56930dcfc09SJohn Dyson  *
57030dcfc09SJohn Dyson  *	Inserts the given whole VM object into the target
57130dcfc09SJohn Dyson  *	map at the specified address range.  The object's
57230dcfc09SJohn Dyson  *	size should match that of the address range.
57330dcfc09SJohn Dyson  *
57430dcfc09SJohn Dyson  *	Requires that the map be locked, and leaves it so.
5752aaeadf8SMatthew Dillon  *
5762aaeadf8SMatthew Dillon  *	If object is non-NULL, ref count must be bumped by caller
5772aaeadf8SMatthew Dillon  *	prior to making call to account for the new entry.
57830dcfc09SJohn Dyson  */
57930dcfc09SJohn Dyson int
580b9dcd593SBruce Evans vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
581b9dcd593SBruce Evans 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
582b9dcd593SBruce Evans 	      int cow)
58330dcfc09SJohn Dyson {
584c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
585c0877f10SJohn Dyson 	vm_map_entry_t prev_entry;
58630dcfc09SJohn Dyson 	vm_map_entry_t temp_entry;
5879730a5daSPaul Saab 	vm_eflags_t protoeflags;
58830dcfc09SJohn Dyson 
5890cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
5900cddd8f0SMatthew Dillon 
59130dcfc09SJohn Dyson 	/*
59230dcfc09SJohn Dyson 	 * Check that the start and end points are not bogus.
59330dcfc09SJohn Dyson 	 */
59430dcfc09SJohn Dyson 
59530dcfc09SJohn Dyson 	if ((start < map->min_offset) || (end > map->max_offset) ||
59630dcfc09SJohn Dyson 	    (start >= end))
59730dcfc09SJohn Dyson 		return (KERN_INVALID_ADDRESS);
59830dcfc09SJohn Dyson 
59930dcfc09SJohn Dyson 	/*
60030dcfc09SJohn Dyson 	 * Find the entry prior to the proposed starting address; if it's part
60130dcfc09SJohn Dyson 	 * of an existing entry, this range is bogus.
60230dcfc09SJohn Dyson 	 */
60330dcfc09SJohn Dyson 
60430dcfc09SJohn Dyson 	if (vm_map_lookup_entry(map, start, &temp_entry))
60530dcfc09SJohn Dyson 		return (KERN_NO_SPACE);
60630dcfc09SJohn Dyson 
60730dcfc09SJohn Dyson 	prev_entry = temp_entry;
60830dcfc09SJohn Dyson 
60930dcfc09SJohn Dyson 	/*
61030dcfc09SJohn Dyson 	 * Assert that the next entry doesn't overlap the end point.
61130dcfc09SJohn Dyson 	 */
61230dcfc09SJohn Dyson 
61330dcfc09SJohn Dyson 	if ((prev_entry->next != &map->header) &&
61430dcfc09SJohn Dyson 	    (prev_entry->next->start < end))
61530dcfc09SJohn Dyson 		return (KERN_NO_SPACE);
61630dcfc09SJohn Dyson 
617afa07f7eSJohn Dyson 	protoeflags = 0;
618afa07f7eSJohn Dyson 
619afa07f7eSJohn Dyson 	if (cow & MAP_COPY_ON_WRITE)
620e5f13bddSAlan Cox 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
621afa07f7eSJohn Dyson 
6224e045f93SAlan Cox 	if (cow & MAP_NOFAULT) {
623afa07f7eSJohn Dyson 		protoeflags |= MAP_ENTRY_NOFAULT;
624afa07f7eSJohn Dyson 
6254e045f93SAlan Cox 		KASSERT(object == NULL,
6264e045f93SAlan Cox 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
6274e045f93SAlan Cox 	}
6284f79d873SMatthew Dillon 	if (cow & MAP_DISABLE_SYNCER)
6294f79d873SMatthew Dillon 		protoeflags |= MAP_ENTRY_NOSYNC;
6309730a5daSPaul Saab 	if (cow & MAP_DISABLE_COREDUMP)
6319730a5daSPaul Saab 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
6324f79d873SMatthew Dillon 
6332aaeadf8SMatthew Dillon 	if (object) {
63430dcfc09SJohn Dyson 		/*
6352aaeadf8SMatthew Dillon 		 * When object is non-NULL, it could be shared with another
6362aaeadf8SMatthew Dillon 		 * process.  We have to set or clear OBJ_ONEMAPPING
6372aaeadf8SMatthew Dillon 		 * appropriately.
63830dcfc09SJohn Dyson 		 */
6392aaeadf8SMatthew Dillon 		if ((object->ref_count > 1) || (object->shadow_count != 0)) {
6402aaeadf8SMatthew Dillon 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
6412aaeadf8SMatthew Dillon 		}
6424e045f93SAlan Cox 	}
6434e045f93SAlan Cox 	else if ((prev_entry != &map->header) &&
6444e045f93SAlan Cox 		 (prev_entry->eflags == protoeflags) &&
6458cc7e047SJohn Dyson 		 (prev_entry->end == start) &&
6464e045f93SAlan Cox 		 (prev_entry->wired_count == 0) &&
6474e045f93SAlan Cox 		 ((prev_entry->object.vm_object == NULL) ||
6488cc7e047SJohn Dyson 		  vm_object_coalesce(prev_entry->object.vm_object,
64930dcfc09SJohn Dyson 				     OFF_TO_IDX(prev_entry->offset),
6508cc7e047SJohn Dyson 				     (vm_size_t)(prev_entry->end - prev_entry->start),
651cdc2c291SJohn Dyson 				     (vm_size_t)(end - prev_entry->end)))) {
65230dcfc09SJohn Dyson 		/*
6532aaeadf8SMatthew Dillon 		 * We were able to extend the object.  Determine if we
6542aaeadf8SMatthew Dillon 		 * can extend the previous map entry to include the
6552aaeadf8SMatthew Dillon 		 * new range as well.
65630dcfc09SJohn Dyson 		 */
6578cc7e047SJohn Dyson 		if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
6588cc7e047SJohn Dyson 		    (prev_entry->protection == prot) &&
6598cc7e047SJohn Dyson 		    (prev_entry->max_protection == max)) {
66030dcfc09SJohn Dyson 			map->size += (end - prev_entry->end);
66130dcfc09SJohn Dyson 			prev_entry->end = end;
6624e71e795SMatthew Dillon 			vm_map_simplify_entry(map, prev_entry);
66330dcfc09SJohn Dyson 			return (KERN_SUCCESS);
66430dcfc09SJohn Dyson 		}
6658cc7e047SJohn Dyson 
6662aaeadf8SMatthew Dillon 		/*
6672aaeadf8SMatthew Dillon 		 * If we can extend the object but cannot extend the
6682aaeadf8SMatthew Dillon 		 * map entry, we have to create a new map entry.  We
6692aaeadf8SMatthew Dillon 		 * must bump the ref count on the extended object to
6704e71e795SMatthew Dillon 		 * account for it.  object may be NULL.
6712aaeadf8SMatthew Dillon 		 */
6722aaeadf8SMatthew Dillon 		object = prev_entry->object.vm_object;
6732aaeadf8SMatthew Dillon 		offset = prev_entry->offset +
6742aaeadf8SMatthew Dillon 			(prev_entry->end - prev_entry->start);
6758cc7e047SJohn Dyson 		vm_object_reference(object);
676b18bfc3dSJohn Dyson 	}
6772aaeadf8SMatthew Dillon 
6782aaeadf8SMatthew Dillon 	/*
6792aaeadf8SMatthew Dillon 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
6802aaeadf8SMatthew Dillon 	 * in things like the buffer map where we manage kva but do not manage
6812aaeadf8SMatthew Dillon 	 * backing objects.
6822aaeadf8SMatthew Dillon 	 */
6838cc7e047SJohn Dyson 
68430dcfc09SJohn Dyson 	/*
68530dcfc09SJohn Dyson 	 * Create a new entry
68630dcfc09SJohn Dyson 	 */
68730dcfc09SJohn Dyson 
68830dcfc09SJohn Dyson 	new_entry = vm_map_entry_create(map);
68930dcfc09SJohn Dyson 	new_entry->start = start;
69030dcfc09SJohn Dyson 	new_entry->end = end;
69130dcfc09SJohn Dyson 
692afa07f7eSJohn Dyson 	new_entry->eflags = protoeflags;
69330dcfc09SJohn Dyson 	new_entry->object.vm_object = object;
69430dcfc09SJohn Dyson 	new_entry->offset = offset;
6952267af78SJulian Elischer 	new_entry->avail_ssize = 0;
6962267af78SJulian Elischer 
69730dcfc09SJohn Dyson 	new_entry->inheritance = VM_INHERIT_DEFAULT;
69830dcfc09SJohn Dyson 	new_entry->protection = prot;
69930dcfc09SJohn Dyson 	new_entry->max_protection = max;
70030dcfc09SJohn Dyson 	new_entry->wired_count = 0;
701e5f251d2SAlan Cox 
70230dcfc09SJohn Dyson 	/*
70330dcfc09SJohn Dyson 	 * Insert the new entry into the list
70430dcfc09SJohn Dyson 	 */
70530dcfc09SJohn Dyson 
70630dcfc09SJohn Dyson 	vm_map_entry_link(map, prev_entry, new_entry);
70730dcfc09SJohn Dyson 	map->size += new_entry->end - new_entry->start;
70830dcfc09SJohn Dyson 
70930dcfc09SJohn Dyson 	/*
71030dcfc09SJohn Dyson 	 * Update the free space hint
71130dcfc09SJohn Dyson 	 */
71267bf6868SJohn Dyson 	if ((map->first_free == prev_entry) &&
7134f79d873SMatthew Dillon 	    (prev_entry->end >= new_entry->start)) {
71430dcfc09SJohn Dyson 		map->first_free = new_entry;
7154f79d873SMatthew Dillon 	}
71630dcfc09SJohn Dyson 
7171a484d28SMatthew Dillon #if 0
7181a484d28SMatthew Dillon 	/*
7191a484d28SMatthew Dillon 	 * Temporarily removed to avoid MAP_STACK panic, due to
7201a484d28SMatthew Dillon 	 * MAP_STACK being a huge hack.  Will be added back in
7211a484d28SMatthew Dillon 	 * when MAP_STACK (and the user stack mapping) is fixed.
7221a484d28SMatthew Dillon 	 */
7234e71e795SMatthew Dillon 	/*
7244e71e795SMatthew Dillon 	 * It may be possible to simplify the entry
7254e71e795SMatthew Dillon 	 */
7264e71e795SMatthew Dillon 	vm_map_simplify_entry(map, new_entry);
7271a484d28SMatthew Dillon #endif
7284e71e795SMatthew Dillon 
7294f79d873SMatthew Dillon 	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
730e972780aSAlan Cox 		pmap_object_init_pt(map->pmap, start,
731e972780aSAlan Cox 				    object, OFF_TO_IDX(offset), end - start,
732e972780aSAlan Cox 				    cow & MAP_PREFAULT_PARTIAL);
7334f79d873SMatthew Dillon 	}
734e972780aSAlan Cox 
73530dcfc09SJohn Dyson 	return (KERN_SUCCESS);
73630dcfc09SJohn Dyson }
73730dcfc09SJohn Dyson 
73830dcfc09SJohn Dyson /*
739df8bae1dSRodney W. Grimes  * Find sufficient space for `length' bytes in the given map, starting at
740df8bae1dSRodney W. Grimes  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
741df8bae1dSRodney W. Grimes  */
742df8bae1dSRodney W. Grimes int
7431b40f8c0SMatthew Dillon vm_map_findspace(
7441b40f8c0SMatthew Dillon 	vm_map_t map,
7451b40f8c0SMatthew Dillon 	vm_offset_t start,
7461b40f8c0SMatthew Dillon 	vm_size_t length,
7471b40f8c0SMatthew Dillon 	vm_offset_t *addr)
748df8bae1dSRodney W. Grimes {
749c0877f10SJohn Dyson 	vm_map_entry_t entry, next;
750c0877f10SJohn Dyson 	vm_offset_t end;
751df8bae1dSRodney W. Grimes 
7520cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
753df8bae1dSRodney W. Grimes 	if (start < map->min_offset)
754df8bae1dSRodney W. Grimes 		start = map->min_offset;
755df8bae1dSRodney W. Grimes 	if (start > map->max_offset)
756df8bae1dSRodney W. Grimes 		return (1);
757df8bae1dSRodney W. Grimes 
758df8bae1dSRodney W. Grimes 	/*
7590d94caffSDavid Greenman 	 * Look for the first possible address; if there's already something
7600d94caffSDavid Greenman 	 * at this address, we have to start after it.
761df8bae1dSRodney W. Grimes 	 */
762df8bae1dSRodney W. Grimes 	if (start == map->min_offset) {
76367bf6868SJohn Dyson 		if ((entry = map->first_free) != &map->header)
764df8bae1dSRodney W. Grimes 			start = entry->end;
765df8bae1dSRodney W. Grimes 	} else {
766df8bae1dSRodney W. Grimes 		vm_map_entry_t tmp;
7670d94caffSDavid Greenman 
768df8bae1dSRodney W. Grimes 		if (vm_map_lookup_entry(map, start, &tmp))
769df8bae1dSRodney W. Grimes 			start = tmp->end;
770df8bae1dSRodney W. Grimes 		entry = tmp;
771df8bae1dSRodney W. Grimes 	}
772df8bae1dSRodney W. Grimes 
773df8bae1dSRodney W. Grimes 	/*
7740d94caffSDavid Greenman 	 * Look through the rest of the map, trying to fit a new region in the
7750d94caffSDavid Greenman 	 * gap between existing regions, or after the very last region.
776df8bae1dSRodney W. Grimes 	 */
777df8bae1dSRodney W. Grimes 	for (;; start = (entry = next)->end) {
778df8bae1dSRodney W. Grimes 		/*
779df8bae1dSRodney W. Grimes 		 * Find the end of the proposed new region.  Be sure we didn't
780df8bae1dSRodney W. Grimes 		 * go beyond the end of the map, or wrap around the address;
781df8bae1dSRodney W. Grimes 		 * if so, we lose.  Otherwise, if this is the last entry, or
782df8bae1dSRodney W. Grimes 		 * if the proposed new region fits before the next entry, we
783df8bae1dSRodney W. Grimes 		 * win.
784df8bae1dSRodney W. Grimes 		 */
785df8bae1dSRodney W. Grimes 		end = start + length;
786df8bae1dSRodney W. Grimes 		if (end > map->max_offset || end < start)
787df8bae1dSRodney W. Grimes 			return (1);
788df8bae1dSRodney W. Grimes 		next = entry->next;
789df8bae1dSRodney W. Grimes 		if (next == &map->header || next->start >= end)
790df8bae1dSRodney W. Grimes 			break;
791df8bae1dSRodney W. Grimes 	}
792df8bae1dSRodney W. Grimes 	SAVE_HINT(map, entry);
793df8bae1dSRodney W. Grimes 	*addr = start;
79499448ed1SJohn Dyson 	if (map == kernel_map) {
79599448ed1SJohn Dyson 		vm_offset_t ksize;
79699448ed1SJohn Dyson 		if ((ksize = round_page(start + length)) > kernel_vm_end) {
79799448ed1SJohn Dyson 			pmap_growkernel(ksize);
79899448ed1SJohn Dyson 		}
79999448ed1SJohn Dyson 	}
800df8bae1dSRodney W. Grimes 	return (0);
801df8bae1dSRodney W. Grimes }
802df8bae1dSRodney W. Grimes 
803df8bae1dSRodney W. Grimes /*
804df8bae1dSRodney W. Grimes  *	vm_map_find finds an unallocated region in the target address
805df8bae1dSRodney W. Grimes  *	map with the given length.  The search is defined to be
806df8bae1dSRodney W. Grimes  *	first-fit from the specified address; the region found is
807df8bae1dSRodney W. Grimes  *	returned in the same parameter.
808df8bae1dSRodney W. Grimes  *
8092aaeadf8SMatthew Dillon  *	If object is non-NULL, ref count must be bumped by caller
8102aaeadf8SMatthew Dillon  *	prior to making call to account for the new entry.
811df8bae1dSRodney W. Grimes  */
812df8bae1dSRodney W. Grimes int
813b9dcd593SBruce Evans vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
814b9dcd593SBruce Evans 	    vm_offset_t *addr,	/* IN/OUT */
815b9dcd593SBruce Evans 	    vm_size_t length, boolean_t find_space, vm_prot_t prot,
816b9dcd593SBruce Evans 	    vm_prot_t max, int cow)
817df8bae1dSRodney W. Grimes {
818c0877f10SJohn Dyson 	vm_offset_t start;
8198d6e8edeSDavid Greenman 	int result, s = 0;
820df8bae1dSRodney W. Grimes 
8210cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
8220cddd8f0SMatthew Dillon 
823df8bae1dSRodney W. Grimes 	start = *addr;
8248d6e8edeSDavid Greenman 
82508442f8aSBosko Milekic 	if (map == kmem_map)
826b18bfc3dSJohn Dyson 		s = splvm();
8278d6e8edeSDavid Greenman 
828bea41bcfSDavid Greenman 	vm_map_lock(map);
829df8bae1dSRodney W. Grimes 	if (find_space) {
830df8bae1dSRodney W. Grimes 		if (vm_map_findspace(map, start, length, addr)) {
831df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
83208442f8aSBosko Milekic 			if (map == kmem_map)
8338d6e8edeSDavid Greenman 				splx(s);
834df8bae1dSRodney W. Grimes 			return (KERN_NO_SPACE);
835df8bae1dSRodney W. Grimes 		}
836df8bae1dSRodney W. Grimes 		start = *addr;
837df8bae1dSRodney W. Grimes 	}
838bd7e5f99SJohn Dyson 	result = vm_map_insert(map, object, offset,
839bd7e5f99SJohn Dyson 		start, start + length, prot, max, cow);
840df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
8418d6e8edeSDavid Greenman 
84208442f8aSBosko Milekic 	if (map == kmem_map)
8438d6e8edeSDavid Greenman 		splx(s);
8448d6e8edeSDavid Greenman 
845df8bae1dSRodney W. Grimes 	return (result);
846df8bae1dSRodney W. Grimes }
847df8bae1dSRodney W. Grimes 
848df8bae1dSRodney W. Grimes /*
849b7b2aac2SJohn Dyson  *	vm_map_simplify_entry:
85067bf6868SJohn Dyson  *
8514e71e795SMatthew Dillon  *	Simplify the given map entry by merging with either neighbor.  This
8524e71e795SMatthew Dillon  *	routine also has the ability to merge with both neighbors.
8534e71e795SMatthew Dillon  *
8544e71e795SMatthew Dillon  *	The map must be locked.
8554e71e795SMatthew Dillon  *
8564e71e795SMatthew Dillon  *	This routine guarentees that the passed entry remains valid (though
8574e71e795SMatthew Dillon  *	possibly extended).  When merging, this routine may delete one or
8584e71e795SMatthew Dillon  *	both neighbors.
859df8bae1dSRodney W. Grimes  */
860b7b2aac2SJohn Dyson void
8611b40f8c0SMatthew Dillon vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
862df8bae1dSRodney W. Grimes {
863308c24baSJohn Dyson 	vm_map_entry_t next, prev;
864b7b2aac2SJohn Dyson 	vm_size_t prevsize, esize;
865df8bae1dSRodney W. Grimes 
8660cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
8670cddd8f0SMatthew Dillon 
8689fdfe602SMatthew Dillon 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
869df8bae1dSRodney W. Grimes 		return;
870308c24baSJohn Dyson 
871308c24baSJohn Dyson 	prev = entry->prev;
872308c24baSJohn Dyson 	if (prev != &map->header) {
87367bf6868SJohn Dyson 		prevsize = prev->end - prev->start;
87467bf6868SJohn Dyson 		if ( (prev->end == entry->start) &&
87567bf6868SJohn Dyson 		     (prev->object.vm_object == entry->object.vm_object) &&
87695e5e988SJohn Dyson 		     (!prev->object.vm_object ||
87767bf6868SJohn Dyson 			(prev->offset + prevsize == entry->offset)) &&
878afa07f7eSJohn Dyson 		     (prev->eflags == entry->eflags) &&
87967bf6868SJohn Dyson 		     (prev->protection == entry->protection) &&
88067bf6868SJohn Dyson 		     (prev->max_protection == entry->max_protection) &&
88167bf6868SJohn Dyson 		     (prev->inheritance == entry->inheritance) &&
882b7b2aac2SJohn Dyson 		     (prev->wired_count == entry->wired_count)) {
883308c24baSJohn Dyson 			if (map->first_free == prev)
884308c24baSJohn Dyson 				map->first_free = entry;
885b18bfc3dSJohn Dyson 			if (map->hint == prev)
886b18bfc3dSJohn Dyson 				map->hint = entry;
887308c24baSJohn Dyson 			vm_map_entry_unlink(map, prev);
888308c24baSJohn Dyson 			entry->start = prev->start;
889308c24baSJohn Dyson 			entry->offset = prev->offset;
890b18bfc3dSJohn Dyson 			if (prev->object.vm_object)
891308c24baSJohn Dyson 				vm_object_deallocate(prev->object.vm_object);
892308c24baSJohn Dyson 			vm_map_entry_dispose(map, prev);
893308c24baSJohn Dyson 		}
894308c24baSJohn Dyson 	}
895de5f6a77SJohn Dyson 
896de5f6a77SJohn Dyson 	next = entry->next;
897308c24baSJohn Dyson 	if (next != &map->header) {
89867bf6868SJohn Dyson 		esize = entry->end - entry->start;
89967bf6868SJohn Dyson 		if ((entry->end == next->start) &&
90067bf6868SJohn Dyson 		    (next->object.vm_object == entry->object.vm_object) &&
90167bf6868SJohn Dyson 		     (!entry->object.vm_object ||
90267bf6868SJohn Dyson 			(entry->offset + esize == next->offset)) &&
903afa07f7eSJohn Dyson 		    (next->eflags == entry->eflags) &&
90467bf6868SJohn Dyson 		    (next->protection == entry->protection) &&
90567bf6868SJohn Dyson 		    (next->max_protection == entry->max_protection) &&
90667bf6868SJohn Dyson 		    (next->inheritance == entry->inheritance) &&
907b7b2aac2SJohn Dyson 		    (next->wired_count == entry->wired_count)) {
908308c24baSJohn Dyson 			if (map->first_free == next)
909308c24baSJohn Dyson 				map->first_free = entry;
910b18bfc3dSJohn Dyson 			if (map->hint == next)
911b18bfc3dSJohn Dyson 				map->hint = entry;
912de5f6a77SJohn Dyson 			vm_map_entry_unlink(map, next);
913de5f6a77SJohn Dyson 			entry->end = next->end;
914b18bfc3dSJohn Dyson 			if (next->object.vm_object)
915de5f6a77SJohn Dyson 				vm_object_deallocate(next->object.vm_object);
916de5f6a77SJohn Dyson 			vm_map_entry_dispose(map, next);
917df8bae1dSRodney W. Grimes 	        }
918df8bae1dSRodney W. Grimes 	}
919de5f6a77SJohn Dyson }
920df8bae1dSRodney W. Grimes /*
921df8bae1dSRodney W. Grimes  *	vm_map_clip_start:	[ internal use only ]
922df8bae1dSRodney W. Grimes  *
923df8bae1dSRodney W. Grimes  *	Asserts that the given entry begins at or after
924df8bae1dSRodney W. Grimes  *	the specified address; if necessary,
925df8bae1dSRodney W. Grimes  *	it splits the entry into two.
926df8bae1dSRodney W. Grimes  */
927df8bae1dSRodney W. Grimes #define vm_map_clip_start(map, entry, startaddr) \
928df8bae1dSRodney W. Grimes { \
929df8bae1dSRodney W. Grimes 	if (startaddr > entry->start) \
930df8bae1dSRodney W. Grimes 		_vm_map_clip_start(map, entry, startaddr); \
931df8bae1dSRodney W. Grimes }
932df8bae1dSRodney W. Grimes 
933df8bae1dSRodney W. Grimes /*
934df8bae1dSRodney W. Grimes  *	This routine is called only when it is known that
935df8bae1dSRodney W. Grimes  *	the entry must be split.
936df8bae1dSRodney W. Grimes  */
9370d94caffSDavid Greenman static void
9381b40f8c0SMatthew Dillon _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
939df8bae1dSRodney W. Grimes {
940c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
941df8bae1dSRodney W. Grimes 
942df8bae1dSRodney W. Grimes 	/*
9430d94caffSDavid Greenman 	 * Split off the front portion -- note that we must insert the new
9440d94caffSDavid Greenman 	 * entry BEFORE this one, so that this entry has the specified
9450d94caffSDavid Greenman 	 * starting address.
946df8bae1dSRodney W. Grimes 	 */
947df8bae1dSRodney W. Grimes 
948f32dbbeeSJohn Dyson 	vm_map_simplify_entry(map, entry);
949f32dbbeeSJohn Dyson 
95011cccda1SJohn Dyson 	/*
95111cccda1SJohn Dyson 	 * If there is no object backing this entry, we might as well create
95211cccda1SJohn Dyson 	 * one now.  If we defer it, an object can get created after the map
95311cccda1SJohn Dyson 	 * is clipped, and individual objects will be created for the split-up
95411cccda1SJohn Dyson 	 * map.  This is a bit of a hack, but is also about the best place to
95511cccda1SJohn Dyson 	 * put this improvement.
95611cccda1SJohn Dyson 	 */
95711cccda1SJohn Dyson 
9584e71e795SMatthew Dillon 	if (entry->object.vm_object == NULL && !map->system_map) {
95911cccda1SJohn Dyson 		vm_object_t object;
96011cccda1SJohn Dyson 		object = vm_object_allocate(OBJT_DEFAULT,
961c2e11a03SJohn Dyson 				atop(entry->end - entry->start));
96211cccda1SJohn Dyson 		entry->object.vm_object = object;
96311cccda1SJohn Dyson 		entry->offset = 0;
96411cccda1SJohn Dyson 	}
96511cccda1SJohn Dyson 
966df8bae1dSRodney W. Grimes 	new_entry = vm_map_entry_create(map);
967df8bae1dSRodney W. Grimes 	*new_entry = *entry;
968df8bae1dSRodney W. Grimes 
969df8bae1dSRodney W. Grimes 	new_entry->end = start;
970df8bae1dSRodney W. Grimes 	entry->offset += (start - entry->start);
971df8bae1dSRodney W. Grimes 	entry->start = start;
972df8bae1dSRodney W. Grimes 
973df8bae1dSRodney W. Grimes 	vm_map_entry_link(map, entry->prev, new_entry);
974df8bae1dSRodney W. Grimes 
9759fdfe602SMatthew Dillon 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
976df8bae1dSRodney W. Grimes 		vm_object_reference(new_entry->object.vm_object);
977df8bae1dSRodney W. Grimes 	}
978c0877f10SJohn Dyson }
979df8bae1dSRodney W. Grimes 
980df8bae1dSRodney W. Grimes /*
981df8bae1dSRodney W. Grimes  *	vm_map_clip_end:	[ internal use only ]
982df8bae1dSRodney W. Grimes  *
983df8bae1dSRodney W. Grimes  *	Asserts that the given entry ends at or before
984df8bae1dSRodney W. Grimes  *	the specified address; if necessary,
985df8bae1dSRodney W. Grimes  *	it splits the entry into two.
986df8bae1dSRodney W. Grimes  */
987df8bae1dSRodney W. Grimes 
988df8bae1dSRodney W. Grimes #define vm_map_clip_end(map, entry, endaddr) \
989df8bae1dSRodney W. Grimes { \
990df8bae1dSRodney W. Grimes 	if (endaddr < entry->end) \
991df8bae1dSRodney W. Grimes 		_vm_map_clip_end(map, entry, endaddr); \
992df8bae1dSRodney W. Grimes }
993df8bae1dSRodney W. Grimes 
994df8bae1dSRodney W. Grimes /*
995df8bae1dSRodney W. Grimes  *	This routine is called only when it is known that
996df8bae1dSRodney W. Grimes  *	the entry must be split.
997df8bae1dSRodney W. Grimes  */
9980d94caffSDavid Greenman static void
9991b40f8c0SMatthew Dillon _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1000df8bae1dSRodney W. Grimes {
1001c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
1002df8bae1dSRodney W. Grimes 
1003df8bae1dSRodney W. Grimes 	/*
100411cccda1SJohn Dyson 	 * If there is no object backing this entry, we might as well create
100511cccda1SJohn Dyson 	 * one now.  If we defer it, an object can get created after the map
100611cccda1SJohn Dyson 	 * is clipped, and individual objects will be created for the split-up
100711cccda1SJohn Dyson 	 * map.  This is a bit of a hack, but is also about the best place to
100811cccda1SJohn Dyson 	 * put this improvement.
100911cccda1SJohn Dyson 	 */
101011cccda1SJohn Dyson 
10114e71e795SMatthew Dillon 	if (entry->object.vm_object == NULL && !map->system_map) {
101211cccda1SJohn Dyson 		vm_object_t object;
101311cccda1SJohn Dyson 		object = vm_object_allocate(OBJT_DEFAULT,
1014c2e11a03SJohn Dyson 				atop(entry->end - entry->start));
101511cccda1SJohn Dyson 		entry->object.vm_object = object;
101611cccda1SJohn Dyson 		entry->offset = 0;
101711cccda1SJohn Dyson 	}
101811cccda1SJohn Dyson 
101911cccda1SJohn Dyson 	/*
10200d94caffSDavid Greenman 	 * Create a new entry and insert it AFTER the specified entry
1021df8bae1dSRodney W. Grimes 	 */
1022df8bae1dSRodney W. Grimes 
1023df8bae1dSRodney W. Grimes 	new_entry = vm_map_entry_create(map);
1024df8bae1dSRodney W. Grimes 	*new_entry = *entry;
1025df8bae1dSRodney W. Grimes 
1026df8bae1dSRodney W. Grimes 	new_entry->start = entry->end = end;
1027df8bae1dSRodney W. Grimes 	new_entry->offset += (end - entry->start);
1028df8bae1dSRodney W. Grimes 
1029df8bae1dSRodney W. Grimes 	vm_map_entry_link(map, entry, new_entry);
1030df8bae1dSRodney W. Grimes 
10319fdfe602SMatthew Dillon 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1032df8bae1dSRodney W. Grimes 		vm_object_reference(new_entry->object.vm_object);
1033df8bae1dSRodney W. Grimes 	}
1034c0877f10SJohn Dyson }
1035df8bae1dSRodney W. Grimes 
1036df8bae1dSRodney W. Grimes /*
1037df8bae1dSRodney W. Grimes  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
1038df8bae1dSRodney W. Grimes  *
1039df8bae1dSRodney W. Grimes  *	Asserts that the starting and ending region
1040df8bae1dSRodney W. Grimes  *	addresses fall within the valid range of the map.
1041df8bae1dSRodney W. Grimes  */
1042df8bae1dSRodney W. Grimes #define	VM_MAP_RANGE_CHECK(map, start, end)		\
1043df8bae1dSRodney W. Grimes 		{					\
1044df8bae1dSRodney W. Grimes 		if (start < vm_map_min(map))		\
1045df8bae1dSRodney W. Grimes 			start = vm_map_min(map);	\
1046df8bae1dSRodney W. Grimes 		if (end > vm_map_max(map))		\
1047df8bae1dSRodney W. Grimes 			end = vm_map_max(map);		\
1048df8bae1dSRodney W. Grimes 		if (start > end)			\
1049df8bae1dSRodney W. Grimes 			start = end;			\
1050df8bae1dSRodney W. Grimes 		}
1051df8bae1dSRodney W. Grimes 
1052df8bae1dSRodney W. Grimes /*
1053df8bae1dSRodney W. Grimes  *	vm_map_submap:		[ kernel use only ]
1054df8bae1dSRodney W. Grimes  *
1055df8bae1dSRodney W. Grimes  *	Mark the given range as handled by a subordinate map.
1056df8bae1dSRodney W. Grimes  *
1057df8bae1dSRodney W. Grimes  *	This range must have been created with vm_map_find,
1058df8bae1dSRodney W. Grimes  *	and no other operations may have been performed on this
1059df8bae1dSRodney W. Grimes  *	range prior to calling vm_map_submap.
1060df8bae1dSRodney W. Grimes  *
1061df8bae1dSRodney W. Grimes  *	Only a limited number of operations can be performed
1062df8bae1dSRodney W. Grimes  *	within this rage after calling vm_map_submap:
1063df8bae1dSRodney W. Grimes  *		vm_fault
1064df8bae1dSRodney W. Grimes  *	[Don't try vm_map_copy!]
1065df8bae1dSRodney W. Grimes  *
1066df8bae1dSRodney W. Grimes  *	To remove a submapping, one must first remove the
1067df8bae1dSRodney W. Grimes  *	range from the superior map, and then destroy the
1068df8bae1dSRodney W. Grimes  *	submap (if desired).  [Better yet, don't try it.]
1069df8bae1dSRodney W. Grimes  */
1070df8bae1dSRodney W. Grimes int
10711b40f8c0SMatthew Dillon vm_map_submap(
10721b40f8c0SMatthew Dillon 	vm_map_t map,
10731b40f8c0SMatthew Dillon 	vm_offset_t start,
10741b40f8c0SMatthew Dillon 	vm_offset_t end,
10751b40f8c0SMatthew Dillon 	vm_map_t submap)
1076df8bae1dSRodney W. Grimes {
1077df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1078c0877f10SJohn Dyson 	int result = KERN_INVALID_ARGUMENT;
1079df8bae1dSRodney W. Grimes 
10800cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
10810cddd8f0SMatthew Dillon 
1082df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1083df8bae1dSRodney W. Grimes 
1084df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1085df8bae1dSRodney W. Grimes 
1086df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &entry)) {
1087df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
10880d94caffSDavid Greenman 	} else
1089df8bae1dSRodney W. Grimes 		entry = entry->next;
1090df8bae1dSRodney W. Grimes 
1091df8bae1dSRodney W. Grimes 	vm_map_clip_end(map, entry, end);
1092df8bae1dSRodney W. Grimes 
1093df8bae1dSRodney W. Grimes 	if ((entry->start == start) && (entry->end == end) &&
10949fdfe602SMatthew Dillon 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1095afa07f7eSJohn Dyson 	    (entry->object.vm_object == NULL)) {
10962d8acc0fSJohn Dyson 		entry->object.sub_map = submap;
1097afa07f7eSJohn Dyson 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1098df8bae1dSRodney W. Grimes 		result = KERN_SUCCESS;
1099df8bae1dSRodney W. Grimes 	}
1100df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1101df8bae1dSRodney W. Grimes 
1102df8bae1dSRodney W. Grimes 	return (result);
1103df8bae1dSRodney W. Grimes }
1104df8bae1dSRodney W. Grimes 
1105df8bae1dSRodney W. Grimes /*
1106df8bae1dSRodney W. Grimes  *	vm_map_protect:
1107df8bae1dSRodney W. Grimes  *
1108df8bae1dSRodney W. Grimes  *	Sets the protection of the specified address
1109df8bae1dSRodney W. Grimes  *	region in the target map.  If "set_max" is
1110df8bae1dSRodney W. Grimes  *	specified, the maximum protection is to be set;
1111df8bae1dSRodney W. Grimes  *	otherwise, only the current protection is affected.
1112df8bae1dSRodney W. Grimes  */
1113df8bae1dSRodney W. Grimes int
1114b9dcd593SBruce Evans vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1115b9dcd593SBruce Evans 	       vm_prot_t new_prot, boolean_t set_max)
1116df8bae1dSRodney W. Grimes {
1117c0877f10SJohn Dyson 	vm_map_entry_t current;
1118df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1119df8bae1dSRodney W. Grimes 
11200cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
1121df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1122df8bae1dSRodney W. Grimes 
1123df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1124df8bae1dSRodney W. Grimes 
1125df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &entry)) {
1126df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1127b7b2aac2SJohn Dyson 	} else {
1128df8bae1dSRodney W. Grimes 		entry = entry->next;
1129b7b2aac2SJohn Dyson 	}
1130df8bae1dSRodney W. Grimes 
1131df8bae1dSRodney W. Grimes 	/*
11320d94caffSDavid Greenman 	 * Make a first pass to check for protection violations.
1133df8bae1dSRodney W. Grimes 	 */
1134df8bae1dSRodney W. Grimes 
1135df8bae1dSRodney W. Grimes 	current = entry;
1136df8bae1dSRodney W. Grimes 	while ((current != &map->header) && (current->start < end)) {
1137afa07f7eSJohn Dyson 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1138a1f6d91cSDavid Greenman 			vm_map_unlock(map);
1139df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ARGUMENT);
1140a1f6d91cSDavid Greenman 		}
1141df8bae1dSRodney W. Grimes 		if ((new_prot & current->max_protection) != new_prot) {
1142df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
1143df8bae1dSRodney W. Grimes 			return (KERN_PROTECTION_FAILURE);
1144df8bae1dSRodney W. Grimes 		}
1145df8bae1dSRodney W. Grimes 		current = current->next;
1146df8bae1dSRodney W. Grimes 	}
1147df8bae1dSRodney W. Grimes 
1148df8bae1dSRodney W. Grimes 	/*
11490d94caffSDavid Greenman 	 * Go back and fix up protections. [Note that clipping is not
11500d94caffSDavid Greenman 	 * necessary the second time.]
1151df8bae1dSRodney W. Grimes 	 */
1152df8bae1dSRodney W. Grimes 
1153df8bae1dSRodney W. Grimes 	current = entry;
1154df8bae1dSRodney W. Grimes 
1155df8bae1dSRodney W. Grimes 	while ((current != &map->header) && (current->start < end)) {
1156df8bae1dSRodney W. Grimes 		vm_prot_t old_prot;
1157df8bae1dSRodney W. Grimes 
1158df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, current, end);
1159df8bae1dSRodney W. Grimes 
1160df8bae1dSRodney W. Grimes 		old_prot = current->protection;
1161df8bae1dSRodney W. Grimes 		if (set_max)
1162df8bae1dSRodney W. Grimes 			current->protection =
1163df8bae1dSRodney W. Grimes 			    (current->max_protection = new_prot) &
1164df8bae1dSRodney W. Grimes 			    old_prot;
1165df8bae1dSRodney W. Grimes 		else
1166df8bae1dSRodney W. Grimes 			current->protection = new_prot;
1167df8bae1dSRodney W. Grimes 
1168df8bae1dSRodney W. Grimes 		/*
11690d94caffSDavid Greenman 		 * Update physical map if necessary. Worry about copy-on-write
11700d94caffSDavid Greenman 		 * here -- CHECK THIS XXX
1171df8bae1dSRodney W. Grimes 		 */
1172df8bae1dSRodney W. Grimes 
1173df8bae1dSRodney W. Grimes 		if (current->protection != old_prot) {
1174afa07f7eSJohn Dyson #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1175df8bae1dSRodney W. Grimes 							VM_PROT_ALL)
1176df8bae1dSRodney W. Grimes 
1177df8bae1dSRodney W. Grimes 			pmap_protect(map->pmap, current->start,
1178df8bae1dSRodney W. Grimes 			    current->end,
11791c85e3dfSAlan Cox 			    current->protection & MASK(current));
1180df8bae1dSRodney W. Grimes #undef	MASK
1181df8bae1dSRodney W. Grimes 		}
11827d78abc9SJohn Dyson 
11837d78abc9SJohn Dyson 		vm_map_simplify_entry(map, current);
11847d78abc9SJohn Dyson 
1185df8bae1dSRodney W. Grimes 		current = current->next;
1186df8bae1dSRodney W. Grimes 	}
1187df8bae1dSRodney W. Grimes 
1188df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1189df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1190df8bae1dSRodney W. Grimes }
1191df8bae1dSRodney W. Grimes 
1192df8bae1dSRodney W. Grimes /*
1193867a482dSJohn Dyson  *	vm_map_madvise:
1194867a482dSJohn Dyson  *
1195867a482dSJohn Dyson  * 	This routine traverses a processes map handling the madvise
1196f7fc307aSAlan Cox  *	system call.  Advisories are classified as either those effecting
1197f7fc307aSAlan Cox  *	the vm_map_entry structure, or those effecting the underlying
1198f7fc307aSAlan Cox  *	objects.
1199867a482dSJohn Dyson  */
1200b4309055SMatthew Dillon 
1201b4309055SMatthew Dillon int
12021b40f8c0SMatthew Dillon vm_map_madvise(
12031b40f8c0SMatthew Dillon 	vm_map_t map,
12041b40f8c0SMatthew Dillon 	vm_offset_t start,
12051b40f8c0SMatthew Dillon 	vm_offset_t end,
12061b40f8c0SMatthew Dillon 	int behav)
1207867a482dSJohn Dyson {
1208f7fc307aSAlan Cox 	vm_map_entry_t current, entry;
1209b4309055SMatthew Dillon 	int modify_map = 0;
1210867a482dSJohn Dyson 
12110cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
12120cddd8f0SMatthew Dillon 
1213b4309055SMatthew Dillon 	/*
1214b4309055SMatthew Dillon 	 * Some madvise calls directly modify the vm_map_entry, in which case
1215b4309055SMatthew Dillon 	 * we need to use an exclusive lock on the map and we need to perform
1216b4309055SMatthew Dillon 	 * various clipping operations.  Otherwise we only need a read-lock
1217b4309055SMatthew Dillon 	 * on the map.
1218b4309055SMatthew Dillon 	 */
1219f7fc307aSAlan Cox 
1220b4309055SMatthew Dillon 	switch(behav) {
1221b4309055SMatthew Dillon 	case MADV_NORMAL:
1222b4309055SMatthew Dillon 	case MADV_SEQUENTIAL:
1223b4309055SMatthew Dillon 	case MADV_RANDOM:
12244f79d873SMatthew Dillon 	case MADV_NOSYNC:
12254f79d873SMatthew Dillon 	case MADV_AUTOSYNC:
12269730a5daSPaul Saab 	case MADV_NOCORE:
12279730a5daSPaul Saab 	case MADV_CORE:
1228b4309055SMatthew Dillon 		modify_map = 1;
1229867a482dSJohn Dyson 		vm_map_lock(map);
1230b4309055SMatthew Dillon 		break;
1231b4309055SMatthew Dillon 	case MADV_WILLNEED:
1232b4309055SMatthew Dillon 	case MADV_DONTNEED:
1233b4309055SMatthew Dillon 	case MADV_FREE:
1234f7fc307aSAlan Cox 		vm_map_lock_read(map);
1235b4309055SMatthew Dillon 		break;
1236b4309055SMatthew Dillon 	default:
1237b4309055SMatthew Dillon 		return (KERN_INVALID_ARGUMENT);
1238b4309055SMatthew Dillon 	}
1239b4309055SMatthew Dillon 
1240b4309055SMatthew Dillon 	/*
1241b4309055SMatthew Dillon 	 * Locate starting entry and clip if necessary.
1242b4309055SMatthew Dillon 	 */
1243867a482dSJohn Dyson 
1244867a482dSJohn Dyson 	VM_MAP_RANGE_CHECK(map, start, end);
1245867a482dSJohn Dyson 
1246867a482dSJohn Dyson 	if (vm_map_lookup_entry(map, start, &entry)) {
1247f7fc307aSAlan Cox 		if (modify_map)
1248867a482dSJohn Dyson 			vm_map_clip_start(map, entry, start);
1249b4309055SMatthew Dillon 	} else {
1250867a482dSJohn Dyson 		entry = entry->next;
1251b4309055SMatthew Dillon 	}
1252867a482dSJohn Dyson 
1253f7fc307aSAlan Cox 	if (modify_map) {
1254f7fc307aSAlan Cox 		/*
1255f7fc307aSAlan Cox 		 * madvise behaviors that are implemented in the vm_map_entry.
1256f7fc307aSAlan Cox 		 *
1257f7fc307aSAlan Cox 		 * We clip the vm_map_entry so that behavioral changes are
1258f7fc307aSAlan Cox 		 * limited to the specified address range.
1259f7fc307aSAlan Cox 		 */
1260867a482dSJohn Dyson 		for (current = entry;
1261867a482dSJohn Dyson 		     (current != &map->header) && (current->start < end);
1262b4309055SMatthew Dillon 		     current = current->next
1263b4309055SMatthew Dillon 		) {
1264f7fc307aSAlan Cox 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1265867a482dSJohn Dyson 				continue;
1266fed9a903SJohn Dyson 
126747221757SJohn Dyson 			vm_map_clip_end(map, current, end);
1268fed9a903SJohn Dyson 
1269f7fc307aSAlan Cox 			switch (behav) {
1270867a482dSJohn Dyson 			case MADV_NORMAL:
12717f866e4bSAlan Cox 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1272867a482dSJohn Dyson 				break;
1273867a482dSJohn Dyson 			case MADV_SEQUENTIAL:
12747f866e4bSAlan Cox 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1275867a482dSJohn Dyson 				break;
1276867a482dSJohn Dyson 			case MADV_RANDOM:
12777f866e4bSAlan Cox 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1278867a482dSJohn Dyson 				break;
12794f79d873SMatthew Dillon 			case MADV_NOSYNC:
12804f79d873SMatthew Dillon 				current->eflags |= MAP_ENTRY_NOSYNC;
12814f79d873SMatthew Dillon 				break;
12824f79d873SMatthew Dillon 			case MADV_AUTOSYNC:
12834f79d873SMatthew Dillon 				current->eflags &= ~MAP_ENTRY_NOSYNC;
12844f79d873SMatthew Dillon 				break;
12859730a5daSPaul Saab 			case MADV_NOCORE:
12869730a5daSPaul Saab 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
12879730a5daSPaul Saab 				break;
12889730a5daSPaul Saab 			case MADV_CORE:
12899730a5daSPaul Saab 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
12909730a5daSPaul Saab 				break;
1291867a482dSJohn Dyson 			default:
1292867a482dSJohn Dyson 				break;
1293867a482dSJohn Dyson 			}
1294f7fc307aSAlan Cox 			vm_map_simplify_entry(map, current);
1295867a482dSJohn Dyson 		}
1296867a482dSJohn Dyson 		vm_map_unlock(map);
1297b4309055SMatthew Dillon 	} else {
1298f7fc307aSAlan Cox 		vm_pindex_t pindex;
1299f7fc307aSAlan Cox 		int count;
1300f7fc307aSAlan Cox 
1301f7fc307aSAlan Cox 		/*
1302f7fc307aSAlan Cox 		 * madvise behaviors that are implemented in the underlying
1303f7fc307aSAlan Cox 		 * vm_object.
1304f7fc307aSAlan Cox 		 *
1305f7fc307aSAlan Cox 		 * Since we don't clip the vm_map_entry, we have to clip
1306f7fc307aSAlan Cox 		 * the vm_object pindex and count.
1307f7fc307aSAlan Cox 		 */
1308f7fc307aSAlan Cox 		for (current = entry;
1309f7fc307aSAlan Cox 		     (current != &map->header) && (current->start < end);
1310b4309055SMatthew Dillon 		     current = current->next
1311b4309055SMatthew Dillon 		) {
13125f99b57cSMatthew Dillon 			vm_offset_t useStart;
13135f99b57cSMatthew Dillon 
1314f7fc307aSAlan Cox 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1315f7fc307aSAlan Cox 				continue;
1316f7fc307aSAlan Cox 
1317f7fc307aSAlan Cox 			pindex = OFF_TO_IDX(current->offset);
1318f7fc307aSAlan Cox 			count = atop(current->end - current->start);
13195f99b57cSMatthew Dillon 			useStart = current->start;
1320f7fc307aSAlan Cox 
1321f7fc307aSAlan Cox 			if (current->start < start) {
1322f7fc307aSAlan Cox 				pindex += atop(start - current->start);
1323f7fc307aSAlan Cox 				count -= atop(start - current->start);
13245f99b57cSMatthew Dillon 				useStart = start;
1325f7fc307aSAlan Cox 			}
1326f7fc307aSAlan Cox 			if (current->end > end)
1327f7fc307aSAlan Cox 				count -= atop(current->end - end);
1328f7fc307aSAlan Cox 
1329f7fc307aSAlan Cox 			if (count <= 0)
1330f7fc307aSAlan Cox 				continue;
1331f7fc307aSAlan Cox 
1332f7fc307aSAlan Cox 			vm_object_madvise(current->object.vm_object,
1333f7fc307aSAlan Cox 					  pindex, count, behav);
1334b4309055SMatthew Dillon 			if (behav == MADV_WILLNEED) {
1335b4309055SMatthew Dillon 				pmap_object_init_pt(
1336b4309055SMatthew Dillon 				    map->pmap,
13375f99b57cSMatthew Dillon 				    useStart,
1338f7fc307aSAlan Cox 				    current->object.vm_object,
1339b4309055SMatthew Dillon 				    pindex,
1340b4309055SMatthew Dillon 				    (count << PAGE_SHIFT),
1341b4309055SMatthew Dillon 				    0
1342b4309055SMatthew Dillon 				);
1343f7fc307aSAlan Cox 			}
1344f7fc307aSAlan Cox 		}
1345f7fc307aSAlan Cox 		vm_map_unlock_read(map);
1346f7fc307aSAlan Cox 	}
1347b4309055SMatthew Dillon 	return(0);
1348867a482dSJohn Dyson }
1349867a482dSJohn Dyson 
1350867a482dSJohn Dyson 
1351867a482dSJohn Dyson /*
1352df8bae1dSRodney W. Grimes  *	vm_map_inherit:
1353df8bae1dSRodney W. Grimes  *
1354df8bae1dSRodney W. Grimes  *	Sets the inheritance of the specified address
1355df8bae1dSRodney W. Grimes  *	range in the target map.  Inheritance
1356df8bae1dSRodney W. Grimes  *	affects how the map will be shared with
1357df8bae1dSRodney W. Grimes  *	child maps at the time of vm_map_fork.
1358df8bae1dSRodney W. Grimes  */
1359df8bae1dSRodney W. Grimes int
1360b9dcd593SBruce Evans vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1361b9dcd593SBruce Evans 	       vm_inherit_t new_inheritance)
1362df8bae1dSRodney W. Grimes {
1363c0877f10SJohn Dyson 	vm_map_entry_t entry;
1364df8bae1dSRodney W. Grimes 	vm_map_entry_t temp_entry;
1365df8bae1dSRodney W. Grimes 
13660cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
13670cddd8f0SMatthew Dillon 
1368df8bae1dSRodney W. Grimes 	switch (new_inheritance) {
1369df8bae1dSRodney W. Grimes 	case VM_INHERIT_NONE:
1370df8bae1dSRodney W. Grimes 	case VM_INHERIT_COPY:
1371df8bae1dSRodney W. Grimes 	case VM_INHERIT_SHARE:
1372df8bae1dSRodney W. Grimes 		break;
1373df8bae1dSRodney W. Grimes 	default:
1374df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ARGUMENT);
1375df8bae1dSRodney W. Grimes 	}
1376df8bae1dSRodney W. Grimes 
1377df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1378df8bae1dSRodney W. Grimes 
1379df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1380df8bae1dSRodney W. Grimes 
1381df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
1382df8bae1dSRodney W. Grimes 		entry = temp_entry;
1383df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
13840d94caffSDavid Greenman 	} else
1385df8bae1dSRodney W. Grimes 		entry = temp_entry->next;
1386df8bae1dSRodney W. Grimes 
1387df8bae1dSRodney W. Grimes 	while ((entry != &map->header) && (entry->start < end)) {
1388df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
1389df8bae1dSRodney W. Grimes 
1390df8bae1dSRodney W. Grimes 		entry->inheritance = new_inheritance;
1391df8bae1dSRodney W. Grimes 
139244428f62SAlan Cox 		vm_map_simplify_entry(map, entry);
139344428f62SAlan Cox 
1394df8bae1dSRodney W. Grimes 		entry = entry->next;
1395df8bae1dSRodney W. Grimes 	}
1396df8bae1dSRodney W. Grimes 
1397df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1398df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1399df8bae1dSRodney W. Grimes }
1400df8bae1dSRodney W. Grimes 
1401df8bae1dSRodney W. Grimes /*
14027aaaa4fdSJohn Dyson  * Implement the semantics of mlock
14037aaaa4fdSJohn Dyson  */
14047aaaa4fdSJohn Dyson int
14051b40f8c0SMatthew Dillon vm_map_user_pageable(
14061b40f8c0SMatthew Dillon 	vm_map_t map,
14071b40f8c0SMatthew Dillon 	vm_offset_t start,
14081b40f8c0SMatthew Dillon 	vm_offset_t end,
14091b40f8c0SMatthew Dillon 	boolean_t new_pageable)
14107aaaa4fdSJohn Dyson {
1411b44959ceSTor Egge 	vm_map_entry_t entry;
14127aaaa4fdSJohn Dyson 	vm_map_entry_t start_entry;
1413b44959ceSTor Egge 	vm_offset_t estart;
14147aaaa4fdSJohn Dyson 	int rv;
14157aaaa4fdSJohn Dyson 
14167aaaa4fdSJohn Dyson 	vm_map_lock(map);
14177aaaa4fdSJohn Dyson 	VM_MAP_RANGE_CHECK(map, start, end);
14187aaaa4fdSJohn Dyson 
14197aaaa4fdSJohn Dyson 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
14207aaaa4fdSJohn Dyson 		vm_map_unlock(map);
14217aaaa4fdSJohn Dyson 		return (KERN_INVALID_ADDRESS);
14227aaaa4fdSJohn Dyson 	}
14237aaaa4fdSJohn Dyson 
14247aaaa4fdSJohn Dyson 	if (new_pageable) {
14257aaaa4fdSJohn Dyson 
14267aaaa4fdSJohn Dyson 		entry = start_entry;
14277aaaa4fdSJohn Dyson 		vm_map_clip_start(map, entry, start);
14287aaaa4fdSJohn Dyson 
14297aaaa4fdSJohn Dyson 		/*
14307aaaa4fdSJohn Dyson 		 * Now decrement the wiring count for each region. If a region
14317aaaa4fdSJohn Dyson 		 * becomes completely unwired, unwire its physical pages and
14327aaaa4fdSJohn Dyson 		 * mappings.
14337aaaa4fdSJohn Dyson 		 */
14347aaaa4fdSJohn Dyson 		while ((entry != &map->header) && (entry->start < end)) {
1435afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
14367aaaa4fdSJohn Dyson 				vm_map_clip_end(map, entry, end);
1437afa07f7eSJohn Dyson 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
14387aaaa4fdSJohn Dyson 				entry->wired_count--;
14397aaaa4fdSJohn Dyson 				if (entry->wired_count == 0)
14407aaaa4fdSJohn Dyson 					vm_fault_unwire(map, entry->start, entry->end);
14417aaaa4fdSJohn Dyson 			}
1442b44959ceSTor Egge 			vm_map_simplify_entry(map,entry);
14437aaaa4fdSJohn Dyson 			entry = entry->next;
14447aaaa4fdSJohn Dyson 		}
14457aaaa4fdSJohn Dyson 	} else {
14467aaaa4fdSJohn Dyson 
14477aaaa4fdSJohn Dyson 		entry = start_entry;
14487aaaa4fdSJohn Dyson 
14497aaaa4fdSJohn Dyson 		while ((entry != &map->header) && (entry->start < end)) {
14507aaaa4fdSJohn Dyson 
1451afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
14527aaaa4fdSJohn Dyson 				entry = entry->next;
14537aaaa4fdSJohn Dyson 				continue;
14547aaaa4fdSJohn Dyson 			}
14557aaaa4fdSJohn Dyson 
14567aaaa4fdSJohn Dyson 			if (entry->wired_count != 0) {
14577aaaa4fdSJohn Dyson 				entry->wired_count++;
1458afa07f7eSJohn Dyson 				entry->eflags |= MAP_ENTRY_USER_WIRED;
14597aaaa4fdSJohn Dyson 				entry = entry->next;
14607aaaa4fdSJohn Dyson 				continue;
14617aaaa4fdSJohn Dyson 			}
14627aaaa4fdSJohn Dyson 
14637aaaa4fdSJohn Dyson 			/* Here on entry being newly wired */
14647aaaa4fdSJohn Dyson 
14659fdfe602SMatthew Dillon 			if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1466afa07f7eSJohn Dyson 				int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
14677aaaa4fdSJohn Dyson 				if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
14687aaaa4fdSJohn Dyson 
14697aaaa4fdSJohn Dyson 					vm_object_shadow(&entry->object.vm_object,
14707aaaa4fdSJohn Dyson 					    &entry->offset,
1471c2e11a03SJohn Dyson 					    atop(entry->end - entry->start));
1472afa07f7eSJohn Dyson 					entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
14737aaaa4fdSJohn Dyson 
14744e71e795SMatthew Dillon 				} else if (entry->object.vm_object == NULL &&
14754e71e795SMatthew Dillon 					   !map->system_map) {
14767aaaa4fdSJohn Dyson 
14777aaaa4fdSJohn Dyson 					entry->object.vm_object =
14787aaaa4fdSJohn Dyson 					    vm_object_allocate(OBJT_DEFAULT,
1479c2e11a03SJohn Dyson 						atop(entry->end - entry->start));
14807aaaa4fdSJohn Dyson 					entry->offset = (vm_offset_t) 0;
14817aaaa4fdSJohn Dyson 
14827aaaa4fdSJohn Dyson 				}
14837aaaa4fdSJohn Dyson 			}
14847aaaa4fdSJohn Dyson 
14857aaaa4fdSJohn Dyson 			vm_map_clip_start(map, entry, start);
14867aaaa4fdSJohn Dyson 			vm_map_clip_end(map, entry, end);
14877aaaa4fdSJohn Dyson 
14887aaaa4fdSJohn Dyson 			entry->wired_count++;
1489afa07f7eSJohn Dyson 			entry->eflags |= MAP_ENTRY_USER_WIRED;
1490b44959ceSTor Egge 			estart = entry->start;
14917aaaa4fdSJohn Dyson 
14927aaaa4fdSJohn Dyson 			/* First we need to allow map modifications */
1493996c772fSJohn Dyson 			vm_map_set_recursive(map);
149403e9c6c1SJohn Dyson 			vm_map_lock_downgrade(map);
149547221757SJohn Dyson 			map->timestamp++;
14967aaaa4fdSJohn Dyson 
14977aaaa4fdSJohn Dyson 			rv = vm_fault_user_wire(map, entry->start, entry->end);
14987aaaa4fdSJohn Dyson 			if (rv) {
14997aaaa4fdSJohn Dyson 
15007aaaa4fdSJohn Dyson 				entry->wired_count--;
1501afa07f7eSJohn Dyson 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
15027aaaa4fdSJohn Dyson 
1503996c772fSJohn Dyson 				vm_map_clear_recursive(map);
15047aaaa4fdSJohn Dyson 				vm_map_unlock(map);
15057aaaa4fdSJohn Dyson 
15067aaaa4fdSJohn Dyson 				(void) vm_map_user_pageable(map, start, entry->start, TRUE);
15077aaaa4fdSJohn Dyson 				return rv;
15087aaaa4fdSJohn Dyson 			}
15097aaaa4fdSJohn Dyson 
1510996c772fSJohn Dyson 			vm_map_clear_recursive(map);
1511b44959ceSTor Egge 			if (vm_map_lock_upgrade(map)) {
1512b44959ceSTor Egge 				vm_map_lock(map);
1513b44959ceSTor Egge 				if (vm_map_lookup_entry(map, estart, &entry)
1514b44959ceSTor Egge 				    == FALSE) {
1515b44959ceSTor Egge 					vm_map_unlock(map);
1516b44959ceSTor Egge 					(void) vm_map_user_pageable(map,
1517b44959ceSTor Egge 								    start,
1518b44959ceSTor Egge 								    estart,
1519b44959ceSTor Egge 								    TRUE);
1520b44959ceSTor Egge 					return (KERN_INVALID_ADDRESS);
1521b44959ceSTor Egge 				}
1522b44959ceSTor Egge 			}
1523b44959ceSTor Egge 			vm_map_simplify_entry(map,entry);
15247aaaa4fdSJohn Dyson 		}
15257aaaa4fdSJohn Dyson 	}
152647221757SJohn Dyson 	map->timestamp++;
15277aaaa4fdSJohn Dyson 	vm_map_unlock(map);
15287aaaa4fdSJohn Dyson 	return KERN_SUCCESS;
15297aaaa4fdSJohn Dyson }
15307aaaa4fdSJohn Dyson 
15317aaaa4fdSJohn Dyson /*
1532df8bae1dSRodney W. Grimes  *	vm_map_pageable:
1533df8bae1dSRodney W. Grimes  *
1534df8bae1dSRodney W. Grimes  *	Sets the pageability of the specified address
1535df8bae1dSRodney W. Grimes  *	range in the target map.  Regions specified
1536df8bae1dSRodney W. Grimes  *	as not pageable require locked-down physical
1537df8bae1dSRodney W. Grimes  *	memory and physical page maps.
1538df8bae1dSRodney W. Grimes  *
1539df8bae1dSRodney W. Grimes  *	The map must not be locked, but a reference
1540df8bae1dSRodney W. Grimes  *	must remain to the map throughout the call.
1541df8bae1dSRodney W. Grimes  */
1542df8bae1dSRodney W. Grimes int
15431b40f8c0SMatthew Dillon vm_map_pageable(
15441b40f8c0SMatthew Dillon 	vm_map_t map,
15451b40f8c0SMatthew Dillon 	vm_offset_t start,
15461b40f8c0SMatthew Dillon 	vm_offset_t end,
15471b40f8c0SMatthew Dillon 	boolean_t new_pageable)
1548df8bae1dSRodney W. Grimes {
1549c0877f10SJohn Dyson 	vm_map_entry_t entry;
1550df8bae1dSRodney W. Grimes 	vm_map_entry_t start_entry;
1551c0877f10SJohn Dyson 	vm_offset_t failed = 0;
1552df8bae1dSRodney W. Grimes 	int rv;
1553df8bae1dSRodney W. Grimes 
15540cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
15550cddd8f0SMatthew Dillon 
1556df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1557df8bae1dSRodney W. Grimes 
1558df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1559df8bae1dSRodney W. Grimes 
1560df8bae1dSRodney W. Grimes 	/*
15610d94caffSDavid Greenman 	 * Only one pageability change may take place at one time, since
15620d94caffSDavid Greenman 	 * vm_fault assumes it will be called only once for each
15630d94caffSDavid Greenman 	 * wiring/unwiring.  Therefore, we have to make sure we're actually
15640d94caffSDavid Greenman 	 * changing the pageability for the entire region.  We do so before
15650d94caffSDavid Greenman 	 * making any changes.
1566df8bae1dSRodney W. Grimes 	 */
1567df8bae1dSRodney W. Grimes 
1568df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1569df8bae1dSRodney W. Grimes 		vm_map_unlock(map);
1570df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ADDRESS);
1571df8bae1dSRodney W. Grimes 	}
1572df8bae1dSRodney W. Grimes 	entry = start_entry;
1573df8bae1dSRodney W. Grimes 
1574df8bae1dSRodney W. Grimes 	/*
15750d94caffSDavid Greenman 	 * Actions are rather different for wiring and unwiring, so we have
15760d94caffSDavid Greenman 	 * two separate cases.
1577df8bae1dSRodney W. Grimes 	 */
1578df8bae1dSRodney W. Grimes 
1579df8bae1dSRodney W. Grimes 	if (new_pageable) {
1580df8bae1dSRodney W. Grimes 
1581df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1582df8bae1dSRodney W. Grimes 
1583df8bae1dSRodney W. Grimes 		/*
15840d94caffSDavid Greenman 		 * Unwiring.  First ensure that the range to be unwired is
15850d94caffSDavid Greenman 		 * really wired down and that there are no holes.
1586df8bae1dSRodney W. Grimes 		 */
1587df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1588df8bae1dSRodney W. Grimes 
1589df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0 ||
1590df8bae1dSRodney W. Grimes 			    (entry->end < end &&
1591df8bae1dSRodney W. Grimes 				(entry->next == &map->header ||
1592df8bae1dSRodney W. Grimes 				    entry->next->start > entry->end))) {
1593df8bae1dSRodney W. Grimes 				vm_map_unlock(map);
1594df8bae1dSRodney W. Grimes 				return (KERN_INVALID_ARGUMENT);
1595df8bae1dSRodney W. Grimes 			}
1596df8bae1dSRodney W. Grimes 			entry = entry->next;
1597df8bae1dSRodney W. Grimes 		}
1598df8bae1dSRodney W. Grimes 
1599df8bae1dSRodney W. Grimes 		/*
16000d94caffSDavid Greenman 		 * Now decrement the wiring count for each region. If a region
16010d94caffSDavid Greenman 		 * becomes completely unwired, unwire its physical pages and
16020d94caffSDavid Greenman 		 * mappings.
1603df8bae1dSRodney W. Grimes 		 */
1604df8bae1dSRodney W. Grimes 		entry = start_entry;
1605df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1606df8bae1dSRodney W. Grimes 			vm_map_clip_end(map, entry, end);
1607df8bae1dSRodney W. Grimes 
1608df8bae1dSRodney W. Grimes 			entry->wired_count--;
1609df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0)
1610df8bae1dSRodney W. Grimes 				vm_fault_unwire(map, entry->start, entry->end);
1611df8bae1dSRodney W. Grimes 
161244428f62SAlan Cox 			vm_map_simplify_entry(map, entry);
161344428f62SAlan Cox 
1614df8bae1dSRodney W. Grimes 			entry = entry->next;
1615df8bae1dSRodney W. Grimes 		}
16160d94caffSDavid Greenman 	} else {
1617df8bae1dSRodney W. Grimes 		/*
1618df8bae1dSRodney W. Grimes 		 * Wiring.  We must do this in two passes:
1619df8bae1dSRodney W. Grimes 		 *
16200d94caffSDavid Greenman 		 * 1.  Holding the write lock, we create any shadow or zero-fill
16210d94caffSDavid Greenman 		 * objects that need to be created. Then we clip each map
16220d94caffSDavid Greenman 		 * entry to the region to be wired and increment its wiring
16230d94caffSDavid Greenman 		 * count.  We create objects before clipping the map entries
1624df8bae1dSRodney W. Grimes 		 * to avoid object proliferation.
1625df8bae1dSRodney W. Grimes 		 *
16260d94caffSDavid Greenman 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
16270d94caffSDavid Greenman 		 * fault in the pages for any newly wired area (wired_count is
16280d94caffSDavid Greenman 		 * 1).
1629df8bae1dSRodney W. Grimes 		 *
16300d94caffSDavid Greenman 		 * Downgrading to a read lock for vm_fault_wire avoids a possible
163124a1cce3SDavid Greenman 		 * deadlock with another process that may have faulted on one
16320d94caffSDavid Greenman 		 * of the pages to be wired (it would mark the page busy,
16330d94caffSDavid Greenman 		 * blocking us, then in turn block on the map lock that we
16340d94caffSDavid Greenman 		 * hold).  Because of problems in the recursive lock package,
16350d94caffSDavid Greenman 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
16360d94caffSDavid Greenman 		 * any actions that require the write lock must be done
16370d94caffSDavid Greenman 		 * beforehand.  Because we keep the read lock on the map, the
16380d94caffSDavid Greenman 		 * copy-on-write status of the entries we modify here cannot
16390d94caffSDavid Greenman 		 * change.
1640df8bae1dSRodney W. Grimes 		 */
1641df8bae1dSRodney W. Grimes 
1642df8bae1dSRodney W. Grimes 		/*
1643df8bae1dSRodney W. Grimes 		 * Pass 1.
1644df8bae1dSRodney W. Grimes 		 */
1645df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1646df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0) {
1647df8bae1dSRodney W. Grimes 
1648df8bae1dSRodney W. Grimes 				/*
1649df8bae1dSRodney W. Grimes 				 * Perform actions of vm_map_lookup that need
1650df8bae1dSRodney W. Grimes 				 * the write lock on the map: create a shadow
1651df8bae1dSRodney W. Grimes 				 * object for a copy-on-write region, or an
1652df8bae1dSRodney W. Grimes 				 * object for a zero-fill region.
1653df8bae1dSRodney W. Grimes 				 *
1654df8bae1dSRodney W. Grimes 				 * We don't have to do this for entries that
1655ad5fca3bSAlan Cox 				 * point to sub maps, because we won't
1656ad5fca3bSAlan Cox 				 * hold the lock on the sub map.
1657df8bae1dSRodney W. Grimes 				 */
16589fdfe602SMatthew Dillon 				if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1659afa07f7eSJohn Dyson 					int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1660b5b40fa6SJohn Dyson 					if (copyflag &&
1661df8bae1dSRodney W. Grimes 					    ((entry->protection & VM_PROT_WRITE) != 0)) {
1662df8bae1dSRodney W. Grimes 
1663df8bae1dSRodney W. Grimes 						vm_object_shadow(&entry->object.vm_object,
1664df8bae1dSRodney W. Grimes 						    &entry->offset,
1665c2e11a03SJohn Dyson 						    atop(entry->end - entry->start));
1666afa07f7eSJohn Dyson 						entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
16674e71e795SMatthew Dillon 					} else if (entry->object.vm_object == NULL &&
16684e71e795SMatthew Dillon 						   !map->system_map) {
1669df8bae1dSRodney W. Grimes 						entry->object.vm_object =
1670a316d390SJohn Dyson 						    vm_object_allocate(OBJT_DEFAULT,
1671c2e11a03SJohn Dyson 							atop(entry->end - entry->start));
1672df8bae1dSRodney W. Grimes 						entry->offset = (vm_offset_t) 0;
1673df8bae1dSRodney W. Grimes 					}
1674df8bae1dSRodney W. Grimes 				}
1675df8bae1dSRodney W. Grimes 			}
1676df8bae1dSRodney W. Grimes 			vm_map_clip_start(map, entry, start);
1677df8bae1dSRodney W. Grimes 			vm_map_clip_end(map, entry, end);
1678df8bae1dSRodney W. Grimes 			entry->wired_count++;
1679df8bae1dSRodney W. Grimes 
1680df8bae1dSRodney W. Grimes 			/*
1681df8bae1dSRodney W. Grimes 			 * Check for holes
1682df8bae1dSRodney W. Grimes 			 */
1683df8bae1dSRodney W. Grimes 			if (entry->end < end &&
1684df8bae1dSRodney W. Grimes 			    (entry->next == &map->header ||
1685df8bae1dSRodney W. Grimes 				entry->next->start > entry->end)) {
1686df8bae1dSRodney W. Grimes 				/*
16870d94caffSDavid Greenman 				 * Found one.  Object creation actions do not
16880d94caffSDavid Greenman 				 * need to be undone, but the wired counts
16890d94caffSDavid Greenman 				 * need to be restored.
1690df8bae1dSRodney W. Grimes 				 */
1691df8bae1dSRodney W. Grimes 				while (entry != &map->header && entry->end > start) {
1692df8bae1dSRodney W. Grimes 					entry->wired_count--;
1693df8bae1dSRodney W. Grimes 					entry = entry->prev;
1694df8bae1dSRodney W. Grimes 				}
1695df8bae1dSRodney W. Grimes 				vm_map_unlock(map);
1696df8bae1dSRodney W. Grimes 				return (KERN_INVALID_ARGUMENT);
1697df8bae1dSRodney W. Grimes 			}
1698df8bae1dSRodney W. Grimes 			entry = entry->next;
1699df8bae1dSRodney W. Grimes 		}
1700df8bae1dSRodney W. Grimes 
1701df8bae1dSRodney W. Grimes 		/*
1702df8bae1dSRodney W. Grimes 		 * Pass 2.
1703df8bae1dSRodney W. Grimes 		 */
1704df8bae1dSRodney W. Grimes 
1705df8bae1dSRodney W. Grimes 		/*
1706df8bae1dSRodney W. Grimes 		 * HACK HACK HACK HACK
1707df8bae1dSRodney W. Grimes 		 *
170824a1cce3SDavid Greenman 		 * If we are wiring in the kernel map or a submap of it,
170924a1cce3SDavid Greenman 		 * unlock the map to avoid deadlocks.  We trust that the
171024a1cce3SDavid Greenman 		 * kernel is well-behaved, and therefore will not do
171124a1cce3SDavid Greenman 		 * anything destructive to this region of the map while
171224a1cce3SDavid Greenman 		 * we have it unlocked.  We cannot trust user processes
171324a1cce3SDavid Greenman 		 * to do the same.
1714df8bae1dSRodney W. Grimes 		 *
1715df8bae1dSRodney W. Grimes 		 * HACK HACK HACK HACK
1716df8bae1dSRodney W. Grimes 		 */
1717df8bae1dSRodney W. Grimes 		if (vm_map_pmap(map) == kernel_pmap) {
1718df8bae1dSRodney W. Grimes 			vm_map_unlock(map);	/* trust me ... */
17190d94caffSDavid Greenman 		} else {
172003e9c6c1SJohn Dyson 			vm_map_lock_downgrade(map);
1721df8bae1dSRodney W. Grimes 		}
1722df8bae1dSRodney W. Grimes 
1723df8bae1dSRodney W. Grimes 		rv = 0;
1724df8bae1dSRodney W. Grimes 		entry = start_entry;
1725df8bae1dSRodney W. Grimes 		while (entry != &map->header && entry->start < end) {
1726df8bae1dSRodney W. Grimes 			/*
17270d94caffSDavid Greenman 			 * If vm_fault_wire fails for any page we need to undo
17280d94caffSDavid Greenman 			 * what has been done.  We decrement the wiring count
17290d94caffSDavid Greenman 			 * for those pages which have not yet been wired (now)
17300d94caffSDavid Greenman 			 * and unwire those that have (later).
1731df8bae1dSRodney W. Grimes 			 *
1732df8bae1dSRodney W. Grimes 			 * XXX this violates the locking protocol on the map,
1733df8bae1dSRodney W. Grimes 			 * needs to be fixed.
1734df8bae1dSRodney W. Grimes 			 */
1735df8bae1dSRodney W. Grimes 			if (rv)
1736df8bae1dSRodney W. Grimes 				entry->wired_count--;
1737df8bae1dSRodney W. Grimes 			else if (entry->wired_count == 1) {
1738df8bae1dSRodney W. Grimes 				rv = vm_fault_wire(map, entry->start, entry->end);
1739df8bae1dSRodney W. Grimes 				if (rv) {
1740df8bae1dSRodney W. Grimes 					failed = entry->start;
1741df8bae1dSRodney W. Grimes 					entry->wired_count--;
1742df8bae1dSRodney W. Grimes 				}
1743df8bae1dSRodney W. Grimes 			}
1744df8bae1dSRodney W. Grimes 			entry = entry->next;
1745df8bae1dSRodney W. Grimes 		}
1746df8bae1dSRodney W. Grimes 
1747df8bae1dSRodney W. Grimes 		if (vm_map_pmap(map) == kernel_pmap) {
1748df8bae1dSRodney W. Grimes 			vm_map_lock(map);
1749df8bae1dSRodney W. Grimes 		}
1750df8bae1dSRodney W. Grimes 		if (rv) {
1751df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
1752df8bae1dSRodney W. Grimes 			(void) vm_map_pageable(map, start, failed, TRUE);
1753df8bae1dSRodney W. Grimes 			return (rv);
1754df8bae1dSRodney W. Grimes 		}
1755b7b2aac2SJohn Dyson 		vm_map_simplify_entry(map, start_entry);
1756df8bae1dSRodney W. Grimes 	}
1757df8bae1dSRodney W. Grimes 
1758df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1759df8bae1dSRodney W. Grimes 
1760df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1761df8bae1dSRodney W. Grimes }
1762df8bae1dSRodney W. Grimes 
1763df8bae1dSRodney W. Grimes /*
1764df8bae1dSRodney W. Grimes  * vm_map_clean
1765df8bae1dSRodney W. Grimes  *
1766df8bae1dSRodney W. Grimes  * Push any dirty cached pages in the address range to their pager.
1767df8bae1dSRodney W. Grimes  * If syncio is TRUE, dirty pages are written synchronously.
1768df8bae1dSRodney W. Grimes  * If invalidate is TRUE, any cached pages are freed as well.
1769df8bae1dSRodney W. Grimes  *
1770df8bae1dSRodney W. Grimes  * Returns an error if any part of the specified range is not mapped.
1771df8bae1dSRodney W. Grimes  */
1772df8bae1dSRodney W. Grimes int
17731b40f8c0SMatthew Dillon vm_map_clean(
17741b40f8c0SMatthew Dillon 	vm_map_t map,
17751b40f8c0SMatthew Dillon 	vm_offset_t start,
17761b40f8c0SMatthew Dillon 	vm_offset_t end,
17771b40f8c0SMatthew Dillon 	boolean_t syncio,
17781b40f8c0SMatthew Dillon 	boolean_t invalidate)
1779df8bae1dSRodney W. Grimes {
1780c0877f10SJohn Dyson 	vm_map_entry_t current;
1781df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1782df8bae1dSRodney W. Grimes 	vm_size_t size;
1783df8bae1dSRodney W. Grimes 	vm_object_t object;
1784a316d390SJohn Dyson 	vm_ooffset_t offset;
1785df8bae1dSRodney W. Grimes 
17860cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
17870cddd8f0SMatthew Dillon 
1788df8bae1dSRodney W. Grimes 	vm_map_lock_read(map);
1789df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1790df8bae1dSRodney W. Grimes 	if (!vm_map_lookup_entry(map, start, &entry)) {
1791df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map);
1792df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ADDRESS);
1793df8bae1dSRodney W. Grimes 	}
1794df8bae1dSRodney W. Grimes 	/*
1795df8bae1dSRodney W. Grimes 	 * Make a first pass to check for holes.
1796df8bae1dSRodney W. Grimes 	 */
1797df8bae1dSRodney W. Grimes 	for (current = entry; current->start < end; current = current->next) {
1798afa07f7eSJohn Dyson 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1799df8bae1dSRodney W. Grimes 			vm_map_unlock_read(map);
1800df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ARGUMENT);
1801df8bae1dSRodney W. Grimes 		}
1802df8bae1dSRodney W. Grimes 		if (end > current->end &&
1803df8bae1dSRodney W. Grimes 		    (current->next == &map->header ||
1804df8bae1dSRodney W. Grimes 			current->end != current->next->start)) {
1805df8bae1dSRodney W. Grimes 			vm_map_unlock_read(map);
1806df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ADDRESS);
1807df8bae1dSRodney W. Grimes 		}
1808df8bae1dSRodney W. Grimes 	}
1809df8bae1dSRodney W. Grimes 
1810cf2819ccSJohn Dyson 	if (invalidate)
1811cf2819ccSJohn Dyson 		pmap_remove(vm_map_pmap(map), start, end);
1812df8bae1dSRodney W. Grimes 	/*
1813df8bae1dSRodney W. Grimes 	 * Make a second pass, cleaning/uncaching pages from the indicated
1814df8bae1dSRodney W. Grimes 	 * objects as we go.
1815df8bae1dSRodney W. Grimes 	 */
1816df8bae1dSRodney W. Grimes 	for (current = entry; current->start < end; current = current->next) {
1817df8bae1dSRodney W. Grimes 		offset = current->offset + (start - current->start);
1818df8bae1dSRodney W. Grimes 		size = (end <= current->end ? end : current->end) - start;
18199fdfe602SMatthew Dillon 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1820c0877f10SJohn Dyson 			vm_map_t smap;
1821df8bae1dSRodney W. Grimes 			vm_map_entry_t tentry;
1822df8bae1dSRodney W. Grimes 			vm_size_t tsize;
1823df8bae1dSRodney W. Grimes 
18249fdfe602SMatthew Dillon 			smap = current->object.sub_map;
1825df8bae1dSRodney W. Grimes 			vm_map_lock_read(smap);
1826df8bae1dSRodney W. Grimes 			(void) vm_map_lookup_entry(smap, offset, &tentry);
1827df8bae1dSRodney W. Grimes 			tsize = tentry->end - offset;
1828df8bae1dSRodney W. Grimes 			if (tsize < size)
1829df8bae1dSRodney W. Grimes 				size = tsize;
1830df8bae1dSRodney W. Grimes 			object = tentry->object.vm_object;
1831df8bae1dSRodney W. Grimes 			offset = tentry->offset + (offset - tentry->start);
1832df8bae1dSRodney W. Grimes 			vm_map_unlock_read(smap);
1833df8bae1dSRodney W. Grimes 		} else {
1834df8bae1dSRodney W. Grimes 			object = current->object.vm_object;
1835df8bae1dSRodney W. Grimes 		}
18368a02c104SJohn Dyson 		/*
18378a02c104SJohn Dyson 		 * Note that there is absolutely no sense in writing out
18388a02c104SJohn Dyson 		 * anonymous objects, so we track down the vnode object
18398a02c104SJohn Dyson 		 * to write out.
18408a02c104SJohn Dyson 		 * We invalidate (remove) all pages from the address space
18418a02c104SJohn Dyson 		 * anyway, for semantic correctness.
18428a02c104SJohn Dyson 		 */
18438a02c104SJohn Dyson 		while (object->backing_object) {
18448a02c104SJohn Dyson 			object = object->backing_object;
18458a02c104SJohn Dyson 			offset += object->backing_object_offset;
18468a02c104SJohn Dyson 			if (object->size < OFF_TO_IDX( offset + size))
18478a02c104SJohn Dyson 				size = IDX_TO_OFF(object->size) - offset;
18488a02c104SJohn Dyson 		}
1849ff359f84SMatthew Dillon 		if (object && (object->type == OBJT_VNODE) &&
1850ff359f84SMatthew Dillon 		    (current->protection & VM_PROT_WRITE)) {
1851df8bae1dSRodney W. Grimes 			/*
1852ff359f84SMatthew Dillon 			 * Flush pages if writing is allowed, invalidate them
1853ff359f84SMatthew Dillon 			 * if invalidation requested.  Pages undergoing I/O
1854ff359f84SMatthew Dillon 			 * will be ignored by vm_object_page_remove().
1855f5cf85d4SDavid Greenman 			 *
1856ff359f84SMatthew Dillon 			 * We cannot lock the vnode and then wait for paging
1857ff359f84SMatthew Dillon 			 * to complete without deadlocking against vm_fault.
1858ff359f84SMatthew Dillon 			 * Instead we simply call vm_object_page_remove() and
1859ff359f84SMatthew Dillon 			 * allow it to block internally on a page-by-page
1860ff359f84SMatthew Dillon 			 * basis when it encounters pages undergoing async
1861ff359f84SMatthew Dillon 			 * I/O.
1862df8bae1dSRodney W. Grimes 			 */
18638f9110f6SJohn Dyson 			int flags;
1864ff359f84SMatthew Dillon 
1865ff359f84SMatthew Dillon 			vm_object_reference(object);
1866b40ce416SJulian Elischer 			vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
18678f9110f6SJohn Dyson 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
18688f9110f6SJohn Dyson 			flags |= invalidate ? OBJPC_INVAL : 0;
1869a316d390SJohn Dyson 			vm_object_page_clean(object,
1870a316d390SJohn Dyson 			    OFF_TO_IDX(offset),
18712be70f79SJohn Dyson 			    OFF_TO_IDX(offset + size + PAGE_MASK),
18728f9110f6SJohn Dyson 			    flags);
1873cf2819ccSJohn Dyson 			if (invalidate) {
1874ff359f84SMatthew Dillon 				/*vm_object_pip_wait(object, "objmcl");*/
1875a316d390SJohn Dyson 				vm_object_page_remove(object,
1876a316d390SJohn Dyson 				    OFF_TO_IDX(offset),
18772be70f79SJohn Dyson 				    OFF_TO_IDX(offset + size + PAGE_MASK),
1878a316d390SJohn Dyson 				    FALSE);
1879cf2819ccSJohn Dyson 			}
1880b40ce416SJulian Elischer 			VOP_UNLOCK(object->handle, 0, curthread);
1881ff359f84SMatthew Dillon 			vm_object_deallocate(object);
1882a02051c3SJohn Dyson 		}
1883df8bae1dSRodney W. Grimes 		start += size;
1884df8bae1dSRodney W. Grimes 	}
1885df8bae1dSRodney W. Grimes 
1886df8bae1dSRodney W. Grimes 	vm_map_unlock_read(map);
1887df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1888df8bae1dSRodney W. Grimes }
1889df8bae1dSRodney W. Grimes 
1890df8bae1dSRodney W. Grimes /*
1891df8bae1dSRodney W. Grimes  *	vm_map_entry_unwire:	[ internal use only ]
1892df8bae1dSRodney W. Grimes  *
1893df8bae1dSRodney W. Grimes  *	Make the region specified by this entry pageable.
1894df8bae1dSRodney W. Grimes  *
1895df8bae1dSRodney W. Grimes  *	The map in question should be locked.
1896df8bae1dSRodney W. Grimes  *	[This is the reason for this routine's existence.]
1897df8bae1dSRodney W. Grimes  */
18980362d7d7SJohn Dyson static void
18991b40f8c0SMatthew Dillon vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
1900df8bae1dSRodney W. Grimes {
1901df8bae1dSRodney W. Grimes 	vm_fault_unwire(map, entry->start, entry->end);
1902df8bae1dSRodney W. Grimes 	entry->wired_count = 0;
1903df8bae1dSRodney W. Grimes }
1904df8bae1dSRodney W. Grimes 
1905df8bae1dSRodney W. Grimes /*
1906df8bae1dSRodney W. Grimes  *	vm_map_entry_delete:	[ internal use only ]
1907df8bae1dSRodney W. Grimes  *
1908df8bae1dSRodney W. Grimes  *	Deallocate the given entry from the target map.
1909df8bae1dSRodney W. Grimes  */
19100362d7d7SJohn Dyson static void
19111b40f8c0SMatthew Dillon vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
1912df8bae1dSRodney W. Grimes {
1913df8bae1dSRodney W. Grimes 	vm_map_entry_unlink(map, entry);
1914df8bae1dSRodney W. Grimes 	map->size -= entry->end - entry->start;
1915df8bae1dSRodney W. Grimes 
19169fdfe602SMatthew Dillon 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1917df8bae1dSRodney W. Grimes 		vm_object_deallocate(entry->object.vm_object);
1918b5b40fa6SJohn Dyson 	}
1919df8bae1dSRodney W. Grimes 
1920df8bae1dSRodney W. Grimes 	vm_map_entry_dispose(map, entry);
1921df8bae1dSRodney W. Grimes }
1922df8bae1dSRodney W. Grimes 
1923df8bae1dSRodney W. Grimes /*
1924df8bae1dSRodney W. Grimes  *	vm_map_delete:	[ internal use only ]
1925df8bae1dSRodney W. Grimes  *
1926df8bae1dSRodney W. Grimes  *	Deallocates the given address range from the target
1927df8bae1dSRodney W. Grimes  *	map.
1928df8bae1dSRodney W. Grimes  */
1929df8bae1dSRodney W. Grimes int
19301b40f8c0SMatthew Dillon vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
1931df8bae1dSRodney W. Grimes {
1932cbd8ec09SJohn Dyson 	vm_object_t object;
1933c0877f10SJohn Dyson 	vm_map_entry_t entry;
1934df8bae1dSRodney W. Grimes 	vm_map_entry_t first_entry;
1935df8bae1dSRodney W. Grimes 
19360cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
19370cddd8f0SMatthew Dillon 
1938df8bae1dSRodney W. Grimes 	/*
1939df8bae1dSRodney W. Grimes 	 * Find the start of the region, and clip it
1940df8bae1dSRodney W. Grimes 	 */
1941df8bae1dSRodney W. Grimes 
1942876318ecSAlan Cox 	if (!vm_map_lookup_entry(map, start, &first_entry))
1943df8bae1dSRodney W. Grimes 		entry = first_entry->next;
1944876318ecSAlan Cox 	else {
1945df8bae1dSRodney W. Grimes 		entry = first_entry;
1946df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1947df8bae1dSRodney W. Grimes 		/*
19480d94caffSDavid Greenman 		 * Fix the lookup hint now, rather than each time though the
19490d94caffSDavid Greenman 		 * loop.
1950df8bae1dSRodney W. Grimes 		 */
1951df8bae1dSRodney W. Grimes 		SAVE_HINT(map, entry->prev);
1952df8bae1dSRodney W. Grimes 	}
1953df8bae1dSRodney W. Grimes 
1954df8bae1dSRodney W. Grimes 	/*
1955df8bae1dSRodney W. Grimes 	 * Save the free space hint
1956df8bae1dSRodney W. Grimes 	 */
1957df8bae1dSRodney W. Grimes 
1958b18bfc3dSJohn Dyson 	if (entry == &map->header) {
1959b18bfc3dSJohn Dyson 		map->first_free = &map->header;
19602dbea5d2SJohn Dyson 	} else if (map->first_free->start >= start) {
1961df8bae1dSRodney W. Grimes 		map->first_free = entry->prev;
19622dbea5d2SJohn Dyson 	}
1963df8bae1dSRodney W. Grimes 
1964df8bae1dSRodney W. Grimes 	/*
1965df8bae1dSRodney W. Grimes 	 * Step through all entries in this region
1966df8bae1dSRodney W. Grimes 	 */
1967df8bae1dSRodney W. Grimes 
1968df8bae1dSRodney W. Grimes 	while ((entry != &map->header) && (entry->start < end)) {
1969df8bae1dSRodney W. Grimes 		vm_map_entry_t next;
1970b18bfc3dSJohn Dyson 		vm_offset_t s, e;
1971cbd8ec09SJohn Dyson 		vm_pindex_t offidxstart, offidxend, count;
1972df8bae1dSRodney W. Grimes 
1973df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
1974df8bae1dSRodney W. Grimes 
1975df8bae1dSRodney W. Grimes 		s = entry->start;
1976df8bae1dSRodney W. Grimes 		e = entry->end;
1977c0877f10SJohn Dyson 		next = entry->next;
1978df8bae1dSRodney W. Grimes 
1979cbd8ec09SJohn Dyson 		offidxstart = OFF_TO_IDX(entry->offset);
1980cbd8ec09SJohn Dyson 		count = OFF_TO_IDX(e - s);
1981cbd8ec09SJohn Dyson 		object = entry->object.vm_object;
19822dbea5d2SJohn Dyson 
1983df8bae1dSRodney W. Grimes 		/*
19840d94caffSDavid Greenman 		 * Unwire before removing addresses from the pmap; otherwise,
19850d94caffSDavid Greenman 		 * unwiring will put the entries back in the pmap.
1986df8bae1dSRodney W. Grimes 		 */
1987c0877f10SJohn Dyson 		if (entry->wired_count != 0) {
1988df8bae1dSRodney W. Grimes 			vm_map_entry_unwire(map, entry);
1989c0877f10SJohn Dyson 		}
1990df8bae1dSRodney W. Grimes 
1991cbd8ec09SJohn Dyson 		offidxend = offidxstart + count;
1992df8bae1dSRodney W. Grimes 
1993c0877f10SJohn Dyson 		if ((object == kernel_object) || (object == kmem_object)) {
19942dbea5d2SJohn Dyson 			vm_object_page_remove(object, offidxstart, offidxend, FALSE);
1995b18bfc3dSJohn Dyson 		} else {
1996df8bae1dSRodney W. Grimes 			pmap_remove(map->pmap, s, e);
1997876318ecSAlan Cox 			if (object != NULL &&
1998876318ecSAlan Cox 			    object->ref_count != 1 &&
1999876318ecSAlan Cox 			    (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
2000876318ecSAlan Cox 			    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
20012dbea5d2SJohn Dyson 				vm_object_collapse(object);
20022dbea5d2SJohn Dyson 				vm_object_page_remove(object, offidxstart, offidxend, FALSE);
20032dbea5d2SJohn Dyson 				if (object->type == OBJT_SWAP) {
2004cbd8ec09SJohn Dyson 					swap_pager_freespace(object, offidxstart, count);
20052dbea5d2SJohn Dyson 				}
2006876318ecSAlan Cox 				if (offidxend >= object->size &&
2007876318ecSAlan Cox 				    offidxstart < object->size) {
2008c0877f10SJohn Dyson 					object->size = offidxstart;
2009c0877f10SJohn Dyson 				}
20102dbea5d2SJohn Dyson 			}
2011b18bfc3dSJohn Dyson 		}
2012df8bae1dSRodney W. Grimes 
2013df8bae1dSRodney W. Grimes 		/*
20140d94caffSDavid Greenman 		 * Delete the entry (which may delete the object) only after
20150d94caffSDavid Greenman 		 * removing all pmap entries pointing to its pages.
20160d94caffSDavid Greenman 		 * (Otherwise, its page frames may be reallocated, and any
20170d94caffSDavid Greenman 		 * modify bits will be set in the wrong object!)
2018df8bae1dSRodney W. Grimes 		 */
2019df8bae1dSRodney W. Grimes 		vm_map_entry_delete(map, entry);
2020df8bae1dSRodney W. Grimes 		entry = next;
2021df8bae1dSRodney W. Grimes 	}
2022df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
2023df8bae1dSRodney W. Grimes }
2024df8bae1dSRodney W. Grimes 
2025df8bae1dSRodney W. Grimes /*
2026df8bae1dSRodney W. Grimes  *	vm_map_remove:
2027df8bae1dSRodney W. Grimes  *
2028df8bae1dSRodney W. Grimes  *	Remove the given address range from the target map.
2029df8bae1dSRodney W. Grimes  *	This is the exported form of vm_map_delete.
2030df8bae1dSRodney W. Grimes  */
2031df8bae1dSRodney W. Grimes int
20321b40f8c0SMatthew Dillon vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2033df8bae1dSRodney W. Grimes {
2034c0877f10SJohn Dyson 	int result, s = 0;
20358d6e8edeSDavid Greenman 
20360cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
20370cddd8f0SMatthew Dillon 
203808442f8aSBosko Milekic 	if (map == kmem_map)
2039b18bfc3dSJohn Dyson 		s = splvm();
2040df8bae1dSRodney W. Grimes 
2041df8bae1dSRodney W. Grimes 	vm_map_lock(map);
2042df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
2043df8bae1dSRodney W. Grimes 	result = vm_map_delete(map, start, end);
2044df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
2045df8bae1dSRodney W. Grimes 
204608442f8aSBosko Milekic 	if (map == kmem_map)
20478d6e8edeSDavid Greenman 		splx(s);
20488d6e8edeSDavid Greenman 
2049df8bae1dSRodney W. Grimes 	return (result);
2050df8bae1dSRodney W. Grimes }
2051df8bae1dSRodney W. Grimes 
2052df8bae1dSRodney W. Grimes /*
2053df8bae1dSRodney W. Grimes  *	vm_map_check_protection:
2054df8bae1dSRodney W. Grimes  *
2055df8bae1dSRodney W. Grimes  *	Assert that the target map allows the specified
2056df8bae1dSRodney W. Grimes  *	privilege on the entire address region given.
2057df8bae1dSRodney W. Grimes  *	The entire region must be allocated.
2058df8bae1dSRodney W. Grimes  */
20590d94caffSDavid Greenman boolean_t
2060b9dcd593SBruce Evans vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2061b9dcd593SBruce Evans 			vm_prot_t protection)
2062df8bae1dSRodney W. Grimes {
2063c0877f10SJohn Dyson 	vm_map_entry_t entry;
2064df8bae1dSRodney W. Grimes 	vm_map_entry_t tmp_entry;
2065df8bae1dSRodney W. Grimes 
20660cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
20670cddd8f0SMatthew Dillon 
2068df8bae1dSRodney W. Grimes 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2069df8bae1dSRodney W. Grimes 		return (FALSE);
2070df8bae1dSRodney W. Grimes 	}
2071df8bae1dSRodney W. Grimes 	entry = tmp_entry;
2072df8bae1dSRodney W. Grimes 
2073df8bae1dSRodney W. Grimes 	while (start < end) {
2074df8bae1dSRodney W. Grimes 		if (entry == &map->header) {
2075df8bae1dSRodney W. Grimes 			return (FALSE);
2076df8bae1dSRodney W. Grimes 		}
2077df8bae1dSRodney W. Grimes 		/*
2078df8bae1dSRodney W. Grimes 		 * No holes allowed!
2079df8bae1dSRodney W. Grimes 		 */
2080df8bae1dSRodney W. Grimes 
2081df8bae1dSRodney W. Grimes 		if (start < entry->start) {
2082df8bae1dSRodney W. Grimes 			return (FALSE);
2083df8bae1dSRodney W. Grimes 		}
2084df8bae1dSRodney W. Grimes 		/*
2085df8bae1dSRodney W. Grimes 		 * Check protection associated with entry.
2086df8bae1dSRodney W. Grimes 		 */
2087df8bae1dSRodney W. Grimes 
2088df8bae1dSRodney W. Grimes 		if ((entry->protection & protection) != protection) {
2089df8bae1dSRodney W. Grimes 			return (FALSE);
2090df8bae1dSRodney W. Grimes 		}
2091df8bae1dSRodney W. Grimes 		/* go to next entry */
2092df8bae1dSRodney W. Grimes 
2093df8bae1dSRodney W. Grimes 		start = entry->end;
2094df8bae1dSRodney W. Grimes 		entry = entry->next;
2095df8bae1dSRodney W. Grimes 	}
2096df8bae1dSRodney W. Grimes 	return (TRUE);
2097df8bae1dSRodney W. Grimes }
2098df8bae1dSRodney W. Grimes 
209986524867SJohn Dyson /*
210086524867SJohn Dyson  * Split the pages in a map entry into a new object.  This affords
210186524867SJohn Dyson  * easier removal of unused pages, and keeps object inheritance from
210286524867SJohn Dyson  * being a negative impact on memory usage.
210386524867SJohn Dyson  */
2104c0877f10SJohn Dyson static void
21051b40f8c0SMatthew Dillon vm_map_split(vm_map_entry_t entry)
2106c0877f10SJohn Dyson {
210786524867SJohn Dyson 	vm_page_t m;
2108bd6be915SJohn Dyson 	vm_object_t orig_object, new_object, source;
2109c0877f10SJohn Dyson 	vm_offset_t s, e;
2110c0877f10SJohn Dyson 	vm_pindex_t offidxstart, offidxend, idx;
2111c0877f10SJohn Dyson 	vm_size_t size;
2112c0877f10SJohn Dyson 	vm_ooffset_t offset;
2113c0877f10SJohn Dyson 
21140cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
21150cddd8f0SMatthew Dillon 
2116c0877f10SJohn Dyson 	orig_object = entry->object.vm_object;
2117c0877f10SJohn Dyson 	if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
2118c0877f10SJohn Dyson 		return;
2119c0877f10SJohn Dyson 	if (orig_object->ref_count <= 1)
2120c0877f10SJohn Dyson 		return;
2121c0877f10SJohn Dyson 
2122c0877f10SJohn Dyson 	offset = entry->offset;
2123c0877f10SJohn Dyson 	s = entry->start;
2124c0877f10SJohn Dyson 	e = entry->end;
2125c0877f10SJohn Dyson 
2126c0877f10SJohn Dyson 	offidxstart = OFF_TO_IDX(offset);
2127c0877f10SJohn Dyson 	offidxend = offidxstart + OFF_TO_IDX(e - s);
2128c0877f10SJohn Dyson 	size = offidxend - offidxstart;
2129c0877f10SJohn Dyson 
2130c0877f10SJohn Dyson 	new_object = vm_pager_allocate(orig_object->type,
21316cde7a16SDavid Greenman 		NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL);
2132c0877f10SJohn Dyson 	if (new_object == NULL)
2133c0877f10SJohn Dyson 		return;
2134c0877f10SJohn Dyson 
2135bd6be915SJohn Dyson 	source = orig_object->backing_object;
2136bd6be915SJohn Dyson 	if (source != NULL) {
2137bd6be915SJohn Dyson 		vm_object_reference(source);	/* Referenced by new_object */
2138bd6be915SJohn Dyson 		TAILQ_INSERT_TAIL(&source->shadow_head,
2139bd6be915SJohn Dyson 				  new_object, shadow_list);
2140069e9bc1SDoug Rabson 		vm_object_clear_flag(source, OBJ_ONEMAPPING);
2141bd6be915SJohn Dyson 		new_object->backing_object_offset =
2142a0fce827SJohn Polstra 			orig_object->backing_object_offset + IDX_TO_OFF(offidxstart);
2143bd6be915SJohn Dyson 		new_object->backing_object = source;
2144bd6be915SJohn Dyson 		source->shadow_count++;
2145bd6be915SJohn Dyson 		source->generation++;
2146bd6be915SJohn Dyson 	}
2147bd6be915SJohn Dyson 
2148c0877f10SJohn Dyson 	for (idx = 0; idx < size; idx++) {
2149c0877f10SJohn Dyson 		vm_page_t m;
2150c0877f10SJohn Dyson 
2151c0877f10SJohn Dyson 	retry:
2152c0877f10SJohn Dyson 		m = vm_page_lookup(orig_object, offidxstart + idx);
2153c0877f10SJohn Dyson 		if (m == NULL)
2154c0877f10SJohn Dyson 			continue;
21551c7c3c6aSMatthew Dillon 
21561c7c3c6aSMatthew Dillon 		/*
21571c7c3c6aSMatthew Dillon 		 * We must wait for pending I/O to complete before we can
21581c7c3c6aSMatthew Dillon 		 * rename the page.
2159d1bf5d56SMatthew Dillon 		 *
2160d1bf5d56SMatthew Dillon 		 * We do not have to VM_PROT_NONE the page as mappings should
2161d1bf5d56SMatthew Dillon 		 * not be changed by this operation.
21621c7c3c6aSMatthew Dillon 		 */
21631c7c3c6aSMatthew Dillon 		if (vm_page_sleep_busy(m, TRUE, "spltwt"))
2164c0877f10SJohn Dyson 			goto retry;
2165c0877f10SJohn Dyson 
2166e69763a3SDoug Rabson 		vm_page_busy(m);
2167c0877f10SJohn Dyson 		vm_page_rename(m, new_object, idx);
21687dbf82dcSMatthew Dillon 		/* page automatically made dirty by rename and cache handled */
2169e69763a3SDoug Rabson 		vm_page_busy(m);
2170c0877f10SJohn Dyson 	}
2171c0877f10SJohn Dyson 
2172c0877f10SJohn Dyson 	if (orig_object->type == OBJT_SWAP) {
2173d474eaaaSDoug Rabson 		vm_object_pip_add(orig_object, 1);
2174c0877f10SJohn Dyson 		/*
2175c0877f10SJohn Dyson 		 * copy orig_object pages into new_object
2176c0877f10SJohn Dyson 		 * and destroy unneeded pages in
2177c0877f10SJohn Dyson 		 * shadow object.
2178c0877f10SJohn Dyson 		 */
21791c7c3c6aSMatthew Dillon 		swap_pager_copy(orig_object, new_object, offidxstart, 0);
2180c0877f10SJohn Dyson 		vm_object_pip_wakeup(orig_object);
2181c0877f10SJohn Dyson 	}
2182c0877f10SJohn Dyson 
218386524867SJohn Dyson 	for (idx = 0; idx < size; idx++) {
218486524867SJohn Dyson 		m = vm_page_lookup(new_object, idx);
218586524867SJohn Dyson 		if (m) {
2186e69763a3SDoug Rabson 			vm_page_wakeup(m);
218786524867SJohn Dyson 		}
218886524867SJohn Dyson 	}
218986524867SJohn Dyson 
2190c0877f10SJohn Dyson 	entry->object.vm_object = new_object;
2191c0877f10SJohn Dyson 	entry->offset = 0LL;
2192c0877f10SJohn Dyson 	vm_object_deallocate(orig_object);
2193c0877f10SJohn Dyson }
2194c0877f10SJohn Dyson 
2195df8bae1dSRodney W. Grimes /*
2196df8bae1dSRodney W. Grimes  *	vm_map_copy_entry:
2197df8bae1dSRodney W. Grimes  *
2198df8bae1dSRodney W. Grimes  *	Copies the contents of the source entry to the destination
2199df8bae1dSRodney W. Grimes  *	entry.  The entries *must* be aligned properly.
2200df8bae1dSRodney W. Grimes  */
2201f708ef1bSPoul-Henning Kamp static void
22021b40f8c0SMatthew Dillon vm_map_copy_entry(
22031b40f8c0SMatthew Dillon 	vm_map_t src_map,
22041b40f8c0SMatthew Dillon 	vm_map_t dst_map,
22051b40f8c0SMatthew Dillon 	vm_map_entry_t src_entry,
22061b40f8c0SMatthew Dillon 	vm_map_entry_t dst_entry)
2207df8bae1dSRodney W. Grimes {
2208c0877f10SJohn Dyson 	vm_object_t src_object;
2209c0877f10SJohn Dyson 
22109fdfe602SMatthew Dillon 	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2211df8bae1dSRodney W. Grimes 		return;
2212df8bae1dSRodney W. Grimes 
2213df8bae1dSRodney W. Grimes 	if (src_entry->wired_count == 0) {
2214df8bae1dSRodney W. Grimes 
2215df8bae1dSRodney W. Grimes 		/*
22160d94caffSDavid Greenman 		 * If the source entry is marked needs_copy, it is already
22170d94caffSDavid Greenman 		 * write-protected.
2218df8bae1dSRodney W. Grimes 		 */
2219afa07f7eSJohn Dyson 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2220df8bae1dSRodney W. Grimes 			pmap_protect(src_map->pmap,
2221df8bae1dSRodney W. Grimes 			    src_entry->start,
2222df8bae1dSRodney W. Grimes 			    src_entry->end,
2223df8bae1dSRodney W. Grimes 			    src_entry->protection & ~VM_PROT_WRITE);
2224df8bae1dSRodney W. Grimes 		}
2225b18bfc3dSJohn Dyson 
2226df8bae1dSRodney W. Grimes 		/*
2227df8bae1dSRodney W. Grimes 		 * Make a copy of the object.
2228df8bae1dSRodney W. Grimes 		 */
22298aef1712SMatthew Dillon 		if ((src_object = src_entry->object.vm_object) != NULL) {
2230c0877f10SJohn Dyson 
2231c0877f10SJohn Dyson 			if ((src_object->handle == NULL) &&
2232c0877f10SJohn Dyson 				(src_object->type == OBJT_DEFAULT ||
2233c0877f10SJohn Dyson 				 src_object->type == OBJT_SWAP)) {
2234c0877f10SJohn Dyson 				vm_object_collapse(src_object);
223596fb8cf2SJohn Dyson 				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2236c0877f10SJohn Dyson 					vm_map_split(src_entry);
2237c0877f10SJohn Dyson 					src_object = src_entry->object.vm_object;
2238c0877f10SJohn Dyson 				}
2239c0877f10SJohn Dyson 			}
2240c0877f10SJohn Dyson 
2241c0877f10SJohn Dyson 			vm_object_reference(src_object);
2242069e9bc1SDoug Rabson 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2243c0877f10SJohn Dyson 			dst_entry->object.vm_object = src_object;
2244afa07f7eSJohn Dyson 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2245afa07f7eSJohn Dyson 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2246b18bfc3dSJohn Dyson 			dst_entry->offset = src_entry->offset;
2247b18bfc3dSJohn Dyson 		} else {
2248b18bfc3dSJohn Dyson 			dst_entry->object.vm_object = NULL;
2249b18bfc3dSJohn Dyson 			dst_entry->offset = 0;
2250b18bfc3dSJohn Dyson 		}
2251df8bae1dSRodney W. Grimes 
2252df8bae1dSRodney W. Grimes 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2253df8bae1dSRodney W. Grimes 		    dst_entry->end - dst_entry->start, src_entry->start);
22540d94caffSDavid Greenman 	} else {
2255df8bae1dSRodney W. Grimes 		/*
2256df8bae1dSRodney W. Grimes 		 * Of course, wired down pages can't be set copy-on-write.
22570d94caffSDavid Greenman 		 * Cause wired pages to be copied into the new map by
22580d94caffSDavid Greenman 		 * simulating faults (the new pages are pageable)
2259df8bae1dSRodney W. Grimes 		 */
2260df8bae1dSRodney W. Grimes 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2261df8bae1dSRodney W. Grimes 	}
2262df8bae1dSRodney W. Grimes }
2263df8bae1dSRodney W. Grimes 
2264df8bae1dSRodney W. Grimes /*
2265df8bae1dSRodney W. Grimes  * vmspace_fork:
2266df8bae1dSRodney W. Grimes  * Create a new process vmspace structure and vm_map
2267df8bae1dSRodney W. Grimes  * based on those of an existing process.  The new map
2268df8bae1dSRodney W. Grimes  * is based on the old map, according to the inheritance
2269df8bae1dSRodney W. Grimes  * values on the regions in that map.
2270df8bae1dSRodney W. Grimes  *
2271df8bae1dSRodney W. Grimes  * The source map must not be locked.
2272df8bae1dSRodney W. Grimes  */
2273df8bae1dSRodney W. Grimes struct vmspace *
22741b40f8c0SMatthew Dillon vmspace_fork(struct vmspace *vm1)
2275df8bae1dSRodney W. Grimes {
2276c0877f10SJohn Dyson 	struct vmspace *vm2;
2277df8bae1dSRodney W. Grimes 	vm_map_t old_map = &vm1->vm_map;
2278df8bae1dSRodney W. Grimes 	vm_map_t new_map;
2279df8bae1dSRodney W. Grimes 	vm_map_entry_t old_entry;
2280df8bae1dSRodney W. Grimes 	vm_map_entry_t new_entry;
2281de5f6a77SJohn Dyson 	vm_object_t object;
2282df8bae1dSRodney W. Grimes 
22830cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
22840cddd8f0SMatthew Dillon 
2285df8bae1dSRodney W. Grimes 	vm_map_lock(old_map);
2286b823bbd6SMatthew Dillon 	old_map->infork = 1;
2287df8bae1dSRodney W. Grimes 
22882d8acc0fSJohn Dyson 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2289df8bae1dSRodney W. Grimes 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2290df8bae1dSRodney W. Grimes 	    (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2291df8bae1dSRodney W. Grimes 	new_map = &vm2->vm_map;	/* XXX */
229247221757SJohn Dyson 	new_map->timestamp = 1;
2293df8bae1dSRodney W. Grimes 
2294df8bae1dSRodney W. Grimes 	old_entry = old_map->header.next;
2295df8bae1dSRodney W. Grimes 
2296df8bae1dSRodney W. Grimes 	while (old_entry != &old_map->header) {
2297afa07f7eSJohn Dyson 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2298df8bae1dSRodney W. Grimes 			panic("vm_map_fork: encountered a submap");
2299df8bae1dSRodney W. Grimes 
2300df8bae1dSRodney W. Grimes 		switch (old_entry->inheritance) {
2301df8bae1dSRodney W. Grimes 		case VM_INHERIT_NONE:
2302df8bae1dSRodney W. Grimes 			break;
2303df8bae1dSRodney W. Grimes 
2304df8bae1dSRodney W. Grimes 		case VM_INHERIT_SHARE:
2305df8bae1dSRodney W. Grimes 			/*
2306fed9a903SJohn Dyson 			 * Clone the entry, creating the shared object if necessary.
2307fed9a903SJohn Dyson 			 */
2308fed9a903SJohn Dyson 			object = old_entry->object.vm_object;
2309fed9a903SJohn Dyson 			if (object == NULL) {
2310fed9a903SJohn Dyson 				object = vm_object_allocate(OBJT_DEFAULT,
2311c2e11a03SJohn Dyson 					atop(old_entry->end - old_entry->start));
2312fed9a903SJohn Dyson 				old_entry->object.vm_object = object;
2313fed9a903SJohn Dyson 				old_entry->offset = (vm_offset_t) 0;
23149a2f6362SAlan Cox 			}
23159a2f6362SAlan Cox 
23169a2f6362SAlan Cox 			/*
23179a2f6362SAlan Cox 			 * Add the reference before calling vm_object_shadow
23189a2f6362SAlan Cox 			 * to insure that a shadow object is created.
23199a2f6362SAlan Cox 			 */
23209a2f6362SAlan Cox 			vm_object_reference(object);
23219a2f6362SAlan Cox 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
23225069bf57SJohn Dyson 				vm_object_shadow(&old_entry->object.vm_object,
23235069bf57SJohn Dyson 					&old_entry->offset,
2324c2e11a03SJohn Dyson 					atop(old_entry->end - old_entry->start));
23255069bf57SJohn Dyson 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2326d30344bdSIan Dowse 				/* Transfer the second reference too. */
2327d30344bdSIan Dowse 				vm_object_reference(
2328d30344bdSIan Dowse 				    old_entry->object.vm_object);
2329d30344bdSIan Dowse 				vm_object_deallocate(object);
23305069bf57SJohn Dyson 				object = old_entry->object.vm_object;
2331fed9a903SJohn Dyson 			}
2332069e9bc1SDoug Rabson 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
2333fed9a903SJohn Dyson 
2334fed9a903SJohn Dyson 			/*
2335ad5fca3bSAlan Cox 			 * Clone the entry, referencing the shared object.
2336df8bae1dSRodney W. Grimes 			 */
2337df8bae1dSRodney W. Grimes 			new_entry = vm_map_entry_create(new_map);
2338df8bae1dSRodney W. Grimes 			*new_entry = *old_entry;
2339028fe6ecSTor Egge 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2340df8bae1dSRodney W. Grimes 			new_entry->wired_count = 0;
2341df8bae1dSRodney W. Grimes 
2342df8bae1dSRodney W. Grimes 			/*
23430d94caffSDavid Greenman 			 * Insert the entry into the new map -- we know we're
23440d94caffSDavid Greenman 			 * inserting at the end of the new map.
2345df8bae1dSRodney W. Grimes 			 */
2346df8bae1dSRodney W. Grimes 
2347df8bae1dSRodney W. Grimes 			vm_map_entry_link(new_map, new_map->header.prev,
2348df8bae1dSRodney W. Grimes 			    new_entry);
2349df8bae1dSRodney W. Grimes 
2350df8bae1dSRodney W. Grimes 			/*
2351df8bae1dSRodney W. Grimes 			 * Update the physical map
2352df8bae1dSRodney W. Grimes 			 */
2353df8bae1dSRodney W. Grimes 
2354df8bae1dSRodney W. Grimes 			pmap_copy(new_map->pmap, old_map->pmap,
2355df8bae1dSRodney W. Grimes 			    new_entry->start,
2356df8bae1dSRodney W. Grimes 			    (old_entry->end - old_entry->start),
2357df8bae1dSRodney W. Grimes 			    old_entry->start);
2358df8bae1dSRodney W. Grimes 			break;
2359df8bae1dSRodney W. Grimes 
2360df8bae1dSRodney W. Grimes 		case VM_INHERIT_COPY:
2361df8bae1dSRodney W. Grimes 			/*
2362df8bae1dSRodney W. Grimes 			 * Clone the entry and link into the map.
2363df8bae1dSRodney W. Grimes 			 */
2364df8bae1dSRodney W. Grimes 			new_entry = vm_map_entry_create(new_map);
2365df8bae1dSRodney W. Grimes 			*new_entry = *old_entry;
2366028fe6ecSTor Egge 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2367df8bae1dSRodney W. Grimes 			new_entry->wired_count = 0;
2368df8bae1dSRodney W. Grimes 			new_entry->object.vm_object = NULL;
2369df8bae1dSRodney W. Grimes 			vm_map_entry_link(new_map, new_map->header.prev,
2370df8bae1dSRodney W. Grimes 			    new_entry);
2371bd7e5f99SJohn Dyson 			vm_map_copy_entry(old_map, new_map, old_entry,
2372bd7e5f99SJohn Dyson 			    new_entry);
2373df8bae1dSRodney W. Grimes 			break;
2374df8bae1dSRodney W. Grimes 		}
2375df8bae1dSRodney W. Grimes 		old_entry = old_entry->next;
2376df8bae1dSRodney W. Grimes 	}
2377df8bae1dSRodney W. Grimes 
2378df8bae1dSRodney W. Grimes 	new_map->size = old_map->size;
2379b823bbd6SMatthew Dillon 	old_map->infork = 0;
2380df8bae1dSRodney W. Grimes 	vm_map_unlock(old_map);
2381df8bae1dSRodney W. Grimes 
2382df8bae1dSRodney W. Grimes 	return (vm2);
2383df8bae1dSRodney W. Grimes }
2384df8bae1dSRodney W. Grimes 
238594f7e29aSAlan Cox int
238694f7e29aSAlan Cox vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
238794f7e29aSAlan Cox 	      vm_prot_t prot, vm_prot_t max, int cow)
238894f7e29aSAlan Cox {
238994f7e29aSAlan Cox 	vm_map_entry_t prev_entry;
239094f7e29aSAlan Cox 	vm_map_entry_t new_stack_entry;
239194f7e29aSAlan Cox 	vm_size_t      init_ssize;
239294f7e29aSAlan Cox 	int            rv;
239394f7e29aSAlan Cox 
23940cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
23950cddd8f0SMatthew Dillon 
239694f7e29aSAlan Cox 	if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
239794f7e29aSAlan Cox 		return (KERN_NO_SPACE);
239894f7e29aSAlan Cox 
2399cbc89bfbSPaul Saab 	if (max_ssize < sgrowsiz)
240094f7e29aSAlan Cox 		init_ssize = max_ssize;
240194f7e29aSAlan Cox 	else
2402cbc89bfbSPaul Saab 		init_ssize = sgrowsiz;
240394f7e29aSAlan Cox 
240494f7e29aSAlan Cox 	vm_map_lock(map);
240594f7e29aSAlan Cox 
240694f7e29aSAlan Cox 	/* If addr is already mapped, no go */
240794f7e29aSAlan Cox 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
240894f7e29aSAlan Cox 		vm_map_unlock(map);
240994f7e29aSAlan Cox 		return (KERN_NO_SPACE);
241094f7e29aSAlan Cox 	}
241194f7e29aSAlan Cox 
241294f7e29aSAlan Cox 	/* If we can't accomodate max_ssize in the current mapping,
241394f7e29aSAlan Cox 	 * no go.  However, we need to be aware that subsequent user
241494f7e29aSAlan Cox 	 * mappings might map into the space we have reserved for
241594f7e29aSAlan Cox 	 * stack, and currently this space is not protected.
241694f7e29aSAlan Cox 	 *
241794f7e29aSAlan Cox 	 * Hopefully we will at least detect this condition
241894f7e29aSAlan Cox 	 * when we try to grow the stack.
241994f7e29aSAlan Cox 	 */
242094f7e29aSAlan Cox 	if ((prev_entry->next != &map->header) &&
242194f7e29aSAlan Cox 	    (prev_entry->next->start < addrbos + max_ssize)) {
242294f7e29aSAlan Cox 		vm_map_unlock(map);
242394f7e29aSAlan Cox 		return (KERN_NO_SPACE);
242494f7e29aSAlan Cox 	}
242594f7e29aSAlan Cox 
242694f7e29aSAlan Cox 	/* We initially map a stack of only init_ssize.  We will
242794f7e29aSAlan Cox 	 * grow as needed later.  Since this is to be a grow
242894f7e29aSAlan Cox 	 * down stack, we map at the top of the range.
242994f7e29aSAlan Cox 	 *
243094f7e29aSAlan Cox 	 * Note: we would normally expect prot and max to be
243194f7e29aSAlan Cox 	 * VM_PROT_ALL, and cow to be 0.  Possibly we should
243294f7e29aSAlan Cox 	 * eliminate these as input parameters, and just
243394f7e29aSAlan Cox 	 * pass these values here in the insert call.
243494f7e29aSAlan Cox 	 */
243594f7e29aSAlan Cox 	rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
243694f7e29aSAlan Cox 	                   addrbos + max_ssize, prot, max, cow);
243794f7e29aSAlan Cox 
243894f7e29aSAlan Cox 	/* Now set the avail_ssize amount */
243994f7e29aSAlan Cox 	if (rv == KERN_SUCCESS){
244029b45e9eSAlan Cox 		if (prev_entry != &map->header)
244129b45e9eSAlan Cox 			vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize);
244294f7e29aSAlan Cox 		new_stack_entry = prev_entry->next;
244394f7e29aSAlan Cox 		if (new_stack_entry->end   != addrbos + max_ssize ||
244494f7e29aSAlan Cox 		    new_stack_entry->start != addrbos + max_ssize - init_ssize)
244594f7e29aSAlan Cox 			panic ("Bad entry start/end for new stack entry");
244694f7e29aSAlan Cox 		else
244794f7e29aSAlan Cox 			new_stack_entry->avail_ssize = max_ssize - init_ssize;
244894f7e29aSAlan Cox 	}
244994f7e29aSAlan Cox 
245094f7e29aSAlan Cox 	vm_map_unlock(map);
245194f7e29aSAlan Cox 	return (rv);
245294f7e29aSAlan Cox }
245394f7e29aSAlan Cox 
245494f7e29aSAlan Cox /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
245594f7e29aSAlan Cox  * desired address is already mapped, or if we successfully grow
245694f7e29aSAlan Cox  * the stack.  Also returns KERN_SUCCESS if addr is outside the
245794f7e29aSAlan Cox  * stack range (this is strange, but preserves compatibility with
245894f7e29aSAlan Cox  * the grow function in vm_machdep.c).
245994f7e29aSAlan Cox  */
246094f7e29aSAlan Cox int
246194f7e29aSAlan Cox vm_map_growstack (struct proc *p, vm_offset_t addr)
246294f7e29aSAlan Cox {
246394f7e29aSAlan Cox 	vm_map_entry_t prev_entry;
246494f7e29aSAlan Cox 	vm_map_entry_t stack_entry;
246594f7e29aSAlan Cox 	vm_map_entry_t new_stack_entry;
246694f7e29aSAlan Cox 	struct vmspace *vm = p->p_vmspace;
246794f7e29aSAlan Cox 	vm_map_t map = &vm->vm_map;
246894f7e29aSAlan Cox 	vm_offset_t    end;
246994f7e29aSAlan Cox 	int      grow_amount;
247094f7e29aSAlan Cox 	int      rv;
247194f7e29aSAlan Cox 	int      is_procstack;
247223955314SAlfred Perlstein 
24730cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
247423955314SAlfred Perlstein 
247594f7e29aSAlan Cox Retry:
247694f7e29aSAlan Cox 	vm_map_lock_read(map);
247794f7e29aSAlan Cox 
247894f7e29aSAlan Cox 	/* If addr is already in the entry range, no need to grow.*/
247994f7e29aSAlan Cox 	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
248094f7e29aSAlan Cox 		vm_map_unlock_read(map);
24810cddd8f0SMatthew Dillon 		return (KERN_SUCCESS);
248294f7e29aSAlan Cox 	}
248394f7e29aSAlan Cox 
248494f7e29aSAlan Cox 	if ((stack_entry = prev_entry->next) == &map->header) {
248594f7e29aSAlan Cox 		vm_map_unlock_read(map);
24860cddd8f0SMatthew Dillon 		return (KERN_SUCCESS);
248794f7e29aSAlan Cox 	}
248894f7e29aSAlan Cox 	if (prev_entry == &map->header)
248994f7e29aSAlan Cox 		end = stack_entry->start - stack_entry->avail_ssize;
249094f7e29aSAlan Cox 	else
249194f7e29aSAlan Cox 		end = prev_entry->end;
249294f7e29aSAlan Cox 
249394f7e29aSAlan Cox 	/* This next test mimics the old grow function in vm_machdep.c.
249494f7e29aSAlan Cox 	 * It really doesn't quite make sense, but we do it anyway
249594f7e29aSAlan Cox 	 * for compatibility.
249694f7e29aSAlan Cox 	 *
249794f7e29aSAlan Cox 	 * If not growable stack, return success.  This signals the
249894f7e29aSAlan Cox 	 * caller to proceed as he would normally with normal vm.
249994f7e29aSAlan Cox 	 */
250094f7e29aSAlan Cox 	if (stack_entry->avail_ssize < 1 ||
250194f7e29aSAlan Cox 	    addr >= stack_entry->start ||
250294f7e29aSAlan Cox 	    addr <  stack_entry->start - stack_entry->avail_ssize) {
250394f7e29aSAlan Cox 		vm_map_unlock_read(map);
25040cddd8f0SMatthew Dillon 		return (KERN_SUCCESS);
250594f7e29aSAlan Cox 	}
250694f7e29aSAlan Cox 
250794f7e29aSAlan Cox 	/* Find the minimum grow amount */
250894f7e29aSAlan Cox 	grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
250994f7e29aSAlan Cox 	if (grow_amount > stack_entry->avail_ssize) {
251094f7e29aSAlan Cox 		vm_map_unlock_read(map);
25110cddd8f0SMatthew Dillon 		return (KERN_NO_SPACE);
251294f7e29aSAlan Cox 	}
251394f7e29aSAlan Cox 
251494f7e29aSAlan Cox 	/* If there is no longer enough space between the entries
251594f7e29aSAlan Cox 	 * nogo, and adjust the available space.  Note: this
251694f7e29aSAlan Cox 	 * should only happen if the user has mapped into the
251794f7e29aSAlan Cox 	 * stack area after the stack was created, and is
251894f7e29aSAlan Cox 	 * probably an error.
251994f7e29aSAlan Cox 	 *
252094f7e29aSAlan Cox 	 * This also effectively destroys any guard page the user
252194f7e29aSAlan Cox 	 * might have intended by limiting the stack size.
252294f7e29aSAlan Cox 	 */
252394f7e29aSAlan Cox 	if (grow_amount > stack_entry->start - end) {
252494f7e29aSAlan Cox 		if (vm_map_lock_upgrade(map))
252594f7e29aSAlan Cox 			goto Retry;
252694f7e29aSAlan Cox 
252794f7e29aSAlan Cox 		stack_entry->avail_ssize = stack_entry->start - end;
252894f7e29aSAlan Cox 
252994f7e29aSAlan Cox 		vm_map_unlock(map);
25300cddd8f0SMatthew Dillon 		return (KERN_NO_SPACE);
253194f7e29aSAlan Cox 	}
253294f7e29aSAlan Cox 
253394f7e29aSAlan Cox 	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
253494f7e29aSAlan Cox 
253594f7e29aSAlan Cox 	/* If this is the main process stack, see if we're over the
253694f7e29aSAlan Cox 	 * stack limit.
253794f7e29aSAlan Cox 	 */
25386389da78SAlan Cox 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
253994f7e29aSAlan Cox 			     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
254094f7e29aSAlan Cox 		vm_map_unlock_read(map);
25410cddd8f0SMatthew Dillon 		return (KERN_NO_SPACE);
254294f7e29aSAlan Cox 	}
254394f7e29aSAlan Cox 
254494f7e29aSAlan Cox 	/* Round up the grow amount modulo SGROWSIZ */
2545cbc89bfbSPaul Saab 	grow_amount = roundup (grow_amount, sgrowsiz);
254694f7e29aSAlan Cox 	if (grow_amount > stack_entry->avail_ssize) {
254794f7e29aSAlan Cox 		grow_amount = stack_entry->avail_ssize;
254894f7e29aSAlan Cox 	}
25496389da78SAlan Cox 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
255094f7e29aSAlan Cox 	                     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
255194f7e29aSAlan Cox 		grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
25526389da78SAlan Cox 		              ctob(vm->vm_ssize);
255394f7e29aSAlan Cox 	}
255494f7e29aSAlan Cox 
255594f7e29aSAlan Cox 	if (vm_map_lock_upgrade(map))
255694f7e29aSAlan Cox 		goto Retry;
255794f7e29aSAlan Cox 
255894f7e29aSAlan Cox 	/* Get the preliminary new entry start value */
255994f7e29aSAlan Cox 	addr = stack_entry->start - grow_amount;
256094f7e29aSAlan Cox 
256194f7e29aSAlan Cox 	/* If this puts us into the previous entry, cut back our growth
256294f7e29aSAlan Cox 	 * to the available space.  Also, see the note above.
256394f7e29aSAlan Cox 	 */
256494f7e29aSAlan Cox 	if (addr < end) {
256594f7e29aSAlan Cox 		stack_entry->avail_ssize = stack_entry->start - end;
256694f7e29aSAlan Cox 		addr = end;
256794f7e29aSAlan Cox 	}
256894f7e29aSAlan Cox 
256994f7e29aSAlan Cox 	rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
257029b45e9eSAlan Cox 			   VM_PROT_ALL,
257129b45e9eSAlan Cox 			   VM_PROT_ALL,
257294f7e29aSAlan Cox 			   0);
257394f7e29aSAlan Cox 
257494f7e29aSAlan Cox 	/* Adjust the available stack space by the amount we grew. */
257594f7e29aSAlan Cox 	if (rv == KERN_SUCCESS) {
257629b45e9eSAlan Cox 		if (prev_entry != &map->header)
257729b45e9eSAlan Cox 			vm_map_clip_end(map, prev_entry, addr);
257894f7e29aSAlan Cox 		new_stack_entry = prev_entry->next;
257994f7e29aSAlan Cox 		if (new_stack_entry->end   != stack_entry->start  ||
258094f7e29aSAlan Cox 		    new_stack_entry->start != addr)
258194f7e29aSAlan Cox 			panic ("Bad stack grow start/end in new stack entry");
258294f7e29aSAlan Cox 		else {
258394f7e29aSAlan Cox 			new_stack_entry->avail_ssize = stack_entry->avail_ssize -
258494f7e29aSAlan Cox 							(new_stack_entry->end -
258594f7e29aSAlan Cox 							 new_stack_entry->start);
258694f7e29aSAlan Cox 			if (is_procstack)
25876389da78SAlan Cox 				vm->vm_ssize += btoc(new_stack_entry->end -
25886389da78SAlan Cox 						     new_stack_entry->start);
258994f7e29aSAlan Cox 		}
259094f7e29aSAlan Cox 	}
259194f7e29aSAlan Cox 
259294f7e29aSAlan Cox 	vm_map_unlock(map);
25930cddd8f0SMatthew Dillon 	return (rv);
259494f7e29aSAlan Cox }
259594f7e29aSAlan Cox 
2596df8bae1dSRodney W. Grimes /*
25975856e12eSJohn Dyson  * Unshare the specified VM space for exec.  If other processes are
25985856e12eSJohn Dyson  * mapped to it, then create a new one.  The new vmspace is null.
25995856e12eSJohn Dyson  */
26005856e12eSJohn Dyson 
26015856e12eSJohn Dyson void
26021b40f8c0SMatthew Dillon vmspace_exec(struct proc *p)
26031b40f8c0SMatthew Dillon {
26045856e12eSJohn Dyson 	struct vmspace *oldvmspace = p->p_vmspace;
26055856e12eSJohn Dyson 	struct vmspace *newvmspace;
26065856e12eSJohn Dyson 	vm_map_t map = &p->p_vmspace->vm_map;
26075856e12eSJohn Dyson 
26080cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
26092d8acc0fSJohn Dyson 	newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
26105856e12eSJohn Dyson 	bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
26115856e12eSJohn Dyson 	    (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
26125856e12eSJohn Dyson 	/*
26135856e12eSJohn Dyson 	 * This code is written like this for prototype purposes.  The
26145856e12eSJohn Dyson 	 * goal is to avoid running down the vmspace here, but let the
26155856e12eSJohn Dyson 	 * other process's that are still using the vmspace to finally
26165856e12eSJohn Dyson 	 * run it down.  Even though there is little or no chance of blocking
26175856e12eSJohn Dyson 	 * here, it is a good idea to keep this form for future mods.
26185856e12eSJohn Dyson 	 */
26195856e12eSJohn Dyson 	p->p_vmspace = newvmspace;
2620d4da2dbaSAlan Cox 	pmap_pinit2(vmspace_pmap(newvmspace));
262121c641b2SJohn Baldwin 	vmspace_free(oldvmspace);
2622b40ce416SJulian Elischer 	if (p == curthread->td_proc)		/* XXXKSE ? */
2623b40ce416SJulian Elischer 		pmap_activate(curthread);
26245856e12eSJohn Dyson }
26255856e12eSJohn Dyson 
26265856e12eSJohn Dyson /*
26275856e12eSJohn Dyson  * Unshare the specified VM space for forcing COW.  This
26285856e12eSJohn Dyson  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
26295856e12eSJohn Dyson  */
26305856e12eSJohn Dyson 
26315856e12eSJohn Dyson void
26321b40f8c0SMatthew Dillon vmspace_unshare(struct proc *p)
26331b40f8c0SMatthew Dillon {
26345856e12eSJohn Dyson 	struct vmspace *oldvmspace = p->p_vmspace;
26355856e12eSJohn Dyson 	struct vmspace *newvmspace;
26365856e12eSJohn Dyson 
26370cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
26385856e12eSJohn Dyson 	if (oldvmspace->vm_refcnt == 1)
26395856e12eSJohn Dyson 		return;
26405856e12eSJohn Dyson 	newvmspace = vmspace_fork(oldvmspace);
26415856e12eSJohn Dyson 	p->p_vmspace = newvmspace;
2642d4da2dbaSAlan Cox 	pmap_pinit2(vmspace_pmap(newvmspace));
264321c641b2SJohn Baldwin 	vmspace_free(oldvmspace);
2644b40ce416SJulian Elischer 	if (p == curthread->td_proc)		/* XXXKSE ? */
2645b40ce416SJulian Elischer 		pmap_activate(curthread);
26465856e12eSJohn Dyson }
26475856e12eSJohn Dyson 
26485856e12eSJohn Dyson 
26495856e12eSJohn Dyson /*
2650df8bae1dSRodney W. Grimes  *	vm_map_lookup:
2651df8bae1dSRodney W. Grimes  *
2652df8bae1dSRodney W. Grimes  *	Finds the VM object, offset, and
2653df8bae1dSRodney W. Grimes  *	protection for a given virtual address in the
2654df8bae1dSRodney W. Grimes  *	specified map, assuming a page fault of the
2655df8bae1dSRodney W. Grimes  *	type specified.
2656df8bae1dSRodney W. Grimes  *
2657df8bae1dSRodney W. Grimes  *	Leaves the map in question locked for read; return
2658df8bae1dSRodney W. Grimes  *	values are guaranteed until a vm_map_lookup_done
2659df8bae1dSRodney W. Grimes  *	call is performed.  Note that the map argument
2660df8bae1dSRodney W. Grimes  *	is in/out; the returned map must be used in
2661df8bae1dSRodney W. Grimes  *	the call to vm_map_lookup_done.
2662df8bae1dSRodney W. Grimes  *
2663df8bae1dSRodney W. Grimes  *	A handle (out_entry) is returned for use in
2664df8bae1dSRodney W. Grimes  *	vm_map_lookup_done, to make that fast.
2665df8bae1dSRodney W. Grimes  *
2666df8bae1dSRodney W. Grimes  *	If a lookup is requested with "write protection"
2667df8bae1dSRodney W. Grimes  *	specified, the map may be changed to perform virtual
2668df8bae1dSRodney W. Grimes  *	copying operations, although the data referenced will
2669df8bae1dSRodney W. Grimes  *	remain the same.
2670df8bae1dSRodney W. Grimes  */
2671df8bae1dSRodney W. Grimes int
2672b9dcd593SBruce Evans vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
2673b9dcd593SBruce Evans 	      vm_offset_t vaddr,
267447221757SJohn Dyson 	      vm_prot_t fault_typea,
2675b9dcd593SBruce Evans 	      vm_map_entry_t *out_entry,	/* OUT */
2676b9dcd593SBruce Evans 	      vm_object_t *object,		/* OUT */
2677b9dcd593SBruce Evans 	      vm_pindex_t *pindex,		/* OUT */
2678b9dcd593SBruce Evans 	      vm_prot_t *out_prot,		/* OUT */
26792d8acc0fSJohn Dyson 	      boolean_t *wired)			/* OUT */
2680df8bae1dSRodney W. Grimes {
2681c0877f10SJohn Dyson 	vm_map_entry_t entry;
2682c0877f10SJohn Dyson 	vm_map_t map = *var_map;
2683c0877f10SJohn Dyson 	vm_prot_t prot;
268447221757SJohn Dyson 	vm_prot_t fault_type = fault_typea;
2685df8bae1dSRodney W. Grimes 
26860cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
2687df8bae1dSRodney W. Grimes RetryLookup:;
2688df8bae1dSRodney W. Grimes 
2689df8bae1dSRodney W. Grimes 	/*
2690df8bae1dSRodney W. Grimes 	 * Lookup the faulting address.
2691df8bae1dSRodney W. Grimes 	 */
2692df8bae1dSRodney W. Grimes 
2693df8bae1dSRodney W. Grimes 	vm_map_lock_read(map);
2694df8bae1dSRodney W. Grimes 
2695df8bae1dSRodney W. Grimes #define	RETURN(why) \
2696df8bae1dSRodney W. Grimes 		{ \
2697df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map); \
2698df8bae1dSRodney W. Grimes 		return(why); \
2699df8bae1dSRodney W. Grimes 		}
2700df8bae1dSRodney W. Grimes 
2701df8bae1dSRodney W. Grimes 	/*
27020d94caffSDavid Greenman 	 * If the map has an interesting hint, try it before calling full
27030d94caffSDavid Greenman 	 * blown lookup routine.
2704df8bae1dSRodney W. Grimes 	 */
2705df8bae1dSRodney W. Grimes 
2706df8bae1dSRodney W. Grimes 	entry = map->hint;
2707df8bae1dSRodney W. Grimes 
2708df8bae1dSRodney W. Grimes 	*out_entry = entry;
2709df8bae1dSRodney W. Grimes 
2710df8bae1dSRodney W. Grimes 	if ((entry == &map->header) ||
2711df8bae1dSRodney W. Grimes 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
2712df8bae1dSRodney W. Grimes 		vm_map_entry_t tmp_entry;
2713df8bae1dSRodney W. Grimes 
2714df8bae1dSRodney W. Grimes 		/*
27150d94caffSDavid Greenman 		 * Entry was either not a valid hint, or the vaddr was not
27160d94caffSDavid Greenman 		 * contained in the entry, so do a full lookup.
2717df8bae1dSRodney W. Grimes 		 */
2718df8bae1dSRodney W. Grimes 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2719df8bae1dSRodney W. Grimes 			RETURN(KERN_INVALID_ADDRESS);
2720df8bae1dSRodney W. Grimes 
2721df8bae1dSRodney W. Grimes 		entry = tmp_entry;
2722df8bae1dSRodney W. Grimes 		*out_entry = entry;
2723df8bae1dSRodney W. Grimes 	}
2724b7b2aac2SJohn Dyson 
2725df8bae1dSRodney W. Grimes 	/*
2726df8bae1dSRodney W. Grimes 	 * Handle submaps.
2727df8bae1dSRodney W. Grimes 	 */
2728df8bae1dSRodney W. Grimes 
2729afa07f7eSJohn Dyson 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2730df8bae1dSRodney W. Grimes 		vm_map_t old_map = map;
2731df8bae1dSRodney W. Grimes 
2732df8bae1dSRodney W. Grimes 		*var_map = map = entry->object.sub_map;
2733df8bae1dSRodney W. Grimes 		vm_map_unlock_read(old_map);
2734df8bae1dSRodney W. Grimes 		goto RetryLookup;
2735df8bae1dSRodney W. Grimes 	}
2736a04c970aSJohn Dyson 
2737df8bae1dSRodney W. Grimes 	/*
27380d94caffSDavid Greenman 	 * Check whether this task is allowed to have this page.
2739a04c970aSJohn Dyson 	 * Note the special case for MAP_ENTRY_COW
2740a04c970aSJohn Dyson 	 * pages with an override.  This is to implement a forced
2741a04c970aSJohn Dyson 	 * COW for debuggers.
2742df8bae1dSRodney W. Grimes 	 */
2743df8bae1dSRodney W. Grimes 
2744480ba2f5SJohn Dyson 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
2745480ba2f5SJohn Dyson 		prot = entry->max_protection;
2746480ba2f5SJohn Dyson 	else
2747df8bae1dSRodney W. Grimes 		prot = entry->protection;
274847221757SJohn Dyson 
274947221757SJohn Dyson 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
275047221757SJohn Dyson 	if ((fault_type & prot) != fault_type) {
275147221757SJohn Dyson 			RETURN(KERN_PROTECTION_FAILURE);
275247221757SJohn Dyson 	}
275347221757SJohn Dyson 
27542ed14a92SAlan Cox 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
275547221757SJohn Dyson 	    (entry->eflags & MAP_ENTRY_COW) &&
27562ed14a92SAlan Cox 	    (fault_type & VM_PROT_WRITE) &&
275747221757SJohn Dyson 	    (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
2758df8bae1dSRodney W. Grimes 		RETURN(KERN_PROTECTION_FAILURE);
2759a04c970aSJohn Dyson 	}
2760df8bae1dSRodney W. Grimes 
2761df8bae1dSRodney W. Grimes 	/*
27620d94caffSDavid Greenman 	 * If this page is not pageable, we have to get it for all possible
27630d94caffSDavid Greenman 	 * accesses.
2764df8bae1dSRodney W. Grimes 	 */
2765df8bae1dSRodney W. Grimes 
276605f0fdd2SPoul-Henning Kamp 	*wired = (entry->wired_count != 0);
276705f0fdd2SPoul-Henning Kamp 	if (*wired)
2768df8bae1dSRodney W. Grimes 		prot = fault_type = entry->protection;
2769df8bae1dSRodney W. Grimes 
2770df8bae1dSRodney W. Grimes 	/*
2771df8bae1dSRodney W. Grimes 	 * If the entry was copy-on-write, we either ...
2772df8bae1dSRodney W. Grimes 	 */
2773df8bae1dSRodney W. Grimes 
2774afa07f7eSJohn Dyson 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2775df8bae1dSRodney W. Grimes 		/*
27760d94caffSDavid Greenman 		 * If we want to write the page, we may as well handle that
2777ad5fca3bSAlan Cox 		 * now since we've got the map locked.
2778df8bae1dSRodney W. Grimes 		 *
27790d94caffSDavid Greenman 		 * If we don't need to write the page, we just demote the
27800d94caffSDavid Greenman 		 * permissions allowed.
2781df8bae1dSRodney W. Grimes 		 */
2782df8bae1dSRodney W. Grimes 
2783df8bae1dSRodney W. Grimes 		if (fault_type & VM_PROT_WRITE) {
2784df8bae1dSRodney W. Grimes 			/*
27850d94caffSDavid Greenman 			 * Make a new object, and place it in the object
27860d94caffSDavid Greenman 			 * chain.  Note that no new references have appeared
2787ad5fca3bSAlan Cox 			 * -- one just moved from the map to the new
27880d94caffSDavid Greenman 			 * object.
2789df8bae1dSRodney W. Grimes 			 */
2790df8bae1dSRodney W. Grimes 
27919b09b6c7SMatthew Dillon 			if (vm_map_lock_upgrade(map))
2792df8bae1dSRodney W. Grimes 				goto RetryLookup;
27939b09b6c7SMatthew Dillon 
2794df8bae1dSRodney W. Grimes 			vm_object_shadow(
2795df8bae1dSRodney W. Grimes 			    &entry->object.vm_object,
2796df8bae1dSRodney W. Grimes 			    &entry->offset,
2797c2e11a03SJohn Dyson 			    atop(entry->end - entry->start));
2798df8bae1dSRodney W. Grimes 
2799afa07f7eSJohn Dyson 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
28009b09b6c7SMatthew Dillon 			vm_map_lock_downgrade(map);
28010d94caffSDavid Greenman 		} else {
2802df8bae1dSRodney W. Grimes 			/*
28030d94caffSDavid Greenman 			 * We're attempting to read a copy-on-write page --
28040d94caffSDavid Greenman 			 * don't allow writes.
2805df8bae1dSRodney W. Grimes 			 */
2806df8bae1dSRodney W. Grimes 
28072d8acc0fSJohn Dyson 			prot &= ~VM_PROT_WRITE;
2808df8bae1dSRodney W. Grimes 		}
2809df8bae1dSRodney W. Grimes 	}
28102d8acc0fSJohn Dyson 
2811df8bae1dSRodney W. Grimes 	/*
2812df8bae1dSRodney W. Grimes 	 * Create an object if necessary.
2813df8bae1dSRodney W. Grimes 	 */
28144e71e795SMatthew Dillon 	if (entry->object.vm_object == NULL &&
28154e71e795SMatthew Dillon 	    !map->system_map) {
28169b09b6c7SMatthew Dillon 		if (vm_map_lock_upgrade(map))
2817df8bae1dSRodney W. Grimes 			goto RetryLookup;
28189b09b6c7SMatthew Dillon 
281924a1cce3SDavid Greenman 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2820c2e11a03SJohn Dyson 		    atop(entry->end - entry->start));
2821df8bae1dSRodney W. Grimes 		entry->offset = 0;
28229b09b6c7SMatthew Dillon 		vm_map_lock_downgrade(map);
2823df8bae1dSRodney W. Grimes 	}
2824b5b40fa6SJohn Dyson 
2825df8bae1dSRodney W. Grimes 	/*
28260d94caffSDavid Greenman 	 * Return the object/offset from this entry.  If the entry was
28270d94caffSDavid Greenman 	 * copy-on-write or empty, it has been fixed up.
2828df8bae1dSRodney W. Grimes 	 */
2829df8bae1dSRodney W. Grimes 
28309b09b6c7SMatthew Dillon 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
2831df8bae1dSRodney W. Grimes 	*object = entry->object.vm_object;
2832df8bae1dSRodney W. Grimes 
2833df8bae1dSRodney W. Grimes 	/*
2834df8bae1dSRodney W. Grimes 	 * Return whether this is the only map sharing this data.
2835df8bae1dSRodney W. Grimes 	 */
2836df8bae1dSRodney W. Grimes 
2837df8bae1dSRodney W. Grimes 	*out_prot = prot;
2838df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
2839df8bae1dSRodney W. Grimes 
2840df8bae1dSRodney W. Grimes #undef	RETURN
2841df8bae1dSRodney W. Grimes }
2842df8bae1dSRodney W. Grimes 
2843df8bae1dSRodney W. Grimes /*
2844df8bae1dSRodney W. Grimes  *	vm_map_lookup_done:
2845df8bae1dSRodney W. Grimes  *
2846df8bae1dSRodney W. Grimes  *	Releases locks acquired by a vm_map_lookup
2847df8bae1dSRodney W. Grimes  *	(according to the handle returned by that lookup).
2848df8bae1dSRodney W. Grimes  */
2849df8bae1dSRodney W. Grimes 
28500d94caffSDavid Greenman void
28511b40f8c0SMatthew Dillon vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
2852df8bae1dSRodney W. Grimes {
2853df8bae1dSRodney W. Grimes 	/*
2854df8bae1dSRodney W. Grimes 	 * Unlock the main-level map
2855df8bae1dSRodney W. Grimes 	 */
28560cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
2857df8bae1dSRodney W. Grimes 	vm_map_unlock_read(map);
2858df8bae1dSRodney W. Grimes }
2859df8bae1dSRodney W. Grimes 
28601efb74fbSJohn Dyson /*
28611efb74fbSJohn Dyson  * Implement uiomove with VM operations.  This handles (and collateral changes)
28621efb74fbSJohn Dyson  * support every combination of source object modification, and COW type
28631efb74fbSJohn Dyson  * operations.
28641efb74fbSJohn Dyson  */
28651efb74fbSJohn Dyson int
28661b40f8c0SMatthew Dillon vm_uiomove(
28671b40f8c0SMatthew Dillon 	vm_map_t mapa,
28681b40f8c0SMatthew Dillon 	vm_object_t srcobject,
28691b40f8c0SMatthew Dillon 	off_t cp,
28701b40f8c0SMatthew Dillon 	int cnta,
28711b40f8c0SMatthew Dillon 	vm_offset_t uaddra,
28721b40f8c0SMatthew Dillon 	int *npages)
28731efb74fbSJohn Dyson {
28741efb74fbSJohn Dyson 	vm_map_t map;
287547221757SJohn Dyson 	vm_object_t first_object, oldobject, object;
28762d8acc0fSJohn Dyson 	vm_map_entry_t entry;
28771efb74fbSJohn Dyson 	vm_prot_t prot;
28782d8acc0fSJohn Dyson 	boolean_t wired;
28791efb74fbSJohn Dyson 	int tcnt, rv;
28802d8acc0fSJohn Dyson 	vm_offset_t uaddr, start, end, tend;
28811efb74fbSJohn Dyson 	vm_pindex_t first_pindex, osize, oindex;
28821efb74fbSJohn Dyson 	off_t ooffset;
288347221757SJohn Dyson 	int cnt;
28841efb74fbSJohn Dyson 
28850cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
28860cddd8f0SMatthew Dillon 
288795e5e988SJohn Dyson 	if (npages)
288895e5e988SJohn Dyson 		*npages = 0;
288995e5e988SJohn Dyson 
289047221757SJohn Dyson 	cnt = cnta;
28912d8acc0fSJohn Dyson 	uaddr = uaddra;
28922d8acc0fSJohn Dyson 
28931efb74fbSJohn Dyson 	while (cnt > 0) {
28941efb74fbSJohn Dyson 		map = mapa;
28951efb74fbSJohn Dyson 
28961efb74fbSJohn Dyson 		if ((vm_map_lookup(&map, uaddr,
28972d8acc0fSJohn Dyson 			VM_PROT_READ, &entry, &first_object,
28982d8acc0fSJohn Dyson 			&first_pindex, &prot, &wired)) != KERN_SUCCESS) {
28991efb74fbSJohn Dyson 			return EFAULT;
29001efb74fbSJohn Dyson 		}
29011efb74fbSJohn Dyson 
29022d8acc0fSJohn Dyson 		vm_map_clip_start(map, entry, uaddr);
29031efb74fbSJohn Dyson 
29041efb74fbSJohn Dyson 		tcnt = cnt;
29052d8acc0fSJohn Dyson 		tend = uaddr + tcnt;
29062d8acc0fSJohn Dyson 		if (tend > entry->end) {
29072d8acc0fSJohn Dyson 			tcnt = entry->end - uaddr;
29082d8acc0fSJohn Dyson 			tend = entry->end;
29092d8acc0fSJohn Dyson 		}
29101efb74fbSJohn Dyson 
29112d8acc0fSJohn Dyson 		vm_map_clip_end(map, entry, tend);
29121efb74fbSJohn Dyson 
29132d8acc0fSJohn Dyson 		start = entry->start;
29142d8acc0fSJohn Dyson 		end = entry->end;
29151efb74fbSJohn Dyson 
2916c2e11a03SJohn Dyson 		osize = atop(tcnt);
291795e5e988SJohn Dyson 
2918925a3a41SJohn Dyson 		oindex = OFF_TO_IDX(cp);
291995e5e988SJohn Dyson 		if (npages) {
2920925a3a41SJohn Dyson 			vm_pindex_t idx;
292195e5e988SJohn Dyson 			for (idx = 0; idx < osize; idx++) {
292295e5e988SJohn Dyson 				vm_page_t m;
2923925a3a41SJohn Dyson 				if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
29242d8acc0fSJohn Dyson 					vm_map_lookup_done(map, entry);
292595e5e988SJohn Dyson 					return 0;
292695e5e988SJohn Dyson 				}
29271c7c3c6aSMatthew Dillon 				/*
29281c7c3c6aSMatthew Dillon 				 * disallow busy or invalid pages, but allow
29291c7c3c6aSMatthew Dillon 				 * m->busy pages if they are entirely valid.
29301c7c3c6aSMatthew Dillon 				 */
2931925a3a41SJohn Dyson 				if ((m->flags & PG_BUSY) ||
293295e5e988SJohn Dyson 					((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
29332d8acc0fSJohn Dyson 					vm_map_lookup_done(map, entry);
293495e5e988SJohn Dyson 					return 0;
293595e5e988SJohn Dyson 				}
293695e5e988SJohn Dyson 			}
293795e5e988SJohn Dyson 		}
293895e5e988SJohn Dyson 
29391efb74fbSJohn Dyson /*
29401efb74fbSJohn Dyson  * If we are changing an existing map entry, just redirect
29411efb74fbSJohn Dyson  * the object, and change mappings.
29421efb74fbSJohn Dyson  */
29432d8acc0fSJohn Dyson 		if ((first_object->type == OBJT_VNODE) &&
29442d8acc0fSJohn Dyson 			((oldobject = entry->object.vm_object) == first_object)) {
29452d8acc0fSJohn Dyson 
29462d8acc0fSJohn Dyson 			if ((entry->offset != cp) || (oldobject != srcobject)) {
29472d8acc0fSJohn Dyson 				/*
29482d8acc0fSJohn Dyson    				* Remove old window into the file
29492d8acc0fSJohn Dyson    				*/
29502d8acc0fSJohn Dyson 				pmap_remove (map->pmap, uaddr, tend);
29512d8acc0fSJohn Dyson 
29522d8acc0fSJohn Dyson 				/*
29532d8acc0fSJohn Dyson    				* Force copy on write for mmaped regions
29542d8acc0fSJohn Dyson    				*/
29552d8acc0fSJohn Dyson 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
29562d8acc0fSJohn Dyson 
29572d8acc0fSJohn Dyson 				/*
29582d8acc0fSJohn Dyson    				* Point the object appropriately
29592d8acc0fSJohn Dyson    				*/
29602d8acc0fSJohn Dyson 				if (oldobject != srcobject) {
29612d8acc0fSJohn Dyson 
29622d8acc0fSJohn Dyson 				/*
29632d8acc0fSJohn Dyson    				* Set the object optimization hint flag
29642d8acc0fSJohn Dyson    				*/
2965069e9bc1SDoug Rabson 					vm_object_set_flag(srcobject, OBJ_OPT);
29662d8acc0fSJohn Dyson 					vm_object_reference(srcobject);
29672d8acc0fSJohn Dyson 					entry->object.vm_object = srcobject;
29682d8acc0fSJohn Dyson 
29692d8acc0fSJohn Dyson 					if (oldobject) {
29702d8acc0fSJohn Dyson 						vm_object_deallocate(oldobject);
29712d8acc0fSJohn Dyson 					}
29722d8acc0fSJohn Dyson 				}
29732d8acc0fSJohn Dyson 
29742d8acc0fSJohn Dyson 				entry->offset = cp;
29752d8acc0fSJohn Dyson 				map->timestamp++;
29762d8acc0fSJohn Dyson 			} else {
29772d8acc0fSJohn Dyson 				pmap_remove (map->pmap, uaddr, tend);
29782d8acc0fSJohn Dyson 			}
29792d8acc0fSJohn Dyson 
29802d8acc0fSJohn Dyson 		} else if ((first_object->ref_count == 1) &&
2981925a3a41SJohn Dyson 			(first_object->size == osize) &&
298247221757SJohn Dyson 			((first_object->type == OBJT_DEFAULT) ||
298347221757SJohn Dyson 				(first_object->type == OBJT_SWAP)) ) {
2984925a3a41SJohn Dyson 
2985925a3a41SJohn Dyson 			oldobject = first_object->backing_object;
2986925a3a41SJohn Dyson 
2987925a3a41SJohn Dyson 			if ((first_object->backing_object_offset != cp) ||
2988925a3a41SJohn Dyson 				(oldobject != srcobject)) {
2989925a3a41SJohn Dyson 				/*
2990925a3a41SJohn Dyson    				* Remove old window into the file
2991925a3a41SJohn Dyson    				*/
29922d8acc0fSJohn Dyson 				pmap_remove (map->pmap, uaddr, tend);
2993925a3a41SJohn Dyson 
2994925a3a41SJohn Dyson 				/*
299547221757SJohn Dyson 				 * Remove unneeded old pages
299647221757SJohn Dyson 				 */
299747221757SJohn Dyson 				vm_object_page_remove(first_object, 0, 0, 0);
299847221757SJohn Dyson 
299947221757SJohn Dyson 				/*
300047221757SJohn Dyson 				 * Invalidate swap space
300147221757SJohn Dyson 				 */
300247221757SJohn Dyson 				if (first_object->type == OBJT_SWAP) {
300347221757SJohn Dyson 					swap_pager_freespace(first_object,
30041c7c3c6aSMatthew Dillon 						0,
300547221757SJohn Dyson 						first_object->size);
300647221757SJohn Dyson 				}
300747221757SJohn Dyson 
300847221757SJohn Dyson 				/*
3009925a3a41SJohn Dyson    				* Force copy on write for mmaped regions
3010925a3a41SJohn Dyson    				*/
301147221757SJohn Dyson 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
30121efb74fbSJohn Dyson 
30131efb74fbSJohn Dyson 				/*
30141efb74fbSJohn Dyson    				* Point the object appropriately
30151efb74fbSJohn Dyson    				*/
3016925a3a41SJohn Dyson 				if (oldobject != srcobject) {
301747221757SJohn Dyson 
3018925a3a41SJohn Dyson 				/*
3019925a3a41SJohn Dyson    				* Set the object optimization hint flag
3020925a3a41SJohn Dyson    				*/
3021069e9bc1SDoug Rabson 					vm_object_set_flag(srcobject, OBJ_OPT);
3022925a3a41SJohn Dyson 					vm_object_reference(srcobject);
3023925a3a41SJohn Dyson 
3024925a3a41SJohn Dyson 					if (oldobject) {
3025925a3a41SJohn Dyson 						TAILQ_REMOVE(&oldobject->shadow_head,
3026925a3a41SJohn Dyson 							first_object, shadow_list);
3027925a3a41SJohn Dyson 						oldobject->shadow_count--;
3028b4309055SMatthew Dillon 						/* XXX bump generation? */
3029925a3a41SJohn Dyson 						vm_object_deallocate(oldobject);
3030925a3a41SJohn Dyson 					}
3031925a3a41SJohn Dyson 
3032925a3a41SJohn Dyson 					TAILQ_INSERT_TAIL(&srcobject->shadow_head,
3033925a3a41SJohn Dyson 						first_object, shadow_list);
3034925a3a41SJohn Dyson 					srcobject->shadow_count++;
3035b4309055SMatthew Dillon 					/* XXX bump generation? */
3036925a3a41SJohn Dyson 
3037925a3a41SJohn Dyson 					first_object->backing_object = srcobject;
3038925a3a41SJohn Dyson 				}
30391efb74fbSJohn Dyson 				first_object->backing_object_offset = cp;
30402d8acc0fSJohn Dyson 				map->timestamp++;
3041925a3a41SJohn Dyson 			} else {
30422d8acc0fSJohn Dyson 				pmap_remove (map->pmap, uaddr, tend);
3043925a3a41SJohn Dyson 			}
30441efb74fbSJohn Dyson /*
30451efb74fbSJohn Dyson  * Otherwise, we have to do a logical mmap.
30461efb74fbSJohn Dyson  */
30471efb74fbSJohn Dyson 		} else {
30481efb74fbSJohn Dyson 
3049069e9bc1SDoug Rabson 			vm_object_set_flag(srcobject, OBJ_OPT);
3050925a3a41SJohn Dyson 			vm_object_reference(srcobject);
30511efb74fbSJohn Dyson 
30522d8acc0fSJohn Dyson 			pmap_remove (map->pmap, uaddr, tend);
30531efb74fbSJohn Dyson 
305447221757SJohn Dyson 			vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
305547221757SJohn Dyson 			vm_map_lock_upgrade(map);
30561efb74fbSJohn Dyson 
30572d8acc0fSJohn Dyson 			if (entry == &map->header) {
30581efb74fbSJohn Dyson 				map->first_free = &map->header;
30591efb74fbSJohn Dyson 			} else if (map->first_free->start >= start) {
30602d8acc0fSJohn Dyson 				map->first_free = entry->prev;
30611efb74fbSJohn Dyson 			}
30621efb74fbSJohn Dyson 
30632d8acc0fSJohn Dyson 			SAVE_HINT(map, entry->prev);
30642d8acc0fSJohn Dyson 			vm_map_entry_delete(map, entry);
30651efb74fbSJohn Dyson 
30662d8acc0fSJohn Dyson 			object = srcobject;
30672d8acc0fSJohn Dyson 			ooffset = cp;
30682d8acc0fSJohn Dyson 
30692d8acc0fSJohn Dyson 			rv = vm_map_insert(map, object, ooffset, start, tend,
3070e5f13bddSAlan Cox 				VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
30711efb74fbSJohn Dyson 
30721efb74fbSJohn Dyson 			if (rv != KERN_SUCCESS)
30731efb74fbSJohn Dyson 				panic("vm_uiomove: could not insert new entry: %d", rv);
30741efb74fbSJohn Dyson 		}
30751efb74fbSJohn Dyson 
30761efb74fbSJohn Dyson /*
30771efb74fbSJohn Dyson  * Map the window directly, if it is already in memory
30781efb74fbSJohn Dyson  */
30792d8acc0fSJohn Dyson 		pmap_object_init_pt(map->pmap, uaddr,
30802d8acc0fSJohn Dyson 			srcobject, oindex, tcnt, 0);
30811efb74fbSJohn Dyson 
308247221757SJohn Dyson 		map->timestamp++;
30831efb74fbSJohn Dyson 		vm_map_unlock(map);
30841efb74fbSJohn Dyson 
30851efb74fbSJohn Dyson 		cnt -= tcnt;
30862d8acc0fSJohn Dyson 		uaddr += tcnt;
30871efb74fbSJohn Dyson 		cp += tcnt;
308895e5e988SJohn Dyson 		if (npages)
308995e5e988SJohn Dyson 			*npages += osize;
30901efb74fbSJohn Dyson 	}
30911efb74fbSJohn Dyson 	return 0;
30921efb74fbSJohn Dyson }
30931efb74fbSJohn Dyson 
30941efb74fbSJohn Dyson /*
30951efb74fbSJohn Dyson  * Performs the copy_on_write operations necessary to allow the virtual copies
30961efb74fbSJohn Dyson  * into user space to work.  This has to be called for write(2) system calls
30971efb74fbSJohn Dyson  * from other processes, file unlinking, and file size shrinkage.
30981efb74fbSJohn Dyson  */
30991efb74fbSJohn Dyson void
31001b40f8c0SMatthew Dillon vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa)
31011efb74fbSJohn Dyson {
3102f5ef029eSPoul-Henning Kamp 	int rv;
3103f5ef029eSPoul-Henning Kamp 	vm_object_t robject;
3104f5ef029eSPoul-Henning Kamp 	vm_pindex_t idx;
31051efb74fbSJohn Dyson 
31060cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
31072d8acc0fSJohn Dyson 	if ((object == NULL) ||
310895e5e988SJohn Dyson 		((object->flags & OBJ_OPT) == 0))
310995e5e988SJohn Dyson 		return;
31101efb74fbSJohn Dyson 
31111efb74fbSJohn Dyson 	if (object->shadow_count > object->ref_count)
31121efb74fbSJohn Dyson 		panic("vm_freeze_copyopts: sc > rc");
31131efb74fbSJohn Dyson 
31148aef1712SMatthew Dillon 	while((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) {
31151efb74fbSJohn Dyson 		vm_pindex_t bo_pindex;
31161efb74fbSJohn Dyson 		vm_page_t m_in, m_out;
31171efb74fbSJohn Dyson 
31181efb74fbSJohn Dyson 		bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
31191efb74fbSJohn Dyson 
312095e5e988SJohn Dyson 		vm_object_reference(robject);
3121925a3a41SJohn Dyson 
312266095752SJohn Dyson 		vm_object_pip_wait(robject, "objfrz");
3123925a3a41SJohn Dyson 
31241efb74fbSJohn Dyson 		if (robject->ref_count == 1) {
31251efb74fbSJohn Dyson 			vm_object_deallocate(robject);
31261efb74fbSJohn Dyson 			continue;
31271efb74fbSJohn Dyson 		}
31281efb74fbSJohn Dyson 
3129d474eaaaSDoug Rabson 		vm_object_pip_add(robject, 1);
31301efb74fbSJohn Dyson 
313147221757SJohn Dyson 		for (idx = 0; idx < robject->size; idx++) {
31321efb74fbSJohn Dyson 
313395461b45SJohn Dyson 			m_out = vm_page_grab(robject, idx,
313495461b45SJohn Dyson 						VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
31351efb74fbSJohn Dyson 
31361efb74fbSJohn Dyson 			if (m_out->valid == 0) {
313795461b45SJohn Dyson 				m_in = vm_page_grab(object, bo_pindex + idx,
313895461b45SJohn Dyson 						VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
313995461b45SJohn Dyson 				if (m_in->valid == 0) {
314047221757SJohn Dyson 					rv = vm_pager_get_pages(object, &m_in, 1, 0);
314147221757SJohn Dyson 					if (rv != VM_PAGER_OK) {
31423efc015bSPeter Wemm 						printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex);
314347221757SJohn Dyson 						continue;
31441efb74fbSJohn Dyson 					}
314595461b45SJohn Dyson 					vm_page_deactivate(m_in);
314647221757SJohn Dyson 				}
314747221757SJohn Dyson 
314847221757SJohn Dyson 				vm_page_protect(m_in, VM_PROT_NONE);
314947221757SJohn Dyson 				pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
315095461b45SJohn Dyson 				m_out->valid = m_in->valid;
31517dbf82dcSMatthew Dillon 				vm_page_dirty(m_out);
315295461b45SJohn Dyson 				vm_page_activate(m_out);
3153e69763a3SDoug Rabson 				vm_page_wakeup(m_in);
31541efb74fbSJohn Dyson 			}
3155e69763a3SDoug Rabson 			vm_page_wakeup(m_out);
315647221757SJohn Dyson 		}
3157925a3a41SJohn Dyson 
31581efb74fbSJohn Dyson 		object->shadow_count--;
315947221757SJohn Dyson 		object->ref_count--;
31601efb74fbSJohn Dyson 		TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);
31611efb74fbSJohn Dyson 		robject->backing_object = NULL;
31621efb74fbSJohn Dyson 		robject->backing_object_offset = 0;
31631efb74fbSJohn Dyson 
316447221757SJohn Dyson 		vm_object_pip_wakeup(robject);
31651efb74fbSJohn Dyson 		vm_object_deallocate(robject);
31661efb74fbSJohn Dyson 	}
316747221757SJohn Dyson 
3168069e9bc1SDoug Rabson 	vm_object_clear_flag(object, OBJ_OPT);
31691efb74fbSJohn Dyson }
31701efb74fbSJohn Dyson 
3171c7c34a24SBruce Evans #include "opt_ddb.h"
3172c3cb3e12SDavid Greenman #ifdef DDB
3173c7c34a24SBruce Evans #include <sys/kernel.h>
3174c7c34a24SBruce Evans 
3175c7c34a24SBruce Evans #include <ddb/ddb.h>
3176c7c34a24SBruce Evans 
3177df8bae1dSRodney W. Grimes /*
3178df8bae1dSRodney W. Grimes  *	vm_map_print:	[ debug ]
3179df8bae1dSRodney W. Grimes  */
3180c7c34a24SBruce Evans DB_SHOW_COMMAND(map, vm_map_print)
3181df8bae1dSRodney W. Grimes {
318295e5e988SJohn Dyson 	static int nlines;
3183c7c34a24SBruce Evans 	/* XXX convert args. */
3184c0877f10SJohn Dyson 	vm_map_t map = (vm_map_t)addr;
3185c7c34a24SBruce Evans 	boolean_t full = have_addr;
3186df8bae1dSRodney W. Grimes 
3187c0877f10SJohn Dyson 	vm_map_entry_t entry;
3188c7c34a24SBruce Evans 
3189e5f251d2SAlan Cox 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3190e5f251d2SAlan Cox 	    (void *)map,
3191101eeb7fSBruce Evans 	    (void *)map->pmap, map->nentries, map->timestamp);
319295e5e988SJohn Dyson 	nlines++;
3193df8bae1dSRodney W. Grimes 
3194c7c34a24SBruce Evans 	if (!full && db_indent)
3195df8bae1dSRodney W. Grimes 		return;
3196df8bae1dSRodney W. Grimes 
3197c7c34a24SBruce Evans 	db_indent += 2;
3198df8bae1dSRodney W. Grimes 	for (entry = map->header.next; entry != &map->header;
3199df8bae1dSRodney W. Grimes 	    entry = entry->next) {
3200fc62ef1fSBruce Evans 		db_iprintf("map entry %p: start=%p, end=%p\n",
3201fc62ef1fSBruce Evans 		    (void *)entry, (void *)entry->start, (void *)entry->end);
320295e5e988SJohn Dyson 		nlines++;
3203e5f251d2SAlan Cox 		{
3204df8bae1dSRodney W. Grimes 			static char *inheritance_name[4] =
3205df8bae1dSRodney W. Grimes 			{"share", "copy", "none", "donate_copy"};
32060d94caffSDavid Greenman 
320795e5e988SJohn Dyson 			db_iprintf(" prot=%x/%x/%s",
3208df8bae1dSRodney W. Grimes 			    entry->protection,
3209df8bae1dSRodney W. Grimes 			    entry->max_protection,
32108aef1712SMatthew Dillon 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
3211df8bae1dSRodney W. Grimes 			if (entry->wired_count != 0)
321295e5e988SJohn Dyson 				db_printf(", wired");
3213df8bae1dSRodney W. Grimes 		}
32149fdfe602SMatthew Dillon 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3215101eeb7fSBruce Evans 			/* XXX no %qd in kernel.  Truncate entry->offset. */
3216101eeb7fSBruce Evans 			db_printf(", share=%p, offset=0x%lx\n",
32179fdfe602SMatthew Dillon 			    (void *)entry->object.sub_map,
3218ecbb00a2SDoug Rabson 			    (long)entry->offset);
321995e5e988SJohn Dyson 			nlines++;
3220df8bae1dSRodney W. Grimes 			if ((entry->prev == &map->header) ||
32219fdfe602SMatthew Dillon 			    (entry->prev->object.sub_map !=
32229fdfe602SMatthew Dillon 				entry->object.sub_map)) {
3223c7c34a24SBruce Evans 				db_indent += 2;
3224101eeb7fSBruce Evans 				vm_map_print((db_expr_t)(intptr_t)
32259fdfe602SMatthew Dillon 					     entry->object.sub_map,
3226914181e7SBruce Evans 					     full, 0, (char *)0);
3227c7c34a24SBruce Evans 				db_indent -= 2;
3228df8bae1dSRodney W. Grimes 			}
32290d94caffSDavid Greenman 		} else {
3230101eeb7fSBruce Evans 			/* XXX no %qd in kernel.  Truncate entry->offset. */
3231101eeb7fSBruce Evans 			db_printf(", object=%p, offset=0x%lx",
3232101eeb7fSBruce Evans 			    (void *)entry->object.vm_object,
3233ecbb00a2SDoug Rabson 			    (long)entry->offset);
3234afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_COW)
3235c7c34a24SBruce Evans 				db_printf(", copy (%s)",
3236afa07f7eSJohn Dyson 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3237c7c34a24SBruce Evans 			db_printf("\n");
323895e5e988SJohn Dyson 			nlines++;
3239df8bae1dSRodney W. Grimes 
3240df8bae1dSRodney W. Grimes 			if ((entry->prev == &map->header) ||
3241df8bae1dSRodney W. Grimes 			    (entry->prev->object.vm_object !=
3242df8bae1dSRodney W. Grimes 				entry->object.vm_object)) {
3243c7c34a24SBruce Evans 				db_indent += 2;
3244101eeb7fSBruce Evans 				vm_object_print((db_expr_t)(intptr_t)
3245101eeb7fSBruce Evans 						entry->object.vm_object,
3246914181e7SBruce Evans 						full, 0, (char *)0);
324795e5e988SJohn Dyson 				nlines += 4;
3248c7c34a24SBruce Evans 				db_indent -= 2;
3249df8bae1dSRodney W. Grimes 			}
3250df8bae1dSRodney W. Grimes 		}
3251df8bae1dSRodney W. Grimes 	}
3252c7c34a24SBruce Evans 	db_indent -= 2;
325395e5e988SJohn Dyson 	if (db_indent == 0)
325495e5e988SJohn Dyson 		nlines = 0;
3255df8bae1dSRodney W. Grimes }
325695e5e988SJohn Dyson 
325795e5e988SJohn Dyson 
325895e5e988SJohn Dyson DB_SHOW_COMMAND(procvm, procvm)
325995e5e988SJohn Dyson {
326095e5e988SJohn Dyson 	struct proc *p;
326195e5e988SJohn Dyson 
326295e5e988SJohn Dyson 	if (have_addr) {
326395e5e988SJohn Dyson 		p = (struct proc *) addr;
326495e5e988SJohn Dyson 	} else {
326595e5e988SJohn Dyson 		p = curproc;
326695e5e988SJohn Dyson 	}
326795e5e988SJohn Dyson 
3268ac1e407bSBruce Evans 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3269ac1e407bSBruce Evans 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3270b1028ad1SLuoqi Chen 	    (void *)vmspace_pmap(p->p_vmspace));
327195e5e988SJohn Dyson 
3272101eeb7fSBruce Evans 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
327395e5e988SJohn Dyson }
327495e5e988SJohn Dyson 
3275c7c34a24SBruce Evans #endif /* DDB */
3276