xref: /freebsd/sys/vm/vm_map.c (revision 0cddd8f02397756cbeca5c5777d5895f3e419d88)
1df8bae1dSRodney W. Grimes /*
2df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  *
5df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
6df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
9df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
10df8bae1dSRodney W. Grimes  * are met:
11df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
12df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
13df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
15df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
16df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
175929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
18df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
19df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
20df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
21df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
22df8bae1dSRodney W. Grimes  *    without specific prior written permission.
23df8bae1dSRodney W. Grimes  *
24df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
35df8bae1dSRodney W. Grimes  *
363c4dd356SDavid Greenman  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  *
39df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40df8bae1dSRodney W. Grimes  * All rights reserved.
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43df8bae1dSRodney W. Grimes  *
44df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
45df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
46df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
47df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
48df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
49df8bae1dSRodney W. Grimes  *
50df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
55df8bae1dSRodney W. Grimes  *
56df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57df8bae1dSRodney W. Grimes  *  School of Computer Science
58df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
59df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
60df8bae1dSRodney W. Grimes  *
61df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
62df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
633c4dd356SDavid Greenman  *
64c3aac50fSPeter Wemm  * $FreeBSD$
65df8bae1dSRodney W. Grimes  */
66df8bae1dSRodney W. Grimes 
67df8bae1dSRodney W. Grimes /*
68df8bae1dSRodney W. Grimes  *	Virtual memory mapping module.
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes #include <sys/param.h>
72df8bae1dSRodney W. Grimes #include <sys/systm.h>
73fb919e4dSMark Murray #include <sys/lock.h>
74fb919e4dSMark Murray #include <sys/mutex.h>
75b5e8ce9fSBruce Evans #include <sys/proc.h>
76efeaf95aSDavid Greenman #include <sys/vmmeter.h>
77867a482dSJohn Dyson #include <sys/mman.h>
781efb74fbSJohn Dyson #include <sys/vnode.h>
792267af78SJulian Elischer #include <sys/resourcevar.h>
80df8bae1dSRodney W. Grimes 
81df8bae1dSRodney W. Grimes #include <vm/vm.h>
82efeaf95aSDavid Greenman #include <vm/vm_param.h>
83efeaf95aSDavid Greenman #include <vm/pmap.h>
84efeaf95aSDavid Greenman #include <vm/vm_map.h>
85df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
86df8bae1dSRodney W. Grimes #include <vm/vm_object.h>
8747221757SJohn Dyson #include <vm/vm_pager.h>
8826f9a767SRodney W. Grimes #include <vm/vm_kern.h>
89efeaf95aSDavid Greenman #include <vm/vm_extern.h>
903075778bSJohn Dyson #include <vm/vm_zone.h>
9121cd6e62SSeigo Tanimura #include <vm/swap_pager.h>
92df8bae1dSRodney W. Grimes 
93df8bae1dSRodney W. Grimes /*
94df8bae1dSRodney W. Grimes  *	Virtual memory maps provide for the mapping, protection,
95df8bae1dSRodney W. Grimes  *	and sharing of virtual memory objects.  In addition,
96df8bae1dSRodney W. Grimes  *	this module provides for an efficient virtual copy of
97df8bae1dSRodney W. Grimes  *	memory from one map to another.
98df8bae1dSRodney W. Grimes  *
99df8bae1dSRodney W. Grimes  *	Synchronization is required prior to most operations.
100df8bae1dSRodney W. Grimes  *
101df8bae1dSRodney W. Grimes  *	Maps consist of an ordered doubly-linked list of simple
102df8bae1dSRodney W. Grimes  *	entries; a single hint is used to speed up lookups.
103df8bae1dSRodney W. Grimes  *
104956f3135SPhilippe Charnier  *	Since portions of maps are specified by start/end addresses,
105df8bae1dSRodney W. Grimes  *	which may not align with existing map entries, all
106df8bae1dSRodney W. Grimes  *	routines merely "clip" entries to these start/end values.
107df8bae1dSRodney W. Grimes  *	[That is, an entry is split into two, bordering at a
108df8bae1dSRodney W. Grimes  *	start or end value.]  Note that these clippings may not
109df8bae1dSRodney W. Grimes  *	always be necessary (as the two resulting entries are then
110df8bae1dSRodney W. Grimes  *	not changed); however, the clipping is done for convenience.
111df8bae1dSRodney W. Grimes  *
112df8bae1dSRodney W. Grimes  *	As mentioned above, virtual copy operations are performed
113ad5fca3bSAlan Cox  *	by copying VM object references from one map to
114df8bae1dSRodney W. Grimes  *	another, and then marking both regions as copy-on-write.
115df8bae1dSRodney W. Grimes  */
116df8bae1dSRodney W. Grimes 
117df8bae1dSRodney W. Grimes /*
118df8bae1dSRodney W. Grimes  *	vm_map_startup:
119df8bae1dSRodney W. Grimes  *
120df8bae1dSRodney W. Grimes  *	Initialize the vm_map module.  Must be called before
121df8bae1dSRodney W. Grimes  *	any other vm_map routines.
122df8bae1dSRodney W. Grimes  *
123df8bae1dSRodney W. Grimes  *	Map and entry structures are allocated from the general
124df8bae1dSRodney W. Grimes  *	purpose memory pool with some exceptions:
125df8bae1dSRodney W. Grimes  *
126df8bae1dSRodney W. Grimes  *	- The kernel map and kmem submap are allocated statically.
127df8bae1dSRodney W. Grimes  *	- Kernel map entries are allocated out of a static pool.
128df8bae1dSRodney W. Grimes  *
129df8bae1dSRodney W. Grimes  *	These restrictions are necessary since malloc() uses the
130df8bae1dSRodney W. Grimes  *	maps and requires map entries.
131df8bae1dSRodney W. Grimes  */
132df8bae1dSRodney W. Grimes 
1333075778bSJohn Dyson static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store;
1342d8acc0fSJohn Dyson static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone;
1353075778bSJohn Dyson static struct vm_object kmapentobj, mapentobj, mapobj;
1361fc43fd1SAlan Cox 
137303b270bSEivind Eklund static struct vm_map_entry map_entry_init[MAX_MAPENT];
138303b270bSEivind Eklund static struct vm_map_entry kmap_entry_init[MAX_KMAPENT];
139303b270bSEivind Eklund static struct vm_map map_init[MAX_KMAP];
140b18bfc3dSJohn Dyson 
141df8bae1dSRodney W. Grimes static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
142df8bae1dSRodney W. Grimes static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
143f708ef1bSPoul-Henning Kamp static vm_map_entry_t vm_map_entry_create __P((vm_map_t));
144f708ef1bSPoul-Henning Kamp static void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t));
1450362d7d7SJohn Dyson static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
146f708ef1bSPoul-Henning Kamp static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
147f708ef1bSPoul-Henning Kamp static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t,
148f708ef1bSPoul-Henning Kamp 		vm_map_entry_t));
149dda6b171SJohn Dyson static void vm_map_split __P((vm_map_entry_t));
150df8bae1dSRodney W. Grimes 
1510d94caffSDavid Greenman void
1520d94caffSDavid Greenman vm_map_startup()
153df8bae1dSRodney W. Grimes {
1543075778bSJohn Dyson 	mapzone = &mapzone_store;
1550d65e566SJohn Dyson 	zbootinit(mapzone, "MAP", sizeof (struct vm_map),
1563075778bSJohn Dyson 		map_init, MAX_KMAP);
1573075778bSJohn Dyson 	kmapentzone = &kmapentzone_store;
1580d65e566SJohn Dyson 	zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry),
1593075778bSJohn Dyson 		kmap_entry_init, MAX_KMAPENT);
1603075778bSJohn Dyson 	mapentzone = &mapentzone_store;
1610d65e566SJohn Dyson 	zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
1623075778bSJohn Dyson 		map_entry_init, MAX_MAPENT);
163df8bae1dSRodney W. Grimes }
164df8bae1dSRodney W. Grimes 
165df8bae1dSRodney W. Grimes /*
166df8bae1dSRodney W. Grimes  * Allocate a vmspace structure, including a vm_map and pmap,
167df8bae1dSRodney W. Grimes  * and initialize those structures.  The refcnt is set to 1.
168df8bae1dSRodney W. Grimes  * The remaining fields must be initialized by the caller.
169df8bae1dSRodney W. Grimes  */
170df8bae1dSRodney W. Grimes struct vmspace *
1712d8acc0fSJohn Dyson vmspace_alloc(min, max)
172df8bae1dSRodney W. Grimes 	vm_offset_t min, max;
173df8bae1dSRodney W. Grimes {
174c0877f10SJohn Dyson 	struct vmspace *vm;
1750d94caffSDavid Greenman 
1760cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
1772d8acc0fSJohn Dyson 	vm = zalloc(vmspace_zone);
17821c641b2SJohn Baldwin 	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
1792d8acc0fSJohn Dyson 	vm_map_init(&vm->vm_map, min, max);
180b1028ad1SLuoqi Chen 	pmap_pinit(vmspace_pmap(vm));
181b1028ad1SLuoqi Chen 	vm->vm_map.pmap = vmspace_pmap(vm);		/* XXX */
182df8bae1dSRodney W. Grimes 	vm->vm_refcnt = 1;
1832d8acc0fSJohn Dyson 	vm->vm_shm = NULL;
184df8bae1dSRodney W. Grimes 	return (vm);
185df8bae1dSRodney W. Grimes }
186df8bae1dSRodney W. Grimes 
187df8bae1dSRodney W. Grimes void
1883075778bSJohn Dyson vm_init2(void) {
1890d65e566SJohn Dyson 	zinitna(kmapentzone, &kmapentobj,
1900a80f406SJohn Dyson 		NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1);
1910d65e566SJohn Dyson 	zinitna(mapentzone, &mapentobj,
1920a80f406SJohn Dyson 		NULL, 0, 0, 0, 1);
1930d65e566SJohn Dyson 	zinitna(mapzone, &mapobj,
1940a80f406SJohn Dyson 		NULL, 0, 0, 0, 1);
1952d8acc0fSJohn Dyson 	vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
196ba9be04cSJohn Dyson 	pmap_init2();
19799448ed1SJohn Dyson 	vm_object_init2();
1983075778bSJohn Dyson }
1993075778bSJohn Dyson 
2003075778bSJohn Dyson void
201df8bae1dSRodney W. Grimes vmspace_free(vm)
202c0877f10SJohn Dyson 	struct vmspace *vm;
203df8bae1dSRodney W. Grimes {
2040cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
205df8bae1dSRodney W. Grimes 
206a1f6d91cSDavid Greenman 	if (vm->vm_refcnt == 0)
207a1f6d91cSDavid Greenman 		panic("vmspace_free: attempt to free already freed vmspace");
208a1f6d91cSDavid Greenman 
209df8bae1dSRodney W. Grimes 	if (--vm->vm_refcnt == 0) {
210bd7e5f99SJohn Dyson 
21121c641b2SJohn Baldwin 		CTR1(KTR_VM, "vmspace_free: %p", vm);
21230dcfc09SJohn Dyson 		/*
213df8bae1dSRodney W. Grimes 		 * Lock the map, to wait out all other references to it.
2140d94caffSDavid Greenman 		 * Delete all of the mappings and pages they hold, then call
2150d94caffSDavid Greenman 		 * the pmap module to reclaim anything left.
216df8bae1dSRodney W. Grimes 		 */
217df8bae1dSRodney W. Grimes 		vm_map_lock(&vm->vm_map);
218df8bae1dSRodney W. Grimes 		(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
219df8bae1dSRodney W. Grimes 		    vm->vm_map.max_offset);
220a1f6d91cSDavid Greenman 		vm_map_unlock(&vm->vm_map);
221b18bfc3dSJohn Dyson 
222b1028ad1SLuoqi Chen 		pmap_release(vmspace_pmap(vm));
223a18b1f1dSJason Evans 		vm_map_destroy(&vm->vm_map);
2242d8acc0fSJohn Dyson 		zfree(vmspace_zone, vm);
225df8bae1dSRodney W. Grimes 	}
226df8bae1dSRodney W. Grimes }
227df8bae1dSRodney W. Grimes 
228df8bae1dSRodney W. Grimes /*
229ff2b5645SMatthew Dillon  * vmspace_swap_count() - count the approximate swap useage in pages for a
230ff2b5645SMatthew Dillon  *			  vmspace.
231ff2b5645SMatthew Dillon  *
232ff2b5645SMatthew Dillon  *	Swap useage is determined by taking the proportional swap used by
233ff2b5645SMatthew Dillon  *	VM objects backing the VM map.  To make up for fractional losses,
234ff2b5645SMatthew Dillon  *	if the VM object has any swap use at all the associated map entries
235ff2b5645SMatthew Dillon  *	count for at least 1 swap page.
236ff2b5645SMatthew Dillon  */
237ff2b5645SMatthew Dillon int
238ff2b5645SMatthew Dillon vmspace_swap_count(struct vmspace *vmspace)
239ff2b5645SMatthew Dillon {
240ff2b5645SMatthew Dillon 	vm_map_t map = &vmspace->vm_map;
241ff2b5645SMatthew Dillon 	vm_map_entry_t cur;
242ff2b5645SMatthew Dillon 	int count = 0;
243ff2b5645SMatthew Dillon 
244ff2b5645SMatthew Dillon 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
245ff2b5645SMatthew Dillon 		vm_object_t object;
246ff2b5645SMatthew Dillon 
247ff2b5645SMatthew Dillon 		if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
248ff2b5645SMatthew Dillon 		    (object = cur->object.vm_object) != NULL &&
249ff2b5645SMatthew Dillon 		    object->type == OBJT_SWAP
250ff2b5645SMatthew Dillon 		) {
251ff2b5645SMatthew Dillon 			int n = (cur->end - cur->start) / PAGE_SIZE;
252ff2b5645SMatthew Dillon 
253ff2b5645SMatthew Dillon 			if (object->un_pager.swp.swp_bcount) {
254ef6a93efSMatthew Dillon 				count += object->un_pager.swp.swp_bcount *
255ef6a93efSMatthew Dillon 				    SWAP_META_PAGES * n / object->size + 1;
256ff2b5645SMatthew Dillon 			}
257ff2b5645SMatthew Dillon 		}
258ff2b5645SMatthew Dillon 	}
259ff2b5645SMatthew Dillon 	return(count);
260ff2b5645SMatthew Dillon }
261ff2b5645SMatthew Dillon 
262ff2b5645SMatthew Dillon /*
263df8bae1dSRodney W. Grimes  *	vm_map_create:
264df8bae1dSRodney W. Grimes  *
265df8bae1dSRodney W. Grimes  *	Creates and returns a new empty VM map with
266df8bae1dSRodney W. Grimes  *	the given physical map structure, and having
267df8bae1dSRodney W. Grimes  *	the given lower and upper address bounds.
268df8bae1dSRodney W. Grimes  */
2690d94caffSDavid Greenman vm_map_t
2702d8acc0fSJohn Dyson vm_map_create(pmap, min, max)
271df8bae1dSRodney W. Grimes 	pmap_t pmap;
272df8bae1dSRodney W. Grimes 	vm_offset_t min, max;
273df8bae1dSRodney W. Grimes {
274c0877f10SJohn Dyson 	vm_map_t result;
275df8bae1dSRodney W. Grimes 
2760cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
2770cddd8f0SMatthew Dillon 
2783075778bSJohn Dyson 	result = zalloc(mapzone);
27921c641b2SJohn Baldwin 	CTR1(KTR_VM, "vm_map_create: %p", result);
2802d8acc0fSJohn Dyson 	vm_map_init(result, min, max);
281df8bae1dSRodney W. Grimes 	result->pmap = pmap;
282df8bae1dSRodney W. Grimes 	return (result);
283df8bae1dSRodney W. Grimes }
284df8bae1dSRodney W. Grimes 
285df8bae1dSRodney W. Grimes /*
286df8bae1dSRodney W. Grimes  * Initialize an existing vm_map structure
287df8bae1dSRodney W. Grimes  * such as that in the vmspace structure.
288df8bae1dSRodney W. Grimes  * The pmap is set elsewhere.
289df8bae1dSRodney W. Grimes  */
290df8bae1dSRodney W. Grimes void
2912d8acc0fSJohn Dyson vm_map_init(map, min, max)
292c0877f10SJohn Dyson 	struct vm_map *map;
293df8bae1dSRodney W. Grimes 	vm_offset_t min, max;
294df8bae1dSRodney W. Grimes {
2950cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
29621c641b2SJohn Baldwin 
297df8bae1dSRodney W. Grimes 	map->header.next = map->header.prev = &map->header;
298df8bae1dSRodney W. Grimes 	map->nentries = 0;
299df8bae1dSRodney W. Grimes 	map->size = 0;
3003075778bSJohn Dyson 	map->system_map = 0;
301b823bbd6SMatthew Dillon 	map->infork = 0;
302df8bae1dSRodney W. Grimes 	map->min_offset = min;
303df8bae1dSRodney W. Grimes 	map->max_offset = max;
304df8bae1dSRodney W. Grimes 	map->first_free = &map->header;
305df8bae1dSRodney W. Grimes 	map->hint = &map->header;
306df8bae1dSRodney W. Grimes 	map->timestamp = 0;
3078f9110f6SJohn Dyson 	lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
308df8bae1dSRodney W. Grimes }
309df8bae1dSRodney W. Grimes 
310a18b1f1dSJason Evans void
311a18b1f1dSJason Evans vm_map_destroy(map)
312a18b1f1dSJason Evans 	struct vm_map *map;
313a18b1f1dSJason Evans {
3140cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
315a18b1f1dSJason Evans 	lockdestroy(&map->lock);
316a18b1f1dSJason Evans }
317a18b1f1dSJason Evans 
318df8bae1dSRodney W. Grimes /*
319b18bfc3dSJohn Dyson  *	vm_map_entry_dispose:	[ internal use only ]
320b18bfc3dSJohn Dyson  *
321b18bfc3dSJohn Dyson  *	Inverse of vm_map_entry_create.
322b18bfc3dSJohn Dyson  */
32362487bb4SJohn Dyson static void
324b18bfc3dSJohn Dyson vm_map_entry_dispose(map, entry)
325b18bfc3dSJohn Dyson 	vm_map_t map;
326b18bfc3dSJohn Dyson 	vm_map_entry_t entry;
327b18bfc3dSJohn Dyson {
328b79933ebSJohn Dyson 	zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry);
329b18bfc3dSJohn Dyson }
330b18bfc3dSJohn Dyson 
331b18bfc3dSJohn Dyson /*
332df8bae1dSRodney W. Grimes  *	vm_map_entry_create:	[ internal use only ]
333df8bae1dSRodney W. Grimes  *
334df8bae1dSRodney W. Grimes  *	Allocates a VM map entry for insertion.
335b28cb1caSAlfred Perlstein  *	No entry fields are filled in.
336df8bae1dSRodney W. Grimes  */
337f708ef1bSPoul-Henning Kamp static vm_map_entry_t
33826f9a767SRodney W. Grimes vm_map_entry_create(map)
339df8bae1dSRodney W. Grimes 	vm_map_t map;
340df8bae1dSRodney W. Grimes {
3411f6889a1SMatthew Dillon 	vm_map_entry_t new_entry;
3421f6889a1SMatthew Dillon 
3431f6889a1SMatthew Dillon 	new_entry = zalloc((map->system_map || !mapentzone) ?
3441f6889a1SMatthew Dillon 		kmapentzone : mapentzone);
3451f6889a1SMatthew Dillon 	if (new_entry == NULL)
3461f6889a1SMatthew Dillon 	    panic("vm_map_entry_create: kernel resources exhausted");
3471f6889a1SMatthew Dillon 	return(new_entry);
348df8bae1dSRodney W. Grimes }
349df8bae1dSRodney W. Grimes 
350df8bae1dSRodney W. Grimes /*
351df8bae1dSRodney W. Grimes  *	vm_map_entry_{un,}link:
352df8bae1dSRodney W. Grimes  *
353df8bae1dSRodney W. Grimes  *	Insert/remove entries from maps.
354df8bae1dSRodney W. Grimes  */
35599c81ca9SAlan Cox static __inline void
35699c81ca9SAlan Cox vm_map_entry_link(vm_map_t map,
35799c81ca9SAlan Cox 		  vm_map_entry_t after_where,
35899c81ca9SAlan Cox 		  vm_map_entry_t entry)
35999c81ca9SAlan Cox {
36021c641b2SJohn Baldwin 
36121c641b2SJohn Baldwin 	CTR4(KTR_VM,
36221c641b2SJohn Baldwin 	    "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
36321c641b2SJohn Baldwin 	    map->nentries, entry, after_where);
36499c81ca9SAlan Cox 	map->nentries++;
36599c81ca9SAlan Cox 	entry->prev = after_where;
36699c81ca9SAlan Cox 	entry->next = after_where->next;
36799c81ca9SAlan Cox 	entry->next->prev = entry;
36899c81ca9SAlan Cox 	after_where->next = entry;
369df8bae1dSRodney W. Grimes }
37099c81ca9SAlan Cox 
37199c81ca9SAlan Cox static __inline void
37299c81ca9SAlan Cox vm_map_entry_unlink(vm_map_t map,
37399c81ca9SAlan Cox 		    vm_map_entry_t entry)
37499c81ca9SAlan Cox {
37599c81ca9SAlan Cox 	vm_map_entry_t prev = entry->prev;
37699c81ca9SAlan Cox 	vm_map_entry_t next = entry->next;
37799c81ca9SAlan Cox 
37899c81ca9SAlan Cox 	next->prev = prev;
37999c81ca9SAlan Cox 	prev->next = next;
38099c81ca9SAlan Cox 	map->nentries--;
38121c641b2SJohn Baldwin 	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
38221c641b2SJohn Baldwin 	    map->nentries, entry);
383df8bae1dSRodney W. Grimes }
384df8bae1dSRodney W. Grimes 
385df8bae1dSRodney W. Grimes /*
386df8bae1dSRodney W. Grimes  *	SAVE_HINT:
387df8bae1dSRodney W. Grimes  *
388df8bae1dSRodney W. Grimes  *	Saves the specified entry as the hint for
38924a1cce3SDavid Greenman  *	future lookups.
390df8bae1dSRodney W. Grimes  */
391df8bae1dSRodney W. Grimes #define	SAVE_HINT(map,value) \
39224a1cce3SDavid Greenman 		(map)->hint = (value);
393df8bae1dSRodney W. Grimes 
394df8bae1dSRodney W. Grimes /*
395df8bae1dSRodney W. Grimes  *	vm_map_lookup_entry:	[ internal use only ]
396df8bae1dSRodney W. Grimes  *
397df8bae1dSRodney W. Grimes  *	Finds the map entry containing (or
398df8bae1dSRodney W. Grimes  *	immediately preceding) the specified address
399df8bae1dSRodney W. Grimes  *	in the given map; the entry is returned
400df8bae1dSRodney W. Grimes  *	in the "entry" parameter.  The boolean
401df8bae1dSRodney W. Grimes  *	result indicates whether the address is
402df8bae1dSRodney W. Grimes  *	actually contained in the map.
403df8bae1dSRodney W. Grimes  */
4040d94caffSDavid Greenman boolean_t
4050d94caffSDavid Greenman vm_map_lookup_entry(map, address, entry)
406c0877f10SJohn Dyson 	vm_map_t map;
407c0877f10SJohn Dyson 	vm_offset_t address;
408df8bae1dSRodney W. Grimes 	vm_map_entry_t *entry;	/* OUT */
409df8bae1dSRodney W. Grimes {
410c0877f10SJohn Dyson 	vm_map_entry_t cur;
411c0877f10SJohn Dyson 	vm_map_entry_t last;
412df8bae1dSRodney W. Grimes 
4130cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
414df8bae1dSRodney W. Grimes 	/*
4150d94caffSDavid Greenman 	 * Start looking either from the head of the list, or from the hint.
416df8bae1dSRodney W. Grimes 	 */
417df8bae1dSRodney W. Grimes 
418df8bae1dSRodney W. Grimes 	cur = map->hint;
419df8bae1dSRodney W. Grimes 
420df8bae1dSRodney W. Grimes 	if (cur == &map->header)
421df8bae1dSRodney W. Grimes 		cur = cur->next;
422df8bae1dSRodney W. Grimes 
423df8bae1dSRodney W. Grimes 	if (address >= cur->start) {
424df8bae1dSRodney W. Grimes 		/*
425df8bae1dSRodney W. Grimes 		 * Go from hint to end of list.
426df8bae1dSRodney W. Grimes 		 *
4270d94caffSDavid Greenman 		 * But first, make a quick check to see if we are already looking
4280d94caffSDavid Greenman 		 * at the entry we want (which is usually the case). Note also
4290d94caffSDavid Greenman 		 * that we don't need to save the hint here... it is the same
4300d94caffSDavid Greenman 		 * hint (unless we are at the header, in which case the hint
4310d94caffSDavid Greenman 		 * didn't buy us anything anyway).
432df8bae1dSRodney W. Grimes 		 */
433df8bae1dSRodney W. Grimes 		last = &map->header;
434df8bae1dSRodney W. Grimes 		if ((cur != last) && (cur->end > address)) {
435df8bae1dSRodney W. Grimes 			*entry = cur;
436df8bae1dSRodney W. Grimes 			return (TRUE);
437df8bae1dSRodney W. Grimes 		}
4380d94caffSDavid Greenman 	} else {
439df8bae1dSRodney W. Grimes 		/*
440df8bae1dSRodney W. Grimes 		 * Go from start to hint, *inclusively*
441df8bae1dSRodney W. Grimes 		 */
442df8bae1dSRodney W. Grimes 		last = cur->next;
443df8bae1dSRodney W. Grimes 		cur = map->header.next;
444df8bae1dSRodney W. Grimes 	}
445df8bae1dSRodney W. Grimes 
446df8bae1dSRodney W. Grimes 	/*
447df8bae1dSRodney W. Grimes 	 * Search linearly
448df8bae1dSRodney W. Grimes 	 */
449df8bae1dSRodney W. Grimes 
450df8bae1dSRodney W. Grimes 	while (cur != last) {
451df8bae1dSRodney W. Grimes 		if (cur->end > address) {
452df8bae1dSRodney W. Grimes 			if (address >= cur->start) {
453df8bae1dSRodney W. Grimes 				/*
4540d94caffSDavid Greenman 				 * Save this lookup for future hints, and
4550d94caffSDavid Greenman 				 * return
456df8bae1dSRodney W. Grimes 				 */
457df8bae1dSRodney W. Grimes 
458df8bae1dSRodney W. Grimes 				*entry = cur;
459df8bae1dSRodney W. Grimes 				SAVE_HINT(map, cur);
460df8bae1dSRodney W. Grimes 				return (TRUE);
461df8bae1dSRodney W. Grimes 			}
462df8bae1dSRodney W. Grimes 			break;
463df8bae1dSRodney W. Grimes 		}
464df8bae1dSRodney W. Grimes 		cur = cur->next;
465df8bae1dSRodney W. Grimes 	}
466df8bae1dSRodney W. Grimes 	*entry = cur->prev;
467df8bae1dSRodney W. Grimes 	SAVE_HINT(map, *entry);
468df8bae1dSRodney W. Grimes 	return (FALSE);
469df8bae1dSRodney W. Grimes }
470df8bae1dSRodney W. Grimes 
471df8bae1dSRodney W. Grimes /*
47230dcfc09SJohn Dyson  *	vm_map_insert:
47330dcfc09SJohn Dyson  *
47430dcfc09SJohn Dyson  *	Inserts the given whole VM object into the target
47530dcfc09SJohn Dyson  *	map at the specified address range.  The object's
47630dcfc09SJohn Dyson  *	size should match that of the address range.
47730dcfc09SJohn Dyson  *
47830dcfc09SJohn Dyson  *	Requires that the map be locked, and leaves it so.
4792aaeadf8SMatthew Dillon  *
4802aaeadf8SMatthew Dillon  *	If object is non-NULL, ref count must be bumped by caller
4812aaeadf8SMatthew Dillon  *	prior to making call to account for the new entry.
48230dcfc09SJohn Dyson  */
48330dcfc09SJohn Dyson int
484b9dcd593SBruce Evans vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
485b9dcd593SBruce Evans 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
486b9dcd593SBruce Evans 	      int cow)
48730dcfc09SJohn Dyson {
488c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
489c0877f10SJohn Dyson 	vm_map_entry_t prev_entry;
49030dcfc09SJohn Dyson 	vm_map_entry_t temp_entry;
4919730a5daSPaul Saab 	vm_eflags_t protoeflags;
49230dcfc09SJohn Dyson 
4930cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
4940cddd8f0SMatthew Dillon 
49530dcfc09SJohn Dyson 	/*
49630dcfc09SJohn Dyson 	 * Check that the start and end points are not bogus.
49730dcfc09SJohn Dyson 	 */
49830dcfc09SJohn Dyson 
49930dcfc09SJohn Dyson 	if ((start < map->min_offset) || (end > map->max_offset) ||
50030dcfc09SJohn Dyson 	    (start >= end))
50130dcfc09SJohn Dyson 		return (KERN_INVALID_ADDRESS);
50230dcfc09SJohn Dyson 
50330dcfc09SJohn Dyson 	/*
50430dcfc09SJohn Dyson 	 * Find the entry prior to the proposed starting address; if it's part
50530dcfc09SJohn Dyson 	 * of an existing entry, this range is bogus.
50630dcfc09SJohn Dyson 	 */
50730dcfc09SJohn Dyson 
50830dcfc09SJohn Dyson 	if (vm_map_lookup_entry(map, start, &temp_entry))
50930dcfc09SJohn Dyson 		return (KERN_NO_SPACE);
51030dcfc09SJohn Dyson 
51130dcfc09SJohn Dyson 	prev_entry = temp_entry;
51230dcfc09SJohn Dyson 
51330dcfc09SJohn Dyson 	/*
51430dcfc09SJohn Dyson 	 * Assert that the next entry doesn't overlap the end point.
51530dcfc09SJohn Dyson 	 */
51630dcfc09SJohn Dyson 
51730dcfc09SJohn Dyson 	if ((prev_entry->next != &map->header) &&
51830dcfc09SJohn Dyson 	    (prev_entry->next->start < end))
51930dcfc09SJohn Dyson 		return (KERN_NO_SPACE);
52030dcfc09SJohn Dyson 
521afa07f7eSJohn Dyson 	protoeflags = 0;
522afa07f7eSJohn Dyson 
523afa07f7eSJohn Dyson 	if (cow & MAP_COPY_ON_WRITE)
524e5f13bddSAlan Cox 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
525afa07f7eSJohn Dyson 
5264e045f93SAlan Cox 	if (cow & MAP_NOFAULT) {
527afa07f7eSJohn Dyson 		protoeflags |= MAP_ENTRY_NOFAULT;
528afa07f7eSJohn Dyson 
5294e045f93SAlan Cox 		KASSERT(object == NULL,
5304e045f93SAlan Cox 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
5314e045f93SAlan Cox 	}
5324f79d873SMatthew Dillon 	if (cow & MAP_DISABLE_SYNCER)
5334f79d873SMatthew Dillon 		protoeflags |= MAP_ENTRY_NOSYNC;
5349730a5daSPaul Saab 	if (cow & MAP_DISABLE_COREDUMP)
5359730a5daSPaul Saab 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
5364f79d873SMatthew Dillon 
5372aaeadf8SMatthew Dillon 	if (object) {
53830dcfc09SJohn Dyson 		/*
5392aaeadf8SMatthew Dillon 		 * When object is non-NULL, it could be shared with another
5402aaeadf8SMatthew Dillon 		 * process.  We have to set or clear OBJ_ONEMAPPING
5412aaeadf8SMatthew Dillon 		 * appropriately.
54230dcfc09SJohn Dyson 		 */
5432aaeadf8SMatthew Dillon 		if ((object->ref_count > 1) || (object->shadow_count != 0)) {
5442aaeadf8SMatthew Dillon 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
5452aaeadf8SMatthew Dillon 		}
5464e045f93SAlan Cox 	}
5474e045f93SAlan Cox 	else if ((prev_entry != &map->header) &&
5484e045f93SAlan Cox 		 (prev_entry->eflags == protoeflags) &&
5498cc7e047SJohn Dyson 		 (prev_entry->end == start) &&
5504e045f93SAlan Cox 		 (prev_entry->wired_count == 0) &&
5514e045f93SAlan Cox 		 ((prev_entry->object.vm_object == NULL) ||
5528cc7e047SJohn Dyson 		  vm_object_coalesce(prev_entry->object.vm_object,
55330dcfc09SJohn Dyson 				     OFF_TO_IDX(prev_entry->offset),
5548cc7e047SJohn Dyson 				     (vm_size_t)(prev_entry->end - prev_entry->start),
555cdc2c291SJohn Dyson 				     (vm_size_t)(end - prev_entry->end)))) {
55630dcfc09SJohn Dyson 		/*
5572aaeadf8SMatthew Dillon 		 * We were able to extend the object.  Determine if we
5582aaeadf8SMatthew Dillon 		 * can extend the previous map entry to include the
5592aaeadf8SMatthew Dillon 		 * new range as well.
56030dcfc09SJohn Dyson 		 */
5618cc7e047SJohn Dyson 		if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
5628cc7e047SJohn Dyson 		    (prev_entry->protection == prot) &&
5638cc7e047SJohn Dyson 		    (prev_entry->max_protection == max)) {
56430dcfc09SJohn Dyson 			map->size += (end - prev_entry->end);
56530dcfc09SJohn Dyson 			prev_entry->end = end;
5664e71e795SMatthew Dillon 			vm_map_simplify_entry(map, prev_entry);
56730dcfc09SJohn Dyson 			return (KERN_SUCCESS);
56830dcfc09SJohn Dyson 		}
5698cc7e047SJohn Dyson 
5702aaeadf8SMatthew Dillon 		/*
5712aaeadf8SMatthew Dillon 		 * If we can extend the object but cannot extend the
5722aaeadf8SMatthew Dillon 		 * map entry, we have to create a new map entry.  We
5732aaeadf8SMatthew Dillon 		 * must bump the ref count on the extended object to
5744e71e795SMatthew Dillon 		 * account for it.  object may be NULL.
5752aaeadf8SMatthew Dillon 		 */
5762aaeadf8SMatthew Dillon 		object = prev_entry->object.vm_object;
5772aaeadf8SMatthew Dillon 		offset = prev_entry->offset +
5782aaeadf8SMatthew Dillon 			(prev_entry->end - prev_entry->start);
5798cc7e047SJohn Dyson 		vm_object_reference(object);
580b18bfc3dSJohn Dyson 	}
5812aaeadf8SMatthew Dillon 
5822aaeadf8SMatthew Dillon 	/*
5832aaeadf8SMatthew Dillon 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
5842aaeadf8SMatthew Dillon 	 * in things like the buffer map where we manage kva but do not manage
5852aaeadf8SMatthew Dillon 	 * backing objects.
5862aaeadf8SMatthew Dillon 	 */
5878cc7e047SJohn Dyson 
58830dcfc09SJohn Dyson 	/*
58930dcfc09SJohn Dyson 	 * Create a new entry
59030dcfc09SJohn Dyson 	 */
59130dcfc09SJohn Dyson 
59230dcfc09SJohn Dyson 	new_entry = vm_map_entry_create(map);
59330dcfc09SJohn Dyson 	new_entry->start = start;
59430dcfc09SJohn Dyson 	new_entry->end = end;
59530dcfc09SJohn Dyson 
596afa07f7eSJohn Dyson 	new_entry->eflags = protoeflags;
59730dcfc09SJohn Dyson 	new_entry->object.vm_object = object;
59830dcfc09SJohn Dyson 	new_entry->offset = offset;
5992267af78SJulian Elischer 	new_entry->avail_ssize = 0;
6002267af78SJulian Elischer 
60130dcfc09SJohn Dyson 	new_entry->inheritance = VM_INHERIT_DEFAULT;
60230dcfc09SJohn Dyson 	new_entry->protection = prot;
60330dcfc09SJohn Dyson 	new_entry->max_protection = max;
60430dcfc09SJohn Dyson 	new_entry->wired_count = 0;
605e5f251d2SAlan Cox 
60630dcfc09SJohn Dyson 	/*
60730dcfc09SJohn Dyson 	 * Insert the new entry into the list
60830dcfc09SJohn Dyson 	 */
60930dcfc09SJohn Dyson 
61030dcfc09SJohn Dyson 	vm_map_entry_link(map, prev_entry, new_entry);
61130dcfc09SJohn Dyson 	map->size += new_entry->end - new_entry->start;
61230dcfc09SJohn Dyson 
61330dcfc09SJohn Dyson 	/*
61430dcfc09SJohn Dyson 	 * Update the free space hint
61530dcfc09SJohn Dyson 	 */
61667bf6868SJohn Dyson 	if ((map->first_free == prev_entry) &&
6174f79d873SMatthew Dillon 	    (prev_entry->end >= new_entry->start)) {
61830dcfc09SJohn Dyson 		map->first_free = new_entry;
6194f79d873SMatthew Dillon 	}
62030dcfc09SJohn Dyson 
6211a484d28SMatthew Dillon #if 0
6221a484d28SMatthew Dillon 	/*
6231a484d28SMatthew Dillon 	 * Temporarily removed to avoid MAP_STACK panic, due to
6241a484d28SMatthew Dillon 	 * MAP_STACK being a huge hack.  Will be added back in
6251a484d28SMatthew Dillon 	 * when MAP_STACK (and the user stack mapping) is fixed.
6261a484d28SMatthew Dillon 	 */
6274e71e795SMatthew Dillon 	/*
6284e71e795SMatthew Dillon 	 * It may be possible to simplify the entry
6294e71e795SMatthew Dillon 	 */
6304e71e795SMatthew Dillon 	vm_map_simplify_entry(map, new_entry);
6311a484d28SMatthew Dillon #endif
6324e71e795SMatthew Dillon 
6334f79d873SMatthew Dillon 	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
634e972780aSAlan Cox 		pmap_object_init_pt(map->pmap, start,
635e972780aSAlan Cox 				    object, OFF_TO_IDX(offset), end - start,
636e972780aSAlan Cox 				    cow & MAP_PREFAULT_PARTIAL);
6374f79d873SMatthew Dillon 	}
638e972780aSAlan Cox 
63930dcfc09SJohn Dyson 	return (KERN_SUCCESS);
64030dcfc09SJohn Dyson }
64130dcfc09SJohn Dyson 
64230dcfc09SJohn Dyson /*
643df8bae1dSRodney W. Grimes  * Find sufficient space for `length' bytes in the given map, starting at
644df8bae1dSRodney W. Grimes  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
645df8bae1dSRodney W. Grimes  */
646df8bae1dSRodney W. Grimes int
647df8bae1dSRodney W. Grimes vm_map_findspace(map, start, length, addr)
648c0877f10SJohn Dyson 	vm_map_t map;
649c0877f10SJohn Dyson 	vm_offset_t start;
650df8bae1dSRodney W. Grimes 	vm_size_t length;
651df8bae1dSRodney W. Grimes 	vm_offset_t *addr;
652df8bae1dSRodney W. Grimes {
653c0877f10SJohn Dyson 	vm_map_entry_t entry, next;
654c0877f10SJohn Dyson 	vm_offset_t end;
655df8bae1dSRodney W. Grimes 
6560cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
657df8bae1dSRodney W. Grimes 	if (start < map->min_offset)
658df8bae1dSRodney W. Grimes 		start = map->min_offset;
659df8bae1dSRodney W. Grimes 	if (start > map->max_offset)
660df8bae1dSRodney W. Grimes 		return (1);
661df8bae1dSRodney W. Grimes 
662df8bae1dSRodney W. Grimes 	/*
6630d94caffSDavid Greenman 	 * Look for the first possible address; if there's already something
6640d94caffSDavid Greenman 	 * at this address, we have to start after it.
665df8bae1dSRodney W. Grimes 	 */
666df8bae1dSRodney W. Grimes 	if (start == map->min_offset) {
66767bf6868SJohn Dyson 		if ((entry = map->first_free) != &map->header)
668df8bae1dSRodney W. Grimes 			start = entry->end;
669df8bae1dSRodney W. Grimes 	} else {
670df8bae1dSRodney W. Grimes 		vm_map_entry_t tmp;
6710d94caffSDavid Greenman 
672df8bae1dSRodney W. Grimes 		if (vm_map_lookup_entry(map, start, &tmp))
673df8bae1dSRodney W. Grimes 			start = tmp->end;
674df8bae1dSRodney W. Grimes 		entry = tmp;
675df8bae1dSRodney W. Grimes 	}
676df8bae1dSRodney W. Grimes 
677df8bae1dSRodney W. Grimes 	/*
6780d94caffSDavid Greenman 	 * Look through the rest of the map, trying to fit a new region in the
6790d94caffSDavid Greenman 	 * gap between existing regions, or after the very last region.
680df8bae1dSRodney W. Grimes 	 */
681df8bae1dSRodney W. Grimes 	for (;; start = (entry = next)->end) {
682df8bae1dSRodney W. Grimes 		/*
683df8bae1dSRodney W. Grimes 		 * Find the end of the proposed new region.  Be sure we didn't
684df8bae1dSRodney W. Grimes 		 * go beyond the end of the map, or wrap around the address;
685df8bae1dSRodney W. Grimes 		 * if so, we lose.  Otherwise, if this is the last entry, or
686df8bae1dSRodney W. Grimes 		 * if the proposed new region fits before the next entry, we
687df8bae1dSRodney W. Grimes 		 * win.
688df8bae1dSRodney W. Grimes 		 */
689df8bae1dSRodney W. Grimes 		end = start + length;
690df8bae1dSRodney W. Grimes 		if (end > map->max_offset || end < start)
691df8bae1dSRodney W. Grimes 			return (1);
692df8bae1dSRodney W. Grimes 		next = entry->next;
693df8bae1dSRodney W. Grimes 		if (next == &map->header || next->start >= end)
694df8bae1dSRodney W. Grimes 			break;
695df8bae1dSRodney W. Grimes 	}
696df8bae1dSRodney W. Grimes 	SAVE_HINT(map, entry);
697df8bae1dSRodney W. Grimes 	*addr = start;
69899448ed1SJohn Dyson 	if (map == kernel_map) {
69999448ed1SJohn Dyson 		vm_offset_t ksize;
70099448ed1SJohn Dyson 		if ((ksize = round_page(start + length)) > kernel_vm_end) {
70199448ed1SJohn Dyson 			pmap_growkernel(ksize);
70299448ed1SJohn Dyson 		}
70399448ed1SJohn Dyson 	}
704df8bae1dSRodney W. Grimes 	return (0);
705df8bae1dSRodney W. Grimes }
706df8bae1dSRodney W. Grimes 
707df8bae1dSRodney W. Grimes /*
708df8bae1dSRodney W. Grimes  *	vm_map_find finds an unallocated region in the target address
709df8bae1dSRodney W. Grimes  *	map with the given length.  The search is defined to be
710df8bae1dSRodney W. Grimes  *	first-fit from the specified address; the region found is
711df8bae1dSRodney W. Grimes  *	returned in the same parameter.
712df8bae1dSRodney W. Grimes  *
7132aaeadf8SMatthew Dillon  *	If object is non-NULL, ref count must be bumped by caller
7142aaeadf8SMatthew Dillon  *	prior to making call to account for the new entry.
715df8bae1dSRodney W. Grimes  */
716df8bae1dSRodney W. Grimes int
717b9dcd593SBruce Evans vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
718b9dcd593SBruce Evans 	    vm_offset_t *addr,	/* IN/OUT */
719b9dcd593SBruce Evans 	    vm_size_t length, boolean_t find_space, vm_prot_t prot,
720b9dcd593SBruce Evans 	    vm_prot_t max, int cow)
721df8bae1dSRodney W. Grimes {
722c0877f10SJohn Dyson 	vm_offset_t start;
7238d6e8edeSDavid Greenman 	int result, s = 0;
724df8bae1dSRodney W. Grimes 
7250cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
7260cddd8f0SMatthew Dillon 
727df8bae1dSRodney W. Grimes 	start = *addr;
7288d6e8edeSDavid Greenman 
72908442f8aSBosko Milekic 	if (map == kmem_map)
730b18bfc3dSJohn Dyson 		s = splvm();
7318d6e8edeSDavid Greenman 
732bea41bcfSDavid Greenman 	vm_map_lock(map);
733df8bae1dSRodney W. Grimes 	if (find_space) {
734df8bae1dSRodney W. Grimes 		if (vm_map_findspace(map, start, length, addr)) {
735df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
73608442f8aSBosko Milekic 			if (map == kmem_map)
7378d6e8edeSDavid Greenman 				splx(s);
738df8bae1dSRodney W. Grimes 			return (KERN_NO_SPACE);
739df8bae1dSRodney W. Grimes 		}
740df8bae1dSRodney W. Grimes 		start = *addr;
741df8bae1dSRodney W. Grimes 	}
742bd7e5f99SJohn Dyson 	result = vm_map_insert(map, object, offset,
743bd7e5f99SJohn Dyson 		start, start + length, prot, max, cow);
744df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
7458d6e8edeSDavid Greenman 
74608442f8aSBosko Milekic 	if (map == kmem_map)
7478d6e8edeSDavid Greenman 		splx(s);
7488d6e8edeSDavid Greenman 
749df8bae1dSRodney W. Grimes 	return (result);
750df8bae1dSRodney W. Grimes }
751df8bae1dSRodney W. Grimes 
752df8bae1dSRodney W. Grimes /*
753b7b2aac2SJohn Dyson  *	vm_map_simplify_entry:
75467bf6868SJohn Dyson  *
7554e71e795SMatthew Dillon  *	Simplify the given map entry by merging with either neighbor.  This
7564e71e795SMatthew Dillon  *	routine also has the ability to merge with both neighbors.
7574e71e795SMatthew Dillon  *
7584e71e795SMatthew Dillon  *	The map must be locked.
7594e71e795SMatthew Dillon  *
7604e71e795SMatthew Dillon  *	This routine guarentees that the passed entry remains valid (though
7614e71e795SMatthew Dillon  *	possibly extended).  When merging, this routine may delete one or
7624e71e795SMatthew Dillon  *	both neighbors.
763df8bae1dSRodney W. Grimes  */
764b7b2aac2SJohn Dyson void
7650d94caffSDavid Greenman vm_map_simplify_entry(map, entry)
766df8bae1dSRodney W. Grimes 	vm_map_t map;
767df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
768df8bae1dSRodney W. Grimes {
769308c24baSJohn Dyson 	vm_map_entry_t next, prev;
770b7b2aac2SJohn Dyson 	vm_size_t prevsize, esize;
771df8bae1dSRodney W. Grimes 
7720cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
7730cddd8f0SMatthew Dillon 
7749fdfe602SMatthew Dillon 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
775df8bae1dSRodney W. Grimes 		return;
776308c24baSJohn Dyson 
777308c24baSJohn Dyson 	prev = entry->prev;
778308c24baSJohn Dyson 	if (prev != &map->header) {
77967bf6868SJohn Dyson 		prevsize = prev->end - prev->start;
78067bf6868SJohn Dyson 		if ( (prev->end == entry->start) &&
78167bf6868SJohn Dyson 		     (prev->object.vm_object == entry->object.vm_object) &&
78295e5e988SJohn Dyson 		     (!prev->object.vm_object ||
78367bf6868SJohn Dyson 			(prev->offset + prevsize == entry->offset)) &&
784afa07f7eSJohn Dyson 		     (prev->eflags == entry->eflags) &&
78567bf6868SJohn Dyson 		     (prev->protection == entry->protection) &&
78667bf6868SJohn Dyson 		     (prev->max_protection == entry->max_protection) &&
78767bf6868SJohn Dyson 		     (prev->inheritance == entry->inheritance) &&
788b7b2aac2SJohn Dyson 		     (prev->wired_count == entry->wired_count)) {
789308c24baSJohn Dyson 			if (map->first_free == prev)
790308c24baSJohn Dyson 				map->first_free = entry;
791b18bfc3dSJohn Dyson 			if (map->hint == prev)
792b18bfc3dSJohn Dyson 				map->hint = entry;
793308c24baSJohn Dyson 			vm_map_entry_unlink(map, prev);
794308c24baSJohn Dyson 			entry->start = prev->start;
795308c24baSJohn Dyson 			entry->offset = prev->offset;
796b18bfc3dSJohn Dyson 			if (prev->object.vm_object)
797308c24baSJohn Dyson 				vm_object_deallocate(prev->object.vm_object);
798308c24baSJohn Dyson 			vm_map_entry_dispose(map, prev);
799308c24baSJohn Dyson 		}
800308c24baSJohn Dyson 	}
801de5f6a77SJohn Dyson 
802de5f6a77SJohn Dyson 	next = entry->next;
803308c24baSJohn Dyson 	if (next != &map->header) {
80467bf6868SJohn Dyson 		esize = entry->end - entry->start;
80567bf6868SJohn Dyson 		if ((entry->end == next->start) &&
80667bf6868SJohn Dyson 		    (next->object.vm_object == entry->object.vm_object) &&
80767bf6868SJohn Dyson 		     (!entry->object.vm_object ||
80867bf6868SJohn Dyson 			(entry->offset + esize == next->offset)) &&
809afa07f7eSJohn Dyson 		    (next->eflags == entry->eflags) &&
81067bf6868SJohn Dyson 		    (next->protection == entry->protection) &&
81167bf6868SJohn Dyson 		    (next->max_protection == entry->max_protection) &&
81267bf6868SJohn Dyson 		    (next->inheritance == entry->inheritance) &&
813b7b2aac2SJohn Dyson 		    (next->wired_count == entry->wired_count)) {
814308c24baSJohn Dyson 			if (map->first_free == next)
815308c24baSJohn Dyson 				map->first_free = entry;
816b18bfc3dSJohn Dyson 			if (map->hint == next)
817b18bfc3dSJohn Dyson 				map->hint = entry;
818de5f6a77SJohn Dyson 			vm_map_entry_unlink(map, next);
819de5f6a77SJohn Dyson 			entry->end = next->end;
820b18bfc3dSJohn Dyson 			if (next->object.vm_object)
821de5f6a77SJohn Dyson 				vm_object_deallocate(next->object.vm_object);
822de5f6a77SJohn Dyson 			vm_map_entry_dispose(map, next);
823df8bae1dSRodney W. Grimes 	        }
824df8bae1dSRodney W. Grimes 	}
825de5f6a77SJohn Dyson }
826df8bae1dSRodney W. Grimes /*
827df8bae1dSRodney W. Grimes  *	vm_map_clip_start:	[ internal use only ]
828df8bae1dSRodney W. Grimes  *
829df8bae1dSRodney W. Grimes  *	Asserts that the given entry begins at or after
830df8bae1dSRodney W. Grimes  *	the specified address; if necessary,
831df8bae1dSRodney W. Grimes  *	it splits the entry into two.
832df8bae1dSRodney W. Grimes  */
833df8bae1dSRodney W. Grimes #define vm_map_clip_start(map, entry, startaddr) \
834df8bae1dSRodney W. Grimes { \
835df8bae1dSRodney W. Grimes 	if (startaddr > entry->start) \
836df8bae1dSRodney W. Grimes 		_vm_map_clip_start(map, entry, startaddr); \
837df8bae1dSRodney W. Grimes }
838df8bae1dSRodney W. Grimes 
839df8bae1dSRodney W. Grimes /*
840df8bae1dSRodney W. Grimes  *	This routine is called only when it is known that
841df8bae1dSRodney W. Grimes  *	the entry must be split.
842df8bae1dSRodney W. Grimes  */
8430d94caffSDavid Greenman static void
8440d94caffSDavid Greenman _vm_map_clip_start(map, entry, start)
845c0877f10SJohn Dyson 	vm_map_t map;
846c0877f10SJohn Dyson 	vm_map_entry_t entry;
847c0877f10SJohn Dyson 	vm_offset_t start;
848df8bae1dSRodney W. Grimes {
849c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
850df8bae1dSRodney W. Grimes 
851df8bae1dSRodney W. Grimes 	/*
8520d94caffSDavid Greenman 	 * Split off the front portion -- note that we must insert the new
8530d94caffSDavid Greenman 	 * entry BEFORE this one, so that this entry has the specified
8540d94caffSDavid Greenman 	 * starting address.
855df8bae1dSRodney W. Grimes 	 */
856df8bae1dSRodney W. Grimes 
857f32dbbeeSJohn Dyson 	vm_map_simplify_entry(map, entry);
858f32dbbeeSJohn Dyson 
85911cccda1SJohn Dyson 	/*
86011cccda1SJohn Dyson 	 * If there is no object backing this entry, we might as well create
86111cccda1SJohn Dyson 	 * one now.  If we defer it, an object can get created after the map
86211cccda1SJohn Dyson 	 * is clipped, and individual objects will be created for the split-up
86311cccda1SJohn Dyson 	 * map.  This is a bit of a hack, but is also about the best place to
86411cccda1SJohn Dyson 	 * put this improvement.
86511cccda1SJohn Dyson 	 */
86611cccda1SJohn Dyson 
8674e71e795SMatthew Dillon 	if (entry->object.vm_object == NULL && !map->system_map) {
86811cccda1SJohn Dyson 		vm_object_t object;
86911cccda1SJohn Dyson 		object = vm_object_allocate(OBJT_DEFAULT,
870c2e11a03SJohn Dyson 				atop(entry->end - entry->start));
87111cccda1SJohn Dyson 		entry->object.vm_object = object;
87211cccda1SJohn Dyson 		entry->offset = 0;
87311cccda1SJohn Dyson 	}
87411cccda1SJohn Dyson 
875df8bae1dSRodney W. Grimes 	new_entry = vm_map_entry_create(map);
876df8bae1dSRodney W. Grimes 	*new_entry = *entry;
877df8bae1dSRodney W. Grimes 
878df8bae1dSRodney W. Grimes 	new_entry->end = start;
879df8bae1dSRodney W. Grimes 	entry->offset += (start - entry->start);
880df8bae1dSRodney W. Grimes 	entry->start = start;
881df8bae1dSRodney W. Grimes 
882df8bae1dSRodney W. Grimes 	vm_map_entry_link(map, entry->prev, new_entry);
883df8bae1dSRodney W. Grimes 
8849fdfe602SMatthew Dillon 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
885df8bae1dSRodney W. Grimes 		vm_object_reference(new_entry->object.vm_object);
886df8bae1dSRodney W. Grimes 	}
887c0877f10SJohn Dyson }
888df8bae1dSRodney W. Grimes 
889df8bae1dSRodney W. Grimes /*
890df8bae1dSRodney W. Grimes  *	vm_map_clip_end:	[ internal use only ]
891df8bae1dSRodney W. Grimes  *
892df8bae1dSRodney W. Grimes  *	Asserts that the given entry ends at or before
893df8bae1dSRodney W. Grimes  *	the specified address; if necessary,
894df8bae1dSRodney W. Grimes  *	it splits the entry into two.
895df8bae1dSRodney W. Grimes  */
896df8bae1dSRodney W. Grimes 
897df8bae1dSRodney W. Grimes #define vm_map_clip_end(map, entry, endaddr) \
898df8bae1dSRodney W. Grimes { \
899df8bae1dSRodney W. Grimes 	if (endaddr < entry->end) \
900df8bae1dSRodney W. Grimes 		_vm_map_clip_end(map, entry, endaddr); \
901df8bae1dSRodney W. Grimes }
902df8bae1dSRodney W. Grimes 
903df8bae1dSRodney W. Grimes /*
904df8bae1dSRodney W. Grimes  *	This routine is called only when it is known that
905df8bae1dSRodney W. Grimes  *	the entry must be split.
906df8bae1dSRodney W. Grimes  */
9070d94caffSDavid Greenman static void
9080d94caffSDavid Greenman _vm_map_clip_end(map, entry, end)
909c0877f10SJohn Dyson 	vm_map_t map;
910c0877f10SJohn Dyson 	vm_map_entry_t entry;
911c0877f10SJohn Dyson 	vm_offset_t end;
912df8bae1dSRodney W. Grimes {
913c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
914df8bae1dSRodney W. Grimes 
915df8bae1dSRodney W. Grimes 	/*
91611cccda1SJohn Dyson 	 * If there is no object backing this entry, we might as well create
91711cccda1SJohn Dyson 	 * one now.  If we defer it, an object can get created after the map
91811cccda1SJohn Dyson 	 * is clipped, and individual objects will be created for the split-up
91911cccda1SJohn Dyson 	 * map.  This is a bit of a hack, but is also about the best place to
92011cccda1SJohn Dyson 	 * put this improvement.
92111cccda1SJohn Dyson 	 */
92211cccda1SJohn Dyson 
9234e71e795SMatthew Dillon 	if (entry->object.vm_object == NULL && !map->system_map) {
92411cccda1SJohn Dyson 		vm_object_t object;
92511cccda1SJohn Dyson 		object = vm_object_allocate(OBJT_DEFAULT,
926c2e11a03SJohn Dyson 				atop(entry->end - entry->start));
92711cccda1SJohn Dyson 		entry->object.vm_object = object;
92811cccda1SJohn Dyson 		entry->offset = 0;
92911cccda1SJohn Dyson 	}
93011cccda1SJohn Dyson 
93111cccda1SJohn Dyson 	/*
9320d94caffSDavid Greenman 	 * Create a new entry and insert it AFTER the specified entry
933df8bae1dSRodney W. Grimes 	 */
934df8bae1dSRodney W. Grimes 
935df8bae1dSRodney W. Grimes 	new_entry = vm_map_entry_create(map);
936df8bae1dSRodney W. Grimes 	*new_entry = *entry;
937df8bae1dSRodney W. Grimes 
938df8bae1dSRodney W. Grimes 	new_entry->start = entry->end = end;
939df8bae1dSRodney W. Grimes 	new_entry->offset += (end - entry->start);
940df8bae1dSRodney W. Grimes 
941df8bae1dSRodney W. Grimes 	vm_map_entry_link(map, entry, new_entry);
942df8bae1dSRodney W. Grimes 
9439fdfe602SMatthew Dillon 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
944df8bae1dSRodney W. Grimes 		vm_object_reference(new_entry->object.vm_object);
945df8bae1dSRodney W. Grimes 	}
946c0877f10SJohn Dyson }
947df8bae1dSRodney W. Grimes 
948df8bae1dSRodney W. Grimes /*
949df8bae1dSRodney W. Grimes  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
950df8bae1dSRodney W. Grimes  *
951df8bae1dSRodney W. Grimes  *	Asserts that the starting and ending region
952df8bae1dSRodney W. Grimes  *	addresses fall within the valid range of the map.
953df8bae1dSRodney W. Grimes  */
954df8bae1dSRodney W. Grimes #define	VM_MAP_RANGE_CHECK(map, start, end)		\
955df8bae1dSRodney W. Grimes 		{					\
956df8bae1dSRodney W. Grimes 		if (start < vm_map_min(map))		\
957df8bae1dSRodney W. Grimes 			start = vm_map_min(map);	\
958df8bae1dSRodney W. Grimes 		if (end > vm_map_max(map))		\
959df8bae1dSRodney W. Grimes 			end = vm_map_max(map);		\
960df8bae1dSRodney W. Grimes 		if (start > end)			\
961df8bae1dSRodney W. Grimes 			start = end;			\
962df8bae1dSRodney W. Grimes 		}
963df8bae1dSRodney W. Grimes 
964df8bae1dSRodney W. Grimes /*
965df8bae1dSRodney W. Grimes  *	vm_map_submap:		[ kernel use only ]
966df8bae1dSRodney W. Grimes  *
967df8bae1dSRodney W. Grimes  *	Mark the given range as handled by a subordinate map.
968df8bae1dSRodney W. Grimes  *
969df8bae1dSRodney W. Grimes  *	This range must have been created with vm_map_find,
970df8bae1dSRodney W. Grimes  *	and no other operations may have been performed on this
971df8bae1dSRodney W. Grimes  *	range prior to calling vm_map_submap.
972df8bae1dSRodney W. Grimes  *
973df8bae1dSRodney W. Grimes  *	Only a limited number of operations can be performed
974df8bae1dSRodney W. Grimes  *	within this rage after calling vm_map_submap:
975df8bae1dSRodney W. Grimes  *		vm_fault
976df8bae1dSRodney W. Grimes  *	[Don't try vm_map_copy!]
977df8bae1dSRodney W. Grimes  *
978df8bae1dSRodney W. Grimes  *	To remove a submapping, one must first remove the
979df8bae1dSRodney W. Grimes  *	range from the superior map, and then destroy the
980df8bae1dSRodney W. Grimes  *	submap (if desired).  [Better yet, don't try it.]
981df8bae1dSRodney W. Grimes  */
982df8bae1dSRodney W. Grimes int
983df8bae1dSRodney W. Grimes vm_map_submap(map, start, end, submap)
984c0877f10SJohn Dyson 	vm_map_t map;
985c0877f10SJohn Dyson 	vm_offset_t start;
986c0877f10SJohn Dyson 	vm_offset_t end;
987df8bae1dSRodney W. Grimes 	vm_map_t submap;
988df8bae1dSRodney W. Grimes {
989df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
990c0877f10SJohn Dyson 	int result = KERN_INVALID_ARGUMENT;
991df8bae1dSRodney W. Grimes 
9920cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
9930cddd8f0SMatthew Dillon 
994df8bae1dSRodney W. Grimes 	vm_map_lock(map);
995df8bae1dSRodney W. Grimes 
996df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
997df8bae1dSRodney W. Grimes 
998df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &entry)) {
999df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
10000d94caffSDavid Greenman 	} else
1001df8bae1dSRodney W. Grimes 		entry = entry->next;
1002df8bae1dSRodney W. Grimes 
1003df8bae1dSRodney W. Grimes 	vm_map_clip_end(map, entry, end);
1004df8bae1dSRodney W. Grimes 
1005df8bae1dSRodney W. Grimes 	if ((entry->start == start) && (entry->end == end) &&
10069fdfe602SMatthew Dillon 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1007afa07f7eSJohn Dyson 	    (entry->object.vm_object == NULL)) {
10082d8acc0fSJohn Dyson 		entry->object.sub_map = submap;
1009afa07f7eSJohn Dyson 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1010df8bae1dSRodney W. Grimes 		result = KERN_SUCCESS;
1011df8bae1dSRodney W. Grimes 	}
1012df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1013df8bae1dSRodney W. Grimes 
1014df8bae1dSRodney W. Grimes 	return (result);
1015df8bae1dSRodney W. Grimes }
1016df8bae1dSRodney W. Grimes 
1017df8bae1dSRodney W. Grimes /*
1018df8bae1dSRodney W. Grimes  *	vm_map_protect:
1019df8bae1dSRodney W. Grimes  *
1020df8bae1dSRodney W. Grimes  *	Sets the protection of the specified address
1021df8bae1dSRodney W. Grimes  *	region in the target map.  If "set_max" is
1022df8bae1dSRodney W. Grimes  *	specified, the maximum protection is to be set;
1023df8bae1dSRodney W. Grimes  *	otherwise, only the current protection is affected.
1024df8bae1dSRodney W. Grimes  */
1025df8bae1dSRodney W. Grimes int
1026b9dcd593SBruce Evans vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1027b9dcd593SBruce Evans 	       vm_prot_t new_prot, boolean_t set_max)
1028df8bae1dSRodney W. Grimes {
1029c0877f10SJohn Dyson 	vm_map_entry_t current;
1030df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1031df8bae1dSRodney W. Grimes 
10320cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
1033df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1034df8bae1dSRodney W. Grimes 
1035df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1036df8bae1dSRodney W. Grimes 
1037df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &entry)) {
1038df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1039b7b2aac2SJohn Dyson 	} else {
1040df8bae1dSRodney W. Grimes 		entry = entry->next;
1041b7b2aac2SJohn Dyson 	}
1042df8bae1dSRodney W. Grimes 
1043df8bae1dSRodney W. Grimes 	/*
10440d94caffSDavid Greenman 	 * Make a first pass to check for protection violations.
1045df8bae1dSRodney W. Grimes 	 */
1046df8bae1dSRodney W. Grimes 
1047df8bae1dSRodney W. Grimes 	current = entry;
1048df8bae1dSRodney W. Grimes 	while ((current != &map->header) && (current->start < end)) {
1049afa07f7eSJohn Dyson 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1050a1f6d91cSDavid Greenman 			vm_map_unlock(map);
1051df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ARGUMENT);
1052a1f6d91cSDavid Greenman 		}
1053df8bae1dSRodney W. Grimes 		if ((new_prot & current->max_protection) != new_prot) {
1054df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
1055df8bae1dSRodney W. Grimes 			return (KERN_PROTECTION_FAILURE);
1056df8bae1dSRodney W. Grimes 		}
1057df8bae1dSRodney W. Grimes 		current = current->next;
1058df8bae1dSRodney W. Grimes 	}
1059df8bae1dSRodney W. Grimes 
1060df8bae1dSRodney W. Grimes 	/*
10610d94caffSDavid Greenman 	 * Go back and fix up protections. [Note that clipping is not
10620d94caffSDavid Greenman 	 * necessary the second time.]
1063df8bae1dSRodney W. Grimes 	 */
1064df8bae1dSRodney W. Grimes 
1065df8bae1dSRodney W. Grimes 	current = entry;
1066df8bae1dSRodney W. Grimes 
1067df8bae1dSRodney W. Grimes 	while ((current != &map->header) && (current->start < end)) {
1068df8bae1dSRodney W. Grimes 		vm_prot_t old_prot;
1069df8bae1dSRodney W. Grimes 
1070df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, current, end);
1071df8bae1dSRodney W. Grimes 
1072df8bae1dSRodney W. Grimes 		old_prot = current->protection;
1073df8bae1dSRodney W. Grimes 		if (set_max)
1074df8bae1dSRodney W. Grimes 			current->protection =
1075df8bae1dSRodney W. Grimes 			    (current->max_protection = new_prot) &
1076df8bae1dSRodney W. Grimes 			    old_prot;
1077df8bae1dSRodney W. Grimes 		else
1078df8bae1dSRodney W. Grimes 			current->protection = new_prot;
1079df8bae1dSRodney W. Grimes 
1080df8bae1dSRodney W. Grimes 		/*
10810d94caffSDavid Greenman 		 * Update physical map if necessary. Worry about copy-on-write
10820d94caffSDavid Greenman 		 * here -- CHECK THIS XXX
1083df8bae1dSRodney W. Grimes 		 */
1084df8bae1dSRodney W. Grimes 
1085df8bae1dSRodney W. Grimes 		if (current->protection != old_prot) {
1086afa07f7eSJohn Dyson #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1087df8bae1dSRodney W. Grimes 							VM_PROT_ALL)
1088df8bae1dSRodney W. Grimes 
1089df8bae1dSRodney W. Grimes 			pmap_protect(map->pmap, current->start,
1090df8bae1dSRodney W. Grimes 			    current->end,
10911c85e3dfSAlan Cox 			    current->protection & MASK(current));
1092df8bae1dSRodney W. Grimes #undef	MASK
1093df8bae1dSRodney W. Grimes 		}
10947d78abc9SJohn Dyson 
10957d78abc9SJohn Dyson 		vm_map_simplify_entry(map, current);
10967d78abc9SJohn Dyson 
1097df8bae1dSRodney W. Grimes 		current = current->next;
1098df8bae1dSRodney W. Grimes 	}
1099df8bae1dSRodney W. Grimes 
1100df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1101df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1102df8bae1dSRodney W. Grimes }
1103df8bae1dSRodney W. Grimes 
1104df8bae1dSRodney W. Grimes /*
1105867a482dSJohn Dyson  *	vm_map_madvise:
1106867a482dSJohn Dyson  *
1107867a482dSJohn Dyson  * 	This routine traverses a processes map handling the madvise
1108f7fc307aSAlan Cox  *	system call.  Advisories are classified as either those effecting
1109f7fc307aSAlan Cox  *	the vm_map_entry structure, or those effecting the underlying
1110f7fc307aSAlan Cox  *	objects.
1111867a482dSJohn Dyson  */
1112b4309055SMatthew Dillon 
1113b4309055SMatthew Dillon int
1114f7fc307aSAlan Cox vm_map_madvise(map, start, end, behav)
1115867a482dSJohn Dyson 	vm_map_t map;
1116867a482dSJohn Dyson 	vm_offset_t start, end;
1117f7fc307aSAlan Cox 	int behav;
1118867a482dSJohn Dyson {
1119f7fc307aSAlan Cox 	vm_map_entry_t current, entry;
1120b4309055SMatthew Dillon 	int modify_map = 0;
1121867a482dSJohn Dyson 
11220cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
11230cddd8f0SMatthew Dillon 
1124b4309055SMatthew Dillon 	/*
1125b4309055SMatthew Dillon 	 * Some madvise calls directly modify the vm_map_entry, in which case
1126b4309055SMatthew Dillon 	 * we need to use an exclusive lock on the map and we need to perform
1127b4309055SMatthew Dillon 	 * various clipping operations.  Otherwise we only need a read-lock
1128b4309055SMatthew Dillon 	 * on the map.
1129b4309055SMatthew Dillon 	 */
1130f7fc307aSAlan Cox 
1131b4309055SMatthew Dillon 	switch(behav) {
1132b4309055SMatthew Dillon 	case MADV_NORMAL:
1133b4309055SMatthew Dillon 	case MADV_SEQUENTIAL:
1134b4309055SMatthew Dillon 	case MADV_RANDOM:
11354f79d873SMatthew Dillon 	case MADV_NOSYNC:
11364f79d873SMatthew Dillon 	case MADV_AUTOSYNC:
11379730a5daSPaul Saab 	case MADV_NOCORE:
11389730a5daSPaul Saab 	case MADV_CORE:
1139b4309055SMatthew Dillon 		modify_map = 1;
1140867a482dSJohn Dyson 		vm_map_lock(map);
1141b4309055SMatthew Dillon 		break;
1142b4309055SMatthew Dillon 	case MADV_WILLNEED:
1143b4309055SMatthew Dillon 	case MADV_DONTNEED:
1144b4309055SMatthew Dillon 	case MADV_FREE:
1145f7fc307aSAlan Cox 		vm_map_lock_read(map);
1146b4309055SMatthew Dillon 		break;
1147b4309055SMatthew Dillon 	default:
1148b4309055SMatthew Dillon 		return (KERN_INVALID_ARGUMENT);
1149b4309055SMatthew Dillon 	}
1150b4309055SMatthew Dillon 
1151b4309055SMatthew Dillon 	/*
1152b4309055SMatthew Dillon 	 * Locate starting entry and clip if necessary.
1153b4309055SMatthew Dillon 	 */
1154867a482dSJohn Dyson 
1155867a482dSJohn Dyson 	VM_MAP_RANGE_CHECK(map, start, end);
1156867a482dSJohn Dyson 
1157867a482dSJohn Dyson 	if (vm_map_lookup_entry(map, start, &entry)) {
1158f7fc307aSAlan Cox 		if (modify_map)
1159867a482dSJohn Dyson 			vm_map_clip_start(map, entry, start);
1160b4309055SMatthew Dillon 	} else {
1161867a482dSJohn Dyson 		entry = entry->next;
1162b4309055SMatthew Dillon 	}
1163867a482dSJohn Dyson 
1164f7fc307aSAlan Cox 	if (modify_map) {
1165f7fc307aSAlan Cox 		/*
1166f7fc307aSAlan Cox 		 * madvise behaviors that are implemented in the vm_map_entry.
1167f7fc307aSAlan Cox 		 *
1168f7fc307aSAlan Cox 		 * We clip the vm_map_entry so that behavioral changes are
1169f7fc307aSAlan Cox 		 * limited to the specified address range.
1170f7fc307aSAlan Cox 		 */
1171867a482dSJohn Dyson 		for (current = entry;
1172867a482dSJohn Dyson 		     (current != &map->header) && (current->start < end);
1173b4309055SMatthew Dillon 		     current = current->next
1174b4309055SMatthew Dillon 		) {
1175f7fc307aSAlan Cox 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1176867a482dSJohn Dyson 				continue;
1177fed9a903SJohn Dyson 
117847221757SJohn Dyson 			vm_map_clip_end(map, current, end);
1179fed9a903SJohn Dyson 
1180f7fc307aSAlan Cox 			switch (behav) {
1181867a482dSJohn Dyson 			case MADV_NORMAL:
11827f866e4bSAlan Cox 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1183867a482dSJohn Dyson 				break;
1184867a482dSJohn Dyson 			case MADV_SEQUENTIAL:
11857f866e4bSAlan Cox 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1186867a482dSJohn Dyson 				break;
1187867a482dSJohn Dyson 			case MADV_RANDOM:
11887f866e4bSAlan Cox 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1189867a482dSJohn Dyson 				break;
11904f79d873SMatthew Dillon 			case MADV_NOSYNC:
11914f79d873SMatthew Dillon 				current->eflags |= MAP_ENTRY_NOSYNC;
11924f79d873SMatthew Dillon 				break;
11934f79d873SMatthew Dillon 			case MADV_AUTOSYNC:
11944f79d873SMatthew Dillon 				current->eflags &= ~MAP_ENTRY_NOSYNC;
11954f79d873SMatthew Dillon 				break;
11969730a5daSPaul Saab 			case MADV_NOCORE:
11979730a5daSPaul Saab 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
11989730a5daSPaul Saab 				break;
11999730a5daSPaul Saab 			case MADV_CORE:
12009730a5daSPaul Saab 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
12019730a5daSPaul Saab 				break;
1202867a482dSJohn Dyson 			default:
1203867a482dSJohn Dyson 				break;
1204867a482dSJohn Dyson 			}
1205f7fc307aSAlan Cox 			vm_map_simplify_entry(map, current);
1206867a482dSJohn Dyson 		}
1207867a482dSJohn Dyson 		vm_map_unlock(map);
1208b4309055SMatthew Dillon 	} else {
1209f7fc307aSAlan Cox 		vm_pindex_t pindex;
1210f7fc307aSAlan Cox 		int count;
1211f7fc307aSAlan Cox 
1212f7fc307aSAlan Cox 		/*
1213f7fc307aSAlan Cox 		 * madvise behaviors that are implemented in the underlying
1214f7fc307aSAlan Cox 		 * vm_object.
1215f7fc307aSAlan Cox 		 *
1216f7fc307aSAlan Cox 		 * Since we don't clip the vm_map_entry, we have to clip
1217f7fc307aSAlan Cox 		 * the vm_object pindex and count.
1218f7fc307aSAlan Cox 		 */
1219f7fc307aSAlan Cox 		for (current = entry;
1220f7fc307aSAlan Cox 		     (current != &map->header) && (current->start < end);
1221b4309055SMatthew Dillon 		     current = current->next
1222b4309055SMatthew Dillon 		) {
12235f99b57cSMatthew Dillon 			vm_offset_t useStart;
12245f99b57cSMatthew Dillon 
1225f7fc307aSAlan Cox 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1226f7fc307aSAlan Cox 				continue;
1227f7fc307aSAlan Cox 
1228f7fc307aSAlan Cox 			pindex = OFF_TO_IDX(current->offset);
1229f7fc307aSAlan Cox 			count = atop(current->end - current->start);
12305f99b57cSMatthew Dillon 			useStart = current->start;
1231f7fc307aSAlan Cox 
1232f7fc307aSAlan Cox 			if (current->start < start) {
1233f7fc307aSAlan Cox 				pindex += atop(start - current->start);
1234f7fc307aSAlan Cox 				count -= atop(start - current->start);
12355f99b57cSMatthew Dillon 				useStart = start;
1236f7fc307aSAlan Cox 			}
1237f7fc307aSAlan Cox 			if (current->end > end)
1238f7fc307aSAlan Cox 				count -= atop(current->end - end);
1239f7fc307aSAlan Cox 
1240f7fc307aSAlan Cox 			if (count <= 0)
1241f7fc307aSAlan Cox 				continue;
1242f7fc307aSAlan Cox 
1243f7fc307aSAlan Cox 			vm_object_madvise(current->object.vm_object,
1244f7fc307aSAlan Cox 					  pindex, count, behav);
1245b4309055SMatthew Dillon 			if (behav == MADV_WILLNEED) {
1246b4309055SMatthew Dillon 				pmap_object_init_pt(
1247b4309055SMatthew Dillon 				    map->pmap,
12485f99b57cSMatthew Dillon 				    useStart,
1249f7fc307aSAlan Cox 				    current->object.vm_object,
1250b4309055SMatthew Dillon 				    pindex,
1251b4309055SMatthew Dillon 				    (count << PAGE_SHIFT),
1252b4309055SMatthew Dillon 				    0
1253b4309055SMatthew Dillon 				);
1254f7fc307aSAlan Cox 			}
1255f7fc307aSAlan Cox 		}
1256f7fc307aSAlan Cox 		vm_map_unlock_read(map);
1257f7fc307aSAlan Cox 	}
1258b4309055SMatthew Dillon 	return(0);
1259867a482dSJohn Dyson }
1260867a482dSJohn Dyson 
1261867a482dSJohn Dyson 
1262867a482dSJohn Dyson /*
1263df8bae1dSRodney W. Grimes  *	vm_map_inherit:
1264df8bae1dSRodney W. Grimes  *
1265df8bae1dSRodney W. Grimes  *	Sets the inheritance of the specified address
1266df8bae1dSRodney W. Grimes  *	range in the target map.  Inheritance
1267df8bae1dSRodney W. Grimes  *	affects how the map will be shared with
1268df8bae1dSRodney W. Grimes  *	child maps at the time of vm_map_fork.
1269df8bae1dSRodney W. Grimes  */
1270df8bae1dSRodney W. Grimes int
1271b9dcd593SBruce Evans vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1272b9dcd593SBruce Evans 	       vm_inherit_t new_inheritance)
1273df8bae1dSRodney W. Grimes {
1274c0877f10SJohn Dyson 	vm_map_entry_t entry;
1275df8bae1dSRodney W. Grimes 	vm_map_entry_t temp_entry;
1276df8bae1dSRodney W. Grimes 
12770cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
12780cddd8f0SMatthew Dillon 
1279df8bae1dSRodney W. Grimes 	switch (new_inheritance) {
1280df8bae1dSRodney W. Grimes 	case VM_INHERIT_NONE:
1281df8bae1dSRodney W. Grimes 	case VM_INHERIT_COPY:
1282df8bae1dSRodney W. Grimes 	case VM_INHERIT_SHARE:
1283df8bae1dSRodney W. Grimes 		break;
1284df8bae1dSRodney W. Grimes 	default:
1285df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ARGUMENT);
1286df8bae1dSRodney W. Grimes 	}
1287df8bae1dSRodney W. Grimes 
1288df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1289df8bae1dSRodney W. Grimes 
1290df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1291df8bae1dSRodney W. Grimes 
1292df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
1293df8bae1dSRodney W. Grimes 		entry = temp_entry;
1294df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
12950d94caffSDavid Greenman 	} else
1296df8bae1dSRodney W. Grimes 		entry = temp_entry->next;
1297df8bae1dSRodney W. Grimes 
1298df8bae1dSRodney W. Grimes 	while ((entry != &map->header) && (entry->start < end)) {
1299df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
1300df8bae1dSRodney W. Grimes 
1301df8bae1dSRodney W. Grimes 		entry->inheritance = new_inheritance;
1302df8bae1dSRodney W. Grimes 
130344428f62SAlan Cox 		vm_map_simplify_entry(map, entry);
130444428f62SAlan Cox 
1305df8bae1dSRodney W. Grimes 		entry = entry->next;
1306df8bae1dSRodney W. Grimes 	}
1307df8bae1dSRodney W. Grimes 
1308df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1309df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1310df8bae1dSRodney W. Grimes }
1311df8bae1dSRodney W. Grimes 
1312df8bae1dSRodney W. Grimes /*
13137aaaa4fdSJohn Dyson  * Implement the semantics of mlock
13147aaaa4fdSJohn Dyson  */
13157aaaa4fdSJohn Dyson int
13167aaaa4fdSJohn Dyson vm_map_user_pageable(map, start, end, new_pageable)
1317c0877f10SJohn Dyson 	vm_map_t map;
1318c0877f10SJohn Dyson 	vm_offset_t start;
1319c0877f10SJohn Dyson 	vm_offset_t end;
1320c0877f10SJohn Dyson 	boolean_t new_pageable;
13217aaaa4fdSJohn Dyson {
1322b44959ceSTor Egge 	vm_map_entry_t entry;
13237aaaa4fdSJohn Dyson 	vm_map_entry_t start_entry;
1324b44959ceSTor Egge 	vm_offset_t estart;
13257aaaa4fdSJohn Dyson 	int rv;
13267aaaa4fdSJohn Dyson 
13277aaaa4fdSJohn Dyson 	vm_map_lock(map);
13287aaaa4fdSJohn Dyson 	VM_MAP_RANGE_CHECK(map, start, end);
13297aaaa4fdSJohn Dyson 
13307aaaa4fdSJohn Dyson 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
13317aaaa4fdSJohn Dyson 		vm_map_unlock(map);
13327aaaa4fdSJohn Dyson 		return (KERN_INVALID_ADDRESS);
13337aaaa4fdSJohn Dyson 	}
13347aaaa4fdSJohn Dyson 
13357aaaa4fdSJohn Dyson 	if (new_pageable) {
13367aaaa4fdSJohn Dyson 
13377aaaa4fdSJohn Dyson 		entry = start_entry;
13387aaaa4fdSJohn Dyson 		vm_map_clip_start(map, entry, start);
13397aaaa4fdSJohn Dyson 
13407aaaa4fdSJohn Dyson 		/*
13417aaaa4fdSJohn Dyson 		 * Now decrement the wiring count for each region. If a region
13427aaaa4fdSJohn Dyson 		 * becomes completely unwired, unwire its physical pages and
13437aaaa4fdSJohn Dyson 		 * mappings.
13447aaaa4fdSJohn Dyson 		 */
13457aaaa4fdSJohn Dyson 		while ((entry != &map->header) && (entry->start < end)) {
1346afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
13477aaaa4fdSJohn Dyson 				vm_map_clip_end(map, entry, end);
1348afa07f7eSJohn Dyson 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
13497aaaa4fdSJohn Dyson 				entry->wired_count--;
13507aaaa4fdSJohn Dyson 				if (entry->wired_count == 0)
13517aaaa4fdSJohn Dyson 					vm_fault_unwire(map, entry->start, entry->end);
13527aaaa4fdSJohn Dyson 			}
1353b44959ceSTor Egge 			vm_map_simplify_entry(map,entry);
13547aaaa4fdSJohn Dyson 			entry = entry->next;
13557aaaa4fdSJohn Dyson 		}
13567aaaa4fdSJohn Dyson 	} else {
13577aaaa4fdSJohn Dyson 
13587aaaa4fdSJohn Dyson 		entry = start_entry;
13597aaaa4fdSJohn Dyson 
13607aaaa4fdSJohn Dyson 		while ((entry != &map->header) && (entry->start < end)) {
13617aaaa4fdSJohn Dyson 
1362afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
13637aaaa4fdSJohn Dyson 				entry = entry->next;
13647aaaa4fdSJohn Dyson 				continue;
13657aaaa4fdSJohn Dyson 			}
13667aaaa4fdSJohn Dyson 
13677aaaa4fdSJohn Dyson 			if (entry->wired_count != 0) {
13687aaaa4fdSJohn Dyson 				entry->wired_count++;
1369afa07f7eSJohn Dyson 				entry->eflags |= MAP_ENTRY_USER_WIRED;
13707aaaa4fdSJohn Dyson 				entry = entry->next;
13717aaaa4fdSJohn Dyson 				continue;
13727aaaa4fdSJohn Dyson 			}
13737aaaa4fdSJohn Dyson 
13747aaaa4fdSJohn Dyson 			/* Here on entry being newly wired */
13757aaaa4fdSJohn Dyson 
13769fdfe602SMatthew Dillon 			if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1377afa07f7eSJohn Dyson 				int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
13787aaaa4fdSJohn Dyson 				if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
13797aaaa4fdSJohn Dyson 
13807aaaa4fdSJohn Dyson 					vm_object_shadow(&entry->object.vm_object,
13817aaaa4fdSJohn Dyson 					    &entry->offset,
1382c2e11a03SJohn Dyson 					    atop(entry->end - entry->start));
1383afa07f7eSJohn Dyson 					entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
13847aaaa4fdSJohn Dyson 
13854e71e795SMatthew Dillon 				} else if (entry->object.vm_object == NULL &&
13864e71e795SMatthew Dillon 					   !map->system_map) {
13877aaaa4fdSJohn Dyson 
13887aaaa4fdSJohn Dyson 					entry->object.vm_object =
13897aaaa4fdSJohn Dyson 					    vm_object_allocate(OBJT_DEFAULT,
1390c2e11a03SJohn Dyson 						atop(entry->end - entry->start));
13917aaaa4fdSJohn Dyson 					entry->offset = (vm_offset_t) 0;
13927aaaa4fdSJohn Dyson 
13937aaaa4fdSJohn Dyson 				}
13947aaaa4fdSJohn Dyson 			}
13957aaaa4fdSJohn Dyson 
13967aaaa4fdSJohn Dyson 			vm_map_clip_start(map, entry, start);
13977aaaa4fdSJohn Dyson 			vm_map_clip_end(map, entry, end);
13987aaaa4fdSJohn Dyson 
13997aaaa4fdSJohn Dyson 			entry->wired_count++;
1400afa07f7eSJohn Dyson 			entry->eflags |= MAP_ENTRY_USER_WIRED;
1401b44959ceSTor Egge 			estart = entry->start;
14027aaaa4fdSJohn Dyson 
14037aaaa4fdSJohn Dyson 			/* First we need to allow map modifications */
1404996c772fSJohn Dyson 			vm_map_set_recursive(map);
140503e9c6c1SJohn Dyson 			vm_map_lock_downgrade(map);
140647221757SJohn Dyson 			map->timestamp++;
14077aaaa4fdSJohn Dyson 
14087aaaa4fdSJohn Dyson 			rv = vm_fault_user_wire(map, entry->start, entry->end);
14097aaaa4fdSJohn Dyson 			if (rv) {
14107aaaa4fdSJohn Dyson 
14117aaaa4fdSJohn Dyson 				entry->wired_count--;
1412afa07f7eSJohn Dyson 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
14137aaaa4fdSJohn Dyson 
1414996c772fSJohn Dyson 				vm_map_clear_recursive(map);
14157aaaa4fdSJohn Dyson 				vm_map_unlock(map);
14167aaaa4fdSJohn Dyson 
14177aaaa4fdSJohn Dyson 				(void) vm_map_user_pageable(map, start, entry->start, TRUE);
14187aaaa4fdSJohn Dyson 				return rv;
14197aaaa4fdSJohn Dyson 			}
14207aaaa4fdSJohn Dyson 
1421996c772fSJohn Dyson 			vm_map_clear_recursive(map);
1422b44959ceSTor Egge 			if (vm_map_lock_upgrade(map)) {
1423b44959ceSTor Egge 				vm_map_lock(map);
1424b44959ceSTor Egge 				if (vm_map_lookup_entry(map, estart, &entry)
1425b44959ceSTor Egge 				    == FALSE) {
1426b44959ceSTor Egge 					vm_map_unlock(map);
1427b44959ceSTor Egge 					(void) vm_map_user_pageable(map,
1428b44959ceSTor Egge 								    start,
1429b44959ceSTor Egge 								    estart,
1430b44959ceSTor Egge 								    TRUE);
1431b44959ceSTor Egge 					return (KERN_INVALID_ADDRESS);
1432b44959ceSTor Egge 				}
1433b44959ceSTor Egge 			}
1434b44959ceSTor Egge 			vm_map_simplify_entry(map,entry);
14357aaaa4fdSJohn Dyson 		}
14367aaaa4fdSJohn Dyson 	}
143747221757SJohn Dyson 	map->timestamp++;
14387aaaa4fdSJohn Dyson 	vm_map_unlock(map);
14397aaaa4fdSJohn Dyson 	return KERN_SUCCESS;
14407aaaa4fdSJohn Dyson }
14417aaaa4fdSJohn Dyson 
14427aaaa4fdSJohn Dyson /*
1443df8bae1dSRodney W. Grimes  *	vm_map_pageable:
1444df8bae1dSRodney W. Grimes  *
1445df8bae1dSRodney W. Grimes  *	Sets the pageability of the specified address
1446df8bae1dSRodney W. Grimes  *	range in the target map.  Regions specified
1447df8bae1dSRodney W. Grimes  *	as not pageable require locked-down physical
1448df8bae1dSRodney W. Grimes  *	memory and physical page maps.
1449df8bae1dSRodney W. Grimes  *
1450df8bae1dSRodney W. Grimes  *	The map must not be locked, but a reference
1451df8bae1dSRodney W. Grimes  *	must remain to the map throughout the call.
1452df8bae1dSRodney W. Grimes  */
1453df8bae1dSRodney W. Grimes int
1454df8bae1dSRodney W. Grimes vm_map_pageable(map, start, end, new_pageable)
1455c0877f10SJohn Dyson 	vm_map_t map;
1456c0877f10SJohn Dyson 	vm_offset_t start;
1457c0877f10SJohn Dyson 	vm_offset_t end;
1458c0877f10SJohn Dyson 	boolean_t new_pageable;
1459df8bae1dSRodney W. Grimes {
1460c0877f10SJohn Dyson 	vm_map_entry_t entry;
1461df8bae1dSRodney W. Grimes 	vm_map_entry_t start_entry;
1462c0877f10SJohn Dyson 	vm_offset_t failed = 0;
1463df8bae1dSRodney W. Grimes 	int rv;
1464df8bae1dSRodney W. Grimes 
14650cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
14660cddd8f0SMatthew Dillon 
1467df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1468df8bae1dSRodney W. Grimes 
1469df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1470df8bae1dSRodney W. Grimes 
1471df8bae1dSRodney W. Grimes 	/*
14720d94caffSDavid Greenman 	 * Only one pageability change may take place at one time, since
14730d94caffSDavid Greenman 	 * vm_fault assumes it will be called only once for each
14740d94caffSDavid Greenman 	 * wiring/unwiring.  Therefore, we have to make sure we're actually
14750d94caffSDavid Greenman 	 * changing the pageability for the entire region.  We do so before
14760d94caffSDavid Greenman 	 * making any changes.
1477df8bae1dSRodney W. Grimes 	 */
1478df8bae1dSRodney W. Grimes 
1479df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1480df8bae1dSRodney W. Grimes 		vm_map_unlock(map);
1481df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ADDRESS);
1482df8bae1dSRodney W. Grimes 	}
1483df8bae1dSRodney W. Grimes 	entry = start_entry;
1484df8bae1dSRodney W. Grimes 
1485df8bae1dSRodney W. Grimes 	/*
14860d94caffSDavid Greenman 	 * Actions are rather different for wiring and unwiring, so we have
14870d94caffSDavid Greenman 	 * two separate cases.
1488df8bae1dSRodney W. Grimes 	 */
1489df8bae1dSRodney W. Grimes 
1490df8bae1dSRodney W. Grimes 	if (new_pageable) {
1491df8bae1dSRodney W. Grimes 
1492df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1493df8bae1dSRodney W. Grimes 
1494df8bae1dSRodney W. Grimes 		/*
14950d94caffSDavid Greenman 		 * Unwiring.  First ensure that the range to be unwired is
14960d94caffSDavid Greenman 		 * really wired down and that there are no holes.
1497df8bae1dSRodney W. Grimes 		 */
1498df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1499df8bae1dSRodney W. Grimes 
1500df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0 ||
1501df8bae1dSRodney W. Grimes 			    (entry->end < end &&
1502df8bae1dSRodney W. Grimes 				(entry->next == &map->header ||
1503df8bae1dSRodney W. Grimes 				    entry->next->start > entry->end))) {
1504df8bae1dSRodney W. Grimes 				vm_map_unlock(map);
1505df8bae1dSRodney W. Grimes 				return (KERN_INVALID_ARGUMENT);
1506df8bae1dSRodney W. Grimes 			}
1507df8bae1dSRodney W. Grimes 			entry = entry->next;
1508df8bae1dSRodney W. Grimes 		}
1509df8bae1dSRodney W. Grimes 
1510df8bae1dSRodney W. Grimes 		/*
15110d94caffSDavid Greenman 		 * Now decrement the wiring count for each region. If a region
15120d94caffSDavid Greenman 		 * becomes completely unwired, unwire its physical pages and
15130d94caffSDavid Greenman 		 * mappings.
1514df8bae1dSRodney W. Grimes 		 */
1515df8bae1dSRodney W. Grimes 		entry = start_entry;
1516df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1517df8bae1dSRodney W. Grimes 			vm_map_clip_end(map, entry, end);
1518df8bae1dSRodney W. Grimes 
1519df8bae1dSRodney W. Grimes 			entry->wired_count--;
1520df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0)
1521df8bae1dSRodney W. Grimes 				vm_fault_unwire(map, entry->start, entry->end);
1522df8bae1dSRodney W. Grimes 
152344428f62SAlan Cox 			vm_map_simplify_entry(map, entry);
152444428f62SAlan Cox 
1525df8bae1dSRodney W. Grimes 			entry = entry->next;
1526df8bae1dSRodney W. Grimes 		}
15270d94caffSDavid Greenman 	} else {
1528df8bae1dSRodney W. Grimes 		/*
1529df8bae1dSRodney W. Grimes 		 * Wiring.  We must do this in two passes:
1530df8bae1dSRodney W. Grimes 		 *
15310d94caffSDavid Greenman 		 * 1.  Holding the write lock, we create any shadow or zero-fill
15320d94caffSDavid Greenman 		 * objects that need to be created. Then we clip each map
15330d94caffSDavid Greenman 		 * entry to the region to be wired and increment its wiring
15340d94caffSDavid Greenman 		 * count.  We create objects before clipping the map entries
1535df8bae1dSRodney W. Grimes 		 * to avoid object proliferation.
1536df8bae1dSRodney W. Grimes 		 *
15370d94caffSDavid Greenman 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
15380d94caffSDavid Greenman 		 * fault in the pages for any newly wired area (wired_count is
15390d94caffSDavid Greenman 		 * 1).
1540df8bae1dSRodney W. Grimes 		 *
15410d94caffSDavid Greenman 		 * Downgrading to a read lock for vm_fault_wire avoids a possible
154224a1cce3SDavid Greenman 		 * deadlock with another process that may have faulted on one
15430d94caffSDavid Greenman 		 * of the pages to be wired (it would mark the page busy,
15440d94caffSDavid Greenman 		 * blocking us, then in turn block on the map lock that we
15450d94caffSDavid Greenman 		 * hold).  Because of problems in the recursive lock package,
15460d94caffSDavid Greenman 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
15470d94caffSDavid Greenman 		 * any actions that require the write lock must be done
15480d94caffSDavid Greenman 		 * beforehand.  Because we keep the read lock on the map, the
15490d94caffSDavid Greenman 		 * copy-on-write status of the entries we modify here cannot
15500d94caffSDavid Greenman 		 * change.
1551df8bae1dSRodney W. Grimes 		 */
1552df8bae1dSRodney W. Grimes 
1553df8bae1dSRodney W. Grimes 		/*
1554df8bae1dSRodney W. Grimes 		 * Pass 1.
1555df8bae1dSRodney W. Grimes 		 */
1556df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1557df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0) {
1558df8bae1dSRodney W. Grimes 
1559df8bae1dSRodney W. Grimes 				/*
1560df8bae1dSRodney W. Grimes 				 * Perform actions of vm_map_lookup that need
1561df8bae1dSRodney W. Grimes 				 * the write lock on the map: create a shadow
1562df8bae1dSRodney W. Grimes 				 * object for a copy-on-write region, or an
1563df8bae1dSRodney W. Grimes 				 * object for a zero-fill region.
1564df8bae1dSRodney W. Grimes 				 *
1565df8bae1dSRodney W. Grimes 				 * We don't have to do this for entries that
1566ad5fca3bSAlan Cox 				 * point to sub maps, because we won't
1567ad5fca3bSAlan Cox 				 * hold the lock on the sub map.
1568df8bae1dSRodney W. Grimes 				 */
15699fdfe602SMatthew Dillon 				if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1570afa07f7eSJohn Dyson 					int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1571b5b40fa6SJohn Dyson 					if (copyflag &&
1572df8bae1dSRodney W. Grimes 					    ((entry->protection & VM_PROT_WRITE) != 0)) {
1573df8bae1dSRodney W. Grimes 
1574df8bae1dSRodney W. Grimes 						vm_object_shadow(&entry->object.vm_object,
1575df8bae1dSRodney W. Grimes 						    &entry->offset,
1576c2e11a03SJohn Dyson 						    atop(entry->end - entry->start));
1577afa07f7eSJohn Dyson 						entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
15784e71e795SMatthew Dillon 					} else if (entry->object.vm_object == NULL &&
15794e71e795SMatthew Dillon 						   !map->system_map) {
1580df8bae1dSRodney W. Grimes 						entry->object.vm_object =
1581a316d390SJohn Dyson 						    vm_object_allocate(OBJT_DEFAULT,
1582c2e11a03SJohn Dyson 							atop(entry->end - entry->start));
1583df8bae1dSRodney W. Grimes 						entry->offset = (vm_offset_t) 0;
1584df8bae1dSRodney W. Grimes 					}
1585df8bae1dSRodney W. Grimes 				}
1586df8bae1dSRodney W. Grimes 			}
1587df8bae1dSRodney W. Grimes 			vm_map_clip_start(map, entry, start);
1588df8bae1dSRodney W. Grimes 			vm_map_clip_end(map, entry, end);
1589df8bae1dSRodney W. Grimes 			entry->wired_count++;
1590df8bae1dSRodney W. Grimes 
1591df8bae1dSRodney W. Grimes 			/*
1592df8bae1dSRodney W. Grimes 			 * Check for holes
1593df8bae1dSRodney W. Grimes 			 */
1594df8bae1dSRodney W. Grimes 			if (entry->end < end &&
1595df8bae1dSRodney W. Grimes 			    (entry->next == &map->header ||
1596df8bae1dSRodney W. Grimes 				entry->next->start > entry->end)) {
1597df8bae1dSRodney W. Grimes 				/*
15980d94caffSDavid Greenman 				 * Found one.  Object creation actions do not
15990d94caffSDavid Greenman 				 * need to be undone, but the wired counts
16000d94caffSDavid Greenman 				 * need to be restored.
1601df8bae1dSRodney W. Grimes 				 */
1602df8bae1dSRodney W. Grimes 				while (entry != &map->header && entry->end > start) {
1603df8bae1dSRodney W. Grimes 					entry->wired_count--;
1604df8bae1dSRodney W. Grimes 					entry = entry->prev;
1605df8bae1dSRodney W. Grimes 				}
1606df8bae1dSRodney W. Grimes 				vm_map_unlock(map);
1607df8bae1dSRodney W. Grimes 				return (KERN_INVALID_ARGUMENT);
1608df8bae1dSRodney W. Grimes 			}
1609df8bae1dSRodney W. Grimes 			entry = entry->next;
1610df8bae1dSRodney W. Grimes 		}
1611df8bae1dSRodney W. Grimes 
1612df8bae1dSRodney W. Grimes 		/*
1613df8bae1dSRodney W. Grimes 		 * Pass 2.
1614df8bae1dSRodney W. Grimes 		 */
1615df8bae1dSRodney W. Grimes 
1616df8bae1dSRodney W. Grimes 		/*
1617df8bae1dSRodney W. Grimes 		 * HACK HACK HACK HACK
1618df8bae1dSRodney W. Grimes 		 *
161924a1cce3SDavid Greenman 		 * If we are wiring in the kernel map or a submap of it,
162024a1cce3SDavid Greenman 		 * unlock the map to avoid deadlocks.  We trust that the
162124a1cce3SDavid Greenman 		 * kernel is well-behaved, and therefore will not do
162224a1cce3SDavid Greenman 		 * anything destructive to this region of the map while
162324a1cce3SDavid Greenman 		 * we have it unlocked.  We cannot trust user processes
162424a1cce3SDavid Greenman 		 * to do the same.
1625df8bae1dSRodney W. Grimes 		 *
1626df8bae1dSRodney W. Grimes 		 * HACK HACK HACK HACK
1627df8bae1dSRodney W. Grimes 		 */
1628df8bae1dSRodney W. Grimes 		if (vm_map_pmap(map) == kernel_pmap) {
1629df8bae1dSRodney W. Grimes 			vm_map_unlock(map);	/* trust me ... */
16300d94caffSDavid Greenman 		} else {
163103e9c6c1SJohn Dyson 			vm_map_lock_downgrade(map);
1632df8bae1dSRodney W. Grimes 		}
1633df8bae1dSRodney W. Grimes 
1634df8bae1dSRodney W. Grimes 		rv = 0;
1635df8bae1dSRodney W. Grimes 		entry = start_entry;
1636df8bae1dSRodney W. Grimes 		while (entry != &map->header && entry->start < end) {
1637df8bae1dSRodney W. Grimes 			/*
16380d94caffSDavid Greenman 			 * If vm_fault_wire fails for any page we need to undo
16390d94caffSDavid Greenman 			 * what has been done.  We decrement the wiring count
16400d94caffSDavid Greenman 			 * for those pages which have not yet been wired (now)
16410d94caffSDavid Greenman 			 * and unwire those that have (later).
1642df8bae1dSRodney W. Grimes 			 *
1643df8bae1dSRodney W. Grimes 			 * XXX this violates the locking protocol on the map,
1644df8bae1dSRodney W. Grimes 			 * needs to be fixed.
1645df8bae1dSRodney W. Grimes 			 */
1646df8bae1dSRodney W. Grimes 			if (rv)
1647df8bae1dSRodney W. Grimes 				entry->wired_count--;
1648df8bae1dSRodney W. Grimes 			else if (entry->wired_count == 1) {
1649df8bae1dSRodney W. Grimes 				rv = vm_fault_wire(map, entry->start, entry->end);
1650df8bae1dSRodney W. Grimes 				if (rv) {
1651df8bae1dSRodney W. Grimes 					failed = entry->start;
1652df8bae1dSRodney W. Grimes 					entry->wired_count--;
1653df8bae1dSRodney W. Grimes 				}
1654df8bae1dSRodney W. Grimes 			}
1655df8bae1dSRodney W. Grimes 			entry = entry->next;
1656df8bae1dSRodney W. Grimes 		}
1657df8bae1dSRodney W. Grimes 
1658df8bae1dSRodney W. Grimes 		if (vm_map_pmap(map) == kernel_pmap) {
1659df8bae1dSRodney W. Grimes 			vm_map_lock(map);
1660df8bae1dSRodney W. Grimes 		}
1661df8bae1dSRodney W. Grimes 		if (rv) {
1662df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
1663df8bae1dSRodney W. Grimes 			(void) vm_map_pageable(map, start, failed, TRUE);
1664df8bae1dSRodney W. Grimes 			return (rv);
1665df8bae1dSRodney W. Grimes 		}
1666b7b2aac2SJohn Dyson 		vm_map_simplify_entry(map, start_entry);
1667df8bae1dSRodney W. Grimes 	}
1668df8bae1dSRodney W. Grimes 
1669df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1670df8bae1dSRodney W. Grimes 
1671df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1672df8bae1dSRodney W. Grimes }
1673df8bae1dSRodney W. Grimes 
1674df8bae1dSRodney W. Grimes /*
1675df8bae1dSRodney W. Grimes  * vm_map_clean
1676df8bae1dSRodney W. Grimes  *
1677df8bae1dSRodney W. Grimes  * Push any dirty cached pages in the address range to their pager.
1678df8bae1dSRodney W. Grimes  * If syncio is TRUE, dirty pages are written synchronously.
1679df8bae1dSRodney W. Grimes  * If invalidate is TRUE, any cached pages are freed as well.
1680df8bae1dSRodney W. Grimes  *
1681df8bae1dSRodney W. Grimes  * Returns an error if any part of the specified range is not mapped.
1682df8bae1dSRodney W. Grimes  */
1683df8bae1dSRodney W. Grimes int
1684df8bae1dSRodney W. Grimes vm_map_clean(map, start, end, syncio, invalidate)
1685df8bae1dSRodney W. Grimes 	vm_map_t map;
1686df8bae1dSRodney W. Grimes 	vm_offset_t start;
1687df8bae1dSRodney W. Grimes 	vm_offset_t end;
1688df8bae1dSRodney W. Grimes 	boolean_t syncio;
1689df8bae1dSRodney W. Grimes 	boolean_t invalidate;
1690df8bae1dSRodney W. Grimes {
1691c0877f10SJohn Dyson 	vm_map_entry_t current;
1692df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1693df8bae1dSRodney W. Grimes 	vm_size_t size;
1694df8bae1dSRodney W. Grimes 	vm_object_t object;
1695a316d390SJohn Dyson 	vm_ooffset_t offset;
1696df8bae1dSRodney W. Grimes 
16970cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
16980cddd8f0SMatthew Dillon 
1699df8bae1dSRodney W. Grimes 	vm_map_lock_read(map);
1700df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1701df8bae1dSRodney W. Grimes 	if (!vm_map_lookup_entry(map, start, &entry)) {
1702df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map);
1703df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ADDRESS);
1704df8bae1dSRodney W. Grimes 	}
1705df8bae1dSRodney W. Grimes 	/*
1706df8bae1dSRodney W. Grimes 	 * Make a first pass to check for holes.
1707df8bae1dSRodney W. Grimes 	 */
1708df8bae1dSRodney W. Grimes 	for (current = entry; current->start < end; current = current->next) {
1709afa07f7eSJohn Dyson 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1710df8bae1dSRodney W. Grimes 			vm_map_unlock_read(map);
1711df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ARGUMENT);
1712df8bae1dSRodney W. Grimes 		}
1713df8bae1dSRodney W. Grimes 		if (end > current->end &&
1714df8bae1dSRodney W. Grimes 		    (current->next == &map->header ||
1715df8bae1dSRodney W. Grimes 			current->end != current->next->start)) {
1716df8bae1dSRodney W. Grimes 			vm_map_unlock_read(map);
1717df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ADDRESS);
1718df8bae1dSRodney W. Grimes 		}
1719df8bae1dSRodney W. Grimes 	}
1720df8bae1dSRodney W. Grimes 
1721cf2819ccSJohn Dyson 	if (invalidate)
1722cf2819ccSJohn Dyson 		pmap_remove(vm_map_pmap(map), start, end);
1723df8bae1dSRodney W. Grimes 	/*
1724df8bae1dSRodney W. Grimes 	 * Make a second pass, cleaning/uncaching pages from the indicated
1725df8bae1dSRodney W. Grimes 	 * objects as we go.
1726df8bae1dSRodney W. Grimes 	 */
1727df8bae1dSRodney W. Grimes 	for (current = entry; current->start < end; current = current->next) {
1728df8bae1dSRodney W. Grimes 		offset = current->offset + (start - current->start);
1729df8bae1dSRodney W. Grimes 		size = (end <= current->end ? end : current->end) - start;
17309fdfe602SMatthew Dillon 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1731c0877f10SJohn Dyson 			vm_map_t smap;
1732df8bae1dSRodney W. Grimes 			vm_map_entry_t tentry;
1733df8bae1dSRodney W. Grimes 			vm_size_t tsize;
1734df8bae1dSRodney W. Grimes 
17359fdfe602SMatthew Dillon 			smap = current->object.sub_map;
1736df8bae1dSRodney W. Grimes 			vm_map_lock_read(smap);
1737df8bae1dSRodney W. Grimes 			(void) vm_map_lookup_entry(smap, offset, &tentry);
1738df8bae1dSRodney W. Grimes 			tsize = tentry->end - offset;
1739df8bae1dSRodney W. Grimes 			if (tsize < size)
1740df8bae1dSRodney W. Grimes 				size = tsize;
1741df8bae1dSRodney W. Grimes 			object = tentry->object.vm_object;
1742df8bae1dSRodney W. Grimes 			offset = tentry->offset + (offset - tentry->start);
1743df8bae1dSRodney W. Grimes 			vm_map_unlock_read(smap);
1744df8bae1dSRodney W. Grimes 		} else {
1745df8bae1dSRodney W. Grimes 			object = current->object.vm_object;
1746df8bae1dSRodney W. Grimes 		}
17478a02c104SJohn Dyson 		/*
17488a02c104SJohn Dyson 		 * Note that there is absolutely no sense in writing out
17498a02c104SJohn Dyson 		 * anonymous objects, so we track down the vnode object
17508a02c104SJohn Dyson 		 * to write out.
17518a02c104SJohn Dyson 		 * We invalidate (remove) all pages from the address space
17528a02c104SJohn Dyson 		 * anyway, for semantic correctness.
17538a02c104SJohn Dyson 		 */
17548a02c104SJohn Dyson 		while (object->backing_object) {
17558a02c104SJohn Dyson 			object = object->backing_object;
17568a02c104SJohn Dyson 			offset += object->backing_object_offset;
17578a02c104SJohn Dyson 			if (object->size < OFF_TO_IDX( offset + size))
17588a02c104SJohn Dyson 				size = IDX_TO_OFF(object->size) - offset;
17598a02c104SJohn Dyson 		}
1760ff359f84SMatthew Dillon 		if (object && (object->type == OBJT_VNODE) &&
1761ff359f84SMatthew Dillon 		    (current->protection & VM_PROT_WRITE)) {
1762df8bae1dSRodney W. Grimes 			/*
1763ff359f84SMatthew Dillon 			 * Flush pages if writing is allowed, invalidate them
1764ff359f84SMatthew Dillon 			 * if invalidation requested.  Pages undergoing I/O
1765ff359f84SMatthew Dillon 			 * will be ignored by vm_object_page_remove().
1766f5cf85d4SDavid Greenman 			 *
1767ff359f84SMatthew Dillon 			 * We cannot lock the vnode and then wait for paging
1768ff359f84SMatthew Dillon 			 * to complete without deadlocking against vm_fault.
1769ff359f84SMatthew Dillon 			 * Instead we simply call vm_object_page_remove() and
1770ff359f84SMatthew Dillon 			 * allow it to block internally on a page-by-page
1771ff359f84SMatthew Dillon 			 * basis when it encounters pages undergoing async
1772ff359f84SMatthew Dillon 			 * I/O.
1773df8bae1dSRodney W. Grimes 			 */
17748f9110f6SJohn Dyson 			int flags;
1775ff359f84SMatthew Dillon 
1776ff359f84SMatthew Dillon 			vm_object_reference(object);
1777157ac55fSJohn Dyson 			vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
17788f9110f6SJohn Dyson 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
17798f9110f6SJohn Dyson 			flags |= invalidate ? OBJPC_INVAL : 0;
1780a316d390SJohn Dyson 			vm_object_page_clean(object,
1781a316d390SJohn Dyson 			    OFF_TO_IDX(offset),
17822be70f79SJohn Dyson 			    OFF_TO_IDX(offset + size + PAGE_MASK),
17838f9110f6SJohn Dyson 			    flags);
1784cf2819ccSJohn Dyson 			if (invalidate) {
1785ff359f84SMatthew Dillon 				/*vm_object_pip_wait(object, "objmcl");*/
1786a316d390SJohn Dyson 				vm_object_page_remove(object,
1787a316d390SJohn Dyson 				    OFF_TO_IDX(offset),
17882be70f79SJohn Dyson 				    OFF_TO_IDX(offset + size + PAGE_MASK),
1789a316d390SJohn Dyson 				    FALSE);
1790cf2819ccSJohn Dyson 			}
17912be70f79SJohn Dyson 			VOP_UNLOCK(object->handle, 0, curproc);
1792ff359f84SMatthew Dillon 			vm_object_deallocate(object);
1793a02051c3SJohn Dyson 		}
1794df8bae1dSRodney W. Grimes 		start += size;
1795df8bae1dSRodney W. Grimes 	}
1796df8bae1dSRodney W. Grimes 
1797df8bae1dSRodney W. Grimes 	vm_map_unlock_read(map);
1798df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1799df8bae1dSRodney W. Grimes }
1800df8bae1dSRodney W. Grimes 
1801df8bae1dSRodney W. Grimes /*
1802df8bae1dSRodney W. Grimes  *	vm_map_entry_unwire:	[ internal use only ]
1803df8bae1dSRodney W. Grimes  *
1804df8bae1dSRodney W. Grimes  *	Make the region specified by this entry pageable.
1805df8bae1dSRodney W. Grimes  *
1806df8bae1dSRodney W. Grimes  *	The map in question should be locked.
1807df8bae1dSRodney W. Grimes  *	[This is the reason for this routine's existence.]
1808df8bae1dSRodney W. Grimes  */
18090362d7d7SJohn Dyson static void
18100d94caffSDavid Greenman vm_map_entry_unwire(map, entry)
1811df8bae1dSRodney W. Grimes 	vm_map_t map;
1812c0877f10SJohn Dyson 	vm_map_entry_t entry;
1813df8bae1dSRodney W. Grimes {
1814df8bae1dSRodney W. Grimes 	vm_fault_unwire(map, entry->start, entry->end);
1815df8bae1dSRodney W. Grimes 	entry->wired_count = 0;
1816df8bae1dSRodney W. Grimes }
1817df8bae1dSRodney W. Grimes 
1818df8bae1dSRodney W. Grimes /*
1819df8bae1dSRodney W. Grimes  *	vm_map_entry_delete:	[ internal use only ]
1820df8bae1dSRodney W. Grimes  *
1821df8bae1dSRodney W. Grimes  *	Deallocate the given entry from the target map.
1822df8bae1dSRodney W. Grimes  */
18230362d7d7SJohn Dyson static void
18240d94caffSDavid Greenman vm_map_entry_delete(map, entry)
1825c0877f10SJohn Dyson 	vm_map_t map;
1826c0877f10SJohn Dyson 	vm_map_entry_t entry;
1827df8bae1dSRodney W. Grimes {
1828df8bae1dSRodney W. Grimes 	vm_map_entry_unlink(map, entry);
1829df8bae1dSRodney W. Grimes 	map->size -= entry->end - entry->start;
1830df8bae1dSRodney W. Grimes 
18319fdfe602SMatthew Dillon 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1832df8bae1dSRodney W. Grimes 		vm_object_deallocate(entry->object.vm_object);
1833b5b40fa6SJohn Dyson 	}
1834df8bae1dSRodney W. Grimes 
1835df8bae1dSRodney W. Grimes 	vm_map_entry_dispose(map, entry);
1836df8bae1dSRodney W. Grimes }
1837df8bae1dSRodney W. Grimes 
1838df8bae1dSRodney W. Grimes /*
1839df8bae1dSRodney W. Grimes  *	vm_map_delete:	[ internal use only ]
1840df8bae1dSRodney W. Grimes  *
1841df8bae1dSRodney W. Grimes  *	Deallocates the given address range from the target
1842df8bae1dSRodney W. Grimes  *	map.
1843df8bae1dSRodney W. Grimes  */
1844df8bae1dSRodney W. Grimes int
1845df8bae1dSRodney W. Grimes vm_map_delete(map, start, end)
1846c0877f10SJohn Dyson 	vm_map_t map;
1847df8bae1dSRodney W. Grimes 	vm_offset_t start;
1848c0877f10SJohn Dyson 	vm_offset_t end;
1849df8bae1dSRodney W. Grimes {
1850cbd8ec09SJohn Dyson 	vm_object_t object;
1851c0877f10SJohn Dyson 	vm_map_entry_t entry;
1852df8bae1dSRodney W. Grimes 	vm_map_entry_t first_entry;
1853df8bae1dSRodney W. Grimes 
18540cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
18550cddd8f0SMatthew Dillon 
1856df8bae1dSRodney W. Grimes 	/*
1857df8bae1dSRodney W. Grimes 	 * Find the start of the region, and clip it
1858df8bae1dSRodney W. Grimes 	 */
1859df8bae1dSRodney W. Grimes 
1860876318ecSAlan Cox 	if (!vm_map_lookup_entry(map, start, &first_entry))
1861df8bae1dSRodney W. Grimes 		entry = first_entry->next;
1862876318ecSAlan Cox 	else {
1863df8bae1dSRodney W. Grimes 		entry = first_entry;
1864df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1865df8bae1dSRodney W. Grimes 		/*
18660d94caffSDavid Greenman 		 * Fix the lookup hint now, rather than each time though the
18670d94caffSDavid Greenman 		 * loop.
1868df8bae1dSRodney W. Grimes 		 */
1869df8bae1dSRodney W. Grimes 		SAVE_HINT(map, entry->prev);
1870df8bae1dSRodney W. Grimes 	}
1871df8bae1dSRodney W. Grimes 
1872df8bae1dSRodney W. Grimes 	/*
1873df8bae1dSRodney W. Grimes 	 * Save the free space hint
1874df8bae1dSRodney W. Grimes 	 */
1875df8bae1dSRodney W. Grimes 
1876b18bfc3dSJohn Dyson 	if (entry == &map->header) {
1877b18bfc3dSJohn Dyson 		map->first_free = &map->header;
18782dbea5d2SJohn Dyson 	} else if (map->first_free->start >= start) {
1879df8bae1dSRodney W. Grimes 		map->first_free = entry->prev;
18802dbea5d2SJohn Dyson 	}
1881df8bae1dSRodney W. Grimes 
1882df8bae1dSRodney W. Grimes 	/*
1883df8bae1dSRodney W. Grimes 	 * Step through all entries in this region
1884df8bae1dSRodney W. Grimes 	 */
1885df8bae1dSRodney W. Grimes 
1886df8bae1dSRodney W. Grimes 	while ((entry != &map->header) && (entry->start < end)) {
1887df8bae1dSRodney W. Grimes 		vm_map_entry_t next;
1888b18bfc3dSJohn Dyson 		vm_offset_t s, e;
1889cbd8ec09SJohn Dyson 		vm_pindex_t offidxstart, offidxend, count;
1890df8bae1dSRodney W. Grimes 
1891df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
1892df8bae1dSRodney W. Grimes 
1893df8bae1dSRodney W. Grimes 		s = entry->start;
1894df8bae1dSRodney W. Grimes 		e = entry->end;
1895c0877f10SJohn Dyson 		next = entry->next;
1896df8bae1dSRodney W. Grimes 
1897cbd8ec09SJohn Dyson 		offidxstart = OFF_TO_IDX(entry->offset);
1898cbd8ec09SJohn Dyson 		count = OFF_TO_IDX(e - s);
1899cbd8ec09SJohn Dyson 		object = entry->object.vm_object;
19002dbea5d2SJohn Dyson 
1901df8bae1dSRodney W. Grimes 		/*
19020d94caffSDavid Greenman 		 * Unwire before removing addresses from the pmap; otherwise,
19030d94caffSDavid Greenman 		 * unwiring will put the entries back in the pmap.
1904df8bae1dSRodney W. Grimes 		 */
1905c0877f10SJohn Dyson 		if (entry->wired_count != 0) {
1906df8bae1dSRodney W. Grimes 			vm_map_entry_unwire(map, entry);
1907c0877f10SJohn Dyson 		}
1908df8bae1dSRodney W. Grimes 
1909cbd8ec09SJohn Dyson 		offidxend = offidxstart + count;
1910df8bae1dSRodney W. Grimes 
1911c0877f10SJohn Dyson 		if ((object == kernel_object) || (object == kmem_object)) {
19122dbea5d2SJohn Dyson 			vm_object_page_remove(object, offidxstart, offidxend, FALSE);
1913b18bfc3dSJohn Dyson 		} else {
1914df8bae1dSRodney W. Grimes 			pmap_remove(map->pmap, s, e);
1915876318ecSAlan Cox 			if (object != NULL &&
1916876318ecSAlan Cox 			    object->ref_count != 1 &&
1917876318ecSAlan Cox 			    (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
1918876318ecSAlan Cox 			    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
19192dbea5d2SJohn Dyson 				vm_object_collapse(object);
19202dbea5d2SJohn Dyson 				vm_object_page_remove(object, offidxstart, offidxend, FALSE);
19212dbea5d2SJohn Dyson 				if (object->type == OBJT_SWAP) {
1922cbd8ec09SJohn Dyson 					swap_pager_freespace(object, offidxstart, count);
19232dbea5d2SJohn Dyson 				}
1924876318ecSAlan Cox 				if (offidxend >= object->size &&
1925876318ecSAlan Cox 				    offidxstart < object->size) {
1926c0877f10SJohn Dyson 					object->size = offidxstart;
1927c0877f10SJohn Dyson 				}
19282dbea5d2SJohn Dyson 			}
1929b18bfc3dSJohn Dyson 		}
1930df8bae1dSRodney W. Grimes 
1931df8bae1dSRodney W. Grimes 		/*
19320d94caffSDavid Greenman 		 * Delete the entry (which may delete the object) only after
19330d94caffSDavid Greenman 		 * removing all pmap entries pointing to its pages.
19340d94caffSDavid Greenman 		 * (Otherwise, its page frames may be reallocated, and any
19350d94caffSDavid Greenman 		 * modify bits will be set in the wrong object!)
1936df8bae1dSRodney W. Grimes 		 */
1937df8bae1dSRodney W. Grimes 		vm_map_entry_delete(map, entry);
1938df8bae1dSRodney W. Grimes 		entry = next;
1939df8bae1dSRodney W. Grimes 	}
1940df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1941df8bae1dSRodney W. Grimes }
1942df8bae1dSRodney W. Grimes 
1943df8bae1dSRodney W. Grimes /*
1944df8bae1dSRodney W. Grimes  *	vm_map_remove:
1945df8bae1dSRodney W. Grimes  *
1946df8bae1dSRodney W. Grimes  *	Remove the given address range from the target map.
1947df8bae1dSRodney W. Grimes  *	This is the exported form of vm_map_delete.
1948df8bae1dSRodney W. Grimes  */
1949df8bae1dSRodney W. Grimes int
1950df8bae1dSRodney W. Grimes vm_map_remove(map, start, end)
1951c0877f10SJohn Dyson 	vm_map_t map;
1952c0877f10SJohn Dyson 	vm_offset_t start;
1953c0877f10SJohn Dyson 	vm_offset_t end;
1954df8bae1dSRodney W. Grimes {
1955c0877f10SJohn Dyson 	int result, s = 0;
19568d6e8edeSDavid Greenman 
19570cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
19580cddd8f0SMatthew Dillon 
195908442f8aSBosko Milekic 	if (map == kmem_map)
1960b18bfc3dSJohn Dyson 		s = splvm();
1961df8bae1dSRodney W. Grimes 
1962df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1963df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1964df8bae1dSRodney W. Grimes 	result = vm_map_delete(map, start, end);
1965df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1966df8bae1dSRodney W. Grimes 
196708442f8aSBosko Milekic 	if (map == kmem_map)
19688d6e8edeSDavid Greenman 		splx(s);
19698d6e8edeSDavid Greenman 
1970df8bae1dSRodney W. Grimes 	return (result);
1971df8bae1dSRodney W. Grimes }
1972df8bae1dSRodney W. Grimes 
1973df8bae1dSRodney W. Grimes /*
1974df8bae1dSRodney W. Grimes  *	vm_map_check_protection:
1975df8bae1dSRodney W. Grimes  *
1976df8bae1dSRodney W. Grimes  *	Assert that the target map allows the specified
1977df8bae1dSRodney W. Grimes  *	privilege on the entire address region given.
1978df8bae1dSRodney W. Grimes  *	The entire region must be allocated.
1979df8bae1dSRodney W. Grimes  */
19800d94caffSDavid Greenman boolean_t
1981b9dcd593SBruce Evans vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
1982b9dcd593SBruce Evans 			vm_prot_t protection)
1983df8bae1dSRodney W. Grimes {
1984c0877f10SJohn Dyson 	vm_map_entry_t entry;
1985df8bae1dSRodney W. Grimes 	vm_map_entry_t tmp_entry;
1986df8bae1dSRodney W. Grimes 
19870cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
19880cddd8f0SMatthew Dillon 
1989df8bae1dSRodney W. Grimes 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
1990df8bae1dSRodney W. Grimes 		return (FALSE);
1991df8bae1dSRodney W. Grimes 	}
1992df8bae1dSRodney W. Grimes 	entry = tmp_entry;
1993df8bae1dSRodney W. Grimes 
1994df8bae1dSRodney W. Grimes 	while (start < end) {
1995df8bae1dSRodney W. Grimes 		if (entry == &map->header) {
1996df8bae1dSRodney W. Grimes 			return (FALSE);
1997df8bae1dSRodney W. Grimes 		}
1998df8bae1dSRodney W. Grimes 		/*
1999df8bae1dSRodney W. Grimes 		 * No holes allowed!
2000df8bae1dSRodney W. Grimes 		 */
2001df8bae1dSRodney W. Grimes 
2002df8bae1dSRodney W. Grimes 		if (start < entry->start) {
2003df8bae1dSRodney W. Grimes 			return (FALSE);
2004df8bae1dSRodney W. Grimes 		}
2005df8bae1dSRodney W. Grimes 		/*
2006df8bae1dSRodney W. Grimes 		 * Check protection associated with entry.
2007df8bae1dSRodney W. Grimes 		 */
2008df8bae1dSRodney W. Grimes 
2009df8bae1dSRodney W. Grimes 		if ((entry->protection & protection) != protection) {
2010df8bae1dSRodney W. Grimes 			return (FALSE);
2011df8bae1dSRodney W. Grimes 		}
2012df8bae1dSRodney W. Grimes 		/* go to next entry */
2013df8bae1dSRodney W. Grimes 
2014df8bae1dSRodney W. Grimes 		start = entry->end;
2015df8bae1dSRodney W. Grimes 		entry = entry->next;
2016df8bae1dSRodney W. Grimes 	}
2017df8bae1dSRodney W. Grimes 	return (TRUE);
2018df8bae1dSRodney W. Grimes }
2019df8bae1dSRodney W. Grimes 
202086524867SJohn Dyson /*
202186524867SJohn Dyson  * Split the pages in a map entry into a new object.  This affords
202286524867SJohn Dyson  * easier removal of unused pages, and keeps object inheritance from
202386524867SJohn Dyson  * being a negative impact on memory usage.
202486524867SJohn Dyson  */
2025c0877f10SJohn Dyson static void
2026c0877f10SJohn Dyson vm_map_split(entry)
2027c0877f10SJohn Dyson 	vm_map_entry_t entry;
2028c0877f10SJohn Dyson {
202986524867SJohn Dyson 	vm_page_t m;
2030bd6be915SJohn Dyson 	vm_object_t orig_object, new_object, source;
2031c0877f10SJohn Dyson 	vm_offset_t s, e;
2032c0877f10SJohn Dyson 	vm_pindex_t offidxstart, offidxend, idx;
2033c0877f10SJohn Dyson 	vm_size_t size;
2034c0877f10SJohn Dyson 	vm_ooffset_t offset;
2035c0877f10SJohn Dyson 
20360cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
20370cddd8f0SMatthew Dillon 
2038c0877f10SJohn Dyson 	orig_object = entry->object.vm_object;
2039c0877f10SJohn Dyson 	if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
2040c0877f10SJohn Dyson 		return;
2041c0877f10SJohn Dyson 	if (orig_object->ref_count <= 1)
2042c0877f10SJohn Dyson 		return;
2043c0877f10SJohn Dyson 
2044c0877f10SJohn Dyson 	offset = entry->offset;
2045c0877f10SJohn Dyson 	s = entry->start;
2046c0877f10SJohn Dyson 	e = entry->end;
2047c0877f10SJohn Dyson 
2048c0877f10SJohn Dyson 	offidxstart = OFF_TO_IDX(offset);
2049c0877f10SJohn Dyson 	offidxend = offidxstart + OFF_TO_IDX(e - s);
2050c0877f10SJohn Dyson 	size = offidxend - offidxstart;
2051c0877f10SJohn Dyson 
2052c0877f10SJohn Dyson 	new_object = vm_pager_allocate(orig_object->type,
20536cde7a16SDavid Greenman 		NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL);
2054c0877f10SJohn Dyson 	if (new_object == NULL)
2055c0877f10SJohn Dyson 		return;
2056c0877f10SJohn Dyson 
2057bd6be915SJohn Dyson 	source = orig_object->backing_object;
2058bd6be915SJohn Dyson 	if (source != NULL) {
2059bd6be915SJohn Dyson 		vm_object_reference(source);	/* Referenced by new_object */
2060bd6be915SJohn Dyson 		TAILQ_INSERT_TAIL(&source->shadow_head,
2061bd6be915SJohn Dyson 				  new_object, shadow_list);
2062069e9bc1SDoug Rabson 		vm_object_clear_flag(source, OBJ_ONEMAPPING);
2063bd6be915SJohn Dyson 		new_object->backing_object_offset =
2064a0fce827SJohn Polstra 			orig_object->backing_object_offset + IDX_TO_OFF(offidxstart);
2065bd6be915SJohn Dyson 		new_object->backing_object = source;
2066bd6be915SJohn Dyson 		source->shadow_count++;
2067bd6be915SJohn Dyson 		source->generation++;
2068bd6be915SJohn Dyson 	}
2069bd6be915SJohn Dyson 
2070c0877f10SJohn Dyson 	for (idx = 0; idx < size; idx++) {
2071c0877f10SJohn Dyson 		vm_page_t m;
2072c0877f10SJohn Dyson 
2073c0877f10SJohn Dyson 	retry:
2074c0877f10SJohn Dyson 		m = vm_page_lookup(orig_object, offidxstart + idx);
2075c0877f10SJohn Dyson 		if (m == NULL)
2076c0877f10SJohn Dyson 			continue;
20771c7c3c6aSMatthew Dillon 
20781c7c3c6aSMatthew Dillon 		/*
20791c7c3c6aSMatthew Dillon 		 * We must wait for pending I/O to complete before we can
20801c7c3c6aSMatthew Dillon 		 * rename the page.
2081d1bf5d56SMatthew Dillon 		 *
2082d1bf5d56SMatthew Dillon 		 * We do not have to VM_PROT_NONE the page as mappings should
2083d1bf5d56SMatthew Dillon 		 * not be changed by this operation.
20841c7c3c6aSMatthew Dillon 		 */
20851c7c3c6aSMatthew Dillon 		if (vm_page_sleep_busy(m, TRUE, "spltwt"))
2086c0877f10SJohn Dyson 			goto retry;
2087c0877f10SJohn Dyson 
2088e69763a3SDoug Rabson 		vm_page_busy(m);
2089c0877f10SJohn Dyson 		vm_page_rename(m, new_object, idx);
20907dbf82dcSMatthew Dillon 		/* page automatically made dirty by rename and cache handled */
2091e69763a3SDoug Rabson 		vm_page_busy(m);
2092c0877f10SJohn Dyson 	}
2093c0877f10SJohn Dyson 
2094c0877f10SJohn Dyson 	if (orig_object->type == OBJT_SWAP) {
2095d474eaaaSDoug Rabson 		vm_object_pip_add(orig_object, 1);
2096c0877f10SJohn Dyson 		/*
2097c0877f10SJohn Dyson 		 * copy orig_object pages into new_object
2098c0877f10SJohn Dyson 		 * and destroy unneeded pages in
2099c0877f10SJohn Dyson 		 * shadow object.
2100c0877f10SJohn Dyson 		 */
21011c7c3c6aSMatthew Dillon 		swap_pager_copy(orig_object, new_object, offidxstart, 0);
2102c0877f10SJohn Dyson 		vm_object_pip_wakeup(orig_object);
2103c0877f10SJohn Dyson 	}
2104c0877f10SJohn Dyson 
210586524867SJohn Dyson 	for (idx = 0; idx < size; idx++) {
210686524867SJohn Dyson 		m = vm_page_lookup(new_object, idx);
210786524867SJohn Dyson 		if (m) {
2108e69763a3SDoug Rabson 			vm_page_wakeup(m);
210986524867SJohn Dyson 		}
211086524867SJohn Dyson 	}
211186524867SJohn Dyson 
2112c0877f10SJohn Dyson 	entry->object.vm_object = new_object;
2113c0877f10SJohn Dyson 	entry->offset = 0LL;
2114c0877f10SJohn Dyson 	vm_object_deallocate(orig_object);
2115c0877f10SJohn Dyson }
2116c0877f10SJohn Dyson 
2117df8bae1dSRodney W. Grimes /*
2118df8bae1dSRodney W. Grimes  *	vm_map_copy_entry:
2119df8bae1dSRodney W. Grimes  *
2120df8bae1dSRodney W. Grimes  *	Copies the contents of the source entry to the destination
2121df8bae1dSRodney W. Grimes  *	entry.  The entries *must* be aligned properly.
2122df8bae1dSRodney W. Grimes  */
2123f708ef1bSPoul-Henning Kamp static void
21240d94caffSDavid Greenman vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
2125df8bae1dSRodney W. Grimes 	vm_map_t src_map, dst_map;
2126c0877f10SJohn Dyson 	vm_map_entry_t src_entry, dst_entry;
2127df8bae1dSRodney W. Grimes {
2128c0877f10SJohn Dyson 	vm_object_t src_object;
2129c0877f10SJohn Dyson 
21309fdfe602SMatthew Dillon 	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2131df8bae1dSRodney W. Grimes 		return;
2132df8bae1dSRodney W. Grimes 
2133df8bae1dSRodney W. Grimes 	if (src_entry->wired_count == 0) {
2134df8bae1dSRodney W. Grimes 
2135df8bae1dSRodney W. Grimes 		/*
21360d94caffSDavid Greenman 		 * If the source entry is marked needs_copy, it is already
21370d94caffSDavid Greenman 		 * write-protected.
2138df8bae1dSRodney W. Grimes 		 */
2139afa07f7eSJohn Dyson 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2140df8bae1dSRodney W. Grimes 			pmap_protect(src_map->pmap,
2141df8bae1dSRodney W. Grimes 			    src_entry->start,
2142df8bae1dSRodney W. Grimes 			    src_entry->end,
2143df8bae1dSRodney W. Grimes 			    src_entry->protection & ~VM_PROT_WRITE);
2144df8bae1dSRodney W. Grimes 		}
2145b18bfc3dSJohn Dyson 
2146df8bae1dSRodney W. Grimes 		/*
2147df8bae1dSRodney W. Grimes 		 * Make a copy of the object.
2148df8bae1dSRodney W. Grimes 		 */
21498aef1712SMatthew Dillon 		if ((src_object = src_entry->object.vm_object) != NULL) {
2150c0877f10SJohn Dyson 
2151c0877f10SJohn Dyson 			if ((src_object->handle == NULL) &&
2152c0877f10SJohn Dyson 				(src_object->type == OBJT_DEFAULT ||
2153c0877f10SJohn Dyson 				 src_object->type == OBJT_SWAP)) {
2154c0877f10SJohn Dyson 				vm_object_collapse(src_object);
215596fb8cf2SJohn Dyson 				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2156c0877f10SJohn Dyson 					vm_map_split(src_entry);
2157c0877f10SJohn Dyson 					src_object = src_entry->object.vm_object;
2158c0877f10SJohn Dyson 				}
2159c0877f10SJohn Dyson 			}
2160c0877f10SJohn Dyson 
2161c0877f10SJohn Dyson 			vm_object_reference(src_object);
2162069e9bc1SDoug Rabson 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2163c0877f10SJohn Dyson 			dst_entry->object.vm_object = src_object;
2164afa07f7eSJohn Dyson 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2165afa07f7eSJohn Dyson 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2166b18bfc3dSJohn Dyson 			dst_entry->offset = src_entry->offset;
2167b18bfc3dSJohn Dyson 		} else {
2168b18bfc3dSJohn Dyson 			dst_entry->object.vm_object = NULL;
2169b18bfc3dSJohn Dyson 			dst_entry->offset = 0;
2170b18bfc3dSJohn Dyson 		}
2171df8bae1dSRodney W. Grimes 
2172df8bae1dSRodney W. Grimes 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2173df8bae1dSRodney W. Grimes 		    dst_entry->end - dst_entry->start, src_entry->start);
21740d94caffSDavid Greenman 	} else {
2175df8bae1dSRodney W. Grimes 		/*
2176df8bae1dSRodney W. Grimes 		 * Of course, wired down pages can't be set copy-on-write.
21770d94caffSDavid Greenman 		 * Cause wired pages to be copied into the new map by
21780d94caffSDavid Greenman 		 * simulating faults (the new pages are pageable)
2179df8bae1dSRodney W. Grimes 		 */
2180df8bae1dSRodney W. Grimes 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2181df8bae1dSRodney W. Grimes 	}
2182df8bae1dSRodney W. Grimes }
2183df8bae1dSRodney W. Grimes 
2184df8bae1dSRodney W. Grimes /*
2185df8bae1dSRodney W. Grimes  * vmspace_fork:
2186df8bae1dSRodney W. Grimes  * Create a new process vmspace structure and vm_map
2187df8bae1dSRodney W. Grimes  * based on those of an existing process.  The new map
2188df8bae1dSRodney W. Grimes  * is based on the old map, according to the inheritance
2189df8bae1dSRodney W. Grimes  * values on the regions in that map.
2190df8bae1dSRodney W. Grimes  *
2191df8bae1dSRodney W. Grimes  * The source map must not be locked.
2192df8bae1dSRodney W. Grimes  */
2193df8bae1dSRodney W. Grimes struct vmspace *
2194df8bae1dSRodney W. Grimes vmspace_fork(vm1)
2195c0877f10SJohn Dyson 	struct vmspace *vm1;
2196df8bae1dSRodney W. Grimes {
2197c0877f10SJohn Dyson 	struct vmspace *vm2;
2198df8bae1dSRodney W. Grimes 	vm_map_t old_map = &vm1->vm_map;
2199df8bae1dSRodney W. Grimes 	vm_map_t new_map;
2200df8bae1dSRodney W. Grimes 	vm_map_entry_t old_entry;
2201df8bae1dSRodney W. Grimes 	vm_map_entry_t new_entry;
2202de5f6a77SJohn Dyson 	vm_object_t object;
2203df8bae1dSRodney W. Grimes 
22040cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
22050cddd8f0SMatthew Dillon 
2206df8bae1dSRodney W. Grimes 	vm_map_lock(old_map);
2207b823bbd6SMatthew Dillon 	old_map->infork = 1;
2208df8bae1dSRodney W. Grimes 
22092d8acc0fSJohn Dyson 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2210df8bae1dSRodney W. Grimes 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2211df8bae1dSRodney W. Grimes 	    (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2212df8bae1dSRodney W. Grimes 	new_map = &vm2->vm_map;	/* XXX */
221347221757SJohn Dyson 	new_map->timestamp = 1;
2214df8bae1dSRodney W. Grimes 
2215df8bae1dSRodney W. Grimes 	old_entry = old_map->header.next;
2216df8bae1dSRodney W. Grimes 
2217df8bae1dSRodney W. Grimes 	while (old_entry != &old_map->header) {
2218afa07f7eSJohn Dyson 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2219df8bae1dSRodney W. Grimes 			panic("vm_map_fork: encountered a submap");
2220df8bae1dSRodney W. Grimes 
2221df8bae1dSRodney W. Grimes 		switch (old_entry->inheritance) {
2222df8bae1dSRodney W. Grimes 		case VM_INHERIT_NONE:
2223df8bae1dSRodney W. Grimes 			break;
2224df8bae1dSRodney W. Grimes 
2225df8bae1dSRodney W. Grimes 		case VM_INHERIT_SHARE:
2226df8bae1dSRodney W. Grimes 			/*
2227fed9a903SJohn Dyson 			 * Clone the entry, creating the shared object if necessary.
2228fed9a903SJohn Dyson 			 */
2229fed9a903SJohn Dyson 			object = old_entry->object.vm_object;
2230fed9a903SJohn Dyson 			if (object == NULL) {
2231fed9a903SJohn Dyson 				object = vm_object_allocate(OBJT_DEFAULT,
2232c2e11a03SJohn Dyson 					atop(old_entry->end - old_entry->start));
2233fed9a903SJohn Dyson 				old_entry->object.vm_object = object;
2234fed9a903SJohn Dyson 				old_entry->offset = (vm_offset_t) 0;
22359a2f6362SAlan Cox 			}
22369a2f6362SAlan Cox 
22379a2f6362SAlan Cox 			/*
22389a2f6362SAlan Cox 			 * Add the reference before calling vm_object_shadow
22399a2f6362SAlan Cox 			 * to insure that a shadow object is created.
22409a2f6362SAlan Cox 			 */
22419a2f6362SAlan Cox 			vm_object_reference(object);
22429a2f6362SAlan Cox 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
22435069bf57SJohn Dyson 				vm_object_shadow(&old_entry->object.vm_object,
22445069bf57SJohn Dyson 					&old_entry->offset,
2245c2e11a03SJohn Dyson 					atop(old_entry->end - old_entry->start));
22465069bf57SJohn Dyson 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2247d30344bdSIan Dowse 				/* Transfer the second reference too. */
2248d30344bdSIan Dowse 				vm_object_reference(
2249d30344bdSIan Dowse 				    old_entry->object.vm_object);
2250d30344bdSIan Dowse 				vm_object_deallocate(object);
22515069bf57SJohn Dyson 				object = old_entry->object.vm_object;
2252fed9a903SJohn Dyson 			}
2253069e9bc1SDoug Rabson 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
2254fed9a903SJohn Dyson 
2255fed9a903SJohn Dyson 			/*
2256ad5fca3bSAlan Cox 			 * Clone the entry, referencing the shared object.
2257df8bae1dSRodney W. Grimes 			 */
2258df8bae1dSRodney W. Grimes 			new_entry = vm_map_entry_create(new_map);
2259df8bae1dSRodney W. Grimes 			*new_entry = *old_entry;
2260028fe6ecSTor Egge 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2261df8bae1dSRodney W. Grimes 			new_entry->wired_count = 0;
2262df8bae1dSRodney W. Grimes 
2263df8bae1dSRodney W. Grimes 			/*
22640d94caffSDavid Greenman 			 * Insert the entry into the new map -- we know we're
22650d94caffSDavid Greenman 			 * inserting at the end of the new map.
2266df8bae1dSRodney W. Grimes 			 */
2267df8bae1dSRodney W. Grimes 
2268df8bae1dSRodney W. Grimes 			vm_map_entry_link(new_map, new_map->header.prev,
2269df8bae1dSRodney W. Grimes 			    new_entry);
2270df8bae1dSRodney W. Grimes 
2271df8bae1dSRodney W. Grimes 			/*
2272df8bae1dSRodney W. Grimes 			 * Update the physical map
2273df8bae1dSRodney W. Grimes 			 */
2274df8bae1dSRodney W. Grimes 
2275df8bae1dSRodney W. Grimes 			pmap_copy(new_map->pmap, old_map->pmap,
2276df8bae1dSRodney W. Grimes 			    new_entry->start,
2277df8bae1dSRodney W. Grimes 			    (old_entry->end - old_entry->start),
2278df8bae1dSRodney W. Grimes 			    old_entry->start);
2279df8bae1dSRodney W. Grimes 			break;
2280df8bae1dSRodney W. Grimes 
2281df8bae1dSRodney W. Grimes 		case VM_INHERIT_COPY:
2282df8bae1dSRodney W. Grimes 			/*
2283df8bae1dSRodney W. Grimes 			 * Clone the entry and link into the map.
2284df8bae1dSRodney W. Grimes 			 */
2285df8bae1dSRodney W. Grimes 			new_entry = vm_map_entry_create(new_map);
2286df8bae1dSRodney W. Grimes 			*new_entry = *old_entry;
2287028fe6ecSTor Egge 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2288df8bae1dSRodney W. Grimes 			new_entry->wired_count = 0;
2289df8bae1dSRodney W. Grimes 			new_entry->object.vm_object = NULL;
2290df8bae1dSRodney W. Grimes 			vm_map_entry_link(new_map, new_map->header.prev,
2291df8bae1dSRodney W. Grimes 			    new_entry);
2292bd7e5f99SJohn Dyson 			vm_map_copy_entry(old_map, new_map, old_entry,
2293bd7e5f99SJohn Dyson 			    new_entry);
2294df8bae1dSRodney W. Grimes 			break;
2295df8bae1dSRodney W. Grimes 		}
2296df8bae1dSRodney W. Grimes 		old_entry = old_entry->next;
2297df8bae1dSRodney W. Grimes 	}
2298df8bae1dSRodney W. Grimes 
2299df8bae1dSRodney W. Grimes 	new_map->size = old_map->size;
2300b823bbd6SMatthew Dillon 	old_map->infork = 0;
2301df8bae1dSRodney W. Grimes 	vm_map_unlock(old_map);
2302df8bae1dSRodney W. Grimes 
2303df8bae1dSRodney W. Grimes 	return (vm2);
2304df8bae1dSRodney W. Grimes }
2305df8bae1dSRodney W. Grimes 
230694f7e29aSAlan Cox int
230794f7e29aSAlan Cox vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
230894f7e29aSAlan Cox 	      vm_prot_t prot, vm_prot_t max, int cow)
230994f7e29aSAlan Cox {
231094f7e29aSAlan Cox 	vm_map_entry_t prev_entry;
231194f7e29aSAlan Cox 	vm_map_entry_t new_stack_entry;
231294f7e29aSAlan Cox 	vm_size_t      init_ssize;
231394f7e29aSAlan Cox 	int            rv;
231494f7e29aSAlan Cox 
23150cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
23160cddd8f0SMatthew Dillon 
231794f7e29aSAlan Cox 	if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
231894f7e29aSAlan Cox 		return (KERN_NO_SPACE);
231994f7e29aSAlan Cox 
232094f7e29aSAlan Cox 	if (max_ssize < SGROWSIZ)
232194f7e29aSAlan Cox 		init_ssize = max_ssize;
232294f7e29aSAlan Cox 	else
232394f7e29aSAlan Cox 		init_ssize = SGROWSIZ;
232494f7e29aSAlan Cox 
232594f7e29aSAlan Cox 	vm_map_lock(map);
232694f7e29aSAlan Cox 
232794f7e29aSAlan Cox 	/* If addr is already mapped, no go */
232894f7e29aSAlan Cox 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
232994f7e29aSAlan Cox 		vm_map_unlock(map);
233094f7e29aSAlan Cox 		return (KERN_NO_SPACE);
233194f7e29aSAlan Cox 	}
233294f7e29aSAlan Cox 
233394f7e29aSAlan Cox 	/* If we can't accomodate max_ssize in the current mapping,
233494f7e29aSAlan Cox 	 * no go.  However, we need to be aware that subsequent user
233594f7e29aSAlan Cox 	 * mappings might map into the space we have reserved for
233694f7e29aSAlan Cox 	 * stack, and currently this space is not protected.
233794f7e29aSAlan Cox 	 *
233894f7e29aSAlan Cox 	 * Hopefully we will at least detect this condition
233994f7e29aSAlan Cox 	 * when we try to grow the stack.
234094f7e29aSAlan Cox 	 */
234194f7e29aSAlan Cox 	if ((prev_entry->next != &map->header) &&
234294f7e29aSAlan Cox 	    (prev_entry->next->start < addrbos + max_ssize)) {
234394f7e29aSAlan Cox 		vm_map_unlock(map);
234494f7e29aSAlan Cox 		return (KERN_NO_SPACE);
234594f7e29aSAlan Cox 	}
234694f7e29aSAlan Cox 
234794f7e29aSAlan Cox 	/* We initially map a stack of only init_ssize.  We will
234894f7e29aSAlan Cox 	 * grow as needed later.  Since this is to be a grow
234994f7e29aSAlan Cox 	 * down stack, we map at the top of the range.
235094f7e29aSAlan Cox 	 *
235194f7e29aSAlan Cox 	 * Note: we would normally expect prot and max to be
235294f7e29aSAlan Cox 	 * VM_PROT_ALL, and cow to be 0.  Possibly we should
235394f7e29aSAlan Cox 	 * eliminate these as input parameters, and just
235494f7e29aSAlan Cox 	 * pass these values here in the insert call.
235594f7e29aSAlan Cox 	 */
235694f7e29aSAlan Cox 	rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
235794f7e29aSAlan Cox 	                   addrbos + max_ssize, prot, max, cow);
235894f7e29aSAlan Cox 
235994f7e29aSAlan Cox 	/* Now set the avail_ssize amount */
236094f7e29aSAlan Cox 	if (rv == KERN_SUCCESS){
236129b45e9eSAlan Cox 		if (prev_entry != &map->header)
236229b45e9eSAlan Cox 			vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize);
236394f7e29aSAlan Cox 		new_stack_entry = prev_entry->next;
236494f7e29aSAlan Cox 		if (new_stack_entry->end   != addrbos + max_ssize ||
236594f7e29aSAlan Cox 		    new_stack_entry->start != addrbos + max_ssize - init_ssize)
236694f7e29aSAlan Cox 			panic ("Bad entry start/end for new stack entry");
236794f7e29aSAlan Cox 		else
236894f7e29aSAlan Cox 			new_stack_entry->avail_ssize = max_ssize - init_ssize;
236994f7e29aSAlan Cox 	}
237094f7e29aSAlan Cox 
237194f7e29aSAlan Cox 	vm_map_unlock(map);
237294f7e29aSAlan Cox 	return (rv);
237394f7e29aSAlan Cox }
237494f7e29aSAlan Cox 
237594f7e29aSAlan Cox /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
237694f7e29aSAlan Cox  * desired address is already mapped, or if we successfully grow
237794f7e29aSAlan Cox  * the stack.  Also returns KERN_SUCCESS if addr is outside the
237894f7e29aSAlan Cox  * stack range (this is strange, but preserves compatibility with
237994f7e29aSAlan Cox  * the grow function in vm_machdep.c).
238094f7e29aSAlan Cox  */
238194f7e29aSAlan Cox int
238294f7e29aSAlan Cox vm_map_growstack (struct proc *p, vm_offset_t addr)
238394f7e29aSAlan Cox {
238494f7e29aSAlan Cox 	vm_map_entry_t prev_entry;
238594f7e29aSAlan Cox 	vm_map_entry_t stack_entry;
238694f7e29aSAlan Cox 	vm_map_entry_t new_stack_entry;
238794f7e29aSAlan Cox 	struct vmspace *vm = p->p_vmspace;
238894f7e29aSAlan Cox 	vm_map_t map = &vm->vm_map;
238994f7e29aSAlan Cox 	vm_offset_t    end;
239094f7e29aSAlan Cox 	int      grow_amount;
239194f7e29aSAlan Cox 	int      rv;
239294f7e29aSAlan Cox 	int      is_procstack;
239323955314SAlfred Perlstein 
23940cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
239523955314SAlfred Perlstein 
239694f7e29aSAlan Cox Retry:
239794f7e29aSAlan Cox 	vm_map_lock_read(map);
239894f7e29aSAlan Cox 
239994f7e29aSAlan Cox 	/* If addr is already in the entry range, no need to grow.*/
240094f7e29aSAlan Cox 	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
240194f7e29aSAlan Cox 		vm_map_unlock_read(map);
24020cddd8f0SMatthew Dillon 		return (KERN_SUCCESS);
240394f7e29aSAlan Cox 	}
240494f7e29aSAlan Cox 
240594f7e29aSAlan Cox 	if ((stack_entry = prev_entry->next) == &map->header) {
240694f7e29aSAlan Cox 		vm_map_unlock_read(map);
24070cddd8f0SMatthew Dillon 		return (KERN_SUCCESS);
240894f7e29aSAlan Cox 	}
240994f7e29aSAlan Cox 	if (prev_entry == &map->header)
241094f7e29aSAlan Cox 		end = stack_entry->start - stack_entry->avail_ssize;
241194f7e29aSAlan Cox 	else
241294f7e29aSAlan Cox 		end = prev_entry->end;
241394f7e29aSAlan Cox 
241494f7e29aSAlan Cox 	/* This next test mimics the old grow function in vm_machdep.c.
241594f7e29aSAlan Cox 	 * It really doesn't quite make sense, but we do it anyway
241694f7e29aSAlan Cox 	 * for compatibility.
241794f7e29aSAlan Cox 	 *
241894f7e29aSAlan Cox 	 * If not growable stack, return success.  This signals the
241994f7e29aSAlan Cox 	 * caller to proceed as he would normally with normal vm.
242094f7e29aSAlan Cox 	 */
242194f7e29aSAlan Cox 	if (stack_entry->avail_ssize < 1 ||
242294f7e29aSAlan Cox 	    addr >= stack_entry->start ||
242394f7e29aSAlan Cox 	    addr <  stack_entry->start - stack_entry->avail_ssize) {
242494f7e29aSAlan Cox 		vm_map_unlock_read(map);
24250cddd8f0SMatthew Dillon 		return (KERN_SUCCESS);
242694f7e29aSAlan Cox 	}
242794f7e29aSAlan Cox 
242894f7e29aSAlan Cox 	/* Find the minimum grow amount */
242994f7e29aSAlan Cox 	grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
243094f7e29aSAlan Cox 	if (grow_amount > stack_entry->avail_ssize) {
243194f7e29aSAlan Cox 		vm_map_unlock_read(map);
24320cddd8f0SMatthew Dillon 		return (KERN_NO_SPACE);
243394f7e29aSAlan Cox 	}
243494f7e29aSAlan Cox 
243594f7e29aSAlan Cox 	/* If there is no longer enough space between the entries
243694f7e29aSAlan Cox 	 * nogo, and adjust the available space.  Note: this
243794f7e29aSAlan Cox 	 * should only happen if the user has mapped into the
243894f7e29aSAlan Cox 	 * stack area after the stack was created, and is
243994f7e29aSAlan Cox 	 * probably an error.
244094f7e29aSAlan Cox 	 *
244194f7e29aSAlan Cox 	 * This also effectively destroys any guard page the user
244294f7e29aSAlan Cox 	 * might have intended by limiting the stack size.
244394f7e29aSAlan Cox 	 */
244494f7e29aSAlan Cox 	if (grow_amount > stack_entry->start - end) {
244594f7e29aSAlan Cox 		if (vm_map_lock_upgrade(map))
244694f7e29aSAlan Cox 			goto Retry;
244794f7e29aSAlan Cox 
244894f7e29aSAlan Cox 		stack_entry->avail_ssize = stack_entry->start - end;
244994f7e29aSAlan Cox 
245094f7e29aSAlan Cox 		vm_map_unlock(map);
24510cddd8f0SMatthew Dillon 		return (KERN_NO_SPACE);
245294f7e29aSAlan Cox 	}
245394f7e29aSAlan Cox 
245494f7e29aSAlan Cox 	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
245594f7e29aSAlan Cox 
245694f7e29aSAlan Cox 	/* If this is the main process stack, see if we're over the
245794f7e29aSAlan Cox 	 * stack limit.
245894f7e29aSAlan Cox 	 */
24596389da78SAlan Cox 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
246094f7e29aSAlan Cox 			     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
246194f7e29aSAlan Cox 		vm_map_unlock_read(map);
24620cddd8f0SMatthew Dillon 		return (KERN_NO_SPACE);
246394f7e29aSAlan Cox 	}
246494f7e29aSAlan Cox 
246594f7e29aSAlan Cox 	/* Round up the grow amount modulo SGROWSIZ */
246694f7e29aSAlan Cox 	grow_amount = roundup (grow_amount, SGROWSIZ);
246794f7e29aSAlan Cox 	if (grow_amount > stack_entry->avail_ssize) {
246894f7e29aSAlan Cox 		grow_amount = stack_entry->avail_ssize;
246994f7e29aSAlan Cox 	}
24706389da78SAlan Cox 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
247194f7e29aSAlan Cox 	                     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
247294f7e29aSAlan Cox 		grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
24736389da78SAlan Cox 		              ctob(vm->vm_ssize);
247494f7e29aSAlan Cox 	}
247594f7e29aSAlan Cox 
247694f7e29aSAlan Cox 	if (vm_map_lock_upgrade(map))
247794f7e29aSAlan Cox 		goto Retry;
247894f7e29aSAlan Cox 
247994f7e29aSAlan Cox 	/* Get the preliminary new entry start value */
248094f7e29aSAlan Cox 	addr = stack_entry->start - grow_amount;
248194f7e29aSAlan Cox 
248294f7e29aSAlan Cox 	/* If this puts us into the previous entry, cut back our growth
248394f7e29aSAlan Cox 	 * to the available space.  Also, see the note above.
248494f7e29aSAlan Cox 	 */
248594f7e29aSAlan Cox 	if (addr < end) {
248694f7e29aSAlan Cox 		stack_entry->avail_ssize = stack_entry->start - end;
248794f7e29aSAlan Cox 		addr = end;
248894f7e29aSAlan Cox 	}
248994f7e29aSAlan Cox 
249094f7e29aSAlan Cox 	rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
249129b45e9eSAlan Cox 			   VM_PROT_ALL,
249229b45e9eSAlan Cox 			   VM_PROT_ALL,
249394f7e29aSAlan Cox 			   0);
249494f7e29aSAlan Cox 
249594f7e29aSAlan Cox 	/* Adjust the available stack space by the amount we grew. */
249694f7e29aSAlan Cox 	if (rv == KERN_SUCCESS) {
249729b45e9eSAlan Cox 		if (prev_entry != &map->header)
249829b45e9eSAlan Cox 			vm_map_clip_end(map, prev_entry, addr);
249994f7e29aSAlan Cox 		new_stack_entry = prev_entry->next;
250094f7e29aSAlan Cox 		if (new_stack_entry->end   != stack_entry->start  ||
250194f7e29aSAlan Cox 		    new_stack_entry->start != addr)
250294f7e29aSAlan Cox 			panic ("Bad stack grow start/end in new stack entry");
250394f7e29aSAlan Cox 		else {
250494f7e29aSAlan Cox 			new_stack_entry->avail_ssize = stack_entry->avail_ssize -
250594f7e29aSAlan Cox 							(new_stack_entry->end -
250694f7e29aSAlan Cox 							 new_stack_entry->start);
250794f7e29aSAlan Cox 			if (is_procstack)
25086389da78SAlan Cox 				vm->vm_ssize += btoc(new_stack_entry->end -
25096389da78SAlan Cox 						     new_stack_entry->start);
251094f7e29aSAlan Cox 		}
251194f7e29aSAlan Cox 	}
251294f7e29aSAlan Cox 
251394f7e29aSAlan Cox 	vm_map_unlock(map);
25140cddd8f0SMatthew Dillon 	return (rv);
251594f7e29aSAlan Cox }
251694f7e29aSAlan Cox 
2517df8bae1dSRodney W. Grimes /*
25185856e12eSJohn Dyson  * Unshare the specified VM space for exec.  If other processes are
25195856e12eSJohn Dyson  * mapped to it, then create a new one.  The new vmspace is null.
25205856e12eSJohn Dyson  */
25215856e12eSJohn Dyson 
25225856e12eSJohn Dyson void
25235856e12eSJohn Dyson vmspace_exec(struct proc *p) {
25245856e12eSJohn Dyson 	struct vmspace *oldvmspace = p->p_vmspace;
25255856e12eSJohn Dyson 	struct vmspace *newvmspace;
25265856e12eSJohn Dyson 	vm_map_t map = &p->p_vmspace->vm_map;
25275856e12eSJohn Dyson 
25280cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
25292d8acc0fSJohn Dyson 	newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
25305856e12eSJohn Dyson 	bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
25315856e12eSJohn Dyson 	    (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
25325856e12eSJohn Dyson 	/*
25335856e12eSJohn Dyson 	 * This code is written like this for prototype purposes.  The
25345856e12eSJohn Dyson 	 * goal is to avoid running down the vmspace here, but let the
25355856e12eSJohn Dyson 	 * other process's that are still using the vmspace to finally
25365856e12eSJohn Dyson 	 * run it down.  Even though there is little or no chance of blocking
25375856e12eSJohn Dyson 	 * here, it is a good idea to keep this form for future mods.
25385856e12eSJohn Dyson 	 */
25395856e12eSJohn Dyson 	p->p_vmspace = newvmspace;
2540d4da2dbaSAlan Cox 	pmap_pinit2(vmspace_pmap(newvmspace));
254121c641b2SJohn Baldwin 	vmspace_free(oldvmspace);
25425856e12eSJohn Dyson 	if (p == curproc)
25435856e12eSJohn Dyson 		pmap_activate(p);
25445856e12eSJohn Dyson }
25455856e12eSJohn Dyson 
25465856e12eSJohn Dyson /*
25475856e12eSJohn Dyson  * Unshare the specified VM space for forcing COW.  This
25485856e12eSJohn Dyson  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
25495856e12eSJohn Dyson  */
25505856e12eSJohn Dyson 
25515856e12eSJohn Dyson void
25525856e12eSJohn Dyson vmspace_unshare(struct proc *p) {
25535856e12eSJohn Dyson 	struct vmspace *oldvmspace = p->p_vmspace;
25545856e12eSJohn Dyson 	struct vmspace *newvmspace;
25555856e12eSJohn Dyson 
25560cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
25575856e12eSJohn Dyson 	if (oldvmspace->vm_refcnt == 1)
25585856e12eSJohn Dyson 		return;
25595856e12eSJohn Dyson 	newvmspace = vmspace_fork(oldvmspace);
25605856e12eSJohn Dyson 	p->p_vmspace = newvmspace;
2561d4da2dbaSAlan Cox 	pmap_pinit2(vmspace_pmap(newvmspace));
256221c641b2SJohn Baldwin 	vmspace_free(oldvmspace);
25635856e12eSJohn Dyson 	if (p == curproc)
25645856e12eSJohn Dyson 		pmap_activate(p);
25655856e12eSJohn Dyson }
25665856e12eSJohn Dyson 
25675856e12eSJohn Dyson 
25685856e12eSJohn Dyson /*
2569df8bae1dSRodney W. Grimes  *	vm_map_lookup:
2570df8bae1dSRodney W. Grimes  *
2571df8bae1dSRodney W. Grimes  *	Finds the VM object, offset, and
2572df8bae1dSRodney W. Grimes  *	protection for a given virtual address in the
2573df8bae1dSRodney W. Grimes  *	specified map, assuming a page fault of the
2574df8bae1dSRodney W. Grimes  *	type specified.
2575df8bae1dSRodney W. Grimes  *
2576df8bae1dSRodney W. Grimes  *	Leaves the map in question locked for read; return
2577df8bae1dSRodney W. Grimes  *	values are guaranteed until a vm_map_lookup_done
2578df8bae1dSRodney W. Grimes  *	call is performed.  Note that the map argument
2579df8bae1dSRodney W. Grimes  *	is in/out; the returned map must be used in
2580df8bae1dSRodney W. Grimes  *	the call to vm_map_lookup_done.
2581df8bae1dSRodney W. Grimes  *
2582df8bae1dSRodney W. Grimes  *	A handle (out_entry) is returned for use in
2583df8bae1dSRodney W. Grimes  *	vm_map_lookup_done, to make that fast.
2584df8bae1dSRodney W. Grimes  *
2585df8bae1dSRodney W. Grimes  *	If a lookup is requested with "write protection"
2586df8bae1dSRodney W. Grimes  *	specified, the map may be changed to perform virtual
2587df8bae1dSRodney W. Grimes  *	copying operations, although the data referenced will
2588df8bae1dSRodney W. Grimes  *	remain the same.
2589df8bae1dSRodney W. Grimes  */
2590df8bae1dSRodney W. Grimes int
2591b9dcd593SBruce Evans vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
2592b9dcd593SBruce Evans 	      vm_offset_t vaddr,
259347221757SJohn Dyson 	      vm_prot_t fault_typea,
2594b9dcd593SBruce Evans 	      vm_map_entry_t *out_entry,	/* OUT */
2595b9dcd593SBruce Evans 	      vm_object_t *object,		/* OUT */
2596b9dcd593SBruce Evans 	      vm_pindex_t *pindex,		/* OUT */
2597b9dcd593SBruce Evans 	      vm_prot_t *out_prot,		/* OUT */
25982d8acc0fSJohn Dyson 	      boolean_t *wired)			/* OUT */
2599df8bae1dSRodney W. Grimes {
2600c0877f10SJohn Dyson 	vm_map_entry_t entry;
2601c0877f10SJohn Dyson 	vm_map_t map = *var_map;
2602c0877f10SJohn Dyson 	vm_prot_t prot;
260347221757SJohn Dyson 	vm_prot_t fault_type = fault_typea;
2604df8bae1dSRodney W. Grimes 
26050cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
2606df8bae1dSRodney W. Grimes RetryLookup:;
2607df8bae1dSRodney W. Grimes 
2608df8bae1dSRodney W. Grimes 	/*
2609df8bae1dSRodney W. Grimes 	 * Lookup the faulting address.
2610df8bae1dSRodney W. Grimes 	 */
2611df8bae1dSRodney W. Grimes 
2612df8bae1dSRodney W. Grimes 	vm_map_lock_read(map);
2613df8bae1dSRodney W. Grimes 
2614df8bae1dSRodney W. Grimes #define	RETURN(why) \
2615df8bae1dSRodney W. Grimes 		{ \
2616df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map); \
2617df8bae1dSRodney W. Grimes 		return(why); \
2618df8bae1dSRodney W. Grimes 		}
2619df8bae1dSRodney W. Grimes 
2620df8bae1dSRodney W. Grimes 	/*
26210d94caffSDavid Greenman 	 * If the map has an interesting hint, try it before calling full
26220d94caffSDavid Greenman 	 * blown lookup routine.
2623df8bae1dSRodney W. Grimes 	 */
2624df8bae1dSRodney W. Grimes 
2625df8bae1dSRodney W. Grimes 	entry = map->hint;
2626df8bae1dSRodney W. Grimes 
2627df8bae1dSRodney W. Grimes 	*out_entry = entry;
2628df8bae1dSRodney W. Grimes 
2629df8bae1dSRodney W. Grimes 	if ((entry == &map->header) ||
2630df8bae1dSRodney W. Grimes 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
2631df8bae1dSRodney W. Grimes 		vm_map_entry_t tmp_entry;
2632df8bae1dSRodney W. Grimes 
2633df8bae1dSRodney W. Grimes 		/*
26340d94caffSDavid Greenman 		 * Entry was either not a valid hint, or the vaddr was not
26350d94caffSDavid Greenman 		 * contained in the entry, so do a full lookup.
2636df8bae1dSRodney W. Grimes 		 */
2637df8bae1dSRodney W. Grimes 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2638df8bae1dSRodney W. Grimes 			RETURN(KERN_INVALID_ADDRESS);
2639df8bae1dSRodney W. Grimes 
2640df8bae1dSRodney W. Grimes 		entry = tmp_entry;
2641df8bae1dSRodney W. Grimes 		*out_entry = entry;
2642df8bae1dSRodney W. Grimes 	}
2643b7b2aac2SJohn Dyson 
2644df8bae1dSRodney W. Grimes 	/*
2645df8bae1dSRodney W. Grimes 	 * Handle submaps.
2646df8bae1dSRodney W. Grimes 	 */
2647df8bae1dSRodney W. Grimes 
2648afa07f7eSJohn Dyson 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2649df8bae1dSRodney W. Grimes 		vm_map_t old_map = map;
2650df8bae1dSRodney W. Grimes 
2651df8bae1dSRodney W. Grimes 		*var_map = map = entry->object.sub_map;
2652df8bae1dSRodney W. Grimes 		vm_map_unlock_read(old_map);
2653df8bae1dSRodney W. Grimes 		goto RetryLookup;
2654df8bae1dSRodney W. Grimes 	}
2655a04c970aSJohn Dyson 
2656df8bae1dSRodney W. Grimes 	/*
26570d94caffSDavid Greenman 	 * Check whether this task is allowed to have this page.
2658a04c970aSJohn Dyson 	 * Note the special case for MAP_ENTRY_COW
2659a04c970aSJohn Dyson 	 * pages with an override.  This is to implement a forced
2660a04c970aSJohn Dyson 	 * COW for debuggers.
2661df8bae1dSRodney W. Grimes 	 */
2662df8bae1dSRodney W. Grimes 
2663480ba2f5SJohn Dyson 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
2664480ba2f5SJohn Dyson 		prot = entry->max_protection;
2665480ba2f5SJohn Dyson 	else
2666df8bae1dSRodney W. Grimes 		prot = entry->protection;
266747221757SJohn Dyson 
266847221757SJohn Dyson 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
266947221757SJohn Dyson 	if ((fault_type & prot) != fault_type) {
267047221757SJohn Dyson 			RETURN(KERN_PROTECTION_FAILURE);
267147221757SJohn Dyson 	}
267247221757SJohn Dyson 
26732ed14a92SAlan Cox 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
267447221757SJohn Dyson 	    (entry->eflags & MAP_ENTRY_COW) &&
26752ed14a92SAlan Cox 	    (fault_type & VM_PROT_WRITE) &&
267647221757SJohn Dyson 	    (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
2677df8bae1dSRodney W. Grimes 		RETURN(KERN_PROTECTION_FAILURE);
2678a04c970aSJohn Dyson 	}
2679df8bae1dSRodney W. Grimes 
2680df8bae1dSRodney W. Grimes 	/*
26810d94caffSDavid Greenman 	 * If this page is not pageable, we have to get it for all possible
26820d94caffSDavid Greenman 	 * accesses.
2683df8bae1dSRodney W. Grimes 	 */
2684df8bae1dSRodney W. Grimes 
268505f0fdd2SPoul-Henning Kamp 	*wired = (entry->wired_count != 0);
268605f0fdd2SPoul-Henning Kamp 	if (*wired)
2687df8bae1dSRodney W. Grimes 		prot = fault_type = entry->protection;
2688df8bae1dSRodney W. Grimes 
2689df8bae1dSRodney W. Grimes 	/*
2690df8bae1dSRodney W. Grimes 	 * If the entry was copy-on-write, we either ...
2691df8bae1dSRodney W. Grimes 	 */
2692df8bae1dSRodney W. Grimes 
2693afa07f7eSJohn Dyson 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2694df8bae1dSRodney W. Grimes 		/*
26950d94caffSDavid Greenman 		 * If we want to write the page, we may as well handle that
2696ad5fca3bSAlan Cox 		 * now since we've got the map locked.
2697df8bae1dSRodney W. Grimes 		 *
26980d94caffSDavid Greenman 		 * If we don't need to write the page, we just demote the
26990d94caffSDavid Greenman 		 * permissions allowed.
2700df8bae1dSRodney W. Grimes 		 */
2701df8bae1dSRodney W. Grimes 
2702df8bae1dSRodney W. Grimes 		if (fault_type & VM_PROT_WRITE) {
2703df8bae1dSRodney W. Grimes 			/*
27040d94caffSDavid Greenman 			 * Make a new object, and place it in the object
27050d94caffSDavid Greenman 			 * chain.  Note that no new references have appeared
2706ad5fca3bSAlan Cox 			 * -- one just moved from the map to the new
27070d94caffSDavid Greenman 			 * object.
2708df8bae1dSRodney W. Grimes 			 */
2709df8bae1dSRodney W. Grimes 
27109b09b6c7SMatthew Dillon 			if (vm_map_lock_upgrade(map))
2711df8bae1dSRodney W. Grimes 				goto RetryLookup;
27129b09b6c7SMatthew Dillon 
2713df8bae1dSRodney W. Grimes 			vm_object_shadow(
2714df8bae1dSRodney W. Grimes 			    &entry->object.vm_object,
2715df8bae1dSRodney W. Grimes 			    &entry->offset,
2716c2e11a03SJohn Dyson 			    atop(entry->end - entry->start));
2717df8bae1dSRodney W. Grimes 
2718afa07f7eSJohn Dyson 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
27199b09b6c7SMatthew Dillon 			vm_map_lock_downgrade(map);
27200d94caffSDavid Greenman 		} else {
2721df8bae1dSRodney W. Grimes 			/*
27220d94caffSDavid Greenman 			 * We're attempting to read a copy-on-write page --
27230d94caffSDavid Greenman 			 * don't allow writes.
2724df8bae1dSRodney W. Grimes 			 */
2725df8bae1dSRodney W. Grimes 
27262d8acc0fSJohn Dyson 			prot &= ~VM_PROT_WRITE;
2727df8bae1dSRodney W. Grimes 		}
2728df8bae1dSRodney W. Grimes 	}
27292d8acc0fSJohn Dyson 
2730df8bae1dSRodney W. Grimes 	/*
2731df8bae1dSRodney W. Grimes 	 * Create an object if necessary.
2732df8bae1dSRodney W. Grimes 	 */
27334e71e795SMatthew Dillon 	if (entry->object.vm_object == NULL &&
27344e71e795SMatthew Dillon 	    !map->system_map) {
27359b09b6c7SMatthew Dillon 		if (vm_map_lock_upgrade(map))
2736df8bae1dSRodney W. Grimes 			goto RetryLookup;
27379b09b6c7SMatthew Dillon 
273824a1cce3SDavid Greenman 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2739c2e11a03SJohn Dyson 		    atop(entry->end - entry->start));
2740df8bae1dSRodney W. Grimes 		entry->offset = 0;
27419b09b6c7SMatthew Dillon 		vm_map_lock_downgrade(map);
2742df8bae1dSRodney W. Grimes 	}
2743b5b40fa6SJohn Dyson 
2744df8bae1dSRodney W. Grimes 	/*
27450d94caffSDavid Greenman 	 * Return the object/offset from this entry.  If the entry was
27460d94caffSDavid Greenman 	 * copy-on-write or empty, it has been fixed up.
2747df8bae1dSRodney W. Grimes 	 */
2748df8bae1dSRodney W. Grimes 
27499b09b6c7SMatthew Dillon 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
2750df8bae1dSRodney W. Grimes 	*object = entry->object.vm_object;
2751df8bae1dSRodney W. Grimes 
2752df8bae1dSRodney W. Grimes 	/*
2753df8bae1dSRodney W. Grimes 	 * Return whether this is the only map sharing this data.
2754df8bae1dSRodney W. Grimes 	 */
2755df8bae1dSRodney W. Grimes 
2756df8bae1dSRodney W. Grimes 	*out_prot = prot;
2757df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
2758df8bae1dSRodney W. Grimes 
2759df8bae1dSRodney W. Grimes #undef	RETURN
2760df8bae1dSRodney W. Grimes }
2761df8bae1dSRodney W. Grimes 
2762df8bae1dSRodney W. Grimes /*
2763df8bae1dSRodney W. Grimes  *	vm_map_lookup_done:
2764df8bae1dSRodney W. Grimes  *
2765df8bae1dSRodney W. Grimes  *	Releases locks acquired by a vm_map_lookup
2766df8bae1dSRodney W. Grimes  *	(according to the handle returned by that lookup).
2767df8bae1dSRodney W. Grimes  */
2768df8bae1dSRodney W. Grimes 
27690d94caffSDavid Greenman void
27700d94caffSDavid Greenman vm_map_lookup_done(map, entry)
2771c0877f10SJohn Dyson 	vm_map_t map;
2772df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
2773df8bae1dSRodney W. Grimes {
2774df8bae1dSRodney W. Grimes 	/*
2775df8bae1dSRodney W. Grimes 	 * Unlock the main-level map
2776df8bae1dSRodney W. Grimes 	 */
27770cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
2778df8bae1dSRodney W. Grimes 	vm_map_unlock_read(map);
2779df8bae1dSRodney W. Grimes }
2780df8bae1dSRodney W. Grimes 
27811efb74fbSJohn Dyson /*
27821efb74fbSJohn Dyson  * Implement uiomove with VM operations.  This handles (and collateral changes)
27831efb74fbSJohn Dyson  * support every combination of source object modification, and COW type
27841efb74fbSJohn Dyson  * operations.
27851efb74fbSJohn Dyson  */
27861efb74fbSJohn Dyson int
278747221757SJohn Dyson vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
27881efb74fbSJohn Dyson 	vm_map_t mapa;
27891efb74fbSJohn Dyson 	vm_object_t srcobject;
27901efb74fbSJohn Dyson 	off_t cp;
279147221757SJohn Dyson 	int cnta;
27921efb74fbSJohn Dyson 	vm_offset_t uaddra;
279395e5e988SJohn Dyson 	int *npages;
27941efb74fbSJohn Dyson {
27951efb74fbSJohn Dyson 	vm_map_t map;
279647221757SJohn Dyson 	vm_object_t first_object, oldobject, object;
27972d8acc0fSJohn Dyson 	vm_map_entry_t entry;
27981efb74fbSJohn Dyson 	vm_prot_t prot;
27992d8acc0fSJohn Dyson 	boolean_t wired;
28001efb74fbSJohn Dyson 	int tcnt, rv;
28012d8acc0fSJohn Dyson 	vm_offset_t uaddr, start, end, tend;
28021efb74fbSJohn Dyson 	vm_pindex_t first_pindex, osize, oindex;
28031efb74fbSJohn Dyson 	off_t ooffset;
280447221757SJohn Dyson 	int cnt;
28051efb74fbSJohn Dyson 
28060cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
28070cddd8f0SMatthew Dillon 
280895e5e988SJohn Dyson 	if (npages)
280995e5e988SJohn Dyson 		*npages = 0;
281095e5e988SJohn Dyson 
281147221757SJohn Dyson 	cnt = cnta;
28122d8acc0fSJohn Dyson 	uaddr = uaddra;
28132d8acc0fSJohn Dyson 
28141efb74fbSJohn Dyson 	while (cnt > 0) {
28151efb74fbSJohn Dyson 		map = mapa;
28161efb74fbSJohn Dyson 
28171efb74fbSJohn Dyson 		if ((vm_map_lookup(&map, uaddr,
28182d8acc0fSJohn Dyson 			VM_PROT_READ, &entry, &first_object,
28192d8acc0fSJohn Dyson 			&first_pindex, &prot, &wired)) != KERN_SUCCESS) {
28201efb74fbSJohn Dyson 			return EFAULT;
28211efb74fbSJohn Dyson 		}
28221efb74fbSJohn Dyson 
28232d8acc0fSJohn Dyson 		vm_map_clip_start(map, entry, uaddr);
28241efb74fbSJohn Dyson 
28251efb74fbSJohn Dyson 		tcnt = cnt;
28262d8acc0fSJohn Dyson 		tend = uaddr + tcnt;
28272d8acc0fSJohn Dyson 		if (tend > entry->end) {
28282d8acc0fSJohn Dyson 			tcnt = entry->end - uaddr;
28292d8acc0fSJohn Dyson 			tend = entry->end;
28302d8acc0fSJohn Dyson 		}
28311efb74fbSJohn Dyson 
28322d8acc0fSJohn Dyson 		vm_map_clip_end(map, entry, tend);
28331efb74fbSJohn Dyson 
28342d8acc0fSJohn Dyson 		start = entry->start;
28352d8acc0fSJohn Dyson 		end = entry->end;
28361efb74fbSJohn Dyson 
2837c2e11a03SJohn Dyson 		osize = atop(tcnt);
283895e5e988SJohn Dyson 
2839925a3a41SJohn Dyson 		oindex = OFF_TO_IDX(cp);
284095e5e988SJohn Dyson 		if (npages) {
2841925a3a41SJohn Dyson 			vm_pindex_t idx;
284295e5e988SJohn Dyson 			for (idx = 0; idx < osize; idx++) {
284395e5e988SJohn Dyson 				vm_page_t m;
2844925a3a41SJohn Dyson 				if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
28452d8acc0fSJohn Dyson 					vm_map_lookup_done(map, entry);
284695e5e988SJohn Dyson 					return 0;
284795e5e988SJohn Dyson 				}
28481c7c3c6aSMatthew Dillon 				/*
28491c7c3c6aSMatthew Dillon 				 * disallow busy or invalid pages, but allow
28501c7c3c6aSMatthew Dillon 				 * m->busy pages if they are entirely valid.
28511c7c3c6aSMatthew Dillon 				 */
2852925a3a41SJohn Dyson 				if ((m->flags & PG_BUSY) ||
285395e5e988SJohn Dyson 					((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
28542d8acc0fSJohn Dyson 					vm_map_lookup_done(map, entry);
285595e5e988SJohn Dyson 					return 0;
285695e5e988SJohn Dyson 				}
285795e5e988SJohn Dyson 			}
285895e5e988SJohn Dyson 		}
285995e5e988SJohn Dyson 
28601efb74fbSJohn Dyson /*
28611efb74fbSJohn Dyson  * If we are changing an existing map entry, just redirect
28621efb74fbSJohn Dyson  * the object, and change mappings.
28631efb74fbSJohn Dyson  */
28642d8acc0fSJohn Dyson 		if ((first_object->type == OBJT_VNODE) &&
28652d8acc0fSJohn Dyson 			((oldobject = entry->object.vm_object) == first_object)) {
28662d8acc0fSJohn Dyson 
28672d8acc0fSJohn Dyson 			if ((entry->offset != cp) || (oldobject != srcobject)) {
28682d8acc0fSJohn Dyson 				/*
28692d8acc0fSJohn Dyson    				* Remove old window into the file
28702d8acc0fSJohn Dyson    				*/
28712d8acc0fSJohn Dyson 				pmap_remove (map->pmap, uaddr, tend);
28722d8acc0fSJohn Dyson 
28732d8acc0fSJohn Dyson 				/*
28742d8acc0fSJohn Dyson    				* Force copy on write for mmaped regions
28752d8acc0fSJohn Dyson    				*/
28762d8acc0fSJohn Dyson 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
28772d8acc0fSJohn Dyson 
28782d8acc0fSJohn Dyson 				/*
28792d8acc0fSJohn Dyson    				* Point the object appropriately
28802d8acc0fSJohn Dyson    				*/
28812d8acc0fSJohn Dyson 				if (oldobject != srcobject) {
28822d8acc0fSJohn Dyson 
28832d8acc0fSJohn Dyson 				/*
28842d8acc0fSJohn Dyson    				* Set the object optimization hint flag
28852d8acc0fSJohn Dyson    				*/
2886069e9bc1SDoug Rabson 					vm_object_set_flag(srcobject, OBJ_OPT);
28872d8acc0fSJohn Dyson 					vm_object_reference(srcobject);
28882d8acc0fSJohn Dyson 					entry->object.vm_object = srcobject;
28892d8acc0fSJohn Dyson 
28902d8acc0fSJohn Dyson 					if (oldobject) {
28912d8acc0fSJohn Dyson 						vm_object_deallocate(oldobject);
28922d8acc0fSJohn Dyson 					}
28932d8acc0fSJohn Dyson 				}
28942d8acc0fSJohn Dyson 
28952d8acc0fSJohn Dyson 				entry->offset = cp;
28962d8acc0fSJohn Dyson 				map->timestamp++;
28972d8acc0fSJohn Dyson 			} else {
28982d8acc0fSJohn Dyson 				pmap_remove (map->pmap, uaddr, tend);
28992d8acc0fSJohn Dyson 			}
29002d8acc0fSJohn Dyson 
29012d8acc0fSJohn Dyson 		} else if ((first_object->ref_count == 1) &&
2902925a3a41SJohn Dyson 			(first_object->size == osize) &&
290347221757SJohn Dyson 			((first_object->type == OBJT_DEFAULT) ||
290447221757SJohn Dyson 				(first_object->type == OBJT_SWAP)) ) {
2905925a3a41SJohn Dyson 
2906925a3a41SJohn Dyson 			oldobject = first_object->backing_object;
2907925a3a41SJohn Dyson 
2908925a3a41SJohn Dyson 			if ((first_object->backing_object_offset != cp) ||
2909925a3a41SJohn Dyson 				(oldobject != srcobject)) {
2910925a3a41SJohn Dyson 				/*
2911925a3a41SJohn Dyson    				* Remove old window into the file
2912925a3a41SJohn Dyson    				*/
29132d8acc0fSJohn Dyson 				pmap_remove (map->pmap, uaddr, tend);
2914925a3a41SJohn Dyson 
2915925a3a41SJohn Dyson 				/*
291647221757SJohn Dyson 				 * Remove unneeded old pages
291747221757SJohn Dyson 				 */
291847221757SJohn Dyson 				vm_object_page_remove(first_object, 0, 0, 0);
291947221757SJohn Dyson 
292047221757SJohn Dyson 				/*
292147221757SJohn Dyson 				 * Invalidate swap space
292247221757SJohn Dyson 				 */
292347221757SJohn Dyson 				if (first_object->type == OBJT_SWAP) {
292447221757SJohn Dyson 					swap_pager_freespace(first_object,
29251c7c3c6aSMatthew Dillon 						0,
292647221757SJohn Dyson 						first_object->size);
292747221757SJohn Dyson 				}
292847221757SJohn Dyson 
292947221757SJohn Dyson 				/*
2930925a3a41SJohn Dyson    				* Force copy on write for mmaped regions
2931925a3a41SJohn Dyson    				*/
293247221757SJohn Dyson 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
29331efb74fbSJohn Dyson 
29341efb74fbSJohn Dyson 				/*
29351efb74fbSJohn Dyson    				* Point the object appropriately
29361efb74fbSJohn Dyson    				*/
2937925a3a41SJohn Dyson 				if (oldobject != srcobject) {
293847221757SJohn Dyson 
2939925a3a41SJohn Dyson 				/*
2940925a3a41SJohn Dyson    				* Set the object optimization hint flag
2941925a3a41SJohn Dyson    				*/
2942069e9bc1SDoug Rabson 					vm_object_set_flag(srcobject, OBJ_OPT);
2943925a3a41SJohn Dyson 					vm_object_reference(srcobject);
2944925a3a41SJohn Dyson 
2945925a3a41SJohn Dyson 					if (oldobject) {
2946925a3a41SJohn Dyson 						TAILQ_REMOVE(&oldobject->shadow_head,
2947925a3a41SJohn Dyson 							first_object, shadow_list);
2948925a3a41SJohn Dyson 						oldobject->shadow_count--;
2949b4309055SMatthew Dillon 						/* XXX bump generation? */
2950925a3a41SJohn Dyson 						vm_object_deallocate(oldobject);
2951925a3a41SJohn Dyson 					}
2952925a3a41SJohn Dyson 
2953925a3a41SJohn Dyson 					TAILQ_INSERT_TAIL(&srcobject->shadow_head,
2954925a3a41SJohn Dyson 						first_object, shadow_list);
2955925a3a41SJohn Dyson 					srcobject->shadow_count++;
2956b4309055SMatthew Dillon 					/* XXX bump generation? */
2957925a3a41SJohn Dyson 
2958925a3a41SJohn Dyson 					first_object->backing_object = srcobject;
2959925a3a41SJohn Dyson 				}
29601efb74fbSJohn Dyson 				first_object->backing_object_offset = cp;
29612d8acc0fSJohn Dyson 				map->timestamp++;
2962925a3a41SJohn Dyson 			} else {
29632d8acc0fSJohn Dyson 				pmap_remove (map->pmap, uaddr, tend);
2964925a3a41SJohn Dyson 			}
29651efb74fbSJohn Dyson /*
29661efb74fbSJohn Dyson  * Otherwise, we have to do a logical mmap.
29671efb74fbSJohn Dyson  */
29681efb74fbSJohn Dyson 		} else {
29691efb74fbSJohn Dyson 
2970069e9bc1SDoug Rabson 			vm_object_set_flag(srcobject, OBJ_OPT);
2971925a3a41SJohn Dyson 			vm_object_reference(srcobject);
29721efb74fbSJohn Dyson 
29732d8acc0fSJohn Dyson 			pmap_remove (map->pmap, uaddr, tend);
29741efb74fbSJohn Dyson 
297547221757SJohn Dyson 			vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
297647221757SJohn Dyson 			vm_map_lock_upgrade(map);
29771efb74fbSJohn Dyson 
29782d8acc0fSJohn Dyson 			if (entry == &map->header) {
29791efb74fbSJohn Dyson 				map->first_free = &map->header;
29801efb74fbSJohn Dyson 			} else if (map->first_free->start >= start) {
29812d8acc0fSJohn Dyson 				map->first_free = entry->prev;
29821efb74fbSJohn Dyson 			}
29831efb74fbSJohn Dyson 
29842d8acc0fSJohn Dyson 			SAVE_HINT(map, entry->prev);
29852d8acc0fSJohn Dyson 			vm_map_entry_delete(map, entry);
29861efb74fbSJohn Dyson 
29872d8acc0fSJohn Dyson 			object = srcobject;
29882d8acc0fSJohn Dyson 			ooffset = cp;
29892d8acc0fSJohn Dyson 
29902d8acc0fSJohn Dyson 			rv = vm_map_insert(map, object, ooffset, start, tend,
2991e5f13bddSAlan Cox 				VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
29921efb74fbSJohn Dyson 
29931efb74fbSJohn Dyson 			if (rv != KERN_SUCCESS)
29941efb74fbSJohn Dyson 				panic("vm_uiomove: could not insert new entry: %d", rv);
29951efb74fbSJohn Dyson 		}
29961efb74fbSJohn Dyson 
29971efb74fbSJohn Dyson /*
29981efb74fbSJohn Dyson  * Map the window directly, if it is already in memory
29991efb74fbSJohn Dyson  */
30002d8acc0fSJohn Dyson 		pmap_object_init_pt(map->pmap, uaddr,
30012d8acc0fSJohn Dyson 			srcobject, oindex, tcnt, 0);
30021efb74fbSJohn Dyson 
300347221757SJohn Dyson 		map->timestamp++;
30041efb74fbSJohn Dyson 		vm_map_unlock(map);
30051efb74fbSJohn Dyson 
30061efb74fbSJohn Dyson 		cnt -= tcnt;
30072d8acc0fSJohn Dyson 		uaddr += tcnt;
30081efb74fbSJohn Dyson 		cp += tcnt;
300995e5e988SJohn Dyson 		if (npages)
301095e5e988SJohn Dyson 			*npages += osize;
30111efb74fbSJohn Dyson 	}
30121efb74fbSJohn Dyson 	return 0;
30131efb74fbSJohn Dyson }
30141efb74fbSJohn Dyson 
30151efb74fbSJohn Dyson /*
30161efb74fbSJohn Dyson  * Performs the copy_on_write operations necessary to allow the virtual copies
30171efb74fbSJohn Dyson  * into user space to work.  This has to be called for write(2) system calls
30181efb74fbSJohn Dyson  * from other processes, file unlinking, and file size shrinkage.
30191efb74fbSJohn Dyson  */
30201efb74fbSJohn Dyson void
30211efb74fbSJohn Dyson vm_freeze_copyopts(object, froma, toa)
30221efb74fbSJohn Dyson 	vm_object_t object;
30231efb74fbSJohn Dyson 	vm_pindex_t froma, toa;
30241efb74fbSJohn Dyson {
3025f5ef029eSPoul-Henning Kamp 	int rv;
3026f5ef029eSPoul-Henning Kamp 	vm_object_t robject;
3027f5ef029eSPoul-Henning Kamp 	vm_pindex_t idx;
30281efb74fbSJohn Dyson 
30290cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
30302d8acc0fSJohn Dyson 	if ((object == NULL) ||
303195e5e988SJohn Dyson 		((object->flags & OBJ_OPT) == 0))
303295e5e988SJohn Dyson 		return;
30331efb74fbSJohn Dyson 
30341efb74fbSJohn Dyson 	if (object->shadow_count > object->ref_count)
30351efb74fbSJohn Dyson 		panic("vm_freeze_copyopts: sc > rc");
30361efb74fbSJohn Dyson 
30378aef1712SMatthew Dillon 	while((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) {
30381efb74fbSJohn Dyson 		vm_pindex_t bo_pindex;
30391efb74fbSJohn Dyson 		vm_page_t m_in, m_out;
30401efb74fbSJohn Dyson 
30411efb74fbSJohn Dyson 		bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
30421efb74fbSJohn Dyson 
304395e5e988SJohn Dyson 		vm_object_reference(robject);
3044925a3a41SJohn Dyson 
304566095752SJohn Dyson 		vm_object_pip_wait(robject, "objfrz");
3046925a3a41SJohn Dyson 
30471efb74fbSJohn Dyson 		if (robject->ref_count == 1) {
30481efb74fbSJohn Dyson 			vm_object_deallocate(robject);
30491efb74fbSJohn Dyson 			continue;
30501efb74fbSJohn Dyson 		}
30511efb74fbSJohn Dyson 
3052d474eaaaSDoug Rabson 		vm_object_pip_add(robject, 1);
30531efb74fbSJohn Dyson 
305447221757SJohn Dyson 		for (idx = 0; idx < robject->size; idx++) {
30551efb74fbSJohn Dyson 
305695461b45SJohn Dyson 			m_out = vm_page_grab(robject, idx,
305795461b45SJohn Dyson 						VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
30581efb74fbSJohn Dyson 
30591efb74fbSJohn Dyson 			if (m_out->valid == 0) {
306095461b45SJohn Dyson 				m_in = vm_page_grab(object, bo_pindex + idx,
306195461b45SJohn Dyson 						VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
306295461b45SJohn Dyson 				if (m_in->valid == 0) {
306347221757SJohn Dyson 					rv = vm_pager_get_pages(object, &m_in, 1, 0);
306447221757SJohn Dyson 					if (rv != VM_PAGER_OK) {
30653efc015bSPeter Wemm 						printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex);
306647221757SJohn Dyson 						continue;
30671efb74fbSJohn Dyson 					}
306895461b45SJohn Dyson 					vm_page_deactivate(m_in);
306947221757SJohn Dyson 				}
307047221757SJohn Dyson 
307147221757SJohn Dyson 				vm_page_protect(m_in, VM_PROT_NONE);
307247221757SJohn Dyson 				pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
307395461b45SJohn Dyson 				m_out->valid = m_in->valid;
30747dbf82dcSMatthew Dillon 				vm_page_dirty(m_out);
307595461b45SJohn Dyson 				vm_page_activate(m_out);
3076e69763a3SDoug Rabson 				vm_page_wakeup(m_in);
30771efb74fbSJohn Dyson 			}
3078e69763a3SDoug Rabson 			vm_page_wakeup(m_out);
307947221757SJohn Dyson 		}
3080925a3a41SJohn Dyson 
30811efb74fbSJohn Dyson 		object->shadow_count--;
308247221757SJohn Dyson 		object->ref_count--;
30831efb74fbSJohn Dyson 		TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);
30841efb74fbSJohn Dyson 		robject->backing_object = NULL;
30851efb74fbSJohn Dyson 		robject->backing_object_offset = 0;
30861efb74fbSJohn Dyson 
308747221757SJohn Dyson 		vm_object_pip_wakeup(robject);
30881efb74fbSJohn Dyson 		vm_object_deallocate(robject);
30891efb74fbSJohn Dyson 	}
309047221757SJohn Dyson 
3091069e9bc1SDoug Rabson 	vm_object_clear_flag(object, OBJ_OPT);
30921efb74fbSJohn Dyson }
30931efb74fbSJohn Dyson 
3094c7c34a24SBruce Evans #include "opt_ddb.h"
3095c3cb3e12SDavid Greenman #ifdef DDB
3096c7c34a24SBruce Evans #include <sys/kernel.h>
3097c7c34a24SBruce Evans 
3098c7c34a24SBruce Evans #include <ddb/ddb.h>
3099c7c34a24SBruce Evans 
3100df8bae1dSRodney W. Grimes /*
3101df8bae1dSRodney W. Grimes  *	vm_map_print:	[ debug ]
3102df8bae1dSRodney W. Grimes  */
3103c7c34a24SBruce Evans DB_SHOW_COMMAND(map, vm_map_print)
3104df8bae1dSRodney W. Grimes {
310595e5e988SJohn Dyson 	static int nlines;
3106c7c34a24SBruce Evans 	/* XXX convert args. */
3107c0877f10SJohn Dyson 	vm_map_t map = (vm_map_t)addr;
3108c7c34a24SBruce Evans 	boolean_t full = have_addr;
3109df8bae1dSRodney W. Grimes 
3110c0877f10SJohn Dyson 	vm_map_entry_t entry;
3111c7c34a24SBruce Evans 
3112e5f251d2SAlan Cox 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3113e5f251d2SAlan Cox 	    (void *)map,
3114101eeb7fSBruce Evans 	    (void *)map->pmap, map->nentries, map->timestamp);
311595e5e988SJohn Dyson 	nlines++;
3116df8bae1dSRodney W. Grimes 
3117c7c34a24SBruce Evans 	if (!full && db_indent)
3118df8bae1dSRodney W. Grimes 		return;
3119df8bae1dSRodney W. Grimes 
3120c7c34a24SBruce Evans 	db_indent += 2;
3121df8bae1dSRodney W. Grimes 	for (entry = map->header.next; entry != &map->header;
3122df8bae1dSRodney W. Grimes 	    entry = entry->next) {
3123fc62ef1fSBruce Evans 		db_iprintf("map entry %p: start=%p, end=%p\n",
3124fc62ef1fSBruce Evans 		    (void *)entry, (void *)entry->start, (void *)entry->end);
312595e5e988SJohn Dyson 		nlines++;
3126e5f251d2SAlan Cox 		{
3127df8bae1dSRodney W. Grimes 			static char *inheritance_name[4] =
3128df8bae1dSRodney W. Grimes 			{"share", "copy", "none", "donate_copy"};
31290d94caffSDavid Greenman 
313095e5e988SJohn Dyson 			db_iprintf(" prot=%x/%x/%s",
3131df8bae1dSRodney W. Grimes 			    entry->protection,
3132df8bae1dSRodney W. Grimes 			    entry->max_protection,
31338aef1712SMatthew Dillon 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
3134df8bae1dSRodney W. Grimes 			if (entry->wired_count != 0)
313595e5e988SJohn Dyson 				db_printf(", wired");
3136df8bae1dSRodney W. Grimes 		}
31379fdfe602SMatthew Dillon 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3138101eeb7fSBruce Evans 			/* XXX no %qd in kernel.  Truncate entry->offset. */
3139101eeb7fSBruce Evans 			db_printf(", share=%p, offset=0x%lx\n",
31409fdfe602SMatthew Dillon 			    (void *)entry->object.sub_map,
3141ecbb00a2SDoug Rabson 			    (long)entry->offset);
314295e5e988SJohn Dyson 			nlines++;
3143df8bae1dSRodney W. Grimes 			if ((entry->prev == &map->header) ||
31449fdfe602SMatthew Dillon 			    (entry->prev->object.sub_map !=
31459fdfe602SMatthew Dillon 				entry->object.sub_map)) {
3146c7c34a24SBruce Evans 				db_indent += 2;
3147101eeb7fSBruce Evans 				vm_map_print((db_expr_t)(intptr_t)
31489fdfe602SMatthew Dillon 					     entry->object.sub_map,
3149914181e7SBruce Evans 					     full, 0, (char *)0);
3150c7c34a24SBruce Evans 				db_indent -= 2;
3151df8bae1dSRodney W. Grimes 			}
31520d94caffSDavid Greenman 		} else {
3153101eeb7fSBruce Evans 			/* XXX no %qd in kernel.  Truncate entry->offset. */
3154101eeb7fSBruce Evans 			db_printf(", object=%p, offset=0x%lx",
3155101eeb7fSBruce Evans 			    (void *)entry->object.vm_object,
3156ecbb00a2SDoug Rabson 			    (long)entry->offset);
3157afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_COW)
3158c7c34a24SBruce Evans 				db_printf(", copy (%s)",
3159afa07f7eSJohn Dyson 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3160c7c34a24SBruce Evans 			db_printf("\n");
316195e5e988SJohn Dyson 			nlines++;
3162df8bae1dSRodney W. Grimes 
3163df8bae1dSRodney W. Grimes 			if ((entry->prev == &map->header) ||
3164df8bae1dSRodney W. Grimes 			    (entry->prev->object.vm_object !=
3165df8bae1dSRodney W. Grimes 				entry->object.vm_object)) {
3166c7c34a24SBruce Evans 				db_indent += 2;
3167101eeb7fSBruce Evans 				vm_object_print((db_expr_t)(intptr_t)
3168101eeb7fSBruce Evans 						entry->object.vm_object,
3169914181e7SBruce Evans 						full, 0, (char *)0);
317095e5e988SJohn Dyson 				nlines += 4;
3171c7c34a24SBruce Evans 				db_indent -= 2;
3172df8bae1dSRodney W. Grimes 			}
3173df8bae1dSRodney W. Grimes 		}
3174df8bae1dSRodney W. Grimes 	}
3175c7c34a24SBruce Evans 	db_indent -= 2;
317695e5e988SJohn Dyson 	if (db_indent == 0)
317795e5e988SJohn Dyson 		nlines = 0;
3178df8bae1dSRodney W. Grimes }
317995e5e988SJohn Dyson 
318095e5e988SJohn Dyson 
318195e5e988SJohn Dyson DB_SHOW_COMMAND(procvm, procvm)
318295e5e988SJohn Dyson {
318395e5e988SJohn Dyson 	struct proc *p;
318495e5e988SJohn Dyson 
318595e5e988SJohn Dyson 	if (have_addr) {
318695e5e988SJohn Dyson 		p = (struct proc *) addr;
318795e5e988SJohn Dyson 	} else {
318895e5e988SJohn Dyson 		p = curproc;
318995e5e988SJohn Dyson 	}
319095e5e988SJohn Dyson 
3191ac1e407bSBruce Evans 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3192ac1e407bSBruce Evans 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3193b1028ad1SLuoqi Chen 	    (void *)vmspace_pmap(p->p_vmspace));
319495e5e988SJohn Dyson 
3195101eeb7fSBruce Evans 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
319695e5e988SJohn Dyson }
319795e5e988SJohn Dyson 
3198c7c34a24SBruce Evans #endif /* DDB */
3199