xref: /freebsd/sys/vm/vm_map.c (revision 8aef171243894d9b06e5ac740bfa5e8686fc4c1a)
1df8bae1dSRodney W. Grimes /*
2df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  *
5df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
6df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
9df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
10df8bae1dSRodney W. Grimes  * are met:
11df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
12df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
13df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
15df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
16df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
17df8bae1dSRodney W. Grimes  *    must display the following acknowledgement:
18df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
19df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
20df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
21df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
22df8bae1dSRodney W. Grimes  *    without specific prior written permission.
23df8bae1dSRodney W. Grimes  *
24df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
35df8bae1dSRodney W. Grimes  *
363c4dd356SDavid Greenman  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  *
39df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40df8bae1dSRodney W. Grimes  * All rights reserved.
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43df8bae1dSRodney W. Grimes  *
44df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
45df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
46df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
47df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
48df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
49df8bae1dSRodney W. Grimes  *
50df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
55df8bae1dSRodney W. Grimes  *
56df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57df8bae1dSRodney W. Grimes  *  School of Computer Science
58df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
59df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
60df8bae1dSRodney W. Grimes  *
61df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
62df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
633c4dd356SDavid Greenman  *
648aef1712SMatthew Dillon  * $Id: vm_map.c,v 1.143 1999/01/26 02:49:52 julian Exp $
65df8bae1dSRodney W. Grimes  */
66df8bae1dSRodney W. Grimes 
67df8bae1dSRodney W. Grimes /*
68df8bae1dSRodney W. Grimes  *	Virtual memory mapping module.
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes #include <sys/param.h>
72df8bae1dSRodney W. Grimes #include <sys/systm.h>
73df8bae1dSRodney W. Grimes #include <sys/malloc.h>
74b5e8ce9fSBruce Evans #include <sys/proc.h>
75efeaf95aSDavid Greenman #include <sys/vmmeter.h>
76867a482dSJohn Dyson #include <sys/mman.h>
771efb74fbSJohn Dyson #include <sys/vnode.h>
782267af78SJulian Elischer #include <sys/resourcevar.h>
79df8bae1dSRodney W. Grimes 
80df8bae1dSRodney W. Grimes #include <vm/vm.h>
81efeaf95aSDavid Greenman #include <vm/vm_param.h>
82efeaf95aSDavid Greenman #include <vm/vm_prot.h>
83efeaf95aSDavid Greenman #include <vm/vm_inherit.h>
84996c772fSJohn Dyson #include <sys/lock.h>
85efeaf95aSDavid Greenman #include <vm/pmap.h>
86efeaf95aSDavid Greenman #include <vm/vm_map.h>
87df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
88df8bae1dSRodney W. Grimes #include <vm/vm_object.h>
8947221757SJohn Dyson #include <vm/vm_pager.h>
9026f9a767SRodney W. Grimes #include <vm/vm_kern.h>
91efeaf95aSDavid Greenman #include <vm/vm_extern.h>
92f35329acSJohn Dyson #include <vm/default_pager.h>
9347221757SJohn Dyson #include <vm/swap_pager.h>
943075778bSJohn Dyson #include <vm/vm_zone.h>
95df8bae1dSRodney W. Grimes 
96df8bae1dSRodney W. Grimes /*
97df8bae1dSRodney W. Grimes  *	Virtual memory maps provide for the mapping, protection,
98df8bae1dSRodney W. Grimes  *	and sharing of virtual memory objects.  In addition,
99df8bae1dSRodney W. Grimes  *	this module provides for an efficient virtual copy of
100df8bae1dSRodney W. Grimes  *	memory from one map to another.
101df8bae1dSRodney W. Grimes  *
102df8bae1dSRodney W. Grimes  *	Synchronization is required prior to most operations.
103df8bae1dSRodney W. Grimes  *
104df8bae1dSRodney W. Grimes  *	Maps consist of an ordered doubly-linked list of simple
105df8bae1dSRodney W. Grimes  *	entries; a single hint is used to speed up lookups.
106df8bae1dSRodney W. Grimes  *
107df8bae1dSRodney W. Grimes  *	In order to properly represent the sharing of virtual
108df8bae1dSRodney W. Grimes  *	memory regions among maps, the map structure is bi-level.
109df8bae1dSRodney W. Grimes  *	Top-level ("address") maps refer to regions of sharable
110df8bae1dSRodney W. Grimes  *	virtual memory.  These regions are implemented as
111df8bae1dSRodney W. Grimes  *	("sharing") maps, which then refer to the actual virtual
112df8bae1dSRodney W. Grimes  *	memory objects.  When two address maps "share" memory,
113df8bae1dSRodney W. Grimes  *	their top-level maps both have references to the same
114df8bae1dSRodney W. Grimes  *	sharing map.  When memory is virtual-copied from one
115df8bae1dSRodney W. Grimes  *	address map to another, the references in the sharing
116df8bae1dSRodney W. Grimes  *	maps are actually copied -- no copying occurs at the
117df8bae1dSRodney W. Grimes  *	virtual memory object level.
118df8bae1dSRodney W. Grimes  *
119df8bae1dSRodney W. Grimes  *	Since portions of maps are specified by start/end addreses,
120df8bae1dSRodney W. Grimes  *	which may not align with existing map entries, all
121df8bae1dSRodney W. Grimes  *	routines merely "clip" entries to these start/end values.
122df8bae1dSRodney W. Grimes  *	[That is, an entry is split into two, bordering at a
123df8bae1dSRodney W. Grimes  *	start or end value.]  Note that these clippings may not
124df8bae1dSRodney W. Grimes  *	always be necessary (as the two resulting entries are then
125df8bae1dSRodney W. Grimes  *	not changed); however, the clipping is done for convenience.
126df8bae1dSRodney W. Grimes  *	No attempt is currently made to "glue back together" two
127df8bae1dSRodney W. Grimes  *	abutting entries.
128df8bae1dSRodney W. Grimes  *
129df8bae1dSRodney W. Grimes  *	As mentioned above, virtual copy operations are performed
130df8bae1dSRodney W. Grimes  *	by copying VM object references from one sharing map to
131df8bae1dSRodney W. Grimes  *	another, and then marking both regions as copy-on-write.
132df8bae1dSRodney W. Grimes  *	It is important to note that only one writeable reference
133df8bae1dSRodney W. Grimes  *	to a VM object region exists in any map -- this means that
134df8bae1dSRodney W. Grimes  *	shadow object creation can be delayed until a write operation
135df8bae1dSRodney W. Grimes  *	occurs.
136df8bae1dSRodney W. Grimes  */
137df8bae1dSRodney W. Grimes 
138df8bae1dSRodney W. Grimes /*
139df8bae1dSRodney W. Grimes  *	vm_map_startup:
140df8bae1dSRodney W. Grimes  *
141df8bae1dSRodney W. Grimes  *	Initialize the vm_map module.  Must be called before
142df8bae1dSRodney W. Grimes  *	any other vm_map routines.
143df8bae1dSRodney W. Grimes  *
144df8bae1dSRodney W. Grimes  *	Map and entry structures are allocated from the general
145df8bae1dSRodney W. Grimes  *	purpose memory pool with some exceptions:
146df8bae1dSRodney W. Grimes  *
147df8bae1dSRodney W. Grimes  *	- The kernel map and kmem submap are allocated statically.
148df8bae1dSRodney W. Grimes  *	- Kernel map entries are allocated out of a static pool.
149df8bae1dSRodney W. Grimes  *
150df8bae1dSRodney W. Grimes  *	These restrictions are necessary since malloc() uses the
151df8bae1dSRodney W. Grimes  *	maps and requires map entries.
152df8bae1dSRodney W. Grimes  */
153df8bae1dSRodney W. Grimes 
154bd7e5f99SJohn Dyson extern char kstack[];
155b7b2aac2SJohn Dyson extern int inmprotect;
156df8bae1dSRodney W. Grimes 
1573075778bSJohn Dyson static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store;
1582d8acc0fSJohn Dyson static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone;
1593075778bSJohn Dyson static struct vm_object kmapentobj, mapentobj, mapobj;
1603075778bSJohn Dyson #define MAP_ENTRY_INIT	128
161303b270bSEivind Eklund static struct vm_map_entry map_entry_init[MAX_MAPENT];
162303b270bSEivind Eklund static struct vm_map_entry kmap_entry_init[MAX_KMAPENT];
163303b270bSEivind Eklund static struct vm_map map_init[MAX_KMAP];
164b18bfc3dSJohn Dyson 
165df8bae1dSRodney W. Grimes static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
166df8bae1dSRodney W. Grimes static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
167f708ef1bSPoul-Henning Kamp static vm_map_entry_t vm_map_entry_create __P((vm_map_t));
168f708ef1bSPoul-Henning Kamp static void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t));
1690362d7d7SJohn Dyson static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
170f708ef1bSPoul-Henning Kamp static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
171f708ef1bSPoul-Henning Kamp static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t,
172f708ef1bSPoul-Henning Kamp 		vm_map_entry_t));
173dda6b171SJohn Dyson static void vm_map_split __P((vm_map_entry_t));
174df8bae1dSRodney W. Grimes 
1750d94caffSDavid Greenman void
1760d94caffSDavid Greenman vm_map_startup()
177df8bae1dSRodney W. Grimes {
1783075778bSJohn Dyson 	mapzone = &mapzone_store;
1790d65e566SJohn Dyson 	zbootinit(mapzone, "MAP", sizeof (struct vm_map),
1803075778bSJohn Dyson 		map_init, MAX_KMAP);
1813075778bSJohn Dyson 	kmapentzone = &kmapentzone_store;
1820d65e566SJohn Dyson 	zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry),
1833075778bSJohn Dyson 		kmap_entry_init, MAX_KMAPENT);
1843075778bSJohn Dyson 	mapentzone = &mapentzone_store;
1850d65e566SJohn Dyson 	zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
1863075778bSJohn Dyson 		map_entry_init, MAX_MAPENT);
187df8bae1dSRodney W. Grimes }
188df8bae1dSRodney W. Grimes 
189df8bae1dSRodney W. Grimes /*
190df8bae1dSRodney W. Grimes  * Allocate a vmspace structure, including a vm_map and pmap,
191df8bae1dSRodney W. Grimes  * and initialize those structures.  The refcnt is set to 1.
192df8bae1dSRodney W. Grimes  * The remaining fields must be initialized by the caller.
193df8bae1dSRodney W. Grimes  */
194df8bae1dSRodney W. Grimes struct vmspace *
1952d8acc0fSJohn Dyson vmspace_alloc(min, max)
196df8bae1dSRodney W. Grimes 	vm_offset_t min, max;
197df8bae1dSRodney W. Grimes {
198c0877f10SJohn Dyson 	struct vmspace *vm;
1990d94caffSDavid Greenman 
2002d8acc0fSJohn Dyson 	vm = zalloc(vmspace_zone);
2012d8acc0fSJohn Dyson 	bzero(&vm->vm_map, sizeof vm->vm_map);
2022d8acc0fSJohn Dyson 	vm_map_init(&vm->vm_map, min, max);
203df8bae1dSRodney W. Grimes 	pmap_pinit(&vm->vm_pmap);
204df8bae1dSRodney W. Grimes 	vm->vm_map.pmap = &vm->vm_pmap;		/* XXX */
205df8bae1dSRodney W. Grimes 	vm->vm_refcnt = 1;
2062d8acc0fSJohn Dyson 	vm->vm_shm = NULL;
207df8bae1dSRodney W. Grimes 	return (vm);
208df8bae1dSRodney W. Grimes }
209df8bae1dSRodney W. Grimes 
210df8bae1dSRodney W. Grimes void
2113075778bSJohn Dyson vm_init2(void) {
2120d65e566SJohn Dyson 	zinitna(kmapentzone, &kmapentobj,
2130a80f406SJohn Dyson 		NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1);
2140d65e566SJohn Dyson 	zinitna(mapentzone, &mapentobj,
2150a80f406SJohn Dyson 		NULL, 0, 0, 0, 1);
2160d65e566SJohn Dyson 	zinitna(mapzone, &mapobj,
2170a80f406SJohn Dyson 		NULL, 0, 0, 0, 1);
2182d8acc0fSJohn Dyson 	vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
219ba9be04cSJohn Dyson 	pmap_init2();
22099448ed1SJohn Dyson 	vm_object_init2();
2213075778bSJohn Dyson }
2223075778bSJohn Dyson 
2233075778bSJohn Dyson void
224df8bae1dSRodney W. Grimes vmspace_free(vm)
225c0877f10SJohn Dyson 	struct vmspace *vm;
226df8bae1dSRodney W. Grimes {
227df8bae1dSRodney W. Grimes 
228a1f6d91cSDavid Greenman 	if (vm->vm_refcnt == 0)
229a1f6d91cSDavid Greenman 		panic("vmspace_free: attempt to free already freed vmspace");
230a1f6d91cSDavid Greenman 
231df8bae1dSRodney W. Grimes 	if (--vm->vm_refcnt == 0) {
232bd7e5f99SJohn Dyson 
23330dcfc09SJohn Dyson 		/*
234df8bae1dSRodney W. Grimes 		 * Lock the map, to wait out all other references to it.
2350d94caffSDavid Greenman 		 * Delete all of the mappings and pages they hold, then call
2360d94caffSDavid Greenman 		 * the pmap module to reclaim anything left.
237df8bae1dSRodney W. Grimes 		 */
238df8bae1dSRodney W. Grimes 		vm_map_lock(&vm->vm_map);
239df8bae1dSRodney W. Grimes 		(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
240df8bae1dSRodney W. Grimes 		    vm->vm_map.max_offset);
241a1f6d91cSDavid Greenman 		vm_map_unlock(&vm->vm_map);
242b18bfc3dSJohn Dyson 
243df8bae1dSRodney W. Grimes 		pmap_release(&vm->vm_pmap);
2442d8acc0fSJohn Dyson 		zfree(vmspace_zone, vm);
245df8bae1dSRodney W. Grimes 	}
246df8bae1dSRodney W. Grimes }
247df8bae1dSRodney W. Grimes 
248df8bae1dSRodney W. Grimes /*
249df8bae1dSRodney W. Grimes  *	vm_map_create:
250df8bae1dSRodney W. Grimes  *
251df8bae1dSRodney W. Grimes  *	Creates and returns a new empty VM map with
252df8bae1dSRodney W. Grimes  *	the given physical map structure, and having
253df8bae1dSRodney W. Grimes  *	the given lower and upper address bounds.
254df8bae1dSRodney W. Grimes  */
2550d94caffSDavid Greenman vm_map_t
2562d8acc0fSJohn Dyson vm_map_create(pmap, min, max)
257df8bae1dSRodney W. Grimes 	pmap_t pmap;
258df8bae1dSRodney W. Grimes 	vm_offset_t min, max;
259df8bae1dSRodney W. Grimes {
260c0877f10SJohn Dyson 	vm_map_t result;
261df8bae1dSRodney W. Grimes 
2623075778bSJohn Dyson 	result = zalloc(mapzone);
2632d8acc0fSJohn Dyson 	vm_map_init(result, min, max);
264df8bae1dSRodney W. Grimes 	result->pmap = pmap;
265df8bae1dSRodney W. Grimes 	return (result);
266df8bae1dSRodney W. Grimes }
267df8bae1dSRodney W. Grimes 
268df8bae1dSRodney W. Grimes /*
269df8bae1dSRodney W. Grimes  * Initialize an existing vm_map structure
270df8bae1dSRodney W. Grimes  * such as that in the vmspace structure.
271df8bae1dSRodney W. Grimes  * The pmap is set elsewhere.
272df8bae1dSRodney W. Grimes  */
273df8bae1dSRodney W. Grimes void
2742d8acc0fSJohn Dyson vm_map_init(map, min, max)
275c0877f10SJohn Dyson 	struct vm_map *map;
276df8bae1dSRodney W. Grimes 	vm_offset_t min, max;
277df8bae1dSRodney W. Grimes {
278df8bae1dSRodney W. Grimes 	map->header.next = map->header.prev = &map->header;
279df8bae1dSRodney W. Grimes 	map->nentries = 0;
280df8bae1dSRodney W. Grimes 	map->size = 0;
281df8bae1dSRodney W. Grimes 	map->is_main_map = TRUE;
2823075778bSJohn Dyson 	map->system_map = 0;
283df8bae1dSRodney W. Grimes 	map->min_offset = min;
284df8bae1dSRodney W. Grimes 	map->max_offset = max;
285df8bae1dSRodney W. Grimes 	map->first_free = &map->header;
286df8bae1dSRodney W. Grimes 	map->hint = &map->header;
287df8bae1dSRodney W. Grimes 	map->timestamp = 0;
2888f9110f6SJohn Dyson 	lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
289df8bae1dSRodney W. Grimes }
290df8bae1dSRodney W. Grimes 
291df8bae1dSRodney W. Grimes /*
292b18bfc3dSJohn Dyson  *	vm_map_entry_dispose:	[ internal use only ]
293b18bfc3dSJohn Dyson  *
294b18bfc3dSJohn Dyson  *	Inverse of vm_map_entry_create.
295b18bfc3dSJohn Dyson  */
29662487bb4SJohn Dyson static void
297b18bfc3dSJohn Dyson vm_map_entry_dispose(map, entry)
298b18bfc3dSJohn Dyson 	vm_map_t map;
299b18bfc3dSJohn Dyson 	vm_map_entry_t entry;
300b18bfc3dSJohn Dyson {
301b79933ebSJohn Dyson 	zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry);
302b18bfc3dSJohn Dyson }
303b18bfc3dSJohn Dyson 
304b18bfc3dSJohn Dyson /*
305df8bae1dSRodney W. Grimes  *	vm_map_entry_create:	[ internal use only ]
306df8bae1dSRodney W. Grimes  *
307df8bae1dSRodney W. Grimes  *	Allocates a VM map entry for insertion.
308df8bae1dSRodney W. Grimes  *	No entry fields are filled in.  This routine is
309df8bae1dSRodney W. Grimes  */
310f708ef1bSPoul-Henning Kamp static vm_map_entry_t
31126f9a767SRodney W. Grimes vm_map_entry_create(map)
312df8bae1dSRodney W. Grimes 	vm_map_t map;
313df8bae1dSRodney W. Grimes {
314b79933ebSJohn Dyson 	return zalloc((map->system_map || !mapentzone) ? kmapentzone : mapentzone);
315df8bae1dSRodney W. Grimes }
316df8bae1dSRodney W. Grimes 
317df8bae1dSRodney W. Grimes /*
318df8bae1dSRodney W. Grimes  *	vm_map_entry_{un,}link:
319df8bae1dSRodney W. Grimes  *
320df8bae1dSRodney W. Grimes  *	Insert/remove entries from maps.
321df8bae1dSRodney W. Grimes  */
322df8bae1dSRodney W. Grimes #define	vm_map_entry_link(map, after_where, entry) \
323df8bae1dSRodney W. Grimes 		{ \
324df8bae1dSRodney W. Grimes 		(map)->nentries++; \
32547221757SJohn Dyson 		(map)->timestamp++; \
326df8bae1dSRodney W. Grimes 		(entry)->prev = (after_where); \
327df8bae1dSRodney W. Grimes 		(entry)->next = (after_where)->next; \
328df8bae1dSRodney W. Grimes 		(entry)->prev->next = (entry); \
329df8bae1dSRodney W. Grimes 		(entry)->next->prev = (entry); \
330df8bae1dSRodney W. Grimes 		}
331df8bae1dSRodney W. Grimes #define	vm_map_entry_unlink(map, entry) \
332df8bae1dSRodney W. Grimes 		{ \
333df8bae1dSRodney W. Grimes 		(map)->nentries--; \
33447221757SJohn Dyson 		(map)->timestamp++; \
335df8bae1dSRodney W. Grimes 		(entry)->next->prev = (entry)->prev; \
336df8bae1dSRodney W. Grimes 		(entry)->prev->next = (entry)->next; \
337df8bae1dSRodney W. Grimes 		}
338df8bae1dSRodney W. Grimes 
339df8bae1dSRodney W. Grimes /*
340df8bae1dSRodney W. Grimes  *	SAVE_HINT:
341df8bae1dSRodney W. Grimes  *
342df8bae1dSRodney W. Grimes  *	Saves the specified entry as the hint for
34324a1cce3SDavid Greenman  *	future lookups.
344df8bae1dSRodney W. Grimes  */
345df8bae1dSRodney W. Grimes #define	SAVE_HINT(map,value) \
34624a1cce3SDavid Greenman 		(map)->hint = (value);
347df8bae1dSRodney W. Grimes 
348df8bae1dSRodney W. Grimes /*
349df8bae1dSRodney W. Grimes  *	vm_map_lookup_entry:	[ internal use only ]
350df8bae1dSRodney W. Grimes  *
351df8bae1dSRodney W. Grimes  *	Finds the map entry containing (or
352df8bae1dSRodney W. Grimes  *	immediately preceding) the specified address
353df8bae1dSRodney W. Grimes  *	in the given map; the entry is returned
354df8bae1dSRodney W. Grimes  *	in the "entry" parameter.  The boolean
355df8bae1dSRodney W. Grimes  *	result indicates whether the address is
356df8bae1dSRodney W. Grimes  *	actually contained in the map.
357df8bae1dSRodney W. Grimes  */
3580d94caffSDavid Greenman boolean_t
3590d94caffSDavid Greenman vm_map_lookup_entry(map, address, entry)
360c0877f10SJohn Dyson 	vm_map_t map;
361c0877f10SJohn Dyson 	vm_offset_t address;
362df8bae1dSRodney W. Grimes 	vm_map_entry_t *entry;	/* OUT */
363df8bae1dSRodney W. Grimes {
364c0877f10SJohn Dyson 	vm_map_entry_t cur;
365c0877f10SJohn Dyson 	vm_map_entry_t last;
366df8bae1dSRodney W. Grimes 
367df8bae1dSRodney W. Grimes 	/*
3680d94caffSDavid Greenman 	 * Start looking either from the head of the list, or from the hint.
369df8bae1dSRodney W. Grimes 	 */
370df8bae1dSRodney W. Grimes 
371df8bae1dSRodney W. Grimes 	cur = map->hint;
372df8bae1dSRodney W. Grimes 
373df8bae1dSRodney W. Grimes 	if (cur == &map->header)
374df8bae1dSRodney W. Grimes 		cur = cur->next;
375df8bae1dSRodney W. Grimes 
376df8bae1dSRodney W. Grimes 	if (address >= cur->start) {
377df8bae1dSRodney W. Grimes 		/*
378df8bae1dSRodney W. Grimes 		 * Go from hint to end of list.
379df8bae1dSRodney W. Grimes 		 *
3800d94caffSDavid Greenman 		 * But first, make a quick check to see if we are already looking
3810d94caffSDavid Greenman 		 * at the entry we want (which is usually the case). Note also
3820d94caffSDavid Greenman 		 * that we don't need to save the hint here... it is the same
3830d94caffSDavid Greenman 		 * hint (unless we are at the header, in which case the hint
3840d94caffSDavid Greenman 		 * didn't buy us anything anyway).
385df8bae1dSRodney W. Grimes 		 */
386df8bae1dSRodney W. Grimes 		last = &map->header;
387df8bae1dSRodney W. Grimes 		if ((cur != last) && (cur->end > address)) {
388df8bae1dSRodney W. Grimes 			*entry = cur;
389df8bae1dSRodney W. Grimes 			return (TRUE);
390df8bae1dSRodney W. Grimes 		}
3910d94caffSDavid Greenman 	} else {
392df8bae1dSRodney W. Grimes 		/*
393df8bae1dSRodney W. Grimes 		 * Go from start to hint, *inclusively*
394df8bae1dSRodney W. Grimes 		 */
395df8bae1dSRodney W. Grimes 		last = cur->next;
396df8bae1dSRodney W. Grimes 		cur = map->header.next;
397df8bae1dSRodney W. Grimes 	}
398df8bae1dSRodney W. Grimes 
399df8bae1dSRodney W. Grimes 	/*
400df8bae1dSRodney W. Grimes 	 * Search linearly
401df8bae1dSRodney W. Grimes 	 */
402df8bae1dSRodney W. Grimes 
403df8bae1dSRodney W. Grimes 	while (cur != last) {
404df8bae1dSRodney W. Grimes 		if (cur->end > address) {
405df8bae1dSRodney W. Grimes 			if (address >= cur->start) {
406df8bae1dSRodney W. Grimes 				/*
4070d94caffSDavid Greenman 				 * Save this lookup for future hints, and
4080d94caffSDavid Greenman 				 * return
409df8bae1dSRodney W. Grimes 				 */
410df8bae1dSRodney W. Grimes 
411df8bae1dSRodney W. Grimes 				*entry = cur;
412df8bae1dSRodney W. Grimes 				SAVE_HINT(map, cur);
413df8bae1dSRodney W. Grimes 				return (TRUE);
414df8bae1dSRodney W. Grimes 			}
415df8bae1dSRodney W. Grimes 			break;
416df8bae1dSRodney W. Grimes 		}
417df8bae1dSRodney W. Grimes 		cur = cur->next;
418df8bae1dSRodney W. Grimes 	}
419df8bae1dSRodney W. Grimes 	*entry = cur->prev;
420df8bae1dSRodney W. Grimes 	SAVE_HINT(map, *entry);
421df8bae1dSRodney W. Grimes 	return (FALSE);
422df8bae1dSRodney W. Grimes }
423df8bae1dSRodney W. Grimes 
424df8bae1dSRodney W. Grimes /*
42530dcfc09SJohn Dyson  *	vm_map_insert:
42630dcfc09SJohn Dyson  *
42730dcfc09SJohn Dyson  *	Inserts the given whole VM object into the target
42830dcfc09SJohn Dyson  *	map at the specified address range.  The object's
42930dcfc09SJohn Dyson  *	size should match that of the address range.
43030dcfc09SJohn Dyson  *
43130dcfc09SJohn Dyson  *	Requires that the map be locked, and leaves it so.
43230dcfc09SJohn Dyson  */
43330dcfc09SJohn Dyson int
434b9dcd593SBruce Evans vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
435b9dcd593SBruce Evans 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
436b9dcd593SBruce Evans 	      int cow)
43730dcfc09SJohn Dyson {
438c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
439c0877f10SJohn Dyson 	vm_map_entry_t prev_entry;
44030dcfc09SJohn Dyson 	vm_map_entry_t temp_entry;
4411c7c3c6aSMatthew Dillon #if 0
442a5b6fd29SJohn Dyson 	vm_object_t prev_object;
4431c7c3c6aSMatthew Dillon #endif
444afa07f7eSJohn Dyson 	u_char protoeflags;
44530dcfc09SJohn Dyson 
446cdc2c291SJohn Dyson 	if ((object != NULL) && (cow & MAP_NOFAULT)) {
447cdc2c291SJohn Dyson 		panic("vm_map_insert: paradoxical MAP_NOFAULT request");
448cdc2c291SJohn Dyson 	}
449cdc2c291SJohn Dyson 
45030dcfc09SJohn Dyson 	/*
45130dcfc09SJohn Dyson 	 * Check that the start and end points are not bogus.
45230dcfc09SJohn Dyson 	 */
45330dcfc09SJohn Dyson 
45430dcfc09SJohn Dyson 	if ((start < map->min_offset) || (end > map->max_offset) ||
45530dcfc09SJohn Dyson 	    (start >= end))
45630dcfc09SJohn Dyson 		return (KERN_INVALID_ADDRESS);
45730dcfc09SJohn Dyson 
45830dcfc09SJohn Dyson 	/*
45930dcfc09SJohn Dyson 	 * Find the entry prior to the proposed starting address; if it's part
46030dcfc09SJohn Dyson 	 * of an existing entry, this range is bogus.
46130dcfc09SJohn Dyson 	 */
46230dcfc09SJohn Dyson 
46330dcfc09SJohn Dyson 	if (vm_map_lookup_entry(map, start, &temp_entry))
46430dcfc09SJohn Dyson 		return (KERN_NO_SPACE);
46530dcfc09SJohn Dyson 
46630dcfc09SJohn Dyson 	prev_entry = temp_entry;
46730dcfc09SJohn Dyson 
46830dcfc09SJohn Dyson 	/*
46930dcfc09SJohn Dyson 	 * Assert that the next entry doesn't overlap the end point.
47030dcfc09SJohn Dyson 	 */
47130dcfc09SJohn Dyson 
47230dcfc09SJohn Dyson 	if ((prev_entry->next != &map->header) &&
47330dcfc09SJohn Dyson 	    (prev_entry->next->start < end))
47430dcfc09SJohn Dyson 		return (KERN_NO_SPACE);
47530dcfc09SJohn Dyson 
476afa07f7eSJohn Dyson 	protoeflags = 0;
477afa07f7eSJohn Dyson 	if (cow & MAP_COPY_NEEDED)
478afa07f7eSJohn Dyson 		protoeflags |= MAP_ENTRY_NEEDS_COPY;
479afa07f7eSJohn Dyson 
480afa07f7eSJohn Dyson 	if (cow & MAP_COPY_ON_WRITE)
481afa07f7eSJohn Dyson 		protoeflags |= MAP_ENTRY_COW;
482afa07f7eSJohn Dyson 
483afa07f7eSJohn Dyson 	if (cow & MAP_NOFAULT)
484afa07f7eSJohn Dyson 		protoeflags |= MAP_ENTRY_NOFAULT;
485afa07f7eSJohn Dyson 
48630dcfc09SJohn Dyson 	/*
48730dcfc09SJohn Dyson 	 * See if we can avoid creating a new entry by extending one of our
4888cc7e047SJohn Dyson 	 * neighbors.  Or at least extend the object.
48930dcfc09SJohn Dyson 	 */
4908cc7e047SJohn Dyson 
4918cc7e047SJohn Dyson 	if ((object == NULL) &&
4928cc7e047SJohn Dyson 	    (prev_entry != &map->header) &&
493afa07f7eSJohn Dyson 	    (( prev_entry->eflags & (MAP_ENTRY_IS_A_MAP | MAP_ENTRY_IS_SUB_MAP)) == 0) &&
49495e5e988SJohn Dyson 		((prev_entry->object.vm_object == NULL) ||
49595e5e988SJohn Dyson 			(prev_entry->object.vm_object->type == OBJT_DEFAULT)) &&
4968cc7e047SJohn Dyson 	    (prev_entry->end == start) &&
4978cc7e047SJohn Dyson 	    (prev_entry->wired_count == 0)) {
4988cc7e047SJohn Dyson 
499cdc2c291SJohn Dyson 
500afa07f7eSJohn Dyson 		if ((protoeflags == prev_entry->eflags) &&
501afa07f7eSJohn Dyson 		    ((cow & MAP_NOFAULT) ||
5028cc7e047SJohn Dyson 		     vm_object_coalesce(prev_entry->object.vm_object,
50330dcfc09SJohn Dyson 					OFF_TO_IDX(prev_entry->offset),
5048cc7e047SJohn Dyson 					(vm_size_t) (prev_entry->end - prev_entry->start),
505cdc2c291SJohn Dyson 					(vm_size_t) (end - prev_entry->end)))) {
506a5b6fd29SJohn Dyson 
50730dcfc09SJohn Dyson 			/*
5088cc7e047SJohn Dyson 			 * Coalesced the two objects.  Can we extend the
5098cc7e047SJohn Dyson 			 * previous map entry to include the new range?
51030dcfc09SJohn Dyson 			 */
5118cc7e047SJohn Dyson 			if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
5128cc7e047SJohn Dyson 			    (prev_entry->protection == prot) &&
5138cc7e047SJohn Dyson 			    (prev_entry->max_protection == max)) {
5148cc7e047SJohn Dyson 
51530dcfc09SJohn Dyson 				map->size += (end - prev_entry->end);
51630dcfc09SJohn Dyson 				prev_entry->end = end;
5171c7c3c6aSMatthew Dillon #if 0
5181c7c3c6aSMatthew Dillon 				/*
5191c7c3c6aSMatthew Dillon 				 * (no longer applies)
5201c7c3c6aSMatthew Dillon 				 */
521afa07f7eSJohn Dyson 				if ((cow & MAP_NOFAULT) == 0) {
522a5b6fd29SJohn Dyson 					prev_object = prev_entry->object.vm_object;
523b5b40fa6SJohn Dyson 					default_pager_convert_to_swapq(prev_object);
524cdc2c291SJohn Dyson 				}
5251c7c3c6aSMatthew Dillon #endif
52630dcfc09SJohn Dyson 				return (KERN_SUCCESS);
52730dcfc09SJohn Dyson 			}
5288cc7e047SJohn Dyson 			else {
5298cc7e047SJohn Dyson 				object = prev_entry->object.vm_object;
5308cc7e047SJohn Dyson 				offset = prev_entry->offset + (prev_entry->end -
5318cc7e047SJohn Dyson 							       prev_entry->start);
5328cc7e047SJohn Dyson 
5338cc7e047SJohn Dyson 				vm_object_reference(object);
534b18bfc3dSJohn Dyson 			}
53567bf6868SJohn Dyson 		}
5368cc7e047SJohn Dyson 	}
5378cc7e047SJohn Dyson 
53830dcfc09SJohn Dyson 	/*
53930dcfc09SJohn Dyson 	 * Create a new entry
54030dcfc09SJohn Dyson 	 */
54130dcfc09SJohn Dyson 
54230dcfc09SJohn Dyson 	new_entry = vm_map_entry_create(map);
54330dcfc09SJohn Dyson 	new_entry->start = start;
54430dcfc09SJohn Dyson 	new_entry->end = end;
54530dcfc09SJohn Dyson 
546afa07f7eSJohn Dyson 	new_entry->eflags = protoeflags;
54730dcfc09SJohn Dyson 	new_entry->object.vm_object = object;
54830dcfc09SJohn Dyson 	new_entry->offset = offset;
5492267af78SJulian Elischer 	new_entry->avail_ssize = 0;
5502267af78SJulian Elischer 
551cbd8ec09SJohn Dyson 	if (object) {
552cbd8ec09SJohn Dyson 		if ((object->ref_count > 1) || (object->shadow_count != 0)) {
553069e9bc1SDoug Rabson 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
554cbd8ec09SJohn Dyson 		} else {
555069e9bc1SDoug Rabson 			vm_object_set_flag(object, OBJ_ONEMAPPING);
556cbd8ec09SJohn Dyson 		}
557cbd8ec09SJohn Dyson 	}
55830dcfc09SJohn Dyson 
55930dcfc09SJohn Dyson 	if (map->is_main_map) {
56030dcfc09SJohn Dyson 		new_entry->inheritance = VM_INHERIT_DEFAULT;
56130dcfc09SJohn Dyson 		new_entry->protection = prot;
56230dcfc09SJohn Dyson 		new_entry->max_protection = max;
56330dcfc09SJohn Dyson 		new_entry->wired_count = 0;
56430dcfc09SJohn Dyson 	}
56530dcfc09SJohn Dyson 	/*
56630dcfc09SJohn Dyson 	 * Insert the new entry into the list
56730dcfc09SJohn Dyson 	 */
56830dcfc09SJohn Dyson 
56930dcfc09SJohn Dyson 	vm_map_entry_link(map, prev_entry, new_entry);
57030dcfc09SJohn Dyson 	map->size += new_entry->end - new_entry->start;
57130dcfc09SJohn Dyson 
57230dcfc09SJohn Dyson 	/*
57330dcfc09SJohn Dyson 	 * Update the free space hint
57430dcfc09SJohn Dyson 	 */
57567bf6868SJohn Dyson 	if ((map->first_free == prev_entry) &&
57667bf6868SJohn Dyson 		(prev_entry->end >= new_entry->start))
57730dcfc09SJohn Dyson 		map->first_free = new_entry;
57830dcfc09SJohn Dyson 
5791c7c3c6aSMatthew Dillon #if 0
5801c7c3c6aSMatthew Dillon 	/*
5811c7c3c6aSMatthew Dillon 	 * (no longer applies)
5821c7c3c6aSMatthew Dillon 	 */
583b5b40fa6SJohn Dyson 	default_pager_convert_to_swapq(object);
5841c7c3c6aSMatthew Dillon #endif
58530dcfc09SJohn Dyson 	return (KERN_SUCCESS);
58630dcfc09SJohn Dyson }
58730dcfc09SJohn Dyson 
5882267af78SJulian Elischer int
5892267af78SJulian Elischer vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
5902267af78SJulian Elischer 	      vm_prot_t prot, vm_prot_t max, int cow)
5912267af78SJulian Elischer {
5922267af78SJulian Elischer 	vm_map_entry_t prev_entry;
5932267af78SJulian Elischer 	vm_map_entry_t new_stack_entry;
5942267af78SJulian Elischer 	vm_size_t      init_ssize;
5952267af78SJulian Elischer 	int            rv;
5962267af78SJulian Elischer 
5972267af78SJulian Elischer 	if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
5982267af78SJulian Elischer 		return (KERN_NO_SPACE);
5992267af78SJulian Elischer 
6002267af78SJulian Elischer 	if (max_ssize < SGROWSIZ)
6012267af78SJulian Elischer 		init_ssize = max_ssize;
6022267af78SJulian Elischer 	else
6032267af78SJulian Elischer 		init_ssize = SGROWSIZ;
6042267af78SJulian Elischer 
6052267af78SJulian Elischer 	vm_map_lock(map);
6062267af78SJulian Elischer 
6072267af78SJulian Elischer 	/* If addr is already mapped, no go */
6082267af78SJulian Elischer 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
6092267af78SJulian Elischer 		vm_map_unlock(map);
6102267af78SJulian Elischer 		return (KERN_NO_SPACE);
6112267af78SJulian Elischer 	}
6122267af78SJulian Elischer 
6132267af78SJulian Elischer 	/* If we can't accomodate max_ssize in the current mapping,
6142267af78SJulian Elischer 	 * no go.  However, we need to be aware that subsequent user
6152267af78SJulian Elischer 	 * mappings might map into the space we have reserved for
6162267af78SJulian Elischer 	 * stack, and currently this space is not protected.
6172267af78SJulian Elischer 	 *
6182267af78SJulian Elischer 	 * Hopefully we will at least detect this condition
6192267af78SJulian Elischer 	 * when we try to grow the stack.
6202267af78SJulian Elischer 	 */
6212267af78SJulian Elischer 	if ((prev_entry->next != &map->header) &&
6222267af78SJulian Elischer 	    (prev_entry->next->start < addrbos + max_ssize)) {
6232267af78SJulian Elischer 		vm_map_unlock(map);
6242267af78SJulian Elischer 		return (KERN_NO_SPACE);
6252267af78SJulian Elischer 	}
6262267af78SJulian Elischer 
6272267af78SJulian Elischer 	/* We initially map a stack of only init_ssize.  We will
6282267af78SJulian Elischer 	 * grow as needed later.  Since this is to be a grow
6292267af78SJulian Elischer 	 * down stack, we map at the top of the range.
6302267af78SJulian Elischer 	 *
6312267af78SJulian Elischer 	 * Note: we would normally expect prot and max to be
6322267af78SJulian Elischer 	 * VM_PROT_ALL, and cow to be 0.  Possibly we should
6332267af78SJulian Elischer 	 * eliminate these as input parameters, and just
6342267af78SJulian Elischer 	 * pass these values here in the insert call.
6352267af78SJulian Elischer 	 */
6362267af78SJulian Elischer 	rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
6372267af78SJulian Elischer 	                   addrbos + max_ssize, prot, max, cow);
6382267af78SJulian Elischer 
6392267af78SJulian Elischer 	/* Now set the avail_ssize amount */
6402267af78SJulian Elischer 	if (rv == KERN_SUCCESS){
6412267af78SJulian Elischer 		new_stack_entry = prev_entry->next;
6422267af78SJulian Elischer 		if (new_stack_entry->end   != addrbos + max_ssize ||
6432267af78SJulian Elischer 		    new_stack_entry->start != addrbos + max_ssize - init_ssize)
6442267af78SJulian Elischer 			panic ("Bad entry start/end for new stack entry");
6452267af78SJulian Elischer 		else
6462267af78SJulian Elischer 			new_stack_entry->avail_ssize = max_ssize - init_ssize;
6472267af78SJulian Elischer 	}
6482267af78SJulian Elischer 
6492267af78SJulian Elischer 	vm_map_unlock(map);
6502267af78SJulian Elischer 	return (rv);
6512267af78SJulian Elischer }
6522267af78SJulian Elischer 
6532267af78SJulian Elischer /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
6542267af78SJulian Elischer  * desired address is already mapped, or if we successfully grow
6552267af78SJulian Elischer  * the stack.  Also returns KERN_SUCCESS if addr is outside the
6562267af78SJulian Elischer  * stack range (this is strange, but preserves compatibility with
6572267af78SJulian Elischer  * the grow function in vm_machdep.c).
6582267af78SJulian Elischer  */
6592267af78SJulian Elischer int
6602267af78SJulian Elischer vm_map_growstack (struct proc *p, vm_offset_t addr)
6612267af78SJulian Elischer {
6622267af78SJulian Elischer 	vm_map_entry_t prev_entry;
6632267af78SJulian Elischer 	vm_map_entry_t stack_entry;
6642267af78SJulian Elischer 	vm_map_entry_t new_stack_entry;
6652267af78SJulian Elischer 	struct vmspace *vm = p->p_vmspace;
6662267af78SJulian Elischer 	vm_map_t map = &vm->vm_map;
6672267af78SJulian Elischer 	vm_offset_t    end;
6682267af78SJulian Elischer 	int      grow_amount;
6692267af78SJulian Elischer 	int      rv;
6702267af78SJulian Elischer 	int      is_procstack = 0;
6712267af78SJulian Elischer 
6722267af78SJulian Elischer 	vm_map_lock(map);
6732267af78SJulian Elischer 
6742267af78SJulian Elischer 	/* If addr is already in the entry range, no need to grow.*/
6752267af78SJulian Elischer 	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
6762267af78SJulian Elischer 		vm_map_unlock(map);
6772267af78SJulian Elischer 		return (KERN_SUCCESS);
6782267af78SJulian Elischer 	}
6792267af78SJulian Elischer 
6802267af78SJulian Elischer 	if ((stack_entry = prev_entry->next) == &map->header) {
6812267af78SJulian Elischer 		vm_map_unlock(map);
6822267af78SJulian Elischer 		return (KERN_SUCCESS);
6832267af78SJulian Elischer 	}
6842267af78SJulian Elischer 	if (prev_entry == &map->header)
6852267af78SJulian Elischer 		end = stack_entry->start - stack_entry->avail_ssize;
6862267af78SJulian Elischer 	else
6872267af78SJulian Elischer 		end = prev_entry->end;
6882267af78SJulian Elischer 
6892267af78SJulian Elischer 	/* This next test mimics the old grow function in vm_machdep.c.
6902267af78SJulian Elischer 	 * It really doesn't quite make sense, but we do it anyway
6912267af78SJulian Elischer 	 * for compatibility.
6922267af78SJulian Elischer 	 *
6932267af78SJulian Elischer 	 * If not growable stack, return success.  This signals the
6942267af78SJulian Elischer 	 * caller to proceed as he would normally with normal vm.
6952267af78SJulian Elischer 	 */
6962267af78SJulian Elischer 	if (stack_entry->avail_ssize < 1 ||
6972267af78SJulian Elischer 	    addr >= stack_entry->start ||
6982267af78SJulian Elischer 	    addr <  stack_entry->start - stack_entry->avail_ssize) {
6992267af78SJulian Elischer 		vm_map_unlock(map);
7002267af78SJulian Elischer 		return (KERN_SUCCESS);
7012267af78SJulian Elischer 	}
7022267af78SJulian Elischer 
7032267af78SJulian Elischer 	/* Find the minimum grow amount */
7042267af78SJulian Elischer 	grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
7052267af78SJulian Elischer 	if (grow_amount > stack_entry->avail_ssize) {
7062267af78SJulian Elischer 		vm_map_unlock(map);
7072267af78SJulian Elischer 		return (KERN_NO_SPACE);
7082267af78SJulian Elischer 	}
7092267af78SJulian Elischer 
7102267af78SJulian Elischer 	/* If there is no longer enough space between the entries
7112267af78SJulian Elischer 	 * nogo, and adjust the available space.  Note: this
7122267af78SJulian Elischer 	 * should only happen if the user has mapped into the
7132267af78SJulian Elischer 	 * stack area after the stack was created, and is
7142267af78SJulian Elischer 	 * probably an error.
7152267af78SJulian Elischer 	 *
7162267af78SJulian Elischer 	 * This also effectively destroys any guard page the user
7172267af78SJulian Elischer 	 * might have intended by limiting the stack size.
7182267af78SJulian Elischer 	 */
7192267af78SJulian Elischer 	if (grow_amount > stack_entry->start - end) {
7202267af78SJulian Elischer 		stack_entry->avail_ssize = stack_entry->start - end;
7212267af78SJulian Elischer 		vm_map_unlock(map);
7222267af78SJulian Elischer 		return (KERN_NO_SPACE);
7232267af78SJulian Elischer 	}
7242267af78SJulian Elischer 
7252267af78SJulian Elischer 	if (addr >= (vm_offset_t)vm->vm_maxsaddr)
7262267af78SJulian Elischer 		is_procstack = 1;
7272267af78SJulian Elischer 
7282267af78SJulian Elischer 	/* If this is the main process stack, see if we're over the
7292267af78SJulian Elischer 	 * stack limit.
7302267af78SJulian Elischer 	 */
7312267af78SJulian Elischer 	if (is_procstack && (vm->vm_ssize + grow_amount >
7322267af78SJulian Elischer 			     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
7332267af78SJulian Elischer 		vm_map_unlock(map);
7342267af78SJulian Elischer 		return (KERN_NO_SPACE);
7352267af78SJulian Elischer 	}
7362267af78SJulian Elischer 
7372267af78SJulian Elischer 	/* Round up the grow amount modulo SGROWSIZ */
7382267af78SJulian Elischer 	grow_amount = roundup (grow_amount, SGROWSIZ);
7392267af78SJulian Elischer 	if (grow_amount > stack_entry->avail_ssize) {
7402267af78SJulian Elischer 		grow_amount = stack_entry->avail_ssize;
7412267af78SJulian Elischer 	}
7422267af78SJulian Elischer 	if (is_procstack && (vm->vm_ssize + grow_amount >
7432267af78SJulian Elischer 	                     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
7442267af78SJulian Elischer 		grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
7452267af78SJulian Elischer 		              vm->vm_ssize;
7462267af78SJulian Elischer 	}
7472267af78SJulian Elischer 
7482267af78SJulian Elischer 	/* Get the preliminary new entry start value */
7492267af78SJulian Elischer 	addr = stack_entry->start - grow_amount;
7502267af78SJulian Elischer 
7512267af78SJulian Elischer 	/* If this puts us into the previous entry, cut back our growth
7522267af78SJulian Elischer 	 * to the available space.  Also, see the note above.
7532267af78SJulian Elischer 	 */
7542267af78SJulian Elischer 	if (addr < end) {
7552267af78SJulian Elischer 		stack_entry->avail_ssize = stack_entry->start - end;
7562267af78SJulian Elischer 		addr = end;
7572267af78SJulian Elischer 	}
7582267af78SJulian Elischer 
7592267af78SJulian Elischer 	rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
7602267af78SJulian Elischer 			   stack_entry->protection,
7612267af78SJulian Elischer 			   stack_entry->max_protection,
7622267af78SJulian Elischer 			   0);
7632267af78SJulian Elischer 
7642267af78SJulian Elischer 	/* Adjust the available stack space by the amount we grew. */
7652267af78SJulian Elischer 	if (rv == KERN_SUCCESS) {
7662267af78SJulian Elischer 		new_stack_entry = prev_entry->next;
7672267af78SJulian Elischer 		if (new_stack_entry->end   != stack_entry->start  ||
7682267af78SJulian Elischer 		    new_stack_entry->start != addr)
7692267af78SJulian Elischer 			panic ("Bad stack grow start/end in new stack entry");
7702267af78SJulian Elischer 		else {
7712267af78SJulian Elischer 			new_stack_entry->avail_ssize = stack_entry->avail_ssize -
7722267af78SJulian Elischer 							(new_stack_entry->end -
7732267af78SJulian Elischer 							 new_stack_entry->start);
7742267af78SJulian Elischer 			vm->vm_ssize += new_stack_entry->end -
7752267af78SJulian Elischer 					new_stack_entry->start;
7762267af78SJulian Elischer 		}
7772267af78SJulian Elischer 	}
7782267af78SJulian Elischer 
7792267af78SJulian Elischer 	vm_map_unlock(map);
7802267af78SJulian Elischer 	return (rv);
7812267af78SJulian Elischer 
7822267af78SJulian Elischer }
7832267af78SJulian Elischer 
78430dcfc09SJohn Dyson /*
785df8bae1dSRodney W. Grimes  * Find sufficient space for `length' bytes in the given map, starting at
786df8bae1dSRodney W. Grimes  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
787df8bae1dSRodney W. Grimes  */
788df8bae1dSRodney W. Grimes int
789df8bae1dSRodney W. Grimes vm_map_findspace(map, start, length, addr)
790c0877f10SJohn Dyson 	vm_map_t map;
791c0877f10SJohn Dyson 	vm_offset_t start;
792df8bae1dSRodney W. Grimes 	vm_size_t length;
793df8bae1dSRodney W. Grimes 	vm_offset_t *addr;
794df8bae1dSRodney W. Grimes {
795c0877f10SJohn Dyson 	vm_map_entry_t entry, next;
796c0877f10SJohn Dyson 	vm_offset_t end;
797df8bae1dSRodney W. Grimes 
798df8bae1dSRodney W. Grimes 	if (start < map->min_offset)
799df8bae1dSRodney W. Grimes 		start = map->min_offset;
800df8bae1dSRodney W. Grimes 	if (start > map->max_offset)
801df8bae1dSRodney W. Grimes 		return (1);
802df8bae1dSRodney W. Grimes 
803df8bae1dSRodney W. Grimes 	/*
8040d94caffSDavid Greenman 	 * Look for the first possible address; if there's already something
8050d94caffSDavid Greenman 	 * at this address, we have to start after it.
806df8bae1dSRodney W. Grimes 	 */
807df8bae1dSRodney W. Grimes 	if (start == map->min_offset) {
80867bf6868SJohn Dyson 		if ((entry = map->first_free) != &map->header)
809df8bae1dSRodney W. Grimes 			start = entry->end;
810df8bae1dSRodney W. Grimes 	} else {
811df8bae1dSRodney W. Grimes 		vm_map_entry_t tmp;
8120d94caffSDavid Greenman 
813df8bae1dSRodney W. Grimes 		if (vm_map_lookup_entry(map, start, &tmp))
814df8bae1dSRodney W. Grimes 			start = tmp->end;
815df8bae1dSRodney W. Grimes 		entry = tmp;
816df8bae1dSRodney W. Grimes 	}
817df8bae1dSRodney W. Grimes 
818df8bae1dSRodney W. Grimes 	/*
8190d94caffSDavid Greenman 	 * Look through the rest of the map, trying to fit a new region in the
8200d94caffSDavid Greenman 	 * gap between existing regions, or after the very last region.
821df8bae1dSRodney W. Grimes 	 */
822df8bae1dSRodney W. Grimes 	for (;; start = (entry = next)->end) {
823df8bae1dSRodney W. Grimes 		/*
824df8bae1dSRodney W. Grimes 		 * Find the end of the proposed new region.  Be sure we didn't
825df8bae1dSRodney W. Grimes 		 * go beyond the end of the map, or wrap around the address;
826df8bae1dSRodney W. Grimes 		 * if so, we lose.  Otherwise, if this is the last entry, or
827df8bae1dSRodney W. Grimes 		 * if the proposed new region fits before the next entry, we
828df8bae1dSRodney W. Grimes 		 * win.
829df8bae1dSRodney W. Grimes 		 */
830df8bae1dSRodney W. Grimes 		end = start + length;
831df8bae1dSRodney W. Grimes 		if (end > map->max_offset || end < start)
832df8bae1dSRodney W. Grimes 			return (1);
833df8bae1dSRodney W. Grimes 		next = entry->next;
834df8bae1dSRodney W. Grimes 		if (next == &map->header || next->start >= end)
835df8bae1dSRodney W. Grimes 			break;
836df8bae1dSRodney W. Grimes 	}
837df8bae1dSRodney W. Grimes 	SAVE_HINT(map, entry);
838df8bae1dSRodney W. Grimes 	*addr = start;
83999448ed1SJohn Dyson 	if (map == kernel_map) {
84099448ed1SJohn Dyson 		vm_offset_t ksize;
84199448ed1SJohn Dyson 		if ((ksize = round_page(start + length)) > kernel_vm_end) {
84299448ed1SJohn Dyson 			pmap_growkernel(ksize);
84399448ed1SJohn Dyson 		}
84499448ed1SJohn Dyson 	}
845df8bae1dSRodney W. Grimes 	return (0);
846df8bae1dSRodney W. Grimes }
847df8bae1dSRodney W. Grimes 
848df8bae1dSRodney W. Grimes /*
849df8bae1dSRodney W. Grimes  *	vm_map_find finds an unallocated region in the target address
850df8bae1dSRodney W. Grimes  *	map with the given length.  The search is defined to be
851df8bae1dSRodney W. Grimes  *	first-fit from the specified address; the region found is
852df8bae1dSRodney W. Grimes  *	returned in the same parameter.
853df8bae1dSRodney W. Grimes  *
854df8bae1dSRodney W. Grimes  */
855df8bae1dSRodney W. Grimes int
856b9dcd593SBruce Evans vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
857b9dcd593SBruce Evans 	    vm_offset_t *addr,	/* IN/OUT */
858b9dcd593SBruce Evans 	    vm_size_t length, boolean_t find_space, vm_prot_t prot,
859b9dcd593SBruce Evans 	    vm_prot_t max, int cow)
860df8bae1dSRodney W. Grimes {
861c0877f10SJohn Dyson 	vm_offset_t start;
8628d6e8edeSDavid Greenman 	int result, s = 0;
863df8bae1dSRodney W. Grimes 
864df8bae1dSRodney W. Grimes 	start = *addr;
8658d6e8edeSDavid Greenman 
8669579ee64SDavid Greenman 	if (map == kmem_map || map == mb_map)
867b18bfc3dSJohn Dyson 		s = splvm();
8688d6e8edeSDavid Greenman 
869bea41bcfSDavid Greenman 	vm_map_lock(map);
870df8bae1dSRodney W. Grimes 	if (find_space) {
871df8bae1dSRodney W. Grimes 		if (vm_map_findspace(map, start, length, addr)) {
872df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
8739579ee64SDavid Greenman 			if (map == kmem_map || map == mb_map)
8748d6e8edeSDavid Greenman 				splx(s);
875df8bae1dSRodney W. Grimes 			return (KERN_NO_SPACE);
876df8bae1dSRodney W. Grimes 		}
877df8bae1dSRodney W. Grimes 		start = *addr;
878df8bae1dSRodney W. Grimes 	}
879bd7e5f99SJohn Dyson 	result = vm_map_insert(map, object, offset,
880bd7e5f99SJohn Dyson 		start, start + length, prot, max, cow);
881df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
8828d6e8edeSDavid Greenman 
8839579ee64SDavid Greenman 	if (map == kmem_map || map == mb_map)
8848d6e8edeSDavid Greenman 		splx(s);
8858d6e8edeSDavid Greenman 
886df8bae1dSRodney W. Grimes 	return (result);
887df8bae1dSRodney W. Grimes }
888df8bae1dSRodney W. Grimes 
889df8bae1dSRodney W. Grimes /*
890b7b2aac2SJohn Dyson  *	vm_map_simplify_entry:
89167bf6868SJohn Dyson  *
892b7b2aac2SJohn Dyson  *	Simplify the given map entry by merging with either neighbor.
893df8bae1dSRodney W. Grimes  */
894b7b2aac2SJohn Dyson void
8950d94caffSDavid Greenman vm_map_simplify_entry(map, entry)
896df8bae1dSRodney W. Grimes 	vm_map_t map;
897df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
898df8bae1dSRodney W. Grimes {
899308c24baSJohn Dyson 	vm_map_entry_t next, prev;
900b7b2aac2SJohn Dyson 	vm_size_t prevsize, esize;
901df8bae1dSRodney W. Grimes 
902afa07f7eSJohn Dyson 	if (entry->eflags & (MAP_ENTRY_IS_SUB_MAP|MAP_ENTRY_IS_A_MAP))
903df8bae1dSRodney W. Grimes 		return;
904308c24baSJohn Dyson 
905308c24baSJohn Dyson 	prev = entry->prev;
906308c24baSJohn Dyson 	if (prev != &map->header) {
90767bf6868SJohn Dyson 		prevsize = prev->end - prev->start;
90867bf6868SJohn Dyson 		if ( (prev->end == entry->start) &&
90967bf6868SJohn Dyson 		     (prev->object.vm_object == entry->object.vm_object) &&
91095e5e988SJohn Dyson 		     (!prev->object.vm_object ||
91195e5e988SJohn Dyson 				(prev->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
91267bf6868SJohn Dyson 		     (!prev->object.vm_object ||
91367bf6868SJohn Dyson 			(prev->offset + prevsize == entry->offset)) &&
914afa07f7eSJohn Dyson 		     (prev->eflags == entry->eflags) &&
91567bf6868SJohn Dyson 		     (prev->protection == entry->protection) &&
91667bf6868SJohn Dyson 		     (prev->max_protection == entry->max_protection) &&
91767bf6868SJohn Dyson 		     (prev->inheritance == entry->inheritance) &&
918b7b2aac2SJohn Dyson 		     (prev->wired_count == entry->wired_count)) {
919308c24baSJohn Dyson 			if (map->first_free == prev)
920308c24baSJohn Dyson 				map->first_free = entry;
921b18bfc3dSJohn Dyson 			if (map->hint == prev)
922b18bfc3dSJohn Dyson 				map->hint = entry;
923308c24baSJohn Dyson 			vm_map_entry_unlink(map, prev);
924308c24baSJohn Dyson 			entry->start = prev->start;
925308c24baSJohn Dyson 			entry->offset = prev->offset;
926b18bfc3dSJohn Dyson 			if (prev->object.vm_object)
927308c24baSJohn Dyson 				vm_object_deallocate(prev->object.vm_object);
928308c24baSJohn Dyson 			vm_map_entry_dispose(map, prev);
929308c24baSJohn Dyson 		}
930308c24baSJohn Dyson 	}
931de5f6a77SJohn Dyson 
932de5f6a77SJohn Dyson 	next = entry->next;
933308c24baSJohn Dyson 	if (next != &map->header) {
93467bf6868SJohn Dyson 		esize = entry->end - entry->start;
93567bf6868SJohn Dyson 		if ((entry->end == next->start) &&
93667bf6868SJohn Dyson 		    (next->object.vm_object == entry->object.vm_object) &&
93795e5e988SJohn Dyson 		    (!next->object.vm_object ||
93895e5e988SJohn Dyson 				(next->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
93967bf6868SJohn Dyson 		     (!entry->object.vm_object ||
94067bf6868SJohn Dyson 			(entry->offset + esize == next->offset)) &&
941afa07f7eSJohn Dyson 		    (next->eflags == entry->eflags) &&
94267bf6868SJohn Dyson 		    (next->protection == entry->protection) &&
94367bf6868SJohn Dyson 		    (next->max_protection == entry->max_protection) &&
94467bf6868SJohn Dyson 		    (next->inheritance == entry->inheritance) &&
945b7b2aac2SJohn Dyson 		    (next->wired_count == entry->wired_count)) {
946308c24baSJohn Dyson 			if (map->first_free == next)
947308c24baSJohn Dyson 				map->first_free = entry;
948b18bfc3dSJohn Dyson 			if (map->hint == next)
949b18bfc3dSJohn Dyson 				map->hint = entry;
950de5f6a77SJohn Dyson 			vm_map_entry_unlink(map, next);
951de5f6a77SJohn Dyson 			entry->end = next->end;
952b18bfc3dSJohn Dyson 			if (next->object.vm_object)
953de5f6a77SJohn Dyson 				vm_object_deallocate(next->object.vm_object);
954de5f6a77SJohn Dyson 			vm_map_entry_dispose(map, next);
955df8bae1dSRodney W. Grimes 	        }
956df8bae1dSRodney W. Grimes 	}
957de5f6a77SJohn Dyson }
958df8bae1dSRodney W. Grimes /*
959df8bae1dSRodney W. Grimes  *	vm_map_clip_start:	[ internal use only ]
960df8bae1dSRodney W. Grimes  *
961df8bae1dSRodney W. Grimes  *	Asserts that the given entry begins at or after
962df8bae1dSRodney W. Grimes  *	the specified address; if necessary,
963df8bae1dSRodney W. Grimes  *	it splits the entry into two.
964df8bae1dSRodney W. Grimes  */
965df8bae1dSRodney W. Grimes #define vm_map_clip_start(map, entry, startaddr) \
966df8bae1dSRodney W. Grimes { \
967df8bae1dSRodney W. Grimes 	if (startaddr > entry->start) \
968df8bae1dSRodney W. Grimes 		_vm_map_clip_start(map, entry, startaddr); \
969c0877f10SJohn Dyson 	else if (entry->object.vm_object && (entry->object.vm_object->ref_count == 1)) \
970069e9bc1SDoug Rabson 		vm_object_set_flag(entry->object.vm_object, OBJ_ONEMAPPING); \
971df8bae1dSRodney W. Grimes }
972df8bae1dSRodney W. Grimes 
973df8bae1dSRodney W. Grimes /*
974df8bae1dSRodney W. Grimes  *	This routine is called only when it is known that
975df8bae1dSRodney W. Grimes  *	the entry must be split.
976df8bae1dSRodney W. Grimes  */
9770d94caffSDavid Greenman static void
9780d94caffSDavid Greenman _vm_map_clip_start(map, entry, start)
979c0877f10SJohn Dyson 	vm_map_t map;
980c0877f10SJohn Dyson 	vm_map_entry_t entry;
981c0877f10SJohn Dyson 	vm_offset_t start;
982df8bae1dSRodney W. Grimes {
983c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
984df8bae1dSRodney W. Grimes 
985df8bae1dSRodney W. Grimes 	/*
9860d94caffSDavid Greenman 	 * Split off the front portion -- note that we must insert the new
9870d94caffSDavid Greenman 	 * entry BEFORE this one, so that this entry has the specified
9880d94caffSDavid Greenman 	 * starting address.
989df8bae1dSRodney W. Grimes 	 */
990df8bae1dSRodney W. Grimes 
991f32dbbeeSJohn Dyson 	vm_map_simplify_entry(map, entry);
992f32dbbeeSJohn Dyson 
99311cccda1SJohn Dyson 	/*
99411cccda1SJohn Dyson 	 * If there is no object backing this entry, we might as well create
99511cccda1SJohn Dyson 	 * one now.  If we defer it, an object can get created after the map
99611cccda1SJohn Dyson 	 * is clipped, and individual objects will be created for the split-up
99711cccda1SJohn Dyson 	 * map.  This is a bit of a hack, but is also about the best place to
99811cccda1SJohn Dyson 	 * put this improvement.
99911cccda1SJohn Dyson 	 */
100011cccda1SJohn Dyson 
100111cccda1SJohn Dyson 	if (entry->object.vm_object == NULL) {
100211cccda1SJohn Dyson 		vm_object_t object;
100311cccda1SJohn Dyson 		object = vm_object_allocate(OBJT_DEFAULT,
1004c2e11a03SJohn Dyson 				atop(entry->end - entry->start));
100511cccda1SJohn Dyson 		entry->object.vm_object = object;
100611cccda1SJohn Dyson 		entry->offset = 0;
100711cccda1SJohn Dyson 	}
100811cccda1SJohn Dyson 
1009df8bae1dSRodney W. Grimes 	new_entry = vm_map_entry_create(map);
1010df8bae1dSRodney W. Grimes 	*new_entry = *entry;
1011df8bae1dSRodney W. Grimes 
1012df8bae1dSRodney W. Grimes 	new_entry->end = start;
1013df8bae1dSRodney W. Grimes 	entry->offset += (start - entry->start);
1014df8bae1dSRodney W. Grimes 	entry->start = start;
1015df8bae1dSRodney W. Grimes 
1016df8bae1dSRodney W. Grimes 	vm_map_entry_link(map, entry->prev, new_entry);
1017df8bae1dSRodney W. Grimes 
1018c0877f10SJohn Dyson 	if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1019c0877f10SJohn Dyson 		if (new_entry->object.vm_object->ref_count == 1)
1020069e9bc1SDoug Rabson 			vm_object_set_flag(new_entry->object.vm_object,
1021069e9bc1SDoug Rabson 					   OBJ_ONEMAPPING);
1022df8bae1dSRodney W. Grimes 		vm_object_reference(new_entry->object.vm_object);
1023df8bae1dSRodney W. Grimes 	}
1024c0877f10SJohn Dyson }
1025df8bae1dSRodney W. Grimes 
1026df8bae1dSRodney W. Grimes /*
1027df8bae1dSRodney W. Grimes  *	vm_map_clip_end:	[ internal use only ]
1028df8bae1dSRodney W. Grimes  *
1029df8bae1dSRodney W. Grimes  *	Asserts that the given entry ends at or before
1030df8bae1dSRodney W. Grimes  *	the specified address; if necessary,
1031df8bae1dSRodney W. Grimes  *	it splits the entry into two.
1032df8bae1dSRodney W. Grimes  */
1033df8bae1dSRodney W. Grimes 
1034df8bae1dSRodney W. Grimes #define vm_map_clip_end(map, entry, endaddr) \
1035df8bae1dSRodney W. Grimes { \
1036df8bae1dSRodney W. Grimes 	if (endaddr < entry->end) \
1037df8bae1dSRodney W. Grimes 		_vm_map_clip_end(map, entry, endaddr); \
1038c0877f10SJohn Dyson 	else if (entry->object.vm_object && (entry->object.vm_object->ref_count == 1)) \
1039069e9bc1SDoug Rabson 		vm_object_set_flag(entry->object.vm_object, OBJ_ONEMAPPING); \
1040df8bae1dSRodney W. Grimes }
1041df8bae1dSRodney W. Grimes 
1042df8bae1dSRodney W. Grimes /*
1043df8bae1dSRodney W. Grimes  *	This routine is called only when it is known that
1044df8bae1dSRodney W. Grimes  *	the entry must be split.
1045df8bae1dSRodney W. Grimes  */
10460d94caffSDavid Greenman static void
10470d94caffSDavid Greenman _vm_map_clip_end(map, entry, end)
1048c0877f10SJohn Dyson 	vm_map_t map;
1049c0877f10SJohn Dyson 	vm_map_entry_t entry;
1050c0877f10SJohn Dyson 	vm_offset_t end;
1051df8bae1dSRodney W. Grimes {
1052c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
1053df8bae1dSRodney W. Grimes 
1054df8bae1dSRodney W. Grimes 	/*
105511cccda1SJohn Dyson 	 * If there is no object backing this entry, we might as well create
105611cccda1SJohn Dyson 	 * one now.  If we defer it, an object can get created after the map
105711cccda1SJohn Dyson 	 * is clipped, and individual objects will be created for the split-up
105811cccda1SJohn Dyson 	 * map.  This is a bit of a hack, but is also about the best place to
105911cccda1SJohn Dyson 	 * put this improvement.
106011cccda1SJohn Dyson 	 */
106111cccda1SJohn Dyson 
106211cccda1SJohn Dyson 	if (entry->object.vm_object == NULL) {
106311cccda1SJohn Dyson 		vm_object_t object;
106411cccda1SJohn Dyson 		object = vm_object_allocate(OBJT_DEFAULT,
1065c2e11a03SJohn Dyson 				atop(entry->end - entry->start));
106611cccda1SJohn Dyson 		entry->object.vm_object = object;
106711cccda1SJohn Dyson 		entry->offset = 0;
106811cccda1SJohn Dyson 	}
106911cccda1SJohn Dyson 
107011cccda1SJohn Dyson 	/*
10710d94caffSDavid Greenman 	 * Create a new entry and insert it AFTER the specified entry
1072df8bae1dSRodney W. Grimes 	 */
1073df8bae1dSRodney W. Grimes 
1074df8bae1dSRodney W. Grimes 	new_entry = vm_map_entry_create(map);
1075df8bae1dSRodney W. Grimes 	*new_entry = *entry;
1076df8bae1dSRodney W. Grimes 
1077df8bae1dSRodney W. Grimes 	new_entry->start = entry->end = end;
1078df8bae1dSRodney W. Grimes 	new_entry->offset += (end - entry->start);
1079df8bae1dSRodney W. Grimes 
1080df8bae1dSRodney W. Grimes 	vm_map_entry_link(map, entry, new_entry);
1081df8bae1dSRodney W. Grimes 
1082c0877f10SJohn Dyson 	if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1083c0877f10SJohn Dyson 		if (new_entry->object.vm_object->ref_count == 1)
1084069e9bc1SDoug Rabson 			vm_object_set_flag(new_entry->object.vm_object,
1085069e9bc1SDoug Rabson 					   OBJ_ONEMAPPING);
1086df8bae1dSRodney W. Grimes 		vm_object_reference(new_entry->object.vm_object);
1087df8bae1dSRodney W. Grimes 	}
1088c0877f10SJohn Dyson }
1089df8bae1dSRodney W. Grimes 
1090df8bae1dSRodney W. Grimes /*
1091df8bae1dSRodney W. Grimes  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
1092df8bae1dSRodney W. Grimes  *
1093df8bae1dSRodney W. Grimes  *	Asserts that the starting and ending region
1094df8bae1dSRodney W. Grimes  *	addresses fall within the valid range of the map.
1095df8bae1dSRodney W. Grimes  */
1096df8bae1dSRodney W. Grimes #define	VM_MAP_RANGE_CHECK(map, start, end)		\
1097df8bae1dSRodney W. Grimes 		{					\
1098df8bae1dSRodney W. Grimes 		if (start < vm_map_min(map))		\
1099df8bae1dSRodney W. Grimes 			start = vm_map_min(map);	\
1100df8bae1dSRodney W. Grimes 		if (end > vm_map_max(map))		\
1101df8bae1dSRodney W. Grimes 			end = vm_map_max(map);		\
1102df8bae1dSRodney W. Grimes 		if (start > end)			\
1103df8bae1dSRodney W. Grimes 			start = end;			\
1104df8bae1dSRodney W. Grimes 		}
1105df8bae1dSRodney W. Grimes 
1106df8bae1dSRodney W. Grimes /*
1107df8bae1dSRodney W. Grimes  *	vm_map_submap:		[ kernel use only ]
1108df8bae1dSRodney W. Grimes  *
1109df8bae1dSRodney W. Grimes  *	Mark the given range as handled by a subordinate map.
1110df8bae1dSRodney W. Grimes  *
1111df8bae1dSRodney W. Grimes  *	This range must have been created with vm_map_find,
1112df8bae1dSRodney W. Grimes  *	and no other operations may have been performed on this
1113df8bae1dSRodney W. Grimes  *	range prior to calling vm_map_submap.
1114df8bae1dSRodney W. Grimes  *
1115df8bae1dSRodney W. Grimes  *	Only a limited number of operations can be performed
1116df8bae1dSRodney W. Grimes  *	within this rage after calling vm_map_submap:
1117df8bae1dSRodney W. Grimes  *		vm_fault
1118df8bae1dSRodney W. Grimes  *	[Don't try vm_map_copy!]
1119df8bae1dSRodney W. Grimes  *
1120df8bae1dSRodney W. Grimes  *	To remove a submapping, one must first remove the
1121df8bae1dSRodney W. Grimes  *	range from the superior map, and then destroy the
1122df8bae1dSRodney W. Grimes  *	submap (if desired).  [Better yet, don't try it.]
1123df8bae1dSRodney W. Grimes  */
1124df8bae1dSRodney W. Grimes int
1125df8bae1dSRodney W. Grimes vm_map_submap(map, start, end, submap)
1126c0877f10SJohn Dyson 	vm_map_t map;
1127c0877f10SJohn Dyson 	vm_offset_t start;
1128c0877f10SJohn Dyson 	vm_offset_t end;
1129df8bae1dSRodney W. Grimes 	vm_map_t submap;
1130df8bae1dSRodney W. Grimes {
1131df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1132c0877f10SJohn Dyson 	int result = KERN_INVALID_ARGUMENT;
1133df8bae1dSRodney W. Grimes 
1134df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1135df8bae1dSRodney W. Grimes 
1136df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1137df8bae1dSRodney W. Grimes 
1138df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &entry)) {
1139df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
11400d94caffSDavid Greenman 	} else
1141df8bae1dSRodney W. Grimes 		entry = entry->next;
1142df8bae1dSRodney W. Grimes 
1143df8bae1dSRodney W. Grimes 	vm_map_clip_end(map, entry, end);
1144df8bae1dSRodney W. Grimes 
1145df8bae1dSRodney W. Grimes 	if ((entry->start == start) && (entry->end == end) &&
1146afa07f7eSJohn Dyson 	    ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_COW)) == 0) &&
1147afa07f7eSJohn Dyson 	    (entry->object.vm_object == NULL)) {
11482d8acc0fSJohn Dyson 		entry->object.sub_map = submap;
1149afa07f7eSJohn Dyson 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1150df8bae1dSRodney W. Grimes 		result = KERN_SUCCESS;
1151df8bae1dSRodney W. Grimes 	}
1152df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1153df8bae1dSRodney W. Grimes 
1154df8bae1dSRodney W. Grimes 	return (result);
1155df8bae1dSRodney W. Grimes }
1156df8bae1dSRodney W. Grimes 
1157df8bae1dSRodney W. Grimes /*
1158df8bae1dSRodney W. Grimes  *	vm_map_protect:
1159df8bae1dSRodney W. Grimes  *
1160df8bae1dSRodney W. Grimes  *	Sets the protection of the specified address
1161df8bae1dSRodney W. Grimes  *	region in the target map.  If "set_max" is
1162df8bae1dSRodney W. Grimes  *	specified, the maximum protection is to be set;
1163df8bae1dSRodney W. Grimes  *	otherwise, only the current protection is affected.
1164df8bae1dSRodney W. Grimes  */
1165df8bae1dSRodney W. Grimes int
1166b9dcd593SBruce Evans vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1167b9dcd593SBruce Evans 	       vm_prot_t new_prot, boolean_t set_max)
1168df8bae1dSRodney W. Grimes {
1169c0877f10SJohn Dyson 	vm_map_entry_t current;
1170df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1171df8bae1dSRodney W. Grimes 
1172df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1173df8bae1dSRodney W. Grimes 
1174df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1175df8bae1dSRodney W. Grimes 
1176df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &entry)) {
1177df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1178b7b2aac2SJohn Dyson 	} else {
1179df8bae1dSRodney W. Grimes 		entry = entry->next;
1180b7b2aac2SJohn Dyson 	}
1181df8bae1dSRodney W. Grimes 
1182df8bae1dSRodney W. Grimes 	/*
11830d94caffSDavid Greenman 	 * Make a first pass to check for protection violations.
1184df8bae1dSRodney W. Grimes 	 */
1185df8bae1dSRodney W. Grimes 
1186df8bae1dSRodney W. Grimes 	current = entry;
1187df8bae1dSRodney W. Grimes 	while ((current != &map->header) && (current->start < end)) {
1188afa07f7eSJohn Dyson 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1189a1f6d91cSDavid Greenman 			vm_map_unlock(map);
1190df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ARGUMENT);
1191a1f6d91cSDavid Greenman 		}
1192df8bae1dSRodney W. Grimes 		if ((new_prot & current->max_protection) != new_prot) {
1193df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
1194df8bae1dSRodney W. Grimes 			return (KERN_PROTECTION_FAILURE);
1195df8bae1dSRodney W. Grimes 		}
1196df8bae1dSRodney W. Grimes 		current = current->next;
1197df8bae1dSRodney W. Grimes 	}
1198df8bae1dSRodney W. Grimes 
1199df8bae1dSRodney W. Grimes 	/*
12000d94caffSDavid Greenman 	 * Go back and fix up protections. [Note that clipping is not
12010d94caffSDavid Greenman 	 * necessary the second time.]
1202df8bae1dSRodney W. Grimes 	 */
1203df8bae1dSRodney W. Grimes 
1204df8bae1dSRodney W. Grimes 	current = entry;
1205df8bae1dSRodney W. Grimes 
1206df8bae1dSRodney W. Grimes 	while ((current != &map->header) && (current->start < end)) {
1207df8bae1dSRodney W. Grimes 		vm_prot_t old_prot;
1208df8bae1dSRodney W. Grimes 
1209df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, current, end);
1210df8bae1dSRodney W. Grimes 
1211df8bae1dSRodney W. Grimes 		old_prot = current->protection;
1212df8bae1dSRodney W. Grimes 		if (set_max)
1213df8bae1dSRodney W. Grimes 			current->protection =
1214df8bae1dSRodney W. Grimes 			    (current->max_protection = new_prot) &
1215df8bae1dSRodney W. Grimes 			    old_prot;
1216df8bae1dSRodney W. Grimes 		else
1217df8bae1dSRodney W. Grimes 			current->protection = new_prot;
1218df8bae1dSRodney W. Grimes 
1219df8bae1dSRodney W. Grimes 		/*
12200d94caffSDavid Greenman 		 * Update physical map if necessary. Worry about copy-on-write
12210d94caffSDavid Greenman 		 * here -- CHECK THIS XXX
1222df8bae1dSRodney W. Grimes 		 */
1223df8bae1dSRodney W. Grimes 
1224df8bae1dSRodney W. Grimes 		if (current->protection != old_prot) {
1225afa07f7eSJohn Dyson #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1226df8bae1dSRodney W. Grimes 							VM_PROT_ALL)
1227df8bae1dSRodney W. Grimes 
1228afa07f7eSJohn Dyson 			if (current->eflags & MAP_ENTRY_IS_A_MAP) {
1229df8bae1dSRodney W. Grimes 				vm_map_entry_t share_entry;
1230df8bae1dSRodney W. Grimes 				vm_offset_t share_end;
1231df8bae1dSRodney W. Grimes 
1232df8bae1dSRodney W. Grimes 				vm_map_lock(current->object.share_map);
1233df8bae1dSRodney W. Grimes 				(void) vm_map_lookup_entry(
1234df8bae1dSRodney W. Grimes 				    current->object.share_map,
1235df8bae1dSRodney W. Grimes 				    current->offset,
1236df8bae1dSRodney W. Grimes 				    &share_entry);
1237df8bae1dSRodney W. Grimes 				share_end = current->offset +
1238df8bae1dSRodney W. Grimes 				    (current->end - current->start);
1239df8bae1dSRodney W. Grimes 				while ((share_entry !=
1240df8bae1dSRodney W. Grimes 					&current->object.share_map->header) &&
1241df8bae1dSRodney W. Grimes 				    (share_entry->start < share_end)) {
1242df8bae1dSRodney W. Grimes 
1243df8bae1dSRodney W. Grimes 					pmap_protect(map->pmap,
12445270eceaSBruce Evans 					    (qmax(share_entry->start,
1245df8bae1dSRodney W. Grimes 						    current->offset) -
1246df8bae1dSRodney W. Grimes 						current->offset +
1247df8bae1dSRodney W. Grimes 						current->start),
1248df8bae1dSRodney W. Grimes 					    min(share_entry->end,
1249df8bae1dSRodney W. Grimes 						share_end) -
1250df8bae1dSRodney W. Grimes 					    current->offset +
1251df8bae1dSRodney W. Grimes 					    current->start,
1252df8bae1dSRodney W. Grimes 					    current->protection &
1253df8bae1dSRodney W. Grimes 					    MASK(share_entry));
1254df8bae1dSRodney W. Grimes 
1255df8bae1dSRodney W. Grimes 					share_entry = share_entry->next;
1256df8bae1dSRodney W. Grimes 				}
1257df8bae1dSRodney W. Grimes 				vm_map_unlock(current->object.share_map);
12580d94caffSDavid Greenman 			} else
1259df8bae1dSRodney W. Grimes 				pmap_protect(map->pmap, current->start,
1260df8bae1dSRodney W. Grimes 				    current->end,
1261df8bae1dSRodney W. Grimes 				    current->protection & MASK(entry));
1262df8bae1dSRodney W. Grimes #undef	MASK
1263df8bae1dSRodney W. Grimes 		}
12647d78abc9SJohn Dyson 
12657d78abc9SJohn Dyson 		vm_map_simplify_entry(map, current);
12667d78abc9SJohn Dyson 
1267df8bae1dSRodney W. Grimes 		current = current->next;
1268df8bae1dSRodney W. Grimes 	}
1269df8bae1dSRodney W. Grimes 
12702d8acc0fSJohn Dyson 	map->timestamp++;
1271df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1272df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1273df8bae1dSRodney W. Grimes }
1274df8bae1dSRodney W. Grimes 
1275df8bae1dSRodney W. Grimes /*
1276867a482dSJohn Dyson  *	vm_map_madvise:
1277867a482dSJohn Dyson  *
1278867a482dSJohn Dyson  * 	This routine traverses a processes map handling the madvise
1279867a482dSJohn Dyson  *	system call.
1280867a482dSJohn Dyson  */
1281867a482dSJohn Dyson void
1282867a482dSJohn Dyson vm_map_madvise(map, pmap, start, end, advise)
1283867a482dSJohn Dyson 	vm_map_t map;
1284867a482dSJohn Dyson 	pmap_t pmap;
1285867a482dSJohn Dyson 	vm_offset_t start, end;
1286867a482dSJohn Dyson 	int advise;
1287867a482dSJohn Dyson {
1288c0877f10SJohn Dyson 	vm_map_entry_t current;
1289867a482dSJohn Dyson 	vm_map_entry_t entry;
1290867a482dSJohn Dyson 
1291867a482dSJohn Dyson 	vm_map_lock(map);
1292867a482dSJohn Dyson 
1293867a482dSJohn Dyson 	VM_MAP_RANGE_CHECK(map, start, end);
1294867a482dSJohn Dyson 
1295867a482dSJohn Dyson 	if (vm_map_lookup_entry(map, start, &entry)) {
1296867a482dSJohn Dyson 		vm_map_clip_start(map, entry, start);
1297867a482dSJohn Dyson 	} else
1298867a482dSJohn Dyson 		entry = entry->next;
1299867a482dSJohn Dyson 
1300867a482dSJohn Dyson 	for(current = entry;
1301867a482dSJohn Dyson 		(current != &map->header) && (current->start < end);
1302867a482dSJohn Dyson 		current = current->next) {
130347221757SJohn Dyson 		vm_size_t size;
1304fed9a903SJohn Dyson 
1305afa07f7eSJohn Dyson 		if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1306867a482dSJohn Dyson 			continue;
1307867a482dSJohn Dyson 		}
1308fed9a903SJohn Dyson 
130947221757SJohn Dyson 		vm_map_clip_end(map, current, end);
131047221757SJohn Dyson 		size = current->end - current->start;
131147221757SJohn Dyson 
1312fed9a903SJohn Dyson 		/*
1313fed9a903SJohn Dyson 		 * Create an object if needed
1314fed9a903SJohn Dyson 		 */
1315fed9a903SJohn Dyson 		if (current->object.vm_object == NULL) {
1316fed9a903SJohn Dyson 			vm_object_t object;
131747221757SJohn Dyson 			if ((advise == MADV_FREE) || (advise == MADV_DONTNEED))
131847221757SJohn Dyson 				continue;
1319fed9a903SJohn Dyson 			object = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(size));
1320fed9a903SJohn Dyson 			current->object.vm_object = object;
1321fed9a903SJohn Dyson 			current->offset = 0;
1322fed9a903SJohn Dyson 		}
1323fed9a903SJohn Dyson 
1324867a482dSJohn Dyson 		switch (advise) {
1325867a482dSJohn Dyson 	case MADV_NORMAL:
1326867a482dSJohn Dyson 			current->object.vm_object->behavior = OBJ_NORMAL;
1327867a482dSJohn Dyson 			break;
1328867a482dSJohn Dyson 	case MADV_SEQUENTIAL:
1329867a482dSJohn Dyson 			current->object.vm_object->behavior = OBJ_SEQUENTIAL;
1330867a482dSJohn Dyson 			break;
1331867a482dSJohn Dyson 	case MADV_RANDOM:
1332867a482dSJohn Dyson 			current->object.vm_object->behavior = OBJ_RANDOM;
1333867a482dSJohn Dyson 			break;
1334867a482dSJohn Dyson 	/*
1335867a482dSJohn Dyson 	 * Right now, we could handle DONTNEED and WILLNEED with common code.
1336867a482dSJohn Dyson 	 * They are mostly the same, except for the potential async reads (NYI).
1337867a482dSJohn Dyson 	 */
13380a47b48bSJohn Dyson 	case MADV_FREE:
1339867a482dSJohn Dyson 	case MADV_DONTNEED:
1340867a482dSJohn Dyson 			{
1341867a482dSJohn Dyson 				vm_pindex_t pindex;
1342867a482dSJohn Dyson 				int count;
134347221757SJohn Dyson 				pindex = OFF_TO_IDX(current->offset);
1344867a482dSJohn Dyson 				count = OFF_TO_IDX(size);
1345867a482dSJohn Dyson 				/*
1346867a482dSJohn Dyson 				 * MADV_DONTNEED removes the page from all
1347867a482dSJohn Dyson 				 * pmaps, so pmap_remove is not necessary.
1348867a482dSJohn Dyson 				 */
1349867a482dSJohn Dyson 				vm_object_madvise(current->object.vm_object,
1350867a482dSJohn Dyson 					pindex, count, advise);
1351867a482dSJohn Dyson 			}
1352867a482dSJohn Dyson 			break;
1353867a482dSJohn Dyson 
1354867a482dSJohn Dyson 	case MADV_WILLNEED:
1355867a482dSJohn Dyson 			{
1356867a482dSJohn Dyson 				vm_pindex_t pindex;
1357867a482dSJohn Dyson 				int count;
13584334b0d8SJohn Dyson 				pindex = OFF_TO_IDX(current->offset);
1359867a482dSJohn Dyson 				count = OFF_TO_IDX(size);
1360867a482dSJohn Dyson 				vm_object_madvise(current->object.vm_object,
1361867a482dSJohn Dyson 					pindex, count, advise);
1362867a482dSJohn Dyson 				pmap_object_init_pt(pmap, current->start,
1363867a482dSJohn Dyson 					current->object.vm_object, pindex,
1364867a482dSJohn Dyson 					(count << PAGE_SHIFT), 0);
1365867a482dSJohn Dyson 			}
1366867a482dSJohn Dyson 			break;
1367867a482dSJohn Dyson 
1368867a482dSJohn Dyson 	default:
1369867a482dSJohn Dyson 			break;
1370867a482dSJohn Dyson 		}
1371867a482dSJohn Dyson 	}
1372867a482dSJohn Dyson 
137347221757SJohn Dyson 	map->timestamp++;
1374867a482dSJohn Dyson 	vm_map_simplify_entry(map, entry);
1375867a482dSJohn Dyson 	vm_map_unlock(map);
1376867a482dSJohn Dyson 	return;
1377867a482dSJohn Dyson }
1378867a482dSJohn Dyson 
1379867a482dSJohn Dyson 
1380867a482dSJohn Dyson /*
1381df8bae1dSRodney W. Grimes  *	vm_map_inherit:
1382df8bae1dSRodney W. Grimes  *
1383df8bae1dSRodney W. Grimes  *	Sets the inheritance of the specified address
1384df8bae1dSRodney W. Grimes  *	range in the target map.  Inheritance
1385df8bae1dSRodney W. Grimes  *	affects how the map will be shared with
1386df8bae1dSRodney W. Grimes  *	child maps at the time of vm_map_fork.
1387df8bae1dSRodney W. Grimes  */
1388df8bae1dSRodney W. Grimes int
1389b9dcd593SBruce Evans vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1390b9dcd593SBruce Evans 	       vm_inherit_t new_inheritance)
1391df8bae1dSRodney W. Grimes {
1392c0877f10SJohn Dyson 	vm_map_entry_t entry;
1393df8bae1dSRodney W. Grimes 	vm_map_entry_t temp_entry;
1394df8bae1dSRodney W. Grimes 
1395df8bae1dSRodney W. Grimes 	switch (new_inheritance) {
1396df8bae1dSRodney W. Grimes 	case VM_INHERIT_NONE:
1397df8bae1dSRodney W. Grimes 	case VM_INHERIT_COPY:
1398df8bae1dSRodney W. Grimes 	case VM_INHERIT_SHARE:
1399df8bae1dSRodney W. Grimes 		break;
1400df8bae1dSRodney W. Grimes 	default:
1401df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ARGUMENT);
1402df8bae1dSRodney W. Grimes 	}
1403df8bae1dSRodney W. Grimes 
1404df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1405df8bae1dSRodney W. Grimes 
1406df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1407df8bae1dSRodney W. Grimes 
1408df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
1409df8bae1dSRodney W. Grimes 		entry = temp_entry;
1410df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
14110d94caffSDavid Greenman 	} else
1412df8bae1dSRodney W. Grimes 		entry = temp_entry->next;
1413df8bae1dSRodney W. Grimes 
1414df8bae1dSRodney W. Grimes 	while ((entry != &map->header) && (entry->start < end)) {
1415df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
1416df8bae1dSRodney W. Grimes 
1417df8bae1dSRodney W. Grimes 		entry->inheritance = new_inheritance;
1418df8bae1dSRodney W. Grimes 
1419df8bae1dSRodney W. Grimes 		entry = entry->next;
1420df8bae1dSRodney W. Grimes 	}
1421df8bae1dSRodney W. Grimes 
1422f32dbbeeSJohn Dyson 	vm_map_simplify_entry(map, temp_entry);
142347221757SJohn Dyson 	map->timestamp++;
1424df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1425df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1426df8bae1dSRodney W. Grimes }
1427df8bae1dSRodney W. Grimes 
1428df8bae1dSRodney W. Grimes /*
14297aaaa4fdSJohn Dyson  * Implement the semantics of mlock
14307aaaa4fdSJohn Dyson  */
14317aaaa4fdSJohn Dyson int
14327aaaa4fdSJohn Dyson vm_map_user_pageable(map, start, end, new_pageable)
1433c0877f10SJohn Dyson 	vm_map_t map;
1434c0877f10SJohn Dyson 	vm_offset_t start;
1435c0877f10SJohn Dyson 	vm_offset_t end;
1436c0877f10SJohn Dyson 	boolean_t new_pageable;
14377aaaa4fdSJohn Dyson {
1438b44959ceSTor Egge 	vm_map_entry_t entry;
14397aaaa4fdSJohn Dyson 	vm_map_entry_t start_entry;
1440b44959ceSTor Egge 	vm_offset_t estart;
14417aaaa4fdSJohn Dyson 	int rv;
14427aaaa4fdSJohn Dyson 
14437aaaa4fdSJohn Dyson 	vm_map_lock(map);
14447aaaa4fdSJohn Dyson 	VM_MAP_RANGE_CHECK(map, start, end);
14457aaaa4fdSJohn Dyson 
14467aaaa4fdSJohn Dyson 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
14477aaaa4fdSJohn Dyson 		vm_map_unlock(map);
14487aaaa4fdSJohn Dyson 		return (KERN_INVALID_ADDRESS);
14497aaaa4fdSJohn Dyson 	}
14507aaaa4fdSJohn Dyson 
14517aaaa4fdSJohn Dyson 	if (new_pageable) {
14527aaaa4fdSJohn Dyson 
14537aaaa4fdSJohn Dyson 		entry = start_entry;
14547aaaa4fdSJohn Dyson 		vm_map_clip_start(map, entry, start);
14557aaaa4fdSJohn Dyson 
14567aaaa4fdSJohn Dyson 		/*
14577aaaa4fdSJohn Dyson 		 * Now decrement the wiring count for each region. If a region
14587aaaa4fdSJohn Dyson 		 * becomes completely unwired, unwire its physical pages and
14597aaaa4fdSJohn Dyson 		 * mappings.
14607aaaa4fdSJohn Dyson 		 */
1461996c772fSJohn Dyson 		vm_map_set_recursive(map);
14627aaaa4fdSJohn Dyson 
14637aaaa4fdSJohn Dyson 		entry = start_entry;
14647aaaa4fdSJohn Dyson 		while ((entry != &map->header) && (entry->start < end)) {
1465afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
14667aaaa4fdSJohn Dyson 				vm_map_clip_end(map, entry, end);
1467afa07f7eSJohn Dyson 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
14687aaaa4fdSJohn Dyson 				entry->wired_count--;
14697aaaa4fdSJohn Dyson 				if (entry->wired_count == 0)
14707aaaa4fdSJohn Dyson 					vm_fault_unwire(map, entry->start, entry->end);
14717aaaa4fdSJohn Dyson 			}
1472b44959ceSTor Egge 			vm_map_simplify_entry(map,entry);
14737aaaa4fdSJohn Dyson 			entry = entry->next;
14747aaaa4fdSJohn Dyson 		}
1475996c772fSJohn Dyson 		vm_map_clear_recursive(map);
14767aaaa4fdSJohn Dyson 	} else {
14777aaaa4fdSJohn Dyson 
14787aaaa4fdSJohn Dyson 		entry = start_entry;
14797aaaa4fdSJohn Dyson 
14807aaaa4fdSJohn Dyson 		while ((entry != &map->header) && (entry->start < end)) {
14817aaaa4fdSJohn Dyson 
1482afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
14837aaaa4fdSJohn Dyson 				entry = entry->next;
14847aaaa4fdSJohn Dyson 				continue;
14857aaaa4fdSJohn Dyson 			}
14867aaaa4fdSJohn Dyson 
14877aaaa4fdSJohn Dyson 			if (entry->wired_count != 0) {
14887aaaa4fdSJohn Dyson 				entry->wired_count++;
1489afa07f7eSJohn Dyson 				entry->eflags |= MAP_ENTRY_USER_WIRED;
14907aaaa4fdSJohn Dyson 				entry = entry->next;
14917aaaa4fdSJohn Dyson 				continue;
14927aaaa4fdSJohn Dyson 			}
14937aaaa4fdSJohn Dyson 
14947aaaa4fdSJohn Dyson 			/* Here on entry being newly wired */
14957aaaa4fdSJohn Dyson 
1496afa07f7eSJohn Dyson 			if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1497afa07f7eSJohn Dyson 				int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
14987aaaa4fdSJohn Dyson 				if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
14997aaaa4fdSJohn Dyson 
15007aaaa4fdSJohn Dyson 					vm_object_shadow(&entry->object.vm_object,
15017aaaa4fdSJohn Dyson 					    &entry->offset,
1502c2e11a03SJohn Dyson 					    atop(entry->end - entry->start));
1503afa07f7eSJohn Dyson 					entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
15047aaaa4fdSJohn Dyson 
15057aaaa4fdSJohn Dyson 				} else if (entry->object.vm_object == NULL) {
15067aaaa4fdSJohn Dyson 
15077aaaa4fdSJohn Dyson 					entry->object.vm_object =
15087aaaa4fdSJohn Dyson 					    vm_object_allocate(OBJT_DEFAULT,
1509c2e11a03SJohn Dyson 						atop(entry->end - entry->start));
15107aaaa4fdSJohn Dyson 					entry->offset = (vm_offset_t) 0;
15117aaaa4fdSJohn Dyson 
15127aaaa4fdSJohn Dyson 				}
15131c7c3c6aSMatthew Dillon #if 0
15141c7c3c6aSMatthew Dillon 				/*
15151c7c3c6aSMatthew Dillon 				 * (no longer applies)
15161c7c3c6aSMatthew Dillon 				 */
15177aaaa4fdSJohn Dyson 				default_pager_convert_to_swapq(entry->object.vm_object);
15181c7c3c6aSMatthew Dillon #endif
15197aaaa4fdSJohn Dyson 			}
15207aaaa4fdSJohn Dyson 
15217aaaa4fdSJohn Dyson 			vm_map_clip_start(map, entry, start);
15227aaaa4fdSJohn Dyson 			vm_map_clip_end(map, entry, end);
15237aaaa4fdSJohn Dyson 
15247aaaa4fdSJohn Dyson 			entry->wired_count++;
1525afa07f7eSJohn Dyson 			entry->eflags |= MAP_ENTRY_USER_WIRED;
1526b44959ceSTor Egge 			estart = entry->start;
15277aaaa4fdSJohn Dyson 
15287aaaa4fdSJohn Dyson 			/* First we need to allow map modifications */
1529996c772fSJohn Dyson 			vm_map_set_recursive(map);
153003e9c6c1SJohn Dyson 			vm_map_lock_downgrade(map);
153147221757SJohn Dyson 			map->timestamp++;
15327aaaa4fdSJohn Dyson 
15337aaaa4fdSJohn Dyson 			rv = vm_fault_user_wire(map, entry->start, entry->end);
15347aaaa4fdSJohn Dyson 			if (rv) {
15357aaaa4fdSJohn Dyson 
15367aaaa4fdSJohn Dyson 				entry->wired_count--;
1537afa07f7eSJohn Dyson 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
15387aaaa4fdSJohn Dyson 
1539996c772fSJohn Dyson 				vm_map_clear_recursive(map);
15407aaaa4fdSJohn Dyson 				vm_map_unlock(map);
15417aaaa4fdSJohn Dyson 
15427aaaa4fdSJohn Dyson 				(void) vm_map_user_pageable(map, start, entry->start, TRUE);
15437aaaa4fdSJohn Dyson 				return rv;
15447aaaa4fdSJohn Dyson 			}
15457aaaa4fdSJohn Dyson 
1546996c772fSJohn Dyson 			vm_map_clear_recursive(map);
1547b44959ceSTor Egge 			if (vm_map_lock_upgrade(map)) {
1548b44959ceSTor Egge 				vm_map_lock(map);
1549b44959ceSTor Egge 				if (vm_map_lookup_entry(map, estart, &entry)
1550b44959ceSTor Egge 				    == FALSE) {
1551b44959ceSTor Egge 					vm_map_unlock(map);
1552b44959ceSTor Egge 					(void) vm_map_user_pageable(map,
1553b44959ceSTor Egge 								    start,
1554b44959ceSTor Egge 								    estart,
1555b44959ceSTor Egge 								    TRUE);
1556b44959ceSTor Egge 					return (KERN_INVALID_ADDRESS);
1557b44959ceSTor Egge 				}
1558b44959ceSTor Egge 			}
1559b44959ceSTor Egge 			vm_map_simplify_entry(map,entry);
15607aaaa4fdSJohn Dyson 		}
15617aaaa4fdSJohn Dyson 	}
156247221757SJohn Dyson 	map->timestamp++;
15637aaaa4fdSJohn Dyson 	vm_map_unlock(map);
15647aaaa4fdSJohn Dyson 	return KERN_SUCCESS;
15657aaaa4fdSJohn Dyson }
15667aaaa4fdSJohn Dyson 
15677aaaa4fdSJohn Dyson /*
1568df8bae1dSRodney W. Grimes  *	vm_map_pageable:
1569df8bae1dSRodney W. Grimes  *
1570df8bae1dSRodney W. Grimes  *	Sets the pageability of the specified address
1571df8bae1dSRodney W. Grimes  *	range in the target map.  Regions specified
1572df8bae1dSRodney W. Grimes  *	as not pageable require locked-down physical
1573df8bae1dSRodney W. Grimes  *	memory and physical page maps.
1574df8bae1dSRodney W. Grimes  *
1575df8bae1dSRodney W. Grimes  *	The map must not be locked, but a reference
1576df8bae1dSRodney W. Grimes  *	must remain to the map throughout the call.
1577df8bae1dSRodney W. Grimes  */
1578df8bae1dSRodney W. Grimes int
1579df8bae1dSRodney W. Grimes vm_map_pageable(map, start, end, new_pageable)
1580c0877f10SJohn Dyson 	vm_map_t map;
1581c0877f10SJohn Dyson 	vm_offset_t start;
1582c0877f10SJohn Dyson 	vm_offset_t end;
1583c0877f10SJohn Dyson 	boolean_t new_pageable;
1584df8bae1dSRodney W. Grimes {
1585c0877f10SJohn Dyson 	vm_map_entry_t entry;
1586df8bae1dSRodney W. Grimes 	vm_map_entry_t start_entry;
1587c0877f10SJohn Dyson 	vm_offset_t failed = 0;
1588df8bae1dSRodney W. Grimes 	int rv;
1589df8bae1dSRodney W. Grimes 
1590df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1591df8bae1dSRodney W. Grimes 
1592df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1593df8bae1dSRodney W. Grimes 
1594df8bae1dSRodney W. Grimes 	/*
15950d94caffSDavid Greenman 	 * Only one pageability change may take place at one time, since
15960d94caffSDavid Greenman 	 * vm_fault assumes it will be called only once for each
15970d94caffSDavid Greenman 	 * wiring/unwiring.  Therefore, we have to make sure we're actually
15980d94caffSDavid Greenman 	 * changing the pageability for the entire region.  We do so before
15990d94caffSDavid Greenman 	 * making any changes.
1600df8bae1dSRodney W. Grimes 	 */
1601df8bae1dSRodney W. Grimes 
1602df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1603df8bae1dSRodney W. Grimes 		vm_map_unlock(map);
1604df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ADDRESS);
1605df8bae1dSRodney W. Grimes 	}
1606df8bae1dSRodney W. Grimes 	entry = start_entry;
1607df8bae1dSRodney W. Grimes 
1608df8bae1dSRodney W. Grimes 	/*
16090d94caffSDavid Greenman 	 * Actions are rather different for wiring and unwiring, so we have
16100d94caffSDavid Greenman 	 * two separate cases.
1611df8bae1dSRodney W. Grimes 	 */
1612df8bae1dSRodney W. Grimes 
1613df8bae1dSRodney W. Grimes 	if (new_pageable) {
1614df8bae1dSRodney W. Grimes 
1615df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1616df8bae1dSRodney W. Grimes 
1617df8bae1dSRodney W. Grimes 		/*
16180d94caffSDavid Greenman 		 * Unwiring.  First ensure that the range to be unwired is
16190d94caffSDavid Greenman 		 * really wired down and that there are no holes.
1620df8bae1dSRodney W. Grimes 		 */
1621df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1622df8bae1dSRodney W. Grimes 
1623df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0 ||
1624df8bae1dSRodney W. Grimes 			    (entry->end < end &&
1625df8bae1dSRodney W. Grimes 				(entry->next == &map->header ||
1626df8bae1dSRodney W. Grimes 				    entry->next->start > entry->end))) {
1627df8bae1dSRodney W. Grimes 				vm_map_unlock(map);
1628df8bae1dSRodney W. Grimes 				return (KERN_INVALID_ARGUMENT);
1629df8bae1dSRodney W. Grimes 			}
1630df8bae1dSRodney W. Grimes 			entry = entry->next;
1631df8bae1dSRodney W. Grimes 		}
1632df8bae1dSRodney W. Grimes 
1633df8bae1dSRodney W. Grimes 		/*
16340d94caffSDavid Greenman 		 * Now decrement the wiring count for each region. If a region
16350d94caffSDavid Greenman 		 * becomes completely unwired, unwire its physical pages and
16360d94caffSDavid Greenman 		 * mappings.
1637df8bae1dSRodney W. Grimes 		 */
1638996c772fSJohn Dyson 		vm_map_set_recursive(map);
1639df8bae1dSRodney W. Grimes 
1640df8bae1dSRodney W. Grimes 		entry = start_entry;
1641df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1642df8bae1dSRodney W. Grimes 			vm_map_clip_end(map, entry, end);
1643df8bae1dSRodney W. Grimes 
1644df8bae1dSRodney W. Grimes 			entry->wired_count--;
1645df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0)
1646df8bae1dSRodney W. Grimes 				vm_fault_unwire(map, entry->start, entry->end);
1647df8bae1dSRodney W. Grimes 
1648df8bae1dSRodney W. Grimes 			entry = entry->next;
1649df8bae1dSRodney W. Grimes 		}
1650f32dbbeeSJohn Dyson 		vm_map_simplify_entry(map, start_entry);
1651996c772fSJohn Dyson 		vm_map_clear_recursive(map);
16520d94caffSDavid Greenman 	} else {
1653df8bae1dSRodney W. Grimes 		/*
1654df8bae1dSRodney W. Grimes 		 * Wiring.  We must do this in two passes:
1655df8bae1dSRodney W. Grimes 		 *
16560d94caffSDavid Greenman 		 * 1.  Holding the write lock, we create any shadow or zero-fill
16570d94caffSDavid Greenman 		 * objects that need to be created. Then we clip each map
16580d94caffSDavid Greenman 		 * entry to the region to be wired and increment its wiring
16590d94caffSDavid Greenman 		 * count.  We create objects before clipping the map entries
1660df8bae1dSRodney W. Grimes 		 * to avoid object proliferation.
1661df8bae1dSRodney W. Grimes 		 *
16620d94caffSDavid Greenman 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
16630d94caffSDavid Greenman 		 * fault in the pages for any newly wired area (wired_count is
16640d94caffSDavid Greenman 		 * 1).
1665df8bae1dSRodney W. Grimes 		 *
16660d94caffSDavid Greenman 		 * Downgrading to a read lock for vm_fault_wire avoids a possible
166724a1cce3SDavid Greenman 		 * deadlock with another process that may have faulted on one
16680d94caffSDavid Greenman 		 * of the pages to be wired (it would mark the page busy,
16690d94caffSDavid Greenman 		 * blocking us, then in turn block on the map lock that we
16700d94caffSDavid Greenman 		 * hold).  Because of problems in the recursive lock package,
16710d94caffSDavid Greenman 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
16720d94caffSDavid Greenman 		 * any actions that require the write lock must be done
16730d94caffSDavid Greenman 		 * beforehand.  Because we keep the read lock on the map, the
16740d94caffSDavid Greenman 		 * copy-on-write status of the entries we modify here cannot
16750d94caffSDavid Greenman 		 * change.
1676df8bae1dSRodney W. Grimes 		 */
1677df8bae1dSRodney W. Grimes 
1678df8bae1dSRodney W. Grimes 		/*
1679df8bae1dSRodney W. Grimes 		 * Pass 1.
1680df8bae1dSRodney W. Grimes 		 */
1681df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1682df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0) {
1683df8bae1dSRodney W. Grimes 
1684df8bae1dSRodney W. Grimes 				/*
1685df8bae1dSRodney W. Grimes 				 * Perform actions of vm_map_lookup that need
1686df8bae1dSRodney W. Grimes 				 * the write lock on the map: create a shadow
1687df8bae1dSRodney W. Grimes 				 * object for a copy-on-write region, or an
1688df8bae1dSRodney W. Grimes 				 * object for a zero-fill region.
1689df8bae1dSRodney W. Grimes 				 *
1690df8bae1dSRodney W. Grimes 				 * We don't have to do this for entries that
16910d94caffSDavid Greenman 				 * point to sharing maps, because we won't
16920d94caffSDavid Greenman 				 * hold the lock on the sharing map.
1693df8bae1dSRodney W. Grimes 				 */
1694afa07f7eSJohn Dyson 				if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1695afa07f7eSJohn Dyson 					int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1696b5b40fa6SJohn Dyson 					if (copyflag &&
1697df8bae1dSRodney W. Grimes 					    ((entry->protection & VM_PROT_WRITE) != 0)) {
1698df8bae1dSRodney W. Grimes 
1699df8bae1dSRodney W. Grimes 						vm_object_shadow(&entry->object.vm_object,
1700df8bae1dSRodney W. Grimes 						    &entry->offset,
1701c2e11a03SJohn Dyson 						    atop(entry->end - entry->start));
1702afa07f7eSJohn Dyson 						entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
17030d94caffSDavid Greenman 					} else if (entry->object.vm_object == NULL) {
1704df8bae1dSRodney W. Grimes 						entry->object.vm_object =
1705a316d390SJohn Dyson 						    vm_object_allocate(OBJT_DEFAULT,
1706c2e11a03SJohn Dyson 							atop(entry->end - entry->start));
1707df8bae1dSRodney W. Grimes 						entry->offset = (vm_offset_t) 0;
1708df8bae1dSRodney W. Grimes 					}
17091c7c3c6aSMatthew Dillon #if 0
17101c7c3c6aSMatthew Dillon 					/*
17111c7c3c6aSMatthew Dillon 					 * (no longer applies)
17121c7c3c6aSMatthew Dillon 					 */
1713b5b40fa6SJohn Dyson 					default_pager_convert_to_swapq(entry->object.vm_object);
17141c7c3c6aSMatthew Dillon #endif
1715df8bae1dSRodney W. Grimes 				}
1716df8bae1dSRodney W. Grimes 			}
1717df8bae1dSRodney W. Grimes 			vm_map_clip_start(map, entry, start);
1718df8bae1dSRodney W. Grimes 			vm_map_clip_end(map, entry, end);
1719df8bae1dSRodney W. Grimes 			entry->wired_count++;
1720df8bae1dSRodney W. Grimes 
1721df8bae1dSRodney W. Grimes 			/*
1722df8bae1dSRodney W. Grimes 			 * Check for holes
1723df8bae1dSRodney W. Grimes 			 */
1724df8bae1dSRodney W. Grimes 			if (entry->end < end &&
1725df8bae1dSRodney W. Grimes 			    (entry->next == &map->header ||
1726df8bae1dSRodney W. Grimes 				entry->next->start > entry->end)) {
1727df8bae1dSRodney W. Grimes 				/*
17280d94caffSDavid Greenman 				 * Found one.  Object creation actions do not
17290d94caffSDavid Greenman 				 * need to be undone, but the wired counts
17300d94caffSDavid Greenman 				 * need to be restored.
1731df8bae1dSRodney W. Grimes 				 */
1732df8bae1dSRodney W. Grimes 				while (entry != &map->header && entry->end > start) {
1733df8bae1dSRodney W. Grimes 					entry->wired_count--;
1734df8bae1dSRodney W. Grimes 					entry = entry->prev;
1735df8bae1dSRodney W. Grimes 				}
173647221757SJohn Dyson 				map->timestamp++;
1737df8bae1dSRodney W. Grimes 				vm_map_unlock(map);
1738df8bae1dSRodney W. Grimes 				return (KERN_INVALID_ARGUMENT);
1739df8bae1dSRodney W. Grimes 			}
1740df8bae1dSRodney W. Grimes 			entry = entry->next;
1741df8bae1dSRodney W. Grimes 		}
1742df8bae1dSRodney W. Grimes 
1743df8bae1dSRodney W. Grimes 		/*
1744df8bae1dSRodney W. Grimes 		 * Pass 2.
1745df8bae1dSRodney W. Grimes 		 */
1746df8bae1dSRodney W. Grimes 
1747df8bae1dSRodney W. Grimes 		/*
1748df8bae1dSRodney W. Grimes 		 * HACK HACK HACK HACK
1749df8bae1dSRodney W. Grimes 		 *
175024a1cce3SDavid Greenman 		 * If we are wiring in the kernel map or a submap of it,
175124a1cce3SDavid Greenman 		 * unlock the map to avoid deadlocks.  We trust that the
175224a1cce3SDavid Greenman 		 * kernel is well-behaved, and therefore will not do
175324a1cce3SDavid Greenman 		 * anything destructive to this region of the map while
175424a1cce3SDavid Greenman 		 * we have it unlocked.  We cannot trust user processes
175524a1cce3SDavid Greenman 		 * to do the same.
1756df8bae1dSRodney W. Grimes 		 *
1757df8bae1dSRodney W. Grimes 		 * HACK HACK HACK HACK
1758df8bae1dSRodney W. Grimes 		 */
1759df8bae1dSRodney W. Grimes 		if (vm_map_pmap(map) == kernel_pmap) {
1760df8bae1dSRodney W. Grimes 			vm_map_unlock(map);	/* trust me ... */
17610d94caffSDavid Greenman 		} else {
1762996c772fSJohn Dyson 			vm_map_set_recursive(map);
176303e9c6c1SJohn Dyson 			vm_map_lock_downgrade(map);
1764df8bae1dSRodney W. Grimes 		}
1765df8bae1dSRodney W. Grimes 
1766df8bae1dSRodney W. Grimes 		rv = 0;
1767df8bae1dSRodney W. Grimes 		entry = start_entry;
1768df8bae1dSRodney W. Grimes 		while (entry != &map->header && entry->start < end) {
1769df8bae1dSRodney W. Grimes 			/*
17700d94caffSDavid Greenman 			 * If vm_fault_wire fails for any page we need to undo
17710d94caffSDavid Greenman 			 * what has been done.  We decrement the wiring count
17720d94caffSDavid Greenman 			 * for those pages which have not yet been wired (now)
17730d94caffSDavid Greenman 			 * and unwire those that have (later).
1774df8bae1dSRodney W. Grimes 			 *
1775df8bae1dSRodney W. Grimes 			 * XXX this violates the locking protocol on the map,
1776df8bae1dSRodney W. Grimes 			 * needs to be fixed.
1777df8bae1dSRodney W. Grimes 			 */
1778df8bae1dSRodney W. Grimes 			if (rv)
1779df8bae1dSRodney W. Grimes 				entry->wired_count--;
1780df8bae1dSRodney W. Grimes 			else if (entry->wired_count == 1) {
1781df8bae1dSRodney W. Grimes 				rv = vm_fault_wire(map, entry->start, entry->end);
1782df8bae1dSRodney W. Grimes 				if (rv) {
1783df8bae1dSRodney W. Grimes 					failed = entry->start;
1784df8bae1dSRodney W. Grimes 					entry->wired_count--;
1785df8bae1dSRodney W. Grimes 				}
1786df8bae1dSRodney W. Grimes 			}
1787df8bae1dSRodney W. Grimes 			entry = entry->next;
1788df8bae1dSRodney W. Grimes 		}
1789df8bae1dSRodney W. Grimes 
1790df8bae1dSRodney W. Grimes 		if (vm_map_pmap(map) == kernel_pmap) {
1791df8bae1dSRodney W. Grimes 			vm_map_lock(map);
17920d94caffSDavid Greenman 		} else {
1793996c772fSJohn Dyson 			vm_map_clear_recursive(map);
1794df8bae1dSRodney W. Grimes 		}
1795df8bae1dSRodney W. Grimes 		if (rv) {
1796df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
1797df8bae1dSRodney W. Grimes 			(void) vm_map_pageable(map, start, failed, TRUE);
1798df8bae1dSRodney W. Grimes 			return (rv);
1799df8bae1dSRodney W. Grimes 		}
1800b7b2aac2SJohn Dyson 		vm_map_simplify_entry(map, start_entry);
1801df8bae1dSRodney W. Grimes 	}
1802df8bae1dSRodney W. Grimes 
1803df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1804df8bae1dSRodney W. Grimes 
180547221757SJohn Dyson 	map->timestamp++;
1806df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1807df8bae1dSRodney W. Grimes }
1808df8bae1dSRodney W. Grimes 
1809df8bae1dSRodney W. Grimes /*
1810df8bae1dSRodney W. Grimes  * vm_map_clean
1811df8bae1dSRodney W. Grimes  *
1812df8bae1dSRodney W. Grimes  * Push any dirty cached pages in the address range to their pager.
1813df8bae1dSRodney W. Grimes  * If syncio is TRUE, dirty pages are written synchronously.
1814df8bae1dSRodney W. Grimes  * If invalidate is TRUE, any cached pages are freed as well.
1815df8bae1dSRodney W. Grimes  *
1816df8bae1dSRodney W. Grimes  * Returns an error if any part of the specified range is not mapped.
1817df8bae1dSRodney W. Grimes  */
1818df8bae1dSRodney W. Grimes int
1819df8bae1dSRodney W. Grimes vm_map_clean(map, start, end, syncio, invalidate)
1820df8bae1dSRodney W. Grimes 	vm_map_t map;
1821df8bae1dSRodney W. Grimes 	vm_offset_t start;
1822df8bae1dSRodney W. Grimes 	vm_offset_t end;
1823df8bae1dSRodney W. Grimes 	boolean_t syncio;
1824df8bae1dSRodney W. Grimes 	boolean_t invalidate;
1825df8bae1dSRodney W. Grimes {
1826c0877f10SJohn Dyson 	vm_map_entry_t current;
1827df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1828df8bae1dSRodney W. Grimes 	vm_size_t size;
1829df8bae1dSRodney W. Grimes 	vm_object_t object;
1830a316d390SJohn Dyson 	vm_ooffset_t offset;
1831df8bae1dSRodney W. Grimes 
1832df8bae1dSRodney W. Grimes 	vm_map_lock_read(map);
1833df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1834df8bae1dSRodney W. Grimes 	if (!vm_map_lookup_entry(map, start, &entry)) {
1835df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map);
1836df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ADDRESS);
1837df8bae1dSRodney W. Grimes 	}
1838df8bae1dSRodney W. Grimes 	/*
1839df8bae1dSRodney W. Grimes 	 * Make a first pass to check for holes.
1840df8bae1dSRodney W. Grimes 	 */
1841df8bae1dSRodney W. Grimes 	for (current = entry; current->start < end; current = current->next) {
1842afa07f7eSJohn Dyson 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1843df8bae1dSRodney W. Grimes 			vm_map_unlock_read(map);
1844df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ARGUMENT);
1845df8bae1dSRodney W. Grimes 		}
1846df8bae1dSRodney W. Grimes 		if (end > current->end &&
1847df8bae1dSRodney W. Grimes 		    (current->next == &map->header ||
1848df8bae1dSRodney W. Grimes 			current->end != current->next->start)) {
1849df8bae1dSRodney W. Grimes 			vm_map_unlock_read(map);
1850df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ADDRESS);
1851df8bae1dSRodney W. Grimes 		}
1852df8bae1dSRodney W. Grimes 	}
1853df8bae1dSRodney W. Grimes 
1854cf2819ccSJohn Dyson 	if (invalidate)
1855cf2819ccSJohn Dyson 		pmap_remove(vm_map_pmap(map), start, end);
1856df8bae1dSRodney W. Grimes 	/*
1857df8bae1dSRodney W. Grimes 	 * Make a second pass, cleaning/uncaching pages from the indicated
1858df8bae1dSRodney W. Grimes 	 * objects as we go.
1859df8bae1dSRodney W. Grimes 	 */
1860df8bae1dSRodney W. Grimes 	for (current = entry; current->start < end; current = current->next) {
1861df8bae1dSRodney W. Grimes 		offset = current->offset + (start - current->start);
1862df8bae1dSRodney W. Grimes 		size = (end <= current->end ? end : current->end) - start;
1863afa07f7eSJohn Dyson 		if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1864c0877f10SJohn Dyson 			vm_map_t smap;
1865df8bae1dSRodney W. Grimes 			vm_map_entry_t tentry;
1866df8bae1dSRodney W. Grimes 			vm_size_t tsize;
1867df8bae1dSRodney W. Grimes 
1868df8bae1dSRodney W. Grimes 			smap = current->object.share_map;
1869df8bae1dSRodney W. Grimes 			vm_map_lock_read(smap);
1870df8bae1dSRodney W. Grimes 			(void) vm_map_lookup_entry(smap, offset, &tentry);
1871df8bae1dSRodney W. Grimes 			tsize = tentry->end - offset;
1872df8bae1dSRodney W. Grimes 			if (tsize < size)
1873df8bae1dSRodney W. Grimes 				size = tsize;
1874df8bae1dSRodney W. Grimes 			object = tentry->object.vm_object;
1875df8bae1dSRodney W. Grimes 			offset = tentry->offset + (offset - tentry->start);
1876df8bae1dSRodney W. Grimes 			vm_map_unlock_read(smap);
1877df8bae1dSRodney W. Grimes 		} else {
1878df8bae1dSRodney W. Grimes 			object = current->object.vm_object;
1879df8bae1dSRodney W. Grimes 		}
18808a02c104SJohn Dyson 		/*
18818a02c104SJohn Dyson 		 * Note that there is absolutely no sense in writing out
18828a02c104SJohn Dyson 		 * anonymous objects, so we track down the vnode object
18838a02c104SJohn Dyson 		 * to write out.
18848a02c104SJohn Dyson 		 * We invalidate (remove) all pages from the address space
18858a02c104SJohn Dyson 		 * anyway, for semantic correctness.
18868a02c104SJohn Dyson 		 */
18878a02c104SJohn Dyson 		while (object->backing_object) {
18888a02c104SJohn Dyson 			object = object->backing_object;
18898a02c104SJohn Dyson 			offset += object->backing_object_offset;
18908a02c104SJohn Dyson 			if (object->size < OFF_TO_IDX( offset + size))
18918a02c104SJohn Dyson 				size = IDX_TO_OFF(object->size) - offset;
18928a02c104SJohn Dyson 		}
189324a1cce3SDavid Greenman 		if (object && (object->type == OBJT_VNODE)) {
1894df8bae1dSRodney W. Grimes 			/*
18950d94caffSDavid Greenman 			 * Flush pages if writing is allowed. XXX should we continue
18960d94caffSDavid Greenman 			 * on an error?
1897f5cf85d4SDavid Greenman 			 *
1898f5cf85d4SDavid Greenman 			 * XXX Doing async I/O and then removing all the pages from
1899f5cf85d4SDavid Greenman 			 *     the object before it completes is probably a very bad
1900f5cf85d4SDavid Greenman 			 *     idea.
1901df8bae1dSRodney W. Grimes 			 */
1902a02051c3SJohn Dyson 			if (current->protection & VM_PROT_WRITE) {
19038f9110f6SJohn Dyson 				int flags;
19042be70f79SJohn Dyson 				if (object->type == OBJT_VNODE)
1905157ac55fSJohn Dyson 					vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
19068f9110f6SJohn Dyson 				flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
19078f9110f6SJohn Dyson 				flags |= invalidate ? OBJPC_INVAL : 0;
1908a316d390SJohn Dyson 		   	    vm_object_page_clean(object,
1909a316d390SJohn Dyson 					OFF_TO_IDX(offset),
19102be70f79SJohn Dyson 					OFF_TO_IDX(offset + size + PAGE_MASK),
19118f9110f6SJohn Dyson 					flags);
1912cf2819ccSJohn Dyson 				if (invalidate) {
1913cf2819ccSJohn Dyson 					vm_object_pip_wait(object, "objmcl");
1914a316d390SJohn Dyson 					vm_object_page_remove(object,
1915a316d390SJohn Dyson 						OFF_TO_IDX(offset),
19162be70f79SJohn Dyson 						OFF_TO_IDX(offset + size + PAGE_MASK),
1917a316d390SJohn Dyson 						FALSE);
1918cf2819ccSJohn Dyson 				}
19192be70f79SJohn Dyson 				if (object->type == OBJT_VNODE)
19202be70f79SJohn Dyson 					VOP_UNLOCK(object->handle, 0, curproc);
1921bf4bd9bdSDavid Greenman 			}
1922a02051c3SJohn Dyson 		}
1923df8bae1dSRodney W. Grimes 		start += size;
1924df8bae1dSRodney W. Grimes 	}
1925df8bae1dSRodney W. Grimes 
1926df8bae1dSRodney W. Grimes 	vm_map_unlock_read(map);
1927df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1928df8bae1dSRodney W. Grimes }
1929df8bae1dSRodney W. Grimes 
1930df8bae1dSRodney W. Grimes /*
1931df8bae1dSRodney W. Grimes  *	vm_map_entry_unwire:	[ internal use only ]
1932df8bae1dSRodney W. Grimes  *
1933df8bae1dSRodney W. Grimes  *	Make the region specified by this entry pageable.
1934df8bae1dSRodney W. Grimes  *
1935df8bae1dSRodney W. Grimes  *	The map in question should be locked.
1936df8bae1dSRodney W. Grimes  *	[This is the reason for this routine's existence.]
1937df8bae1dSRodney W. Grimes  */
19380362d7d7SJohn Dyson static void
19390d94caffSDavid Greenman vm_map_entry_unwire(map, entry)
1940df8bae1dSRodney W. Grimes 	vm_map_t map;
1941c0877f10SJohn Dyson 	vm_map_entry_t entry;
1942df8bae1dSRodney W. Grimes {
1943df8bae1dSRodney W. Grimes 	vm_fault_unwire(map, entry->start, entry->end);
1944df8bae1dSRodney W. Grimes 	entry->wired_count = 0;
1945df8bae1dSRodney W. Grimes }
1946df8bae1dSRodney W. Grimes 
1947df8bae1dSRodney W. Grimes /*
1948df8bae1dSRodney W. Grimes  *	vm_map_entry_delete:	[ internal use only ]
1949df8bae1dSRodney W. Grimes  *
1950df8bae1dSRodney W. Grimes  *	Deallocate the given entry from the target map.
1951df8bae1dSRodney W. Grimes  */
19520362d7d7SJohn Dyson static void
19530d94caffSDavid Greenman vm_map_entry_delete(map, entry)
1954c0877f10SJohn Dyson 	vm_map_t map;
1955c0877f10SJohn Dyson 	vm_map_entry_t entry;
1956df8bae1dSRodney W. Grimes {
1957df8bae1dSRodney W. Grimes 	vm_map_entry_unlink(map, entry);
1958df8bae1dSRodney W. Grimes 	map->size -= entry->end - entry->start;
1959df8bae1dSRodney W. Grimes 
19602d8acc0fSJohn Dyson 	if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1961df8bae1dSRodney W. Grimes 		vm_object_deallocate(entry->object.vm_object);
1962b5b40fa6SJohn Dyson 	}
1963df8bae1dSRodney W. Grimes 
1964df8bae1dSRodney W. Grimes 	vm_map_entry_dispose(map, entry);
1965df8bae1dSRodney W. Grimes }
1966df8bae1dSRodney W. Grimes 
1967df8bae1dSRodney W. Grimes /*
1968df8bae1dSRodney W. Grimes  *	vm_map_delete:	[ internal use only ]
1969df8bae1dSRodney W. Grimes  *
1970df8bae1dSRodney W. Grimes  *	Deallocates the given address range from the target
1971df8bae1dSRodney W. Grimes  *	map.
1972df8bae1dSRodney W. Grimes  *
1973df8bae1dSRodney W. Grimes  *	When called with a sharing map, removes pages from
1974df8bae1dSRodney W. Grimes  *	that region from all physical maps.
1975df8bae1dSRodney W. Grimes  */
1976df8bae1dSRodney W. Grimes int
1977df8bae1dSRodney W. Grimes vm_map_delete(map, start, end)
1978c0877f10SJohn Dyson 	vm_map_t map;
1979df8bae1dSRodney W. Grimes 	vm_offset_t start;
1980c0877f10SJohn Dyson 	vm_offset_t end;
1981df8bae1dSRodney W. Grimes {
1982cbd8ec09SJohn Dyson 	vm_object_t object;
1983c0877f10SJohn Dyson 	vm_map_entry_t entry;
1984df8bae1dSRodney W. Grimes 	vm_map_entry_t first_entry;
1985df8bae1dSRodney W. Grimes 
1986df8bae1dSRodney W. Grimes 	/*
1987df8bae1dSRodney W. Grimes 	 * Find the start of the region, and clip it
1988df8bae1dSRodney W. Grimes 	 */
1989df8bae1dSRodney W. Grimes 
19902dbea5d2SJohn Dyson 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
1991df8bae1dSRodney W. Grimes 		entry = first_entry->next;
1992cbd8ec09SJohn Dyson 		object = entry->object.vm_object;
1993cbd8ec09SJohn Dyson 		if (object && (object->ref_count == 1) && (object->shadow_count == 0))
1994069e9bc1SDoug Rabson 			vm_object_set_flag(object, OBJ_ONEMAPPING);
19952dbea5d2SJohn Dyson 	} else {
1996df8bae1dSRodney W. Grimes 		entry = first_entry;
1997df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1998df8bae1dSRodney W. Grimes 		/*
19990d94caffSDavid Greenman 		 * Fix the lookup hint now, rather than each time though the
20000d94caffSDavid Greenman 		 * loop.
2001df8bae1dSRodney W. Grimes 		 */
2002df8bae1dSRodney W. Grimes 		SAVE_HINT(map, entry->prev);
2003df8bae1dSRodney W. Grimes 	}
2004df8bae1dSRodney W. Grimes 
2005df8bae1dSRodney W. Grimes 	/*
2006df8bae1dSRodney W. Grimes 	 * Save the free space hint
2007df8bae1dSRodney W. Grimes 	 */
2008df8bae1dSRodney W. Grimes 
2009b18bfc3dSJohn Dyson 	if (entry == &map->header) {
2010b18bfc3dSJohn Dyson 		map->first_free = &map->header;
20112dbea5d2SJohn Dyson 	} else if (map->first_free->start >= start) {
2012df8bae1dSRodney W. Grimes 		map->first_free = entry->prev;
20132dbea5d2SJohn Dyson 	}
2014df8bae1dSRodney W. Grimes 
2015df8bae1dSRodney W. Grimes 	/*
2016df8bae1dSRodney W. Grimes 	 * Step through all entries in this region
2017df8bae1dSRodney W. Grimes 	 */
2018df8bae1dSRodney W. Grimes 
2019df8bae1dSRodney W. Grimes 	while ((entry != &map->header) && (entry->start < end)) {
2020df8bae1dSRodney W. Grimes 		vm_map_entry_t next;
2021b18bfc3dSJohn Dyson 		vm_offset_t s, e;
2022cbd8ec09SJohn Dyson 		vm_pindex_t offidxstart, offidxend, count;
2023df8bae1dSRodney W. Grimes 
2024df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
2025df8bae1dSRodney W. Grimes 
2026df8bae1dSRodney W. Grimes 		s = entry->start;
2027df8bae1dSRodney W. Grimes 		e = entry->end;
2028c0877f10SJohn Dyson 		next = entry->next;
2029df8bae1dSRodney W. Grimes 
2030cbd8ec09SJohn Dyson 		offidxstart = OFF_TO_IDX(entry->offset);
2031cbd8ec09SJohn Dyson 		count = OFF_TO_IDX(e - s);
2032cbd8ec09SJohn Dyson 		object = entry->object.vm_object;
20332dbea5d2SJohn Dyson 
2034df8bae1dSRodney W. Grimes 		/*
20350d94caffSDavid Greenman 		 * Unwire before removing addresses from the pmap; otherwise,
20360d94caffSDavid Greenman 		 * unwiring will put the entries back in the pmap.
2037df8bae1dSRodney W. Grimes 		 */
2038c0877f10SJohn Dyson 		if (entry->wired_count != 0) {
2039df8bae1dSRodney W. Grimes 			vm_map_entry_unwire(map, entry);
2040c0877f10SJohn Dyson 		}
2041df8bae1dSRodney W. Grimes 
2042cbd8ec09SJohn Dyson 		offidxend = offidxstart + count;
2043df8bae1dSRodney W. Grimes 		/*
20440d94caffSDavid Greenman 		 * If this is a sharing map, we must remove *all* references
20450d94caffSDavid Greenman 		 * to this data, since we can't find all of the physical maps
20460d94caffSDavid Greenman 		 * which are sharing it.
2047df8bae1dSRodney W. Grimes 		 */
2048df8bae1dSRodney W. Grimes 
2049c0877f10SJohn Dyson 		if ((object == kernel_object) || (object == kmem_object)) {
20502dbea5d2SJohn Dyson 			vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2051b18bfc3dSJohn Dyson 		} else if (!map->is_main_map) {
20522dbea5d2SJohn Dyson 			vm_object_pmap_remove(object, offidxstart, offidxend);
2053b18bfc3dSJohn Dyson 		} else {
2054df8bae1dSRodney W. Grimes 			pmap_remove(map->pmap, s, e);
2055c0877f10SJohn Dyson 			if (object &&
205696fb8cf2SJohn Dyson 				((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) &&
2057c0877f10SJohn Dyson 				((object->type == OBJT_SWAP) || (object->type == OBJT_DEFAULT))) {
20582dbea5d2SJohn Dyson 				vm_object_collapse(object);
20592dbea5d2SJohn Dyson 				vm_object_page_remove(object, offidxstart, offidxend, FALSE);
20602dbea5d2SJohn Dyson 				if (object->type == OBJT_SWAP) {
2061cbd8ec09SJohn Dyson 					swap_pager_freespace(object, offidxstart, count);
20622dbea5d2SJohn Dyson 				}
2063cbd8ec09SJohn Dyson 
2064cbd8ec09SJohn Dyson 				if ((offidxend >= object->size) &&
2065cbd8ec09SJohn Dyson 					(offidxstart < object->size)) {
2066c0877f10SJohn Dyson 						object->size = offidxstart;
2067c0877f10SJohn Dyson 				}
20682dbea5d2SJohn Dyson 			}
2069b18bfc3dSJohn Dyson 		}
2070df8bae1dSRodney W. Grimes 
2071df8bae1dSRodney W. Grimes 		/*
20720d94caffSDavid Greenman 		 * Delete the entry (which may delete the object) only after
20730d94caffSDavid Greenman 		 * removing all pmap entries pointing to its pages.
20740d94caffSDavid Greenman 		 * (Otherwise, its page frames may be reallocated, and any
20750d94caffSDavid Greenman 		 * modify bits will be set in the wrong object!)
2076df8bae1dSRodney W. Grimes 		 */
2077df8bae1dSRodney W. Grimes 		vm_map_entry_delete(map, entry);
2078df8bae1dSRodney W. Grimes 		entry = next;
2079df8bae1dSRodney W. Grimes 	}
2080df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
2081df8bae1dSRodney W. Grimes }
2082df8bae1dSRodney W. Grimes 
2083df8bae1dSRodney W. Grimes /*
2084df8bae1dSRodney W. Grimes  *	vm_map_remove:
2085df8bae1dSRodney W. Grimes  *
2086df8bae1dSRodney W. Grimes  *	Remove the given address range from the target map.
2087df8bae1dSRodney W. Grimes  *	This is the exported form of vm_map_delete.
2088df8bae1dSRodney W. Grimes  */
2089df8bae1dSRodney W. Grimes int
2090df8bae1dSRodney W. Grimes vm_map_remove(map, start, end)
2091c0877f10SJohn Dyson 	vm_map_t map;
2092c0877f10SJohn Dyson 	vm_offset_t start;
2093c0877f10SJohn Dyson 	vm_offset_t end;
2094df8bae1dSRodney W. Grimes {
2095c0877f10SJohn Dyson 	int result, s = 0;
20968d6e8edeSDavid Greenman 
20979579ee64SDavid Greenman 	if (map == kmem_map || map == mb_map)
2098b18bfc3dSJohn Dyson 		s = splvm();
2099df8bae1dSRodney W. Grimes 
2100df8bae1dSRodney W. Grimes 	vm_map_lock(map);
2101df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
2102df8bae1dSRodney W. Grimes 	result = vm_map_delete(map, start, end);
2103df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
2104df8bae1dSRodney W. Grimes 
21059579ee64SDavid Greenman 	if (map == kmem_map || map == mb_map)
21068d6e8edeSDavid Greenman 		splx(s);
21078d6e8edeSDavid Greenman 
2108df8bae1dSRodney W. Grimes 	return (result);
2109df8bae1dSRodney W. Grimes }
2110df8bae1dSRodney W. Grimes 
2111df8bae1dSRodney W. Grimes /*
2112df8bae1dSRodney W. Grimes  *	vm_map_check_protection:
2113df8bae1dSRodney W. Grimes  *
2114df8bae1dSRodney W. Grimes  *	Assert that the target map allows the specified
2115df8bae1dSRodney W. Grimes  *	privilege on the entire address region given.
2116df8bae1dSRodney W. Grimes  *	The entire region must be allocated.
2117df8bae1dSRodney W. Grimes  */
21180d94caffSDavid Greenman boolean_t
2119b9dcd593SBruce Evans vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2120b9dcd593SBruce Evans 			vm_prot_t protection)
2121df8bae1dSRodney W. Grimes {
2122c0877f10SJohn Dyson 	vm_map_entry_t entry;
2123df8bae1dSRodney W. Grimes 	vm_map_entry_t tmp_entry;
2124df8bae1dSRodney W. Grimes 
2125df8bae1dSRodney W. Grimes 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2126df8bae1dSRodney W. Grimes 		return (FALSE);
2127df8bae1dSRodney W. Grimes 	}
2128df8bae1dSRodney W. Grimes 	entry = tmp_entry;
2129df8bae1dSRodney W. Grimes 
2130df8bae1dSRodney W. Grimes 	while (start < end) {
2131df8bae1dSRodney W. Grimes 		if (entry == &map->header) {
2132df8bae1dSRodney W. Grimes 			return (FALSE);
2133df8bae1dSRodney W. Grimes 		}
2134df8bae1dSRodney W. Grimes 		/*
2135df8bae1dSRodney W. Grimes 		 * No holes allowed!
2136df8bae1dSRodney W. Grimes 		 */
2137df8bae1dSRodney W. Grimes 
2138df8bae1dSRodney W. Grimes 		if (start < entry->start) {
2139df8bae1dSRodney W. Grimes 			return (FALSE);
2140df8bae1dSRodney W. Grimes 		}
2141df8bae1dSRodney W. Grimes 		/*
2142df8bae1dSRodney W. Grimes 		 * Check protection associated with entry.
2143df8bae1dSRodney W. Grimes 		 */
2144df8bae1dSRodney W. Grimes 
2145df8bae1dSRodney W. Grimes 		if ((entry->protection & protection) != protection) {
2146df8bae1dSRodney W. Grimes 			return (FALSE);
2147df8bae1dSRodney W. Grimes 		}
2148df8bae1dSRodney W. Grimes 		/* go to next entry */
2149df8bae1dSRodney W. Grimes 
2150df8bae1dSRodney W. Grimes 		start = entry->end;
2151df8bae1dSRodney W. Grimes 		entry = entry->next;
2152df8bae1dSRodney W. Grimes 	}
2153df8bae1dSRodney W. Grimes 	return (TRUE);
2154df8bae1dSRodney W. Grimes }
2155df8bae1dSRodney W. Grimes 
215686524867SJohn Dyson /*
215786524867SJohn Dyson  * Split the pages in a map entry into a new object.  This affords
215886524867SJohn Dyson  * easier removal of unused pages, and keeps object inheritance from
215986524867SJohn Dyson  * being a negative impact on memory usage.
216086524867SJohn Dyson  */
2161c0877f10SJohn Dyson static void
2162c0877f10SJohn Dyson vm_map_split(entry)
2163c0877f10SJohn Dyson 	vm_map_entry_t entry;
2164c0877f10SJohn Dyson {
216586524867SJohn Dyson 	vm_page_t m;
2166bd6be915SJohn Dyson 	vm_object_t orig_object, new_object, source;
2167c0877f10SJohn Dyson 	vm_offset_t s, e;
2168c0877f10SJohn Dyson 	vm_pindex_t offidxstart, offidxend, idx;
2169c0877f10SJohn Dyson 	vm_size_t size;
2170c0877f10SJohn Dyson 	vm_ooffset_t offset;
2171c0877f10SJohn Dyson 
2172c0877f10SJohn Dyson 	orig_object = entry->object.vm_object;
2173c0877f10SJohn Dyson 	if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
2174c0877f10SJohn Dyson 		return;
2175c0877f10SJohn Dyson 	if (orig_object->ref_count <= 1)
2176c0877f10SJohn Dyson 		return;
2177c0877f10SJohn Dyson 
2178c0877f10SJohn Dyson 	offset = entry->offset;
2179c0877f10SJohn Dyson 	s = entry->start;
2180c0877f10SJohn Dyson 	e = entry->end;
2181c0877f10SJohn Dyson 
2182c0877f10SJohn Dyson 	offidxstart = OFF_TO_IDX(offset);
2183c0877f10SJohn Dyson 	offidxend = offidxstart + OFF_TO_IDX(e - s);
2184c0877f10SJohn Dyson 	size = offidxend - offidxstart;
2185c0877f10SJohn Dyson 
2186c0877f10SJohn Dyson 	new_object = vm_pager_allocate(orig_object->type,
21876cde7a16SDavid Greenman 		NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL);
2188c0877f10SJohn Dyson 	if (new_object == NULL)
2189c0877f10SJohn Dyson 		return;
2190c0877f10SJohn Dyson 
2191bd6be915SJohn Dyson 	source = orig_object->backing_object;
2192bd6be915SJohn Dyson 	if (source != NULL) {
2193bd6be915SJohn Dyson 		vm_object_reference(source);	/* Referenced by new_object */
2194bd6be915SJohn Dyson 		TAILQ_INSERT_TAIL(&source->shadow_head,
2195bd6be915SJohn Dyson 				  new_object, shadow_list);
2196069e9bc1SDoug Rabson 		vm_object_clear_flag(source, OBJ_ONEMAPPING);
2197bd6be915SJohn Dyson 		new_object->backing_object_offset =
2198a0fce827SJohn Polstra 			orig_object->backing_object_offset + IDX_TO_OFF(offidxstart);
2199bd6be915SJohn Dyson 		new_object->backing_object = source;
2200bd6be915SJohn Dyson 		source->shadow_count++;
2201bd6be915SJohn Dyson 		source->generation++;
2202bd6be915SJohn Dyson 	}
2203bd6be915SJohn Dyson 
2204c0877f10SJohn Dyson 	for (idx = 0; idx < size; idx++) {
2205c0877f10SJohn Dyson 		vm_page_t m;
2206c0877f10SJohn Dyson 
2207c0877f10SJohn Dyson 	retry:
2208c0877f10SJohn Dyson 		m = vm_page_lookup(orig_object, offidxstart + idx);
2209c0877f10SJohn Dyson 		if (m == NULL)
2210c0877f10SJohn Dyson 			continue;
22111c7c3c6aSMatthew Dillon 
22121c7c3c6aSMatthew Dillon 		/*
22131c7c3c6aSMatthew Dillon 		 * We must wait for pending I/O to complete before we can
22141c7c3c6aSMatthew Dillon 		 * rename the page.
22151c7c3c6aSMatthew Dillon 		 */
22161c7c3c6aSMatthew Dillon 		if (vm_page_sleep_busy(m, TRUE, "spltwt"))
2217c0877f10SJohn Dyson 			goto retry;
2218c0877f10SJohn Dyson 
2219e69763a3SDoug Rabson 		vm_page_busy(m);
2220c0877f10SJohn Dyson 		vm_page_protect(m, VM_PROT_NONE);
2221c0877f10SJohn Dyson 		vm_page_rename(m, new_object, idx);
22227dbf82dcSMatthew Dillon 		/* page automatically made dirty by rename and cache handled */
2223e69763a3SDoug Rabson 		vm_page_busy(m);
2224c0877f10SJohn Dyson 	}
2225c0877f10SJohn Dyson 
2226c0877f10SJohn Dyson 	if (orig_object->type == OBJT_SWAP) {
2227d474eaaaSDoug Rabson 		vm_object_pip_add(orig_object, 1);
2228c0877f10SJohn Dyson 		/*
2229c0877f10SJohn Dyson 		 * copy orig_object pages into new_object
2230c0877f10SJohn Dyson 		 * and destroy unneeded pages in
2231c0877f10SJohn Dyson 		 * shadow object.
2232c0877f10SJohn Dyson 		 */
22331c7c3c6aSMatthew Dillon 		swap_pager_copy(orig_object, new_object, offidxstart, 0);
2234c0877f10SJohn Dyson 		vm_object_pip_wakeup(orig_object);
2235c0877f10SJohn Dyson 	}
2236c0877f10SJohn Dyson 
223786524867SJohn Dyson 	for (idx = 0; idx < size; idx++) {
223886524867SJohn Dyson 		m = vm_page_lookup(new_object, idx);
223986524867SJohn Dyson 		if (m) {
2240e69763a3SDoug Rabson 			vm_page_wakeup(m);
224186524867SJohn Dyson 		}
224286524867SJohn Dyson 	}
224386524867SJohn Dyson 
2244c0877f10SJohn Dyson 	entry->object.vm_object = new_object;
2245c0877f10SJohn Dyson 	entry->offset = 0LL;
2246c0877f10SJohn Dyson 	vm_object_deallocate(orig_object);
2247c0877f10SJohn Dyson }
2248c0877f10SJohn Dyson 
2249df8bae1dSRodney W. Grimes /*
2250df8bae1dSRodney W. Grimes  *	vm_map_copy_entry:
2251df8bae1dSRodney W. Grimes  *
2252df8bae1dSRodney W. Grimes  *	Copies the contents of the source entry to the destination
2253df8bae1dSRodney W. Grimes  *	entry.  The entries *must* be aligned properly.
2254df8bae1dSRodney W. Grimes  */
2255f708ef1bSPoul-Henning Kamp static void
22560d94caffSDavid Greenman vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
2257df8bae1dSRodney W. Grimes 	vm_map_t src_map, dst_map;
2258c0877f10SJohn Dyson 	vm_map_entry_t src_entry, dst_entry;
2259df8bae1dSRodney W. Grimes {
2260c0877f10SJohn Dyson 	vm_object_t src_object;
2261c0877f10SJohn Dyson 
2262afa07f7eSJohn Dyson 	if ((dst_entry->eflags|src_entry->eflags) &
2263afa07f7eSJohn Dyson 		(MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
2264df8bae1dSRodney W. Grimes 		return;
2265df8bae1dSRodney W. Grimes 
2266df8bae1dSRodney W. Grimes 	if (src_entry->wired_count == 0) {
2267df8bae1dSRodney W. Grimes 
2268df8bae1dSRodney W. Grimes 		/*
22690d94caffSDavid Greenman 		 * If the source entry is marked needs_copy, it is already
22700d94caffSDavid Greenman 		 * write-protected.
2271df8bae1dSRodney W. Grimes 		 */
2272afa07f7eSJohn Dyson 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2273df8bae1dSRodney W. Grimes 			pmap_protect(src_map->pmap,
2274df8bae1dSRodney W. Grimes 			    src_entry->start,
2275df8bae1dSRodney W. Grimes 			    src_entry->end,
2276df8bae1dSRodney W. Grimes 			    src_entry->protection & ~VM_PROT_WRITE);
2277df8bae1dSRodney W. Grimes 		}
2278b18bfc3dSJohn Dyson 
2279df8bae1dSRodney W. Grimes 		/*
2280df8bae1dSRodney W. Grimes 		 * Make a copy of the object.
2281df8bae1dSRodney W. Grimes 		 */
22828aef1712SMatthew Dillon 		if ((src_object = src_entry->object.vm_object) != NULL) {
2283c0877f10SJohn Dyson 
2284c0877f10SJohn Dyson 			if ((src_object->handle == NULL) &&
2285c0877f10SJohn Dyson 				(src_object->type == OBJT_DEFAULT ||
2286c0877f10SJohn Dyson 				 src_object->type == OBJT_SWAP)) {
2287c0877f10SJohn Dyson 				vm_object_collapse(src_object);
228896fb8cf2SJohn Dyson 				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2289c0877f10SJohn Dyson 					vm_map_split(src_entry);
2290cbd8ec09SJohn Dyson 					src_map->timestamp++;
2291c0877f10SJohn Dyson 					src_object = src_entry->object.vm_object;
2292c0877f10SJohn Dyson 				}
2293c0877f10SJohn Dyson 			}
2294c0877f10SJohn Dyson 
2295c0877f10SJohn Dyson 			vm_object_reference(src_object);
2296069e9bc1SDoug Rabson 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2297c0877f10SJohn Dyson 			dst_entry->object.vm_object = src_object;
2298afa07f7eSJohn Dyson 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2299afa07f7eSJohn Dyson 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2300b18bfc3dSJohn Dyson 			dst_entry->offset = src_entry->offset;
2301b18bfc3dSJohn Dyson 		} else {
2302b18bfc3dSJohn Dyson 			dst_entry->object.vm_object = NULL;
2303b18bfc3dSJohn Dyson 			dst_entry->offset = 0;
2304b18bfc3dSJohn Dyson 		}
2305df8bae1dSRodney W. Grimes 
2306df8bae1dSRodney W. Grimes 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2307df8bae1dSRodney W. Grimes 		    dst_entry->end - dst_entry->start, src_entry->start);
23080d94caffSDavid Greenman 	} else {
2309df8bae1dSRodney W. Grimes 		/*
2310df8bae1dSRodney W. Grimes 		 * Of course, wired down pages can't be set copy-on-write.
23110d94caffSDavid Greenman 		 * Cause wired pages to be copied into the new map by
23120d94caffSDavid Greenman 		 * simulating faults (the new pages are pageable)
2313df8bae1dSRodney W. Grimes 		 */
2314df8bae1dSRodney W. Grimes 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2315df8bae1dSRodney W. Grimes 	}
2316df8bae1dSRodney W. Grimes }
2317df8bae1dSRodney W. Grimes 
2318df8bae1dSRodney W. Grimes /*
2319df8bae1dSRodney W. Grimes  * vmspace_fork:
2320df8bae1dSRodney W. Grimes  * Create a new process vmspace structure and vm_map
2321df8bae1dSRodney W. Grimes  * based on those of an existing process.  The new map
2322df8bae1dSRodney W. Grimes  * is based on the old map, according to the inheritance
2323df8bae1dSRodney W. Grimes  * values on the regions in that map.
2324df8bae1dSRodney W. Grimes  *
2325df8bae1dSRodney W. Grimes  * The source map must not be locked.
2326df8bae1dSRodney W. Grimes  */
2327df8bae1dSRodney W. Grimes struct vmspace *
2328df8bae1dSRodney W. Grimes vmspace_fork(vm1)
2329c0877f10SJohn Dyson 	struct vmspace *vm1;
2330df8bae1dSRodney W. Grimes {
2331c0877f10SJohn Dyson 	struct vmspace *vm2;
2332df8bae1dSRodney W. Grimes 	vm_map_t old_map = &vm1->vm_map;
2333df8bae1dSRodney W. Grimes 	vm_map_t new_map;
2334df8bae1dSRodney W. Grimes 	vm_map_entry_t old_entry;
2335df8bae1dSRodney W. Grimes 	vm_map_entry_t new_entry;
2336df8bae1dSRodney W. Grimes 	pmap_t new_pmap;
2337de5f6a77SJohn Dyson 	vm_object_t object;
2338df8bae1dSRodney W. Grimes 
2339df8bae1dSRodney W. Grimes 	vm_map_lock(old_map);
2340df8bae1dSRodney W. Grimes 
23412d8acc0fSJohn Dyson 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2342df8bae1dSRodney W. Grimes 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2343df8bae1dSRodney W. Grimes 	    (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2344df8bae1dSRodney W. Grimes 	new_pmap = &vm2->vm_pmap;	/* XXX */
2345df8bae1dSRodney W. Grimes 	new_map = &vm2->vm_map;	/* XXX */
234647221757SJohn Dyson 	new_map->timestamp = 1;
2347df8bae1dSRodney W. Grimes 
2348df8bae1dSRodney W. Grimes 	old_entry = old_map->header.next;
2349df8bae1dSRodney W. Grimes 
2350df8bae1dSRodney W. Grimes 	while (old_entry != &old_map->header) {
2351afa07f7eSJohn Dyson 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2352df8bae1dSRodney W. Grimes 			panic("vm_map_fork: encountered a submap");
2353df8bae1dSRodney W. Grimes 
2354df8bae1dSRodney W. Grimes 		switch (old_entry->inheritance) {
2355df8bae1dSRodney W. Grimes 		case VM_INHERIT_NONE:
2356df8bae1dSRodney W. Grimes 			break;
2357df8bae1dSRodney W. Grimes 
2358df8bae1dSRodney W. Grimes 		case VM_INHERIT_SHARE:
2359df8bae1dSRodney W. Grimes 			/*
2360fed9a903SJohn Dyson 			 * Clone the entry, creating the shared object if necessary.
2361fed9a903SJohn Dyson 			 */
2362fed9a903SJohn Dyson 			object = old_entry->object.vm_object;
2363fed9a903SJohn Dyson 			if (object == NULL) {
2364fed9a903SJohn Dyson 				object = vm_object_allocate(OBJT_DEFAULT,
2365c2e11a03SJohn Dyson 					atop(old_entry->end - old_entry->start));
2366fed9a903SJohn Dyson 				old_entry->object.vm_object = object;
2367fed9a903SJohn Dyson 				old_entry->offset = (vm_offset_t) 0;
23685069bf57SJohn Dyson 			} else if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
23695069bf57SJohn Dyson 				vm_object_shadow(&old_entry->object.vm_object,
23705069bf57SJohn Dyson 					&old_entry->offset,
2371c2e11a03SJohn Dyson 					atop(old_entry->end - old_entry->start));
23725069bf57SJohn Dyson 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
23735069bf57SJohn Dyson 				object = old_entry->object.vm_object;
2374fed9a903SJohn Dyson 			}
2375069e9bc1SDoug Rabson 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
2376fed9a903SJohn Dyson 
2377fed9a903SJohn Dyson 			/*
2378df8bae1dSRodney W. Grimes 			 * Clone the entry, referencing the sharing map.
2379df8bae1dSRodney W. Grimes 			 */
2380df8bae1dSRodney W. Grimes 			new_entry = vm_map_entry_create(new_map);
2381df8bae1dSRodney W. Grimes 			*new_entry = *old_entry;
2382df8bae1dSRodney W. Grimes 			new_entry->wired_count = 0;
238395e5e988SJohn Dyson 			vm_object_reference(object);
2384df8bae1dSRodney W. Grimes 
2385df8bae1dSRodney W. Grimes 			/*
23860d94caffSDavid Greenman 			 * Insert the entry into the new map -- we know we're
23870d94caffSDavid Greenman 			 * inserting at the end of the new map.
2388df8bae1dSRodney W. Grimes 			 */
2389df8bae1dSRodney W. Grimes 
2390df8bae1dSRodney W. Grimes 			vm_map_entry_link(new_map, new_map->header.prev,
2391df8bae1dSRodney W. Grimes 			    new_entry);
2392df8bae1dSRodney W. Grimes 
2393df8bae1dSRodney W. Grimes 			/*
2394df8bae1dSRodney W. Grimes 			 * Update the physical map
2395df8bae1dSRodney W. Grimes 			 */
2396df8bae1dSRodney W. Grimes 
2397df8bae1dSRodney W. Grimes 			pmap_copy(new_map->pmap, old_map->pmap,
2398df8bae1dSRodney W. Grimes 			    new_entry->start,
2399df8bae1dSRodney W. Grimes 			    (old_entry->end - old_entry->start),
2400df8bae1dSRodney W. Grimes 			    old_entry->start);
2401df8bae1dSRodney W. Grimes 			break;
2402df8bae1dSRodney W. Grimes 
2403df8bae1dSRodney W. Grimes 		case VM_INHERIT_COPY:
2404df8bae1dSRodney W. Grimes 			/*
2405df8bae1dSRodney W. Grimes 			 * Clone the entry and link into the map.
2406df8bae1dSRodney W. Grimes 			 */
2407df8bae1dSRodney W. Grimes 			new_entry = vm_map_entry_create(new_map);
2408df8bae1dSRodney W. Grimes 			*new_entry = *old_entry;
2409df8bae1dSRodney W. Grimes 			new_entry->wired_count = 0;
2410df8bae1dSRodney W. Grimes 			new_entry->object.vm_object = NULL;
2411afa07f7eSJohn Dyson 			new_entry->eflags &= ~MAP_ENTRY_IS_A_MAP;
2412df8bae1dSRodney W. Grimes 			vm_map_entry_link(new_map, new_map->header.prev,
2413df8bae1dSRodney W. Grimes 			    new_entry);
2414bd7e5f99SJohn Dyson 			vm_map_copy_entry(old_map, new_map, old_entry,
2415bd7e5f99SJohn Dyson 			    new_entry);
2416df8bae1dSRodney W. Grimes 			break;
2417df8bae1dSRodney W. Grimes 		}
2418df8bae1dSRodney W. Grimes 		old_entry = old_entry->next;
2419df8bae1dSRodney W. Grimes 	}
2420df8bae1dSRodney W. Grimes 
2421df8bae1dSRodney W. Grimes 	new_map->size = old_map->size;
2422df8bae1dSRodney W. Grimes 	vm_map_unlock(old_map);
2423cbd8ec09SJohn Dyson 	old_map->timestamp++;
2424df8bae1dSRodney W. Grimes 
2425df8bae1dSRodney W. Grimes 	return (vm2);
2426df8bae1dSRodney W. Grimes }
2427df8bae1dSRodney W. Grimes 
2428df8bae1dSRodney W. Grimes /*
24295856e12eSJohn Dyson  * Unshare the specified VM space for exec.  If other processes are
24305856e12eSJohn Dyson  * mapped to it, then create a new one.  The new vmspace is null.
24315856e12eSJohn Dyson  */
24325856e12eSJohn Dyson 
24335856e12eSJohn Dyson void
24345856e12eSJohn Dyson vmspace_exec(struct proc *p) {
24355856e12eSJohn Dyson 	struct vmspace *oldvmspace = p->p_vmspace;
24365856e12eSJohn Dyson 	struct vmspace *newvmspace;
24375856e12eSJohn Dyson 	vm_map_t map = &p->p_vmspace->vm_map;
24385856e12eSJohn Dyson 
24392d8acc0fSJohn Dyson 	newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
24405856e12eSJohn Dyson 	bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
24415856e12eSJohn Dyson 	    (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
24425856e12eSJohn Dyson 	/*
24435856e12eSJohn Dyson 	 * This code is written like this for prototype purposes.  The
24445856e12eSJohn Dyson 	 * goal is to avoid running down the vmspace here, but let the
24455856e12eSJohn Dyson 	 * other process's that are still using the vmspace to finally
24465856e12eSJohn Dyson 	 * run it down.  Even though there is little or no chance of blocking
24475856e12eSJohn Dyson 	 * here, it is a good idea to keep this form for future mods.
24485856e12eSJohn Dyson 	 */
24495856e12eSJohn Dyson 	vmspace_free(oldvmspace);
24505856e12eSJohn Dyson 	p->p_vmspace = newvmspace;
24515856e12eSJohn Dyson 	if (p == curproc)
24525856e12eSJohn Dyson 		pmap_activate(p);
24535856e12eSJohn Dyson }
24545856e12eSJohn Dyson 
24555856e12eSJohn Dyson /*
24565856e12eSJohn Dyson  * Unshare the specified VM space for forcing COW.  This
24575856e12eSJohn Dyson  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
24585856e12eSJohn Dyson  */
24595856e12eSJohn Dyson 
24605856e12eSJohn Dyson void
24615856e12eSJohn Dyson vmspace_unshare(struct proc *p) {
24625856e12eSJohn Dyson 	struct vmspace *oldvmspace = p->p_vmspace;
24635856e12eSJohn Dyson 	struct vmspace *newvmspace;
24645856e12eSJohn Dyson 
24655856e12eSJohn Dyson 	if (oldvmspace->vm_refcnt == 1)
24665856e12eSJohn Dyson 		return;
24675856e12eSJohn Dyson 	newvmspace = vmspace_fork(oldvmspace);
24685856e12eSJohn Dyson 	vmspace_free(oldvmspace);
24695856e12eSJohn Dyson 	p->p_vmspace = newvmspace;
24705856e12eSJohn Dyson 	if (p == curproc)
24715856e12eSJohn Dyson 		pmap_activate(p);
24725856e12eSJohn Dyson }
24735856e12eSJohn Dyson 
24745856e12eSJohn Dyson 
24755856e12eSJohn Dyson /*
2476df8bae1dSRodney W. Grimes  *	vm_map_lookup:
2477df8bae1dSRodney W. Grimes  *
2478df8bae1dSRodney W. Grimes  *	Finds the VM object, offset, and
2479df8bae1dSRodney W. Grimes  *	protection for a given virtual address in the
2480df8bae1dSRodney W. Grimes  *	specified map, assuming a page fault of the
2481df8bae1dSRodney W. Grimes  *	type specified.
2482df8bae1dSRodney W. Grimes  *
2483df8bae1dSRodney W. Grimes  *	Leaves the map in question locked for read; return
2484df8bae1dSRodney W. Grimes  *	values are guaranteed until a vm_map_lookup_done
2485df8bae1dSRodney W. Grimes  *	call is performed.  Note that the map argument
2486df8bae1dSRodney W. Grimes  *	is in/out; the returned map must be used in
2487df8bae1dSRodney W. Grimes  *	the call to vm_map_lookup_done.
2488df8bae1dSRodney W. Grimes  *
2489df8bae1dSRodney W. Grimes  *	A handle (out_entry) is returned for use in
2490df8bae1dSRodney W. Grimes  *	vm_map_lookup_done, to make that fast.
2491df8bae1dSRodney W. Grimes  *
2492df8bae1dSRodney W. Grimes  *	If a lookup is requested with "write protection"
2493df8bae1dSRodney W. Grimes  *	specified, the map may be changed to perform virtual
2494df8bae1dSRodney W. Grimes  *	copying operations, although the data referenced will
2495df8bae1dSRodney W. Grimes  *	remain the same.
2496df8bae1dSRodney W. Grimes  */
2497df8bae1dSRodney W. Grimes int
2498b9dcd593SBruce Evans vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
2499b9dcd593SBruce Evans 	      vm_offset_t vaddr,
250047221757SJohn Dyson 	      vm_prot_t fault_typea,
2501b9dcd593SBruce Evans 	      vm_map_entry_t *out_entry,	/* OUT */
2502b9dcd593SBruce Evans 	      vm_object_t *object,		/* OUT */
2503b9dcd593SBruce Evans 	      vm_pindex_t *pindex,		/* OUT */
2504b9dcd593SBruce Evans 	      vm_prot_t *out_prot,		/* OUT */
25052d8acc0fSJohn Dyson 	      boolean_t *wired)			/* OUT */
2506df8bae1dSRodney W. Grimes {
2507df8bae1dSRodney W. Grimes 	vm_map_t share_map;
2508df8bae1dSRodney W. Grimes 	vm_offset_t share_offset;
2509c0877f10SJohn Dyson 	vm_map_entry_t entry;
2510c0877f10SJohn Dyson 	vm_map_t map = *var_map;
2511c0877f10SJohn Dyson 	vm_prot_t prot;
2512c0877f10SJohn Dyson 	boolean_t su;
251347221757SJohn Dyson 	vm_prot_t fault_type = fault_typea;
2514df8bae1dSRodney W. Grimes 
2515df8bae1dSRodney W. Grimes RetryLookup:;
2516df8bae1dSRodney W. Grimes 
2517df8bae1dSRodney W. Grimes 	/*
2518df8bae1dSRodney W. Grimes 	 * Lookup the faulting address.
2519df8bae1dSRodney W. Grimes 	 */
2520df8bae1dSRodney W. Grimes 
2521df8bae1dSRodney W. Grimes 	vm_map_lock_read(map);
2522df8bae1dSRodney W. Grimes 
2523df8bae1dSRodney W. Grimes #define	RETURN(why) \
2524df8bae1dSRodney W. Grimes 		{ \
2525df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map); \
2526df8bae1dSRodney W. Grimes 		return(why); \
2527df8bae1dSRodney W. Grimes 		}
2528df8bae1dSRodney W. Grimes 
2529df8bae1dSRodney W. Grimes 	/*
25300d94caffSDavid Greenman 	 * If the map has an interesting hint, try it before calling full
25310d94caffSDavid Greenman 	 * blown lookup routine.
2532df8bae1dSRodney W. Grimes 	 */
2533df8bae1dSRodney W. Grimes 
2534df8bae1dSRodney W. Grimes 	entry = map->hint;
2535df8bae1dSRodney W. Grimes 
2536df8bae1dSRodney W. Grimes 	*out_entry = entry;
2537df8bae1dSRodney W. Grimes 
2538df8bae1dSRodney W. Grimes 	if ((entry == &map->header) ||
2539df8bae1dSRodney W. Grimes 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
2540df8bae1dSRodney W. Grimes 		vm_map_entry_t tmp_entry;
2541df8bae1dSRodney W. Grimes 
2542df8bae1dSRodney W. Grimes 		/*
25430d94caffSDavid Greenman 		 * Entry was either not a valid hint, or the vaddr was not
25440d94caffSDavid Greenman 		 * contained in the entry, so do a full lookup.
2545df8bae1dSRodney W. Grimes 		 */
2546df8bae1dSRodney W. Grimes 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2547df8bae1dSRodney W. Grimes 			RETURN(KERN_INVALID_ADDRESS);
2548df8bae1dSRodney W. Grimes 
2549df8bae1dSRodney W. Grimes 		entry = tmp_entry;
2550df8bae1dSRodney W. Grimes 		*out_entry = entry;
2551df8bae1dSRodney W. Grimes 	}
2552b7b2aac2SJohn Dyson 
2553df8bae1dSRodney W. Grimes 	/*
2554df8bae1dSRodney W. Grimes 	 * Handle submaps.
2555df8bae1dSRodney W. Grimes 	 */
2556df8bae1dSRodney W. Grimes 
2557afa07f7eSJohn Dyson 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2558df8bae1dSRodney W. Grimes 		vm_map_t old_map = map;
2559df8bae1dSRodney W. Grimes 
2560df8bae1dSRodney W. Grimes 		*var_map = map = entry->object.sub_map;
2561df8bae1dSRodney W. Grimes 		vm_map_unlock_read(old_map);
2562df8bae1dSRodney W. Grimes 		goto RetryLookup;
2563df8bae1dSRodney W. Grimes 	}
2564a04c970aSJohn Dyson 
2565df8bae1dSRodney W. Grimes 	/*
25660d94caffSDavid Greenman 	 * Check whether this task is allowed to have this page.
2567a04c970aSJohn Dyson 	 * Note the special case for MAP_ENTRY_COW
2568a04c970aSJohn Dyson 	 * pages with an override.  This is to implement a forced
2569a04c970aSJohn Dyson 	 * COW for debuggers.
2570df8bae1dSRodney W. Grimes 	 */
2571df8bae1dSRodney W. Grimes 
2572480ba2f5SJohn Dyson 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
2573480ba2f5SJohn Dyson 		prot = entry->max_protection;
2574480ba2f5SJohn Dyson 	else
2575df8bae1dSRodney W. Grimes 		prot = entry->protection;
257647221757SJohn Dyson 
257747221757SJohn Dyson 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
257847221757SJohn Dyson 	if ((fault_type & prot) != fault_type) {
257947221757SJohn Dyson 			RETURN(KERN_PROTECTION_FAILURE);
258047221757SJohn Dyson 	}
258147221757SJohn Dyson 
2582480ba2f5SJohn Dyson 	if (entry->wired_count && (fault_type & VM_PROT_WRITE) &&
258347221757SJohn Dyson 			(entry->eflags & MAP_ENTRY_COW) &&
258447221757SJohn Dyson 			(fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
2585df8bae1dSRodney W. Grimes 			RETURN(KERN_PROTECTION_FAILURE);
2586a04c970aSJohn Dyson 	}
2587df8bae1dSRodney W. Grimes 
2588df8bae1dSRodney W. Grimes 	/*
25890d94caffSDavid Greenman 	 * If this page is not pageable, we have to get it for all possible
25900d94caffSDavid Greenman 	 * accesses.
2591df8bae1dSRodney W. Grimes 	 */
2592df8bae1dSRodney W. Grimes 
259305f0fdd2SPoul-Henning Kamp 	*wired = (entry->wired_count != 0);
259405f0fdd2SPoul-Henning Kamp 	if (*wired)
2595df8bae1dSRodney W. Grimes 		prot = fault_type = entry->protection;
2596df8bae1dSRodney W. Grimes 
2597df8bae1dSRodney W. Grimes 	/*
25980d94caffSDavid Greenman 	 * If we don't already have a VM object, track it down.
2599df8bae1dSRodney W. Grimes 	 */
2600df8bae1dSRodney W. Grimes 
2601afa07f7eSJohn Dyson 	su = (entry->eflags & MAP_ENTRY_IS_A_MAP) == 0;
260205f0fdd2SPoul-Henning Kamp 	if (su) {
2603df8bae1dSRodney W. Grimes 		share_map = map;
2604df8bae1dSRodney W. Grimes 		share_offset = vaddr;
26050d94caffSDavid Greenman 	} else {
2606df8bae1dSRodney W. Grimes 		vm_map_entry_t share_entry;
2607df8bae1dSRodney W. Grimes 
2608df8bae1dSRodney W. Grimes 		/*
2609df8bae1dSRodney W. Grimes 		 * Compute the sharing map, and offset into it.
2610df8bae1dSRodney W. Grimes 		 */
2611df8bae1dSRodney W. Grimes 
2612df8bae1dSRodney W. Grimes 		share_map = entry->object.share_map;
2613df8bae1dSRodney W. Grimes 		share_offset = (vaddr - entry->start) + entry->offset;
2614df8bae1dSRodney W. Grimes 
2615df8bae1dSRodney W. Grimes 		/*
2616df8bae1dSRodney W. Grimes 		 * Look for the backing store object and offset
2617df8bae1dSRodney W. Grimes 		 */
2618df8bae1dSRodney W. Grimes 
2619df8bae1dSRodney W. Grimes 		vm_map_lock_read(share_map);
2620df8bae1dSRodney W. Grimes 
2621df8bae1dSRodney W. Grimes 		if (!vm_map_lookup_entry(share_map, share_offset,
2622df8bae1dSRodney W. Grimes 			&share_entry)) {
2623df8bae1dSRodney W. Grimes 			vm_map_unlock_read(share_map);
2624df8bae1dSRodney W. Grimes 			RETURN(KERN_INVALID_ADDRESS);
2625df8bae1dSRodney W. Grimes 		}
2626df8bae1dSRodney W. Grimes 		entry = share_entry;
2627df8bae1dSRodney W. Grimes 	}
2628df8bae1dSRodney W. Grimes 
2629df8bae1dSRodney W. Grimes 	/*
2630df8bae1dSRodney W. Grimes 	 * If the entry was copy-on-write, we either ...
2631df8bae1dSRodney W. Grimes 	 */
2632df8bae1dSRodney W. Grimes 
2633afa07f7eSJohn Dyson 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2634df8bae1dSRodney W. Grimes 		/*
26350d94caffSDavid Greenman 		 * If we want to write the page, we may as well handle that
26360d94caffSDavid Greenman 		 * now since we've got the sharing map locked.
2637df8bae1dSRodney W. Grimes 		 *
26380d94caffSDavid Greenman 		 * If we don't need to write the page, we just demote the
26390d94caffSDavid Greenman 		 * permissions allowed.
2640df8bae1dSRodney W. Grimes 		 */
2641df8bae1dSRodney W. Grimes 
2642df8bae1dSRodney W. Grimes 		if (fault_type & VM_PROT_WRITE) {
2643df8bae1dSRodney W. Grimes 			/*
26440d94caffSDavid Greenman 			 * Make a new object, and place it in the object
26450d94caffSDavid Greenman 			 * chain.  Note that no new references have appeared
26460d94caffSDavid Greenman 			 * -- one just moved from the share map to the new
26470d94caffSDavid Greenman 			 * object.
2648df8bae1dSRodney W. Grimes 			 */
2649df8bae1dSRodney W. Grimes 
265003e9c6c1SJohn Dyson 			if (vm_map_lock_upgrade(share_map)) {
2651df8bae1dSRodney W. Grimes 				if (share_map != map)
2652df8bae1dSRodney W. Grimes 					vm_map_unlock_read(map);
2653dbc806e7SJohn Dyson 
2654df8bae1dSRodney W. Grimes 				goto RetryLookup;
2655df8bae1dSRodney W. Grimes 			}
2656df8bae1dSRodney W. Grimes 			vm_object_shadow(
2657df8bae1dSRodney W. Grimes 			    &entry->object.vm_object,
2658df8bae1dSRodney W. Grimes 			    &entry->offset,
2659c2e11a03SJohn Dyson 			    atop(entry->end - entry->start));
2660df8bae1dSRodney W. Grimes 
2661afa07f7eSJohn Dyson 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
266203e9c6c1SJohn Dyson 			vm_map_lock_downgrade(share_map);
26630d94caffSDavid Greenman 		} else {
2664df8bae1dSRodney W. Grimes 			/*
26650d94caffSDavid Greenman 			 * We're attempting to read a copy-on-write page --
26660d94caffSDavid Greenman 			 * don't allow writes.
2667df8bae1dSRodney W. Grimes 			 */
2668df8bae1dSRodney W. Grimes 
26692d8acc0fSJohn Dyson 			prot &= ~VM_PROT_WRITE;
2670df8bae1dSRodney W. Grimes 		}
2671df8bae1dSRodney W. Grimes 	}
26722d8acc0fSJohn Dyson 
2673df8bae1dSRodney W. Grimes 	/*
2674df8bae1dSRodney W. Grimes 	 * Create an object if necessary.
2675df8bae1dSRodney W. Grimes 	 */
2676df8bae1dSRodney W. Grimes 	if (entry->object.vm_object == NULL) {
2677df8bae1dSRodney W. Grimes 
267803e9c6c1SJohn Dyson 		if (vm_map_lock_upgrade(share_map)) {
2679df8bae1dSRodney W. Grimes 			if (share_map != map)
2680df8bae1dSRodney W. Grimes 				vm_map_unlock_read(map);
2681df8bae1dSRodney W. Grimes 			goto RetryLookup;
2682df8bae1dSRodney W. Grimes 		}
268324a1cce3SDavid Greenman 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2684c2e11a03SJohn Dyson 		    atop(entry->end - entry->start));
2685df8bae1dSRodney W. Grimes 		entry->offset = 0;
268603e9c6c1SJohn Dyson 		vm_map_lock_downgrade(share_map);
2687df8bae1dSRodney W. Grimes 	}
2688b5b40fa6SJohn Dyson 
26891c7c3c6aSMatthew Dillon #if 0
26901c7c3c6aSMatthew Dillon 	/*
26911c7c3c6aSMatthew Dillon 	 * (no longer applies)
26921c7c3c6aSMatthew Dillon 	 */
2693925a3a41SJohn Dyson 	if (entry->object.vm_object->type == OBJT_DEFAULT)
2694b5b40fa6SJohn Dyson 		default_pager_convert_to_swapq(entry->object.vm_object);
26951c7c3c6aSMatthew Dillon #endif
2696df8bae1dSRodney W. Grimes 	/*
26970d94caffSDavid Greenman 	 * Return the object/offset from this entry.  If the entry was
26980d94caffSDavid Greenman 	 * copy-on-write or empty, it has been fixed up.
2699df8bae1dSRodney W. Grimes 	 */
2700df8bae1dSRodney W. Grimes 
2701a316d390SJohn Dyson 	*pindex = OFF_TO_IDX((share_offset - entry->start) + entry->offset);
2702df8bae1dSRodney W. Grimes 	*object = entry->object.vm_object;
2703df8bae1dSRodney W. Grimes 
2704df8bae1dSRodney W. Grimes 	/*
2705df8bae1dSRodney W. Grimes 	 * Return whether this is the only map sharing this data.
2706df8bae1dSRodney W. Grimes 	 */
2707df8bae1dSRodney W. Grimes 
2708df8bae1dSRodney W. Grimes 	*out_prot = prot;
2709df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
2710df8bae1dSRodney W. Grimes 
2711df8bae1dSRodney W. Grimes #undef	RETURN
2712df8bae1dSRodney W. Grimes }
2713df8bae1dSRodney W. Grimes 
2714df8bae1dSRodney W. Grimes /*
2715df8bae1dSRodney W. Grimes  *	vm_map_lookup_done:
2716df8bae1dSRodney W. Grimes  *
2717df8bae1dSRodney W. Grimes  *	Releases locks acquired by a vm_map_lookup
2718df8bae1dSRodney W. Grimes  *	(according to the handle returned by that lookup).
2719df8bae1dSRodney W. Grimes  */
2720df8bae1dSRodney W. Grimes 
27210d94caffSDavid Greenman void
27220d94caffSDavid Greenman vm_map_lookup_done(map, entry)
2723c0877f10SJohn Dyson 	vm_map_t map;
2724df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
2725df8bae1dSRodney W. Grimes {
2726df8bae1dSRodney W. Grimes 	/*
2727df8bae1dSRodney W. Grimes 	 * If this entry references a map, unlock it first.
2728df8bae1dSRodney W. Grimes 	 */
2729df8bae1dSRodney W. Grimes 
2730afa07f7eSJohn Dyson 	if (entry->eflags & MAP_ENTRY_IS_A_MAP)
2731df8bae1dSRodney W. Grimes 		vm_map_unlock_read(entry->object.share_map);
2732df8bae1dSRodney W. Grimes 
2733df8bae1dSRodney W. Grimes 	/*
2734df8bae1dSRodney W. Grimes 	 * Unlock the main-level map
2735df8bae1dSRodney W. Grimes 	 */
2736df8bae1dSRodney W. Grimes 
2737df8bae1dSRodney W. Grimes 	vm_map_unlock_read(map);
2738df8bae1dSRodney W. Grimes }
2739df8bae1dSRodney W. Grimes 
27401efb74fbSJohn Dyson /*
27411efb74fbSJohn Dyson  * Implement uiomove with VM operations.  This handles (and collateral changes)
27421efb74fbSJohn Dyson  * support every combination of source object modification, and COW type
27431efb74fbSJohn Dyson  * operations.
27441efb74fbSJohn Dyson  */
27451efb74fbSJohn Dyson int
274647221757SJohn Dyson vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
27471efb74fbSJohn Dyson 	vm_map_t mapa;
27481efb74fbSJohn Dyson 	vm_object_t srcobject;
27491efb74fbSJohn Dyson 	off_t cp;
275047221757SJohn Dyson 	int cnta;
27511efb74fbSJohn Dyson 	vm_offset_t uaddra;
275295e5e988SJohn Dyson 	int *npages;
27531efb74fbSJohn Dyson {
27541efb74fbSJohn Dyson 	vm_map_t map;
275547221757SJohn Dyson 	vm_object_t first_object, oldobject, object;
27562d8acc0fSJohn Dyson 	vm_map_entry_t entry;
27571efb74fbSJohn Dyson 	vm_prot_t prot;
27582d8acc0fSJohn Dyson 	boolean_t wired;
27591efb74fbSJohn Dyson 	int tcnt, rv;
27602d8acc0fSJohn Dyson 	vm_offset_t uaddr, start, end, tend;
27611efb74fbSJohn Dyson 	vm_pindex_t first_pindex, osize, oindex;
27621efb74fbSJohn Dyson 	off_t ooffset;
276347221757SJohn Dyson 	int cnt;
27641efb74fbSJohn Dyson 
276595e5e988SJohn Dyson 	if (npages)
276695e5e988SJohn Dyson 		*npages = 0;
276795e5e988SJohn Dyson 
276847221757SJohn Dyson 	cnt = cnta;
27692d8acc0fSJohn Dyson 	uaddr = uaddra;
27702d8acc0fSJohn Dyson 
27711efb74fbSJohn Dyson 	while (cnt > 0) {
27721efb74fbSJohn Dyson 		map = mapa;
27731efb74fbSJohn Dyson 
27741efb74fbSJohn Dyson 		if ((vm_map_lookup(&map, uaddr,
27752d8acc0fSJohn Dyson 			VM_PROT_READ, &entry, &first_object,
27762d8acc0fSJohn Dyson 			&first_pindex, &prot, &wired)) != KERN_SUCCESS) {
27771efb74fbSJohn Dyson 			return EFAULT;
27781efb74fbSJohn Dyson 		}
27791efb74fbSJohn Dyson 
27802d8acc0fSJohn Dyson 		vm_map_clip_start(map, entry, uaddr);
27811efb74fbSJohn Dyson 
27821efb74fbSJohn Dyson 		tcnt = cnt;
27832d8acc0fSJohn Dyson 		tend = uaddr + tcnt;
27842d8acc0fSJohn Dyson 		if (tend > entry->end) {
27852d8acc0fSJohn Dyson 			tcnt = entry->end - uaddr;
27862d8acc0fSJohn Dyson 			tend = entry->end;
27872d8acc0fSJohn Dyson 		}
27881efb74fbSJohn Dyson 
27892d8acc0fSJohn Dyson 		vm_map_clip_end(map, entry, tend);
27901efb74fbSJohn Dyson 
27912d8acc0fSJohn Dyson 		start = entry->start;
27922d8acc0fSJohn Dyson 		end = entry->end;
27931efb74fbSJohn Dyson 
2794c2e11a03SJohn Dyson 		osize = atop(tcnt);
279595e5e988SJohn Dyson 
2796925a3a41SJohn Dyson 		oindex = OFF_TO_IDX(cp);
279795e5e988SJohn Dyson 		if (npages) {
2798925a3a41SJohn Dyson 			vm_pindex_t idx;
279995e5e988SJohn Dyson 			for (idx = 0; idx < osize; idx++) {
280095e5e988SJohn Dyson 				vm_page_t m;
2801925a3a41SJohn Dyson 				if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
28022d8acc0fSJohn Dyson 					vm_map_lookup_done(map, entry);
280395e5e988SJohn Dyson 					return 0;
280495e5e988SJohn Dyson 				}
28051c7c3c6aSMatthew Dillon 				/*
28061c7c3c6aSMatthew Dillon 				 * disallow busy or invalid pages, but allow
28071c7c3c6aSMatthew Dillon 				 * m->busy pages if they are entirely valid.
28081c7c3c6aSMatthew Dillon 				 */
2809925a3a41SJohn Dyson 				if ((m->flags & PG_BUSY) ||
281095e5e988SJohn Dyson 					((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
28112d8acc0fSJohn Dyson 					vm_map_lookup_done(map, entry);
281295e5e988SJohn Dyson 					return 0;
281395e5e988SJohn Dyson 				}
281495e5e988SJohn Dyson 			}
281595e5e988SJohn Dyson 		}
281695e5e988SJohn Dyson 
28171efb74fbSJohn Dyson /*
28181efb74fbSJohn Dyson  * If we are changing an existing map entry, just redirect
28191efb74fbSJohn Dyson  * the object, and change mappings.
28201efb74fbSJohn Dyson  */
28212d8acc0fSJohn Dyson 		if ((first_object->type == OBJT_VNODE) &&
28222d8acc0fSJohn Dyson 			((oldobject = entry->object.vm_object) == first_object)) {
28232d8acc0fSJohn Dyson 
28242d8acc0fSJohn Dyson 			if ((entry->offset != cp) || (oldobject != srcobject)) {
28252d8acc0fSJohn Dyson 				/*
28262d8acc0fSJohn Dyson    				* Remove old window into the file
28272d8acc0fSJohn Dyson    				*/
28282d8acc0fSJohn Dyson 				pmap_remove (map->pmap, uaddr, tend);
28292d8acc0fSJohn Dyson 
28302d8acc0fSJohn Dyson 				/*
28312d8acc0fSJohn Dyson    				* Force copy on write for mmaped regions
28322d8acc0fSJohn Dyson    				*/
28332d8acc0fSJohn Dyson 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
28342d8acc0fSJohn Dyson 
28352d8acc0fSJohn Dyson 				/*
28362d8acc0fSJohn Dyson    				* Point the object appropriately
28372d8acc0fSJohn Dyson    				*/
28382d8acc0fSJohn Dyson 				if (oldobject != srcobject) {
28392d8acc0fSJohn Dyson 
28402d8acc0fSJohn Dyson 				/*
28412d8acc0fSJohn Dyson    				* Set the object optimization hint flag
28422d8acc0fSJohn Dyson    				*/
2843069e9bc1SDoug Rabson 					vm_object_set_flag(srcobject, OBJ_OPT);
28442d8acc0fSJohn Dyson 					vm_object_reference(srcobject);
28452d8acc0fSJohn Dyson 					entry->object.vm_object = srcobject;
28462d8acc0fSJohn Dyson 
28472d8acc0fSJohn Dyson 					if (oldobject) {
28482d8acc0fSJohn Dyson 						vm_object_deallocate(oldobject);
28492d8acc0fSJohn Dyson 					}
28502d8acc0fSJohn Dyson 				}
28512d8acc0fSJohn Dyson 
28522d8acc0fSJohn Dyson 				entry->offset = cp;
28532d8acc0fSJohn Dyson 				map->timestamp++;
28542d8acc0fSJohn Dyson 			} else {
28552d8acc0fSJohn Dyson 				pmap_remove (map->pmap, uaddr, tend);
28562d8acc0fSJohn Dyson 			}
28572d8acc0fSJohn Dyson 
28582d8acc0fSJohn Dyson 		} else if ((first_object->ref_count == 1) &&
2859925a3a41SJohn Dyson 			(first_object->size == osize) &&
286047221757SJohn Dyson 			((first_object->type == OBJT_DEFAULT) ||
286147221757SJohn Dyson 				(first_object->type == OBJT_SWAP)) ) {
2862925a3a41SJohn Dyson 
2863925a3a41SJohn Dyson 			oldobject = first_object->backing_object;
2864925a3a41SJohn Dyson 
2865925a3a41SJohn Dyson 			if ((first_object->backing_object_offset != cp) ||
2866925a3a41SJohn Dyson 				(oldobject != srcobject)) {
2867925a3a41SJohn Dyson 				/*
2868925a3a41SJohn Dyson    				* Remove old window into the file
2869925a3a41SJohn Dyson    				*/
28702d8acc0fSJohn Dyson 				pmap_remove (map->pmap, uaddr, tend);
2871925a3a41SJohn Dyson 
2872925a3a41SJohn Dyson 				/*
287347221757SJohn Dyson 				 * Remove unneeded old pages
287447221757SJohn Dyson 				 */
287547221757SJohn Dyson 				if (first_object->resident_page_count) {
287647221757SJohn Dyson 					vm_object_page_remove (first_object, 0, 0, 0);
287747221757SJohn Dyson 				}
287847221757SJohn Dyson 
287947221757SJohn Dyson 				/*
288047221757SJohn Dyson 				 * Invalidate swap space
288147221757SJohn Dyson 				 */
288247221757SJohn Dyson 				if (first_object->type == OBJT_SWAP) {
288347221757SJohn Dyson 					swap_pager_freespace(first_object,
28841c7c3c6aSMatthew Dillon 						0,
288547221757SJohn Dyson 						first_object->size);
288647221757SJohn Dyson 				}
288747221757SJohn Dyson 
288847221757SJohn Dyson 				/*
2889925a3a41SJohn Dyson    				* Force copy on write for mmaped regions
2890925a3a41SJohn Dyson    				*/
289147221757SJohn Dyson 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
28921efb74fbSJohn Dyson 
28931efb74fbSJohn Dyson 				/*
28941efb74fbSJohn Dyson    				* Point the object appropriately
28951efb74fbSJohn Dyson    				*/
2896925a3a41SJohn Dyson 				if (oldobject != srcobject) {
289747221757SJohn Dyson 
2898925a3a41SJohn Dyson 				/*
2899925a3a41SJohn Dyson    				* Set the object optimization hint flag
2900925a3a41SJohn Dyson    				*/
2901069e9bc1SDoug Rabson 					vm_object_set_flag(srcobject, OBJ_OPT);
2902925a3a41SJohn Dyson 					vm_object_reference(srcobject);
2903925a3a41SJohn Dyson 
2904925a3a41SJohn Dyson 					if (oldobject) {
2905925a3a41SJohn Dyson 						TAILQ_REMOVE(&oldobject->shadow_head,
2906925a3a41SJohn Dyson 							first_object, shadow_list);
2907925a3a41SJohn Dyson 						oldobject->shadow_count--;
2908925a3a41SJohn Dyson 						vm_object_deallocate(oldobject);
2909925a3a41SJohn Dyson 					}
2910925a3a41SJohn Dyson 
2911925a3a41SJohn Dyson 					TAILQ_INSERT_TAIL(&srcobject->shadow_head,
2912925a3a41SJohn Dyson 						first_object, shadow_list);
2913925a3a41SJohn Dyson 					srcobject->shadow_count++;
2914925a3a41SJohn Dyson 
2915925a3a41SJohn Dyson 					first_object->backing_object = srcobject;
2916925a3a41SJohn Dyson 				}
29171efb74fbSJohn Dyson 				first_object->backing_object_offset = cp;
29182d8acc0fSJohn Dyson 				map->timestamp++;
2919925a3a41SJohn Dyson 			} else {
29202d8acc0fSJohn Dyson 				pmap_remove (map->pmap, uaddr, tend);
2921925a3a41SJohn Dyson 			}
29221efb74fbSJohn Dyson /*
29231efb74fbSJohn Dyson  * Otherwise, we have to do a logical mmap.
29241efb74fbSJohn Dyson  */
29251efb74fbSJohn Dyson 		} else {
29261efb74fbSJohn Dyson 
2927069e9bc1SDoug Rabson 			vm_object_set_flag(srcobject, OBJ_OPT);
2928925a3a41SJohn Dyson 			vm_object_reference(srcobject);
29291efb74fbSJohn Dyson 
29302d8acc0fSJohn Dyson 			pmap_remove (map->pmap, uaddr, tend);
29311efb74fbSJohn Dyson 
293247221757SJohn Dyson 			vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
293347221757SJohn Dyson 			vm_map_lock_upgrade(map);
29341efb74fbSJohn Dyson 
29352d8acc0fSJohn Dyson 			if (entry == &map->header) {
29361efb74fbSJohn Dyson 				map->first_free = &map->header;
29371efb74fbSJohn Dyson 			} else if (map->first_free->start >= start) {
29382d8acc0fSJohn Dyson 				map->first_free = entry->prev;
29391efb74fbSJohn Dyson 			}
29401efb74fbSJohn Dyson 
29412d8acc0fSJohn Dyson 			SAVE_HINT(map, entry->prev);
29422d8acc0fSJohn Dyson 			vm_map_entry_delete(map, entry);
29431efb74fbSJohn Dyson 
29442d8acc0fSJohn Dyson 			object = srcobject;
29452d8acc0fSJohn Dyson 			ooffset = cp;
29462d8acc0fSJohn Dyson #if 0
29472d8acc0fSJohn Dyson 			vm_object_shadow(&object, &ooffset, osize);
29482d8acc0fSJohn Dyson #endif
29492d8acc0fSJohn Dyson 
29502d8acc0fSJohn Dyson 			rv = vm_map_insert(map, object, ooffset, start, tend,
29512d8acc0fSJohn Dyson 				VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE|MAP_COPY_NEEDED);
29521efb74fbSJohn Dyson 
29531efb74fbSJohn Dyson 			if (rv != KERN_SUCCESS)
29541efb74fbSJohn Dyson 				panic("vm_uiomove: could not insert new entry: %d", rv);
29551efb74fbSJohn Dyson 		}
29561efb74fbSJohn Dyson 
29571efb74fbSJohn Dyson /*
29581efb74fbSJohn Dyson  * Map the window directly, if it is already in memory
29591efb74fbSJohn Dyson  */
29602d8acc0fSJohn Dyson 		pmap_object_init_pt(map->pmap, uaddr,
29612d8acc0fSJohn Dyson 			srcobject, oindex, tcnt, 0);
29621efb74fbSJohn Dyson 
296347221757SJohn Dyson 		map->timestamp++;
29641efb74fbSJohn Dyson 		vm_map_unlock(map);
29651efb74fbSJohn Dyson 
29661efb74fbSJohn Dyson 		cnt -= tcnt;
29672d8acc0fSJohn Dyson 		uaddr += tcnt;
29681efb74fbSJohn Dyson 		cp += tcnt;
296995e5e988SJohn Dyson 		if (npages)
297095e5e988SJohn Dyson 			*npages += osize;
29711efb74fbSJohn Dyson 	}
29721efb74fbSJohn Dyson 	return 0;
29731efb74fbSJohn Dyson }
29741efb74fbSJohn Dyson 
29751efb74fbSJohn Dyson /*
29761efb74fbSJohn Dyson  * Performs the copy_on_write operations necessary to allow the virtual copies
29771efb74fbSJohn Dyson  * into user space to work.  This has to be called for write(2) system calls
29781efb74fbSJohn Dyson  * from other processes, file unlinking, and file size shrinkage.
29791efb74fbSJohn Dyson  */
29801efb74fbSJohn Dyson void
29811efb74fbSJohn Dyson vm_freeze_copyopts(object, froma, toa)
29821efb74fbSJohn Dyson 	vm_object_t object;
29831efb74fbSJohn Dyson 	vm_pindex_t froma, toa;
29841efb74fbSJohn Dyson {
2985f5ef029eSPoul-Henning Kamp 	int rv;
2986f5ef029eSPoul-Henning Kamp 	vm_object_t robject;
2987f5ef029eSPoul-Henning Kamp 	vm_pindex_t idx;
29881efb74fbSJohn Dyson 
29892d8acc0fSJohn Dyson 	if ((object == NULL) ||
299095e5e988SJohn Dyson 		((object->flags & OBJ_OPT) == 0))
299195e5e988SJohn Dyson 		return;
29921efb74fbSJohn Dyson 
29931efb74fbSJohn Dyson 	if (object->shadow_count > object->ref_count)
29941efb74fbSJohn Dyson 		panic("vm_freeze_copyopts: sc > rc");
29951efb74fbSJohn Dyson 
29968aef1712SMatthew Dillon 	while((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) {
29971efb74fbSJohn Dyson 		vm_pindex_t bo_pindex;
29981efb74fbSJohn Dyson 		vm_page_t m_in, m_out;
29991efb74fbSJohn Dyson 
30001efb74fbSJohn Dyson 		bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
30011efb74fbSJohn Dyson 
300295e5e988SJohn Dyson 		vm_object_reference(robject);
3003925a3a41SJohn Dyson 
300466095752SJohn Dyson 		vm_object_pip_wait(robject, "objfrz");
3005925a3a41SJohn Dyson 
30061efb74fbSJohn Dyson 		if (robject->ref_count == 1) {
30071efb74fbSJohn Dyson 			vm_object_deallocate(robject);
30081efb74fbSJohn Dyson 			continue;
30091efb74fbSJohn Dyson 		}
30101efb74fbSJohn Dyson 
3011d474eaaaSDoug Rabson 		vm_object_pip_add(robject, 1);
30121efb74fbSJohn Dyson 
301347221757SJohn Dyson 		for (idx = 0; idx < robject->size; idx++) {
30141efb74fbSJohn Dyson 
301595461b45SJohn Dyson 			m_out = vm_page_grab(robject, idx,
301695461b45SJohn Dyson 						VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
30171efb74fbSJohn Dyson 
30181efb74fbSJohn Dyson 			if (m_out->valid == 0) {
301995461b45SJohn Dyson 				m_in = vm_page_grab(object, bo_pindex + idx,
302095461b45SJohn Dyson 						VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
302195461b45SJohn Dyson 				if (m_in->valid == 0) {
302247221757SJohn Dyson 					rv = vm_pager_get_pages(object, &m_in, 1, 0);
302347221757SJohn Dyson 					if (rv != VM_PAGER_OK) {
302447221757SJohn Dyson 						printf("vm_freeze_copyopts: cannot read page from file: %x\n", m_in->pindex);
302547221757SJohn Dyson 						continue;
30261efb74fbSJohn Dyson 					}
302795461b45SJohn Dyson 					vm_page_deactivate(m_in);
302847221757SJohn Dyson 				}
302947221757SJohn Dyson 
303047221757SJohn Dyson 				vm_page_protect(m_in, VM_PROT_NONE);
303147221757SJohn Dyson 				pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
303295461b45SJohn Dyson 				m_out->valid = m_in->valid;
30337dbf82dcSMatthew Dillon 				vm_page_dirty(m_out);
303495461b45SJohn Dyson 				vm_page_activate(m_out);
3035e69763a3SDoug Rabson 				vm_page_wakeup(m_in);
30361efb74fbSJohn Dyson 			}
3037e69763a3SDoug Rabson 			vm_page_wakeup(m_out);
303847221757SJohn Dyson 		}
3039925a3a41SJohn Dyson 
30401efb74fbSJohn Dyson 		object->shadow_count--;
304147221757SJohn Dyson 		object->ref_count--;
30421efb74fbSJohn Dyson 		TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);
30431efb74fbSJohn Dyson 		robject->backing_object = NULL;
30441efb74fbSJohn Dyson 		robject->backing_object_offset = 0;
30451efb74fbSJohn Dyson 
304647221757SJohn Dyson 		vm_object_pip_wakeup(robject);
30471efb74fbSJohn Dyson 		vm_object_deallocate(robject);
30481efb74fbSJohn Dyson 	}
304947221757SJohn Dyson 
3050069e9bc1SDoug Rabson 	vm_object_clear_flag(object, OBJ_OPT);
30511efb74fbSJohn Dyson }
30521efb74fbSJohn Dyson 
3053c7c34a24SBruce Evans #include "opt_ddb.h"
3054c3cb3e12SDavid Greenman #ifdef DDB
3055c7c34a24SBruce Evans #include <sys/kernel.h>
3056c7c34a24SBruce Evans 
3057c7c34a24SBruce Evans #include <ddb/ddb.h>
3058c7c34a24SBruce Evans 
3059df8bae1dSRodney W. Grimes /*
3060df8bae1dSRodney W. Grimes  *	vm_map_print:	[ debug ]
3061df8bae1dSRodney W. Grimes  */
3062c7c34a24SBruce Evans DB_SHOW_COMMAND(map, vm_map_print)
3063df8bae1dSRodney W. Grimes {
306495e5e988SJohn Dyson 	static int nlines;
3065c7c34a24SBruce Evans 	/* XXX convert args. */
3066c0877f10SJohn Dyson 	vm_map_t map = (vm_map_t)addr;
3067c7c34a24SBruce Evans 	boolean_t full = have_addr;
3068df8bae1dSRodney W. Grimes 
3069c0877f10SJohn Dyson 	vm_map_entry_t entry;
3070c7c34a24SBruce Evans 
3071101eeb7fSBruce Evans 	db_iprintf("%s map %p: pmap=%p, nentries=%d, version=%u\n",
3072101eeb7fSBruce Evans 	    (map->is_main_map ? "Task" : "Share"), (void *)map,
3073101eeb7fSBruce Evans 	    (void *)map->pmap, map->nentries, map->timestamp);
307495e5e988SJohn Dyson 	nlines++;
3075df8bae1dSRodney W. Grimes 
3076c7c34a24SBruce Evans 	if (!full && db_indent)
3077df8bae1dSRodney W. Grimes 		return;
3078df8bae1dSRodney W. Grimes 
3079c7c34a24SBruce Evans 	db_indent += 2;
3080df8bae1dSRodney W. Grimes 	for (entry = map->header.next; entry != &map->header;
3081df8bae1dSRodney W. Grimes 	    entry = entry->next) {
308295e5e988SJohn Dyson #if 0
308395e5e988SJohn Dyson 		if (nlines > 18) {
308495e5e988SJohn Dyson 			db_printf("--More--");
308595e5e988SJohn Dyson 			cngetc();
308695e5e988SJohn Dyson 			db_printf("\r");
308795e5e988SJohn Dyson 			nlines = 0;
308895e5e988SJohn Dyson 		}
308995e5e988SJohn Dyson #endif
309095e5e988SJohn Dyson 
3091fc62ef1fSBruce Evans 		db_iprintf("map entry %p: start=%p, end=%p\n",
3092fc62ef1fSBruce Evans 		    (void *)entry, (void *)entry->start, (void *)entry->end);
309395e5e988SJohn Dyson 		nlines++;
3094df8bae1dSRodney W. Grimes 		if (map->is_main_map) {
3095df8bae1dSRodney W. Grimes 			static char *inheritance_name[4] =
3096df8bae1dSRodney W. Grimes 			{"share", "copy", "none", "donate_copy"};
30970d94caffSDavid Greenman 
309895e5e988SJohn Dyson 			db_iprintf(" prot=%x/%x/%s",
3099df8bae1dSRodney W. Grimes 			    entry->protection,
3100df8bae1dSRodney W. Grimes 			    entry->max_protection,
31018aef1712SMatthew Dillon 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
3102df8bae1dSRodney W. Grimes 			if (entry->wired_count != 0)
310395e5e988SJohn Dyson 				db_printf(", wired");
3104df8bae1dSRodney W. Grimes 		}
3105afa07f7eSJohn Dyson 		if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
3106101eeb7fSBruce Evans 			/* XXX no %qd in kernel.  Truncate entry->offset. */
3107101eeb7fSBruce Evans 			db_printf(", share=%p, offset=0x%lx\n",
3108101eeb7fSBruce Evans 			    (void *)entry->object.share_map,
3109ecbb00a2SDoug Rabson 			    (long)entry->offset);
311095e5e988SJohn Dyson 			nlines++;
3111df8bae1dSRodney W. Grimes 			if ((entry->prev == &map->header) ||
3112afa07f7eSJohn Dyson 			    ((entry->prev->eflags & MAP_ENTRY_IS_A_MAP) == 0) ||
3113df8bae1dSRodney W. Grimes 			    (entry->prev->object.share_map !=
3114df8bae1dSRodney W. Grimes 				entry->object.share_map)) {
3115c7c34a24SBruce Evans 				db_indent += 2;
3116101eeb7fSBruce Evans 				vm_map_print((db_expr_t)(intptr_t)
3117101eeb7fSBruce Evans 					     entry->object.share_map,
3118914181e7SBruce Evans 					     full, 0, (char *)0);
3119c7c34a24SBruce Evans 				db_indent -= 2;
3120df8bae1dSRodney W. Grimes 			}
31210d94caffSDavid Greenman 		} else {
3122101eeb7fSBruce Evans 			/* XXX no %qd in kernel.  Truncate entry->offset. */
3123101eeb7fSBruce Evans 			db_printf(", object=%p, offset=0x%lx",
3124101eeb7fSBruce Evans 			    (void *)entry->object.vm_object,
3125ecbb00a2SDoug Rabson 			    (long)entry->offset);
3126afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_COW)
3127c7c34a24SBruce Evans 				db_printf(", copy (%s)",
3128afa07f7eSJohn Dyson 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3129c7c34a24SBruce Evans 			db_printf("\n");
313095e5e988SJohn Dyson 			nlines++;
3131df8bae1dSRodney W. Grimes 
3132df8bae1dSRodney W. Grimes 			if ((entry->prev == &map->header) ||
3133afa07f7eSJohn Dyson 			    (entry->prev->eflags & MAP_ENTRY_IS_A_MAP) ||
3134df8bae1dSRodney W. Grimes 			    (entry->prev->object.vm_object !=
3135df8bae1dSRodney W. Grimes 				entry->object.vm_object)) {
3136c7c34a24SBruce Evans 				db_indent += 2;
3137101eeb7fSBruce Evans 				vm_object_print((db_expr_t)(intptr_t)
3138101eeb7fSBruce Evans 						entry->object.vm_object,
3139914181e7SBruce Evans 						full, 0, (char *)0);
314095e5e988SJohn Dyson 				nlines += 4;
3141c7c34a24SBruce Evans 				db_indent -= 2;
3142df8bae1dSRodney W. Grimes 			}
3143df8bae1dSRodney W. Grimes 		}
3144df8bae1dSRodney W. Grimes 	}
3145c7c34a24SBruce Evans 	db_indent -= 2;
314695e5e988SJohn Dyson 	if (db_indent == 0)
314795e5e988SJohn Dyson 		nlines = 0;
3148df8bae1dSRodney W. Grimes }
314995e5e988SJohn Dyson 
315095e5e988SJohn Dyson 
315195e5e988SJohn Dyson DB_SHOW_COMMAND(procvm, procvm)
315295e5e988SJohn Dyson {
315395e5e988SJohn Dyson 	struct proc *p;
315495e5e988SJohn Dyson 
315595e5e988SJohn Dyson 	if (have_addr) {
315695e5e988SJohn Dyson 		p = (struct proc *) addr;
315795e5e988SJohn Dyson 	} else {
315895e5e988SJohn Dyson 		p = curproc;
315995e5e988SJohn Dyson 	}
316095e5e988SJohn Dyson 
3161ac1e407bSBruce Evans 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3162ac1e407bSBruce Evans 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3163ac1e407bSBruce Evans 	    (void *)&p->p_vmspace->vm_pmap);
316495e5e988SJohn Dyson 
3165101eeb7fSBruce Evans 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
316695e5e988SJohn Dyson }
316795e5e988SJohn Dyson 
3168c7c34a24SBruce Evans #endif /* DDB */
3169