xref: /freebsd/sys/vm/vm_map.c (revision 5069bf5747992c376cc7ad1ad03a3b0f0d069ccd)
1df8bae1dSRodney W. Grimes /*
2df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  *
5df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
6df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
9df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
10df8bae1dSRodney W. Grimes  * are met:
11df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
12df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
13df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
15df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
16df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
17df8bae1dSRodney W. Grimes  *    must display the following acknowledgement:
18df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
19df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
20df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
21df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
22df8bae1dSRodney W. Grimes  *    without specific prior written permission.
23df8bae1dSRodney W. Grimes  *
24df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
35df8bae1dSRodney W. Grimes  *
363c4dd356SDavid Greenman  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  *
39df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40df8bae1dSRodney W. Grimes  * All rights reserved.
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43df8bae1dSRodney W. Grimes  *
44df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
45df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
46df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
47df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
48df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
49df8bae1dSRodney W. Grimes  *
50df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
55df8bae1dSRodney W. Grimes  *
56df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57df8bae1dSRodney W. Grimes  *  School of Computer Science
58df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
59df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
60df8bae1dSRodney W. Grimes  *
61df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
62df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
633c4dd356SDavid Greenman  *
641130b656SJordan K. Hubbard  * $FreeBSD$
65df8bae1dSRodney W. Grimes  */
66df8bae1dSRodney W. Grimes 
67df8bae1dSRodney W. Grimes /*
68df8bae1dSRodney W. Grimes  *	Virtual memory mapping module.
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes #include <sys/param.h>
72df8bae1dSRodney W. Grimes #include <sys/systm.h>
73df8bae1dSRodney W. Grimes #include <sys/malloc.h>
74b5e8ce9fSBruce Evans #include <sys/proc.h>
75efeaf95aSDavid Greenman #include <sys/queue.h>
76efeaf95aSDavid Greenman #include <sys/vmmeter.h>
77867a482dSJohn Dyson #include <sys/mman.h>
78df8bae1dSRodney W. Grimes 
79df8bae1dSRodney W. Grimes #include <vm/vm.h>
80efeaf95aSDavid Greenman #include <vm/vm_param.h>
81efeaf95aSDavid Greenman #include <vm/vm_prot.h>
82efeaf95aSDavid Greenman #include <vm/vm_inherit.h>
83efeaf95aSDavid Greenman #include <vm/lock.h>
84efeaf95aSDavid Greenman #include <vm/pmap.h>
85efeaf95aSDavid Greenman #include <vm/vm_map.h>
86df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
87df8bae1dSRodney W. Grimes #include <vm/vm_object.h>
8826f9a767SRodney W. Grimes #include <vm/vm_kern.h>
8924a1cce3SDavid Greenman #include <vm/vm_pager.h>
90efeaf95aSDavid Greenman #include <vm/vm_extern.h>
91f35329acSJohn Dyson #include <vm/default_pager.h>
92df8bae1dSRodney W. Grimes 
93df8bae1dSRodney W. Grimes /*
94df8bae1dSRodney W. Grimes  *	Virtual memory maps provide for the mapping, protection,
95df8bae1dSRodney W. Grimes  *	and sharing of virtual memory objects.  In addition,
96df8bae1dSRodney W. Grimes  *	this module provides for an efficient virtual copy of
97df8bae1dSRodney W. Grimes  *	memory from one map to another.
98df8bae1dSRodney W. Grimes  *
99df8bae1dSRodney W. Grimes  *	Synchronization is required prior to most operations.
100df8bae1dSRodney W. Grimes  *
101df8bae1dSRodney W. Grimes  *	Maps consist of an ordered doubly-linked list of simple
102df8bae1dSRodney W. Grimes  *	entries; a single hint is used to speed up lookups.
103df8bae1dSRodney W. Grimes  *
104df8bae1dSRodney W. Grimes  *	In order to properly represent the sharing of virtual
105df8bae1dSRodney W. Grimes  *	memory regions among maps, the map structure is bi-level.
106df8bae1dSRodney W. Grimes  *	Top-level ("address") maps refer to regions of sharable
107df8bae1dSRodney W. Grimes  *	virtual memory.  These regions are implemented as
108df8bae1dSRodney W. Grimes  *	("sharing") maps, which then refer to the actual virtual
109df8bae1dSRodney W. Grimes  *	memory objects.  When two address maps "share" memory,
110df8bae1dSRodney W. Grimes  *	their top-level maps both have references to the same
111df8bae1dSRodney W. Grimes  *	sharing map.  When memory is virtual-copied from one
112df8bae1dSRodney W. Grimes  *	address map to another, the references in the sharing
113df8bae1dSRodney W. Grimes  *	maps are actually copied -- no copying occurs at the
114df8bae1dSRodney W. Grimes  *	virtual memory object level.
115df8bae1dSRodney W. Grimes  *
116df8bae1dSRodney W. Grimes  *	Since portions of maps are specified by start/end addreses,
117df8bae1dSRodney W. Grimes  *	which may not align with existing map entries, all
118df8bae1dSRodney W. Grimes  *	routines merely "clip" entries to these start/end values.
119df8bae1dSRodney W. Grimes  *	[That is, an entry is split into two, bordering at a
120df8bae1dSRodney W. Grimes  *	start or end value.]  Note that these clippings may not
121df8bae1dSRodney W. Grimes  *	always be necessary (as the two resulting entries are then
122df8bae1dSRodney W. Grimes  *	not changed); however, the clipping is done for convenience.
123df8bae1dSRodney W. Grimes  *	No attempt is currently made to "glue back together" two
124df8bae1dSRodney W. Grimes  *	abutting entries.
125df8bae1dSRodney W. Grimes  *
126df8bae1dSRodney W. Grimes  *	As mentioned above, virtual copy operations are performed
127df8bae1dSRodney W. Grimes  *	by copying VM object references from one sharing map to
128df8bae1dSRodney W. Grimes  *	another, and then marking both regions as copy-on-write.
129df8bae1dSRodney W. Grimes  *	It is important to note that only one writeable reference
130df8bae1dSRodney W. Grimes  *	to a VM object region exists in any map -- this means that
131df8bae1dSRodney W. Grimes  *	shadow object creation can be delayed until a write operation
132df8bae1dSRodney W. Grimes  *	occurs.
133df8bae1dSRodney W. Grimes  */
134df8bae1dSRodney W. Grimes 
135df8bae1dSRodney W. Grimes /*
136df8bae1dSRodney W. Grimes  *	vm_map_startup:
137df8bae1dSRodney W. Grimes  *
138df8bae1dSRodney W. Grimes  *	Initialize the vm_map module.  Must be called before
139df8bae1dSRodney W. Grimes  *	any other vm_map routines.
140df8bae1dSRodney W. Grimes  *
141df8bae1dSRodney W. Grimes  *	Map and entry structures are allocated from the general
142df8bae1dSRodney W. Grimes  *	purpose memory pool with some exceptions:
143df8bae1dSRodney W. Grimes  *
144df8bae1dSRodney W. Grimes  *	- The kernel map and kmem submap are allocated statically.
145df8bae1dSRodney W. Grimes  *	- Kernel map entries are allocated out of a static pool.
146df8bae1dSRodney W. Grimes  *
147df8bae1dSRodney W. Grimes  *	These restrictions are necessary since malloc() uses the
148df8bae1dSRodney W. Grimes  *	maps and requires map entries.
149df8bae1dSRodney W. Grimes  */
150df8bae1dSRodney W. Grimes 
151df8bae1dSRodney W. Grimes vm_offset_t kentry_data;
152df8bae1dSRodney W. Grimes vm_size_t kentry_data_size;
153f708ef1bSPoul-Henning Kamp static vm_map_entry_t kentry_free;
154f708ef1bSPoul-Henning Kamp static vm_map_t kmap_free;
155bd7e5f99SJohn Dyson extern char kstack[];
156b7b2aac2SJohn Dyson extern int inmprotect;
157df8bae1dSRodney W. Grimes 
158f708ef1bSPoul-Henning Kamp static int kentry_count;
159c3cb3e12SDavid Greenman static vm_offset_t mapvm_start, mapvm, mapvmmax;
160c3cb3e12SDavid Greenman static int mapvmpgcnt;
16126f9a767SRodney W. Grimes 
162b18bfc3dSJohn Dyson static struct vm_map_entry *mappool;
163b18bfc3dSJohn Dyson static int mappoolcnt;
164b18bfc3dSJohn Dyson #define KENTRY_LOW_WATER 128
165b18bfc3dSJohn Dyson 
166df8bae1dSRodney W. Grimes static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
167df8bae1dSRodney W. Grimes static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
168f708ef1bSPoul-Henning Kamp static vm_map_entry_t vm_map_entry_create __P((vm_map_t));
169f708ef1bSPoul-Henning Kamp static void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t));
1700362d7d7SJohn Dyson static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
171f708ef1bSPoul-Henning Kamp static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
172f708ef1bSPoul-Henning Kamp static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t,
173f708ef1bSPoul-Henning Kamp 		vm_map_entry_t));
174df8bae1dSRodney W. Grimes 
1750d94caffSDavid Greenman void
1760d94caffSDavid Greenman vm_map_startup()
177df8bae1dSRodney W. Grimes {
178df8bae1dSRodney W. Grimes 	register int i;
179df8bae1dSRodney W. Grimes 	register vm_map_entry_t mep;
180df8bae1dSRodney W. Grimes 	vm_map_t mp;
181df8bae1dSRodney W. Grimes 
182df8bae1dSRodney W. Grimes 	/*
183df8bae1dSRodney W. Grimes 	 * Static map structures for allocation before initialization of
184df8bae1dSRodney W. Grimes 	 * kernel map or kmem map.  vm_map_create knows how to deal with them.
185df8bae1dSRodney W. Grimes 	 */
186df8bae1dSRodney W. Grimes 	kmap_free = mp = (vm_map_t) kentry_data;
187df8bae1dSRodney W. Grimes 	i = MAX_KMAP;
188df8bae1dSRodney W. Grimes 	while (--i > 0) {
189df8bae1dSRodney W. Grimes 		mp->header.next = (vm_map_entry_t) (mp + 1);
190df8bae1dSRodney W. Grimes 		mp++;
191df8bae1dSRodney W. Grimes 	}
192df8bae1dSRodney W. Grimes 	mp++->header.next = NULL;
193df8bae1dSRodney W. Grimes 
194df8bae1dSRodney W. Grimes 	/*
1950d94caffSDavid Greenman 	 * Form a free list of statically allocated kernel map entries with
1960d94caffSDavid Greenman 	 * the rest.
197df8bae1dSRodney W. Grimes 	 */
198df8bae1dSRodney W. Grimes 	kentry_free = mep = (vm_map_entry_t) mp;
19966ecebedSDavid Greenman 	kentry_count = i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep;
200df8bae1dSRodney W. Grimes 	while (--i > 0) {
201df8bae1dSRodney W. Grimes 		mep->next = mep + 1;
202df8bae1dSRodney W. Grimes 		mep++;
203df8bae1dSRodney W. Grimes 	}
204df8bae1dSRodney W. Grimes 	mep->next = NULL;
205df8bae1dSRodney W. Grimes }
206df8bae1dSRodney W. Grimes 
207df8bae1dSRodney W. Grimes /*
208df8bae1dSRodney W. Grimes  * Allocate a vmspace structure, including a vm_map and pmap,
209df8bae1dSRodney W. Grimes  * and initialize those structures.  The refcnt is set to 1.
210df8bae1dSRodney W. Grimes  * The remaining fields must be initialized by the caller.
211df8bae1dSRodney W. Grimes  */
212df8bae1dSRodney W. Grimes struct vmspace *
213df8bae1dSRodney W. Grimes vmspace_alloc(min, max, pageable)
214df8bae1dSRodney W. Grimes 	vm_offset_t min, max;
215df8bae1dSRodney W. Grimes 	int pageable;
216df8bae1dSRodney W. Grimes {
217df8bae1dSRodney W. Grimes 	register struct vmspace *vm;
2180d94caffSDavid Greenman 
219d6a6c0f6SDavid Greenman 	if (mapvmpgcnt == 0 && mapvm == 0) {
220b18bfc3dSJohn Dyson 		mapvmpgcnt = (cnt.v_page_count * sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE;
221b18bfc3dSJohn Dyson 		mapvm_start = mapvm = kmem_alloc_pageable(kernel_map,
222b18bfc3dSJohn Dyson 			mapvmpgcnt * PAGE_SIZE);
22366ecebedSDavid Greenman 		mapvmmax = mapvm_start + mapvmpgcnt * PAGE_SIZE;
224d6a6c0f6SDavid Greenman 		if (!mapvm)
225d6a6c0f6SDavid Greenman 			mapvmpgcnt = 0;
226d6a6c0f6SDavid Greenman 	}
227df8bae1dSRodney W. Grimes 	MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
228df8bae1dSRodney W. Grimes 	bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
229df8bae1dSRodney W. Grimes 	vm_map_init(&vm->vm_map, min, max, pageable);
230df8bae1dSRodney W. Grimes 	pmap_pinit(&vm->vm_pmap);
231df8bae1dSRodney W. Grimes 	vm->vm_map.pmap = &vm->vm_pmap;		/* XXX */
232df8bae1dSRodney W. Grimes 	vm->vm_refcnt = 1;
233df8bae1dSRodney W. Grimes 	return (vm);
234df8bae1dSRodney W. Grimes }
235df8bae1dSRodney W. Grimes 
236df8bae1dSRodney W. Grimes void
237df8bae1dSRodney W. Grimes vmspace_free(vm)
238df8bae1dSRodney W. Grimes 	register struct vmspace *vm;
239df8bae1dSRodney W. Grimes {
240df8bae1dSRodney W. Grimes 
241a1f6d91cSDavid Greenman 	if (vm->vm_refcnt == 0)
242a1f6d91cSDavid Greenman 		panic("vmspace_free: attempt to free already freed vmspace");
243a1f6d91cSDavid Greenman 
244df8bae1dSRodney W. Grimes 	if (--vm->vm_refcnt == 0) {
245bd7e5f99SJohn Dyson 
24630dcfc09SJohn Dyson 		/*
247df8bae1dSRodney W. Grimes 		 * Lock the map, to wait out all other references to it.
2480d94caffSDavid Greenman 		 * Delete all of the mappings and pages they hold, then call
2490d94caffSDavid Greenman 		 * the pmap module to reclaim anything left.
250df8bae1dSRodney W. Grimes 		 */
251df8bae1dSRodney W. Grimes 		vm_map_lock(&vm->vm_map);
252df8bae1dSRodney W. Grimes 		(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
253df8bae1dSRodney W. Grimes 		    vm->vm_map.max_offset);
254a1f6d91cSDavid Greenman 		vm_map_unlock(&vm->vm_map);
255b18bfc3dSJohn Dyson 
256a1f6d91cSDavid Greenman 		while( vm->vm_map.ref_count != 1)
257a1f6d91cSDavid Greenman 			tsleep(&vm->vm_map.ref_count, PVM, "vmsfre", 0);
258a1f6d91cSDavid Greenman 		--vm->vm_map.ref_count;
259b18bfc3dSJohn Dyson 		vm_object_pmap_remove(vm->vm_upages_obj,
260b18bfc3dSJohn Dyson 			0, vm->vm_upages_obj->size);
261b18bfc3dSJohn Dyson 		vm_object_deallocate(vm->vm_upages_obj);
262df8bae1dSRodney W. Grimes 		pmap_release(&vm->vm_pmap);
263df8bae1dSRodney W. Grimes 		FREE(vm, M_VMMAP);
264b18bfc3dSJohn Dyson 	} else {
265b18bfc3dSJohn Dyson 		wakeup(&vm->vm_map.ref_count);
266df8bae1dSRodney W. Grimes 	}
267df8bae1dSRodney W. Grimes }
268df8bae1dSRodney W. Grimes 
269df8bae1dSRodney W. Grimes /*
270df8bae1dSRodney W. Grimes  *	vm_map_create:
271df8bae1dSRodney W. Grimes  *
272df8bae1dSRodney W. Grimes  *	Creates and returns a new empty VM map with
273df8bae1dSRodney W. Grimes  *	the given physical map structure, and having
274df8bae1dSRodney W. Grimes  *	the given lower and upper address bounds.
275df8bae1dSRodney W. Grimes  */
2760d94caffSDavid Greenman vm_map_t
2770d94caffSDavid Greenman vm_map_create(pmap, min, max, pageable)
278df8bae1dSRodney W. Grimes 	pmap_t pmap;
279df8bae1dSRodney W. Grimes 	vm_offset_t min, max;
280df8bae1dSRodney W. Grimes 	boolean_t pageable;
281df8bae1dSRodney W. Grimes {
282df8bae1dSRodney W. Grimes 	register vm_map_t result;
283df8bae1dSRodney W. Grimes 
284df8bae1dSRodney W. Grimes 	if (kmem_map == NULL) {
285df8bae1dSRodney W. Grimes 		result = kmap_free;
286df8bae1dSRodney W. Grimes 		kmap_free = (vm_map_t) result->header.next;
287df8bae1dSRodney W. Grimes 		if (result == NULL)
288df8bae1dSRodney W. Grimes 			panic("vm_map_create: out of maps");
289df8bae1dSRodney W. Grimes 	} else
290df8bae1dSRodney W. Grimes 		MALLOC(result, vm_map_t, sizeof(struct vm_map),
291df8bae1dSRodney W. Grimes 		    M_VMMAP, M_WAITOK);
292df8bae1dSRodney W. Grimes 
293df8bae1dSRodney W. Grimes 	vm_map_init(result, min, max, pageable);
294df8bae1dSRodney W. Grimes 	result->pmap = pmap;
295df8bae1dSRodney W. Grimes 	return (result);
296df8bae1dSRodney W. Grimes }
297df8bae1dSRodney W. Grimes 
298df8bae1dSRodney W. Grimes /*
299df8bae1dSRodney W. Grimes  * Initialize an existing vm_map structure
300df8bae1dSRodney W. Grimes  * such as that in the vmspace structure.
301df8bae1dSRodney W. Grimes  * The pmap is set elsewhere.
302df8bae1dSRodney W. Grimes  */
303df8bae1dSRodney W. Grimes void
304df8bae1dSRodney W. Grimes vm_map_init(map, min, max, pageable)
305df8bae1dSRodney W. Grimes 	register struct vm_map *map;
306df8bae1dSRodney W. Grimes 	vm_offset_t min, max;
307df8bae1dSRodney W. Grimes 	boolean_t pageable;
308df8bae1dSRodney W. Grimes {
309df8bae1dSRodney W. Grimes 	map->header.next = map->header.prev = &map->header;
310df8bae1dSRodney W. Grimes 	map->nentries = 0;
311df8bae1dSRodney W. Grimes 	map->size = 0;
312df8bae1dSRodney W. Grimes 	map->ref_count = 1;
313df8bae1dSRodney W. Grimes 	map->is_main_map = TRUE;
314df8bae1dSRodney W. Grimes 	map->min_offset = min;
315df8bae1dSRodney W. Grimes 	map->max_offset = max;
316df8bae1dSRodney W. Grimes 	map->entries_pageable = pageable;
317df8bae1dSRodney W. Grimes 	map->first_free = &map->header;
318df8bae1dSRodney W. Grimes 	map->hint = &map->header;
319df8bae1dSRodney W. Grimes 	map->timestamp = 0;
320df8bae1dSRodney W. Grimes 	lock_init(&map->lock, TRUE);
321df8bae1dSRodney W. Grimes }
322df8bae1dSRodney W. Grimes 
323df8bae1dSRodney W. Grimes /*
324b18bfc3dSJohn Dyson  *	vm_map_entry_dispose:	[ internal use only ]
325b18bfc3dSJohn Dyson  *
326b18bfc3dSJohn Dyson  *	Inverse of vm_map_entry_create.
327b18bfc3dSJohn Dyson  */
32862487bb4SJohn Dyson static void
329b18bfc3dSJohn Dyson vm_map_entry_dispose(map, entry)
330b18bfc3dSJohn Dyson 	vm_map_t map;
331b18bfc3dSJohn Dyson 	vm_map_entry_t entry;
332b18bfc3dSJohn Dyson {
333b18bfc3dSJohn Dyson 	int s;
334b18bfc3dSJohn Dyson 
33562487bb4SJohn Dyson 	if (map == kernel_map || map == kmem_map ||
33662487bb4SJohn Dyson 		map == mb_map || map == pager_map) {
337b18bfc3dSJohn Dyson 		s = splvm();
338b18bfc3dSJohn Dyson 		entry->next = kentry_free;
339b18bfc3dSJohn Dyson 		kentry_free = entry;
340b18bfc3dSJohn Dyson 		++kentry_count;
341b18bfc3dSJohn Dyson 		splx(s);
342b18bfc3dSJohn Dyson 	} else {
343b18bfc3dSJohn Dyson 		entry->next = mappool;
344b18bfc3dSJohn Dyson 		mappool = entry;
345b18bfc3dSJohn Dyson 		++mappoolcnt;
346b18bfc3dSJohn Dyson 	}
347b18bfc3dSJohn Dyson }
348b18bfc3dSJohn Dyson 
349b18bfc3dSJohn Dyson /*
350df8bae1dSRodney W. Grimes  *	vm_map_entry_create:	[ internal use only ]
351df8bae1dSRodney W. Grimes  *
352df8bae1dSRodney W. Grimes  *	Allocates a VM map entry for insertion.
353df8bae1dSRodney W. Grimes  *	No entry fields are filled in.  This routine is
354df8bae1dSRodney W. Grimes  */
355f708ef1bSPoul-Henning Kamp static vm_map_entry_t
35626f9a767SRodney W. Grimes vm_map_entry_create(map)
357df8bae1dSRodney W. Grimes 	vm_map_t map;
358df8bae1dSRodney W. Grimes {
359df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
36026f9a767SRodney W. Grimes 	int i;
361b18bfc3dSJohn Dyson 	int s;
362df8bae1dSRodney W. Grimes 
36326f9a767SRodney W. Grimes 	/*
36426f9a767SRodney W. Grimes 	 * This is a *very* nasty (and sort of incomplete) hack!!!!
36526f9a767SRodney W. Grimes 	 */
36626f9a767SRodney W. Grimes 	if (kentry_count < KENTRY_LOW_WATER) {
367b18bfc3dSJohn Dyson 		s = splvm();
36826f9a767SRodney W. Grimes 		if (mapvmpgcnt && mapvm) {
36926f9a767SRodney W. Grimes 			vm_page_t m;
3700d94caffSDavid Greenman 
3713ea2f344SJohn Dyson 			m = vm_page_alloc(kernel_object,
372b18bfc3dSJohn Dyson 			        OFF_TO_IDX(mapvm - VM_MIN_KERNEL_ADDRESS),
3739579ee64SDavid Greenman 				    (map == kmem_map || map == mb_map) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL);
374b18bfc3dSJohn Dyson 
37505f0fdd2SPoul-Henning Kamp 			if (m) {
37626f9a767SRodney W. Grimes 				int newentries;
3770d94caffSDavid Greenman 
378a91c5a7eSJohn Dyson 				newentries = (PAGE_SIZE / sizeof(struct vm_map_entry));
37926f9a767SRodney W. Grimes 				vm_page_wire(m);
380b18bfc3dSJohn Dyson 				PAGE_WAKEUP(m);
381d9459480SDavid Greenman 				m->valid = VM_PAGE_BITS_ALL;
382b18bfc3dSJohn Dyson 				pmap_kenter(mapvm, VM_PAGE_TO_PHYS(m));
383b18bfc3dSJohn Dyson 				m->flags |= PG_WRITEABLE;
38426f9a767SRodney W. Grimes 
38526f9a767SRodney W. Grimes 				entry = (vm_map_entry_t) mapvm;
386a91c5a7eSJohn Dyson 				mapvm += PAGE_SIZE;
38726f9a767SRodney W. Grimes 				--mapvmpgcnt;
38826f9a767SRodney W. Grimes 
38926f9a767SRodney W. Grimes 				for (i = 0; i < newentries; i++) {
39026f9a767SRodney W. Grimes 					vm_map_entry_dispose(kernel_map, entry);
39126f9a767SRodney W. Grimes 					entry++;
39226f9a767SRodney W. Grimes 				}
39326f9a767SRodney W. Grimes 			}
39426f9a767SRodney W. Grimes 		}
395b18bfc3dSJohn Dyson 		splx(s);
39626f9a767SRodney W. Grimes 	}
39726f9a767SRodney W. Grimes 
39862487bb4SJohn Dyson 	if (map == kernel_map || map == kmem_map ||
39962487bb4SJohn Dyson 		map == mb_map || map == pager_map) {
400b18bfc3dSJohn Dyson 		s = splvm();
40105f0fdd2SPoul-Henning Kamp 		entry = kentry_free;
40205f0fdd2SPoul-Henning Kamp 		if (entry) {
40326f9a767SRodney W. Grimes 			kentry_free = entry->next;
40426f9a767SRodney W. Grimes 			--kentry_count;
405b18bfc3dSJohn Dyson 		} else {
406b18bfc3dSJohn Dyson 			panic("vm_map_entry_create: out of map entries for kernel");
40726f9a767SRodney W. Grimes 		}
408b18bfc3dSJohn Dyson 		splx(s);
40926f9a767SRodney W. Grimes 	} else {
41005f0fdd2SPoul-Henning Kamp 		entry = mappool;
41105f0fdd2SPoul-Henning Kamp 		if (entry) {
41226f9a767SRodney W. Grimes 			mappool = entry->next;
41326f9a767SRodney W. Grimes 			--mappoolcnt;
414b18bfc3dSJohn Dyson 		} else {
415df8bae1dSRodney W. Grimes 			MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
416df8bae1dSRodney W. Grimes 			    M_VMMAPENT, M_WAITOK);
417df8bae1dSRodney W. Grimes 		}
418b18bfc3dSJohn Dyson 	}
419df8bae1dSRodney W. Grimes 
420df8bae1dSRodney W. Grimes 	return (entry);
421df8bae1dSRodney W. Grimes }
422df8bae1dSRodney W. Grimes 
423df8bae1dSRodney W. Grimes /*
424df8bae1dSRodney W. Grimes  *	vm_map_entry_{un,}link:
425df8bae1dSRodney W. Grimes  *
426df8bae1dSRodney W. Grimes  *	Insert/remove entries from maps.
427df8bae1dSRodney W. Grimes  */
428df8bae1dSRodney W. Grimes #define	vm_map_entry_link(map, after_where, entry) \
429df8bae1dSRodney W. Grimes 		{ \
430df8bae1dSRodney W. Grimes 		(map)->nentries++; \
431df8bae1dSRodney W. Grimes 		(entry)->prev = (after_where); \
432df8bae1dSRodney W. Grimes 		(entry)->next = (after_where)->next; \
433df8bae1dSRodney W. Grimes 		(entry)->prev->next = (entry); \
434df8bae1dSRodney W. Grimes 		(entry)->next->prev = (entry); \
435df8bae1dSRodney W. Grimes 		}
436df8bae1dSRodney W. Grimes #define	vm_map_entry_unlink(map, entry) \
437df8bae1dSRodney W. Grimes 		{ \
438df8bae1dSRodney W. Grimes 		(map)->nentries--; \
439df8bae1dSRodney W. Grimes 		(entry)->next->prev = (entry)->prev; \
440df8bae1dSRodney W. Grimes 		(entry)->prev->next = (entry)->next; \
441df8bae1dSRodney W. Grimes 		}
442df8bae1dSRodney W. Grimes 
443df8bae1dSRodney W. Grimes /*
444df8bae1dSRodney W. Grimes  *	vm_map_reference:
445df8bae1dSRodney W. Grimes  *
446df8bae1dSRodney W. Grimes  *	Creates another valid reference to the given map.
447df8bae1dSRodney W. Grimes  *
448df8bae1dSRodney W. Grimes  */
4490d94caffSDavid Greenman void
4500d94caffSDavid Greenman vm_map_reference(map)
451df8bae1dSRodney W. Grimes 	register vm_map_t map;
452df8bae1dSRodney W. Grimes {
453df8bae1dSRodney W. Grimes 	if (map == NULL)
454df8bae1dSRodney W. Grimes 		return;
455df8bae1dSRodney W. Grimes 
456df8bae1dSRodney W. Grimes 	map->ref_count++;
457df8bae1dSRodney W. Grimes }
458df8bae1dSRodney W. Grimes 
459df8bae1dSRodney W. Grimes /*
460df8bae1dSRodney W. Grimes  *	vm_map_deallocate:
461df8bae1dSRodney W. Grimes  *
462df8bae1dSRodney W. Grimes  *	Removes a reference from the specified map,
463df8bae1dSRodney W. Grimes  *	destroying it if no references remain.
464df8bae1dSRodney W. Grimes  *	The map should not be locked.
465df8bae1dSRodney W. Grimes  */
4660d94caffSDavid Greenman void
4670d94caffSDavid Greenman vm_map_deallocate(map)
468df8bae1dSRodney W. Grimes 	register vm_map_t map;
469df8bae1dSRodney W. Grimes {
470df8bae1dSRodney W. Grimes 	register int c;
471df8bae1dSRodney W. Grimes 
472df8bae1dSRodney W. Grimes 	if (map == NULL)
473df8bae1dSRodney W. Grimes 		return;
474df8bae1dSRodney W. Grimes 
475a1f6d91cSDavid Greenman 	c = map->ref_count;
476df8bae1dSRodney W. Grimes 
477a1f6d91cSDavid Greenman 	if (c == 0)
478a1f6d91cSDavid Greenman 		panic("vm_map_deallocate: deallocating already freed map");
479a1f6d91cSDavid Greenman 
480a1f6d91cSDavid Greenman 	if (c != 1) {
481a1f6d91cSDavid Greenman 		--map->ref_count;
48224a1cce3SDavid Greenman 		wakeup(&map->ref_count);
483df8bae1dSRodney W. Grimes 		return;
484df8bae1dSRodney W. Grimes 	}
485df8bae1dSRodney W. Grimes 	/*
4860d94caffSDavid Greenman 	 * Lock the map, to wait out all other references to it.
487df8bae1dSRodney W. Grimes 	 */
488df8bae1dSRodney W. Grimes 
489df8bae1dSRodney W. Grimes 	vm_map_lock(map);
490df8bae1dSRodney W. Grimes 	(void) vm_map_delete(map, map->min_offset, map->max_offset);
491a1f6d91cSDavid Greenman 	--map->ref_count;
492a1f6d91cSDavid Greenman 	if( map->ref_count != 0) {
493a1f6d91cSDavid Greenman 		vm_map_unlock(map);
494a1f6d91cSDavid Greenman 		return;
495a1f6d91cSDavid Greenman 	}
496df8bae1dSRodney W. Grimes 
497df8bae1dSRodney W. Grimes 	pmap_destroy(map->pmap);
498df8bae1dSRodney W. Grimes 	FREE(map, M_VMMAP);
499df8bae1dSRodney W. Grimes }
500df8bae1dSRodney W. Grimes 
501df8bae1dSRodney W. Grimes /*
502df8bae1dSRodney W. Grimes  *	SAVE_HINT:
503df8bae1dSRodney W. Grimes  *
504df8bae1dSRodney W. Grimes  *	Saves the specified entry as the hint for
50524a1cce3SDavid Greenman  *	future lookups.
506df8bae1dSRodney W. Grimes  */
507df8bae1dSRodney W. Grimes #define	SAVE_HINT(map,value) \
50824a1cce3SDavid Greenman 		(map)->hint = (value);
509df8bae1dSRodney W. Grimes 
510df8bae1dSRodney W. Grimes /*
511df8bae1dSRodney W. Grimes  *	vm_map_lookup_entry:	[ internal use only ]
512df8bae1dSRodney W. Grimes  *
513df8bae1dSRodney W. Grimes  *	Finds the map entry containing (or
514df8bae1dSRodney W. Grimes  *	immediately preceding) the specified address
515df8bae1dSRodney W. Grimes  *	in the given map; the entry is returned
516df8bae1dSRodney W. Grimes  *	in the "entry" parameter.  The boolean
517df8bae1dSRodney W. Grimes  *	result indicates whether the address is
518df8bae1dSRodney W. Grimes  *	actually contained in the map.
519df8bae1dSRodney W. Grimes  */
5200d94caffSDavid Greenman boolean_t
5210d94caffSDavid Greenman vm_map_lookup_entry(map, address, entry)
522df8bae1dSRodney W. Grimes 	register vm_map_t map;
523df8bae1dSRodney W. Grimes 	register vm_offset_t address;
524df8bae1dSRodney W. Grimes 	vm_map_entry_t *entry;	/* OUT */
525df8bae1dSRodney W. Grimes {
526df8bae1dSRodney W. Grimes 	register vm_map_entry_t cur;
527df8bae1dSRodney W. Grimes 	register vm_map_entry_t last;
528df8bae1dSRodney W. Grimes 
529df8bae1dSRodney W. Grimes 	/*
5300d94caffSDavid Greenman 	 * Start looking either from the head of the list, or from the hint.
531df8bae1dSRodney W. Grimes 	 */
532df8bae1dSRodney W. Grimes 
533df8bae1dSRodney W. Grimes 	cur = map->hint;
534df8bae1dSRodney W. Grimes 
535df8bae1dSRodney W. Grimes 	if (cur == &map->header)
536df8bae1dSRodney W. Grimes 		cur = cur->next;
537df8bae1dSRodney W. Grimes 
538df8bae1dSRodney W. Grimes 	if (address >= cur->start) {
539df8bae1dSRodney W. Grimes 		/*
540df8bae1dSRodney W. Grimes 		 * Go from hint to end of list.
541df8bae1dSRodney W. Grimes 		 *
5420d94caffSDavid Greenman 		 * But first, make a quick check to see if we are already looking
5430d94caffSDavid Greenman 		 * at the entry we want (which is usually the case). Note also
5440d94caffSDavid Greenman 		 * that we don't need to save the hint here... it is the same
5450d94caffSDavid Greenman 		 * hint (unless we are at the header, in which case the hint
5460d94caffSDavid Greenman 		 * didn't buy us anything anyway).
547df8bae1dSRodney W. Grimes 		 */
548df8bae1dSRodney W. Grimes 		last = &map->header;
549df8bae1dSRodney W. Grimes 		if ((cur != last) && (cur->end > address)) {
550df8bae1dSRodney W. Grimes 			*entry = cur;
551df8bae1dSRodney W. Grimes 			return (TRUE);
552df8bae1dSRodney W. Grimes 		}
5530d94caffSDavid Greenman 	} else {
554df8bae1dSRodney W. Grimes 		/*
555df8bae1dSRodney W. Grimes 		 * Go from start to hint, *inclusively*
556df8bae1dSRodney W. Grimes 		 */
557df8bae1dSRodney W. Grimes 		last = cur->next;
558df8bae1dSRodney W. Grimes 		cur = map->header.next;
559df8bae1dSRodney W. Grimes 	}
560df8bae1dSRodney W. Grimes 
561df8bae1dSRodney W. Grimes 	/*
562df8bae1dSRodney W. Grimes 	 * Search linearly
563df8bae1dSRodney W. Grimes 	 */
564df8bae1dSRodney W. Grimes 
565df8bae1dSRodney W. Grimes 	while (cur != last) {
566df8bae1dSRodney W. Grimes 		if (cur->end > address) {
567df8bae1dSRodney W. Grimes 			if (address >= cur->start) {
568df8bae1dSRodney W. Grimes 				/*
5690d94caffSDavid Greenman 				 * Save this lookup for future hints, and
5700d94caffSDavid Greenman 				 * return
571df8bae1dSRodney W. Grimes 				 */
572df8bae1dSRodney W. Grimes 
573df8bae1dSRodney W. Grimes 				*entry = cur;
574df8bae1dSRodney W. Grimes 				SAVE_HINT(map, cur);
575df8bae1dSRodney W. Grimes 				return (TRUE);
576df8bae1dSRodney W. Grimes 			}
577df8bae1dSRodney W. Grimes 			break;
578df8bae1dSRodney W. Grimes 		}
579df8bae1dSRodney W. Grimes 		cur = cur->next;
580df8bae1dSRodney W. Grimes 	}
581df8bae1dSRodney W. Grimes 	*entry = cur->prev;
582df8bae1dSRodney W. Grimes 	SAVE_HINT(map, *entry);
583df8bae1dSRodney W. Grimes 	return (FALSE);
584df8bae1dSRodney W. Grimes }
585df8bae1dSRodney W. Grimes 
586df8bae1dSRodney W. Grimes /*
58730dcfc09SJohn Dyson  *	vm_map_insert:
58830dcfc09SJohn Dyson  *
58930dcfc09SJohn Dyson  *	Inserts the given whole VM object into the target
59030dcfc09SJohn Dyson  *	map at the specified address range.  The object's
59130dcfc09SJohn Dyson  *	size should match that of the address range.
59230dcfc09SJohn Dyson  *
59330dcfc09SJohn Dyson  *	Requires that the map be locked, and leaves it so.
59430dcfc09SJohn Dyson  */
59530dcfc09SJohn Dyson int
59630dcfc09SJohn Dyson vm_map_insert(map, object, offset, start, end, prot, max, cow)
59730dcfc09SJohn Dyson 	vm_map_t map;
59830dcfc09SJohn Dyson 	vm_object_t object;
59930dcfc09SJohn Dyson 	vm_ooffset_t offset;
60030dcfc09SJohn Dyson 	vm_offset_t start;
60130dcfc09SJohn Dyson 	vm_offset_t end;
60230dcfc09SJohn Dyson 	vm_prot_t prot, max;
60330dcfc09SJohn Dyson 	int cow;
60430dcfc09SJohn Dyson {
60530dcfc09SJohn Dyson 	register vm_map_entry_t new_entry;
60630dcfc09SJohn Dyson 	register vm_map_entry_t prev_entry;
60730dcfc09SJohn Dyson 	vm_map_entry_t temp_entry;
608a5b6fd29SJohn Dyson 	vm_object_t prev_object;
609afa07f7eSJohn Dyson 	u_char protoeflags;
61030dcfc09SJohn Dyson 
611cdc2c291SJohn Dyson 	if ((object != NULL) && (cow & MAP_NOFAULT)) {
612cdc2c291SJohn Dyson 		panic("vm_map_insert: paradoxical MAP_NOFAULT request");
613cdc2c291SJohn Dyson 	}
614cdc2c291SJohn Dyson 
61530dcfc09SJohn Dyson 	/*
61630dcfc09SJohn Dyson 	 * Check that the start and end points are not bogus.
61730dcfc09SJohn Dyson 	 */
61830dcfc09SJohn Dyson 
61930dcfc09SJohn Dyson 	if ((start < map->min_offset) || (end > map->max_offset) ||
62030dcfc09SJohn Dyson 	    (start >= end))
62130dcfc09SJohn Dyson 		return (KERN_INVALID_ADDRESS);
62230dcfc09SJohn Dyson 
62330dcfc09SJohn Dyson 	/*
62430dcfc09SJohn Dyson 	 * Find the entry prior to the proposed starting address; if it's part
62530dcfc09SJohn Dyson 	 * of an existing entry, this range is bogus.
62630dcfc09SJohn Dyson 	 */
62730dcfc09SJohn Dyson 
62830dcfc09SJohn Dyson 	if (vm_map_lookup_entry(map, start, &temp_entry))
62930dcfc09SJohn Dyson 		return (KERN_NO_SPACE);
63030dcfc09SJohn Dyson 
63130dcfc09SJohn Dyson 	prev_entry = temp_entry;
63230dcfc09SJohn Dyson 
63330dcfc09SJohn Dyson 	/*
63430dcfc09SJohn Dyson 	 * Assert that the next entry doesn't overlap the end point.
63530dcfc09SJohn Dyson 	 */
63630dcfc09SJohn Dyson 
63730dcfc09SJohn Dyson 	if ((prev_entry->next != &map->header) &&
63830dcfc09SJohn Dyson 	    (prev_entry->next->start < end))
63930dcfc09SJohn Dyson 		return (KERN_NO_SPACE);
64030dcfc09SJohn Dyson 
641afa07f7eSJohn Dyson 	protoeflags = 0;
642afa07f7eSJohn Dyson 	if (cow & MAP_COPY_NEEDED)
643afa07f7eSJohn Dyson 		protoeflags |= MAP_ENTRY_NEEDS_COPY;
644afa07f7eSJohn Dyson 
645afa07f7eSJohn Dyson 	if (cow & MAP_COPY_ON_WRITE)
646afa07f7eSJohn Dyson 		protoeflags |= MAP_ENTRY_COW;
647afa07f7eSJohn Dyson 
648afa07f7eSJohn Dyson 	if (cow & MAP_NOFAULT)
649afa07f7eSJohn Dyson 		protoeflags |= MAP_ENTRY_NOFAULT;
650afa07f7eSJohn Dyson 
65130dcfc09SJohn Dyson 	/*
65230dcfc09SJohn Dyson 	 * See if we can avoid creating a new entry by extending one of our
6538cc7e047SJohn Dyson 	 * neighbors.  Or at least extend the object.
65430dcfc09SJohn Dyson 	 */
6558cc7e047SJohn Dyson 
6568cc7e047SJohn Dyson 	if ((object == NULL) &&
6578cc7e047SJohn Dyson 	    (prev_entry != &map->header) &&
658afa07f7eSJohn Dyson 	    (( prev_entry->eflags & (MAP_ENTRY_IS_A_MAP | MAP_ENTRY_IS_SUB_MAP)) == 0) &&
6598cc7e047SJohn Dyson 	    (prev_entry->end == start) &&
6608cc7e047SJohn Dyson 	    (prev_entry->wired_count == 0)) {
6618cc7e047SJohn Dyson 
662cdc2c291SJohn Dyson 
663afa07f7eSJohn Dyson 		if ((protoeflags == prev_entry->eflags) &&
664afa07f7eSJohn Dyson 		    ((cow & MAP_NOFAULT) ||
6658cc7e047SJohn Dyson 		     vm_object_coalesce(prev_entry->object.vm_object,
66630dcfc09SJohn Dyson 					OFF_TO_IDX(prev_entry->offset),
6678cc7e047SJohn Dyson 					(vm_size_t) (prev_entry->end - prev_entry->start),
668cdc2c291SJohn Dyson 					(vm_size_t) (end - prev_entry->end)))) {
669a5b6fd29SJohn Dyson 
67030dcfc09SJohn Dyson 			/*
6718cc7e047SJohn Dyson 			 * Coalesced the two objects.  Can we extend the
6728cc7e047SJohn Dyson 			 * previous map entry to include the new range?
67330dcfc09SJohn Dyson 			 */
6748cc7e047SJohn Dyson 			if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
6758cc7e047SJohn Dyson 			    (prev_entry->protection == prot) &&
6768cc7e047SJohn Dyson 			    (prev_entry->max_protection == max)) {
6778cc7e047SJohn Dyson 
67830dcfc09SJohn Dyson 				map->size += (end - prev_entry->end);
67930dcfc09SJohn Dyson 				prev_entry->end = end;
680afa07f7eSJohn Dyson 				if ((cow & MAP_NOFAULT) == 0) {
681a5b6fd29SJohn Dyson 					prev_object = prev_entry->object.vm_object;
682b5b40fa6SJohn Dyson 					default_pager_convert_to_swapq(prev_object);
683cdc2c291SJohn Dyson 				}
68430dcfc09SJohn Dyson 				return (KERN_SUCCESS);
68530dcfc09SJohn Dyson 			}
6868cc7e047SJohn Dyson 			else {
6878cc7e047SJohn Dyson 				object = prev_entry->object.vm_object;
6888cc7e047SJohn Dyson 				offset = prev_entry->offset + (prev_entry->end -
6898cc7e047SJohn Dyson 							       prev_entry->start);
6908cc7e047SJohn Dyson 
6918cc7e047SJohn Dyson 				vm_object_reference(object);
692b18bfc3dSJohn Dyson 			}
69367bf6868SJohn Dyson 		}
6948cc7e047SJohn Dyson 	}
6958cc7e047SJohn Dyson 
69630dcfc09SJohn Dyson 	/*
69730dcfc09SJohn Dyson 	 * Create a new entry
69830dcfc09SJohn Dyson 	 */
69930dcfc09SJohn Dyson 
70030dcfc09SJohn Dyson 	new_entry = vm_map_entry_create(map);
70130dcfc09SJohn Dyson 	new_entry->start = start;
70230dcfc09SJohn Dyson 	new_entry->end = end;
70330dcfc09SJohn Dyson 
704afa07f7eSJohn Dyson 	new_entry->eflags = protoeflags;
70530dcfc09SJohn Dyson 	new_entry->object.vm_object = object;
70630dcfc09SJohn Dyson 	new_entry->offset = offset;
70730dcfc09SJohn Dyson 
70830dcfc09SJohn Dyson 	if (map->is_main_map) {
70930dcfc09SJohn Dyson 		new_entry->inheritance = VM_INHERIT_DEFAULT;
71030dcfc09SJohn Dyson 		new_entry->protection = prot;
71130dcfc09SJohn Dyson 		new_entry->max_protection = max;
71230dcfc09SJohn Dyson 		new_entry->wired_count = 0;
71330dcfc09SJohn Dyson 	}
71430dcfc09SJohn Dyson 	/*
71530dcfc09SJohn Dyson 	 * Insert the new entry into the list
71630dcfc09SJohn Dyson 	 */
71730dcfc09SJohn Dyson 
71830dcfc09SJohn Dyson 	vm_map_entry_link(map, prev_entry, new_entry);
71930dcfc09SJohn Dyson 	map->size += new_entry->end - new_entry->start;
72030dcfc09SJohn Dyson 
72130dcfc09SJohn Dyson 	/*
72230dcfc09SJohn Dyson 	 * Update the free space hint
72330dcfc09SJohn Dyson 	 */
72467bf6868SJohn Dyson 	if ((map->first_free == prev_entry) &&
72567bf6868SJohn Dyson 		(prev_entry->end >= new_entry->start))
72630dcfc09SJohn Dyson 		map->first_free = new_entry;
72730dcfc09SJohn Dyson 
728b5b40fa6SJohn Dyson 	default_pager_convert_to_swapq(object);
72930dcfc09SJohn Dyson 	return (KERN_SUCCESS);
73030dcfc09SJohn Dyson }
73130dcfc09SJohn Dyson 
73230dcfc09SJohn Dyson /*
733df8bae1dSRodney W. Grimes  * Find sufficient space for `length' bytes in the given map, starting at
734df8bae1dSRodney W. Grimes  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
735df8bae1dSRodney W. Grimes  */
736df8bae1dSRodney W. Grimes int
737df8bae1dSRodney W. Grimes vm_map_findspace(map, start, length, addr)
738df8bae1dSRodney W. Grimes 	register vm_map_t map;
739df8bae1dSRodney W. Grimes 	register vm_offset_t start;
740df8bae1dSRodney W. Grimes 	vm_size_t length;
741df8bae1dSRodney W. Grimes 	vm_offset_t *addr;
742df8bae1dSRodney W. Grimes {
743df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry, next;
744df8bae1dSRodney W. Grimes 	register vm_offset_t end;
745df8bae1dSRodney W. Grimes 
746df8bae1dSRodney W. Grimes 	if (start < map->min_offset)
747df8bae1dSRodney W. Grimes 		start = map->min_offset;
748df8bae1dSRodney W. Grimes 	if (start > map->max_offset)
749df8bae1dSRodney W. Grimes 		return (1);
750df8bae1dSRodney W. Grimes 
751df8bae1dSRodney W. Grimes 	/*
7520d94caffSDavid Greenman 	 * Look for the first possible address; if there's already something
7530d94caffSDavid Greenman 	 * at this address, we have to start after it.
754df8bae1dSRodney W. Grimes 	 */
755df8bae1dSRodney W. Grimes 	if (start == map->min_offset) {
75667bf6868SJohn Dyson 		if ((entry = map->first_free) != &map->header)
757df8bae1dSRodney W. Grimes 			start = entry->end;
758df8bae1dSRodney W. Grimes 	} else {
759df8bae1dSRodney W. Grimes 		vm_map_entry_t tmp;
7600d94caffSDavid Greenman 
761df8bae1dSRodney W. Grimes 		if (vm_map_lookup_entry(map, start, &tmp))
762df8bae1dSRodney W. Grimes 			start = tmp->end;
763df8bae1dSRodney W. Grimes 		entry = tmp;
764df8bae1dSRodney W. Grimes 	}
765df8bae1dSRodney W. Grimes 
766df8bae1dSRodney W. Grimes 	/*
7670d94caffSDavid Greenman 	 * Look through the rest of the map, trying to fit a new region in the
7680d94caffSDavid Greenman 	 * gap between existing regions, or after the very last region.
769df8bae1dSRodney W. Grimes 	 */
770df8bae1dSRodney W. Grimes 	for (;; start = (entry = next)->end) {
771df8bae1dSRodney W. Grimes 		/*
772df8bae1dSRodney W. Grimes 		 * Find the end of the proposed new region.  Be sure we didn't
773df8bae1dSRodney W. Grimes 		 * go beyond the end of the map, or wrap around the address;
774df8bae1dSRodney W. Grimes 		 * if so, we lose.  Otherwise, if this is the last entry, or
775df8bae1dSRodney W. Grimes 		 * if the proposed new region fits before the next entry, we
776df8bae1dSRodney W. Grimes 		 * win.
777df8bae1dSRodney W. Grimes 		 */
778df8bae1dSRodney W. Grimes 		end = start + length;
779df8bae1dSRodney W. Grimes 		if (end > map->max_offset || end < start)
780df8bae1dSRodney W. Grimes 			return (1);
781df8bae1dSRodney W. Grimes 		next = entry->next;
782df8bae1dSRodney W. Grimes 		if (next == &map->header || next->start >= end)
783df8bae1dSRodney W. Grimes 			break;
784df8bae1dSRodney W. Grimes 	}
785df8bae1dSRodney W. Grimes 	SAVE_HINT(map, entry);
786df8bae1dSRodney W. Grimes 	*addr = start;
7870d94caffSDavid Greenman 	if (map == kernel_map && round_page(start + length) > kernel_vm_end)
7880d94caffSDavid Greenman 		pmap_growkernel(round_page(start + length));
789df8bae1dSRodney W. Grimes 	return (0);
790df8bae1dSRodney W. Grimes }
791df8bae1dSRodney W. Grimes 
792df8bae1dSRodney W. Grimes /*
793df8bae1dSRodney W. Grimes  *	vm_map_find finds an unallocated region in the target address
794df8bae1dSRodney W. Grimes  *	map with the given length.  The search is defined to be
795df8bae1dSRodney W. Grimes  *	first-fit from the specified address; the region found is
796df8bae1dSRodney W. Grimes  *	returned in the same parameter.
797df8bae1dSRodney W. Grimes  *
798df8bae1dSRodney W. Grimes  */
799df8bae1dSRodney W. Grimes int
800bd7e5f99SJohn Dyson vm_map_find(map, object, offset, addr, length, find_space, prot, max, cow)
801df8bae1dSRodney W. Grimes 	vm_map_t map;
802df8bae1dSRodney W. Grimes 	vm_object_t object;
803a316d390SJohn Dyson 	vm_ooffset_t offset;
804df8bae1dSRodney W. Grimes 	vm_offset_t *addr;	/* IN/OUT */
805df8bae1dSRodney W. Grimes 	vm_size_t length;
806df8bae1dSRodney W. Grimes 	boolean_t find_space;
807bd7e5f99SJohn Dyson 	vm_prot_t prot, max;
808bd7e5f99SJohn Dyson 	int cow;
809df8bae1dSRodney W. Grimes {
810df8bae1dSRodney W. Grimes 	register vm_offset_t start;
8118d6e8edeSDavid Greenman 	int result, s = 0;
812df8bae1dSRodney W. Grimes 
813df8bae1dSRodney W. Grimes 	start = *addr;
8148d6e8edeSDavid Greenman 
8159579ee64SDavid Greenman 	if (map == kmem_map || map == mb_map)
816b18bfc3dSJohn Dyson 		s = splvm();
8178d6e8edeSDavid Greenman 
818bea41bcfSDavid Greenman 	vm_map_lock(map);
819df8bae1dSRodney W. Grimes 	if (find_space) {
820df8bae1dSRodney W. Grimes 		if (vm_map_findspace(map, start, length, addr)) {
821df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
8229579ee64SDavid Greenman 			if (map == kmem_map || map == mb_map)
8238d6e8edeSDavid Greenman 				splx(s);
824df8bae1dSRodney W. Grimes 			return (KERN_NO_SPACE);
825df8bae1dSRodney W. Grimes 		}
826df8bae1dSRodney W. Grimes 		start = *addr;
827df8bae1dSRodney W. Grimes 	}
828bd7e5f99SJohn Dyson 	result = vm_map_insert(map, object, offset,
829bd7e5f99SJohn Dyson 		start, start + length, prot, max, cow);
830df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
8318d6e8edeSDavid Greenman 
8329579ee64SDavid Greenman 	if (map == kmem_map || map == mb_map)
8338d6e8edeSDavid Greenman 		splx(s);
8348d6e8edeSDavid Greenman 
835df8bae1dSRodney W. Grimes 	return (result);
836df8bae1dSRodney W. Grimes }
837df8bae1dSRodney W. Grimes 
838df8bae1dSRodney W. Grimes /*
839b7b2aac2SJohn Dyson  *	vm_map_simplify_entry:
84067bf6868SJohn Dyson  *
841b7b2aac2SJohn Dyson  *	Simplify the given map entry by merging with either neighbor.
842df8bae1dSRodney W. Grimes  */
843b7b2aac2SJohn Dyson void
8440d94caffSDavid Greenman vm_map_simplify_entry(map, entry)
845df8bae1dSRodney W. Grimes 	vm_map_t map;
846df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
847df8bae1dSRodney W. Grimes {
848308c24baSJohn Dyson 	vm_map_entry_t next, prev;
849b7b2aac2SJohn Dyson 	vm_size_t prevsize, esize;
850df8bae1dSRodney W. Grimes 
851afa07f7eSJohn Dyson 	if (entry->eflags & (MAP_ENTRY_IS_SUB_MAP|MAP_ENTRY_IS_A_MAP))
852df8bae1dSRodney W. Grimes 		return;
853308c24baSJohn Dyson 
854308c24baSJohn Dyson 	prev = entry->prev;
855308c24baSJohn Dyson 	if (prev != &map->header) {
85667bf6868SJohn Dyson 		prevsize = prev->end - prev->start;
85767bf6868SJohn Dyson 		if ( (prev->end == entry->start) &&
85867bf6868SJohn Dyson 		     (prev->object.vm_object == entry->object.vm_object) &&
85967bf6868SJohn Dyson 		     (!prev->object.vm_object || (prev->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
86067bf6868SJohn Dyson 		     (!prev->object.vm_object ||
86167bf6868SJohn Dyson 			(prev->offset + prevsize == entry->offset)) &&
862afa07f7eSJohn Dyson 		     (prev->eflags == entry->eflags) &&
86367bf6868SJohn Dyson 		     (prev->protection == entry->protection) &&
86467bf6868SJohn Dyson 		     (prev->max_protection == entry->max_protection) &&
86567bf6868SJohn Dyson 		     (prev->inheritance == entry->inheritance) &&
866b7b2aac2SJohn Dyson 		     (prev->wired_count == entry->wired_count)) {
867308c24baSJohn Dyson 			if (map->first_free == prev)
868308c24baSJohn Dyson 				map->first_free = entry;
869b18bfc3dSJohn Dyson 			if (map->hint == prev)
870b18bfc3dSJohn Dyson 				map->hint = entry;
871308c24baSJohn Dyson 			vm_map_entry_unlink(map, prev);
872308c24baSJohn Dyson 			entry->start = prev->start;
873308c24baSJohn Dyson 			entry->offset = prev->offset;
874b18bfc3dSJohn Dyson 			if (prev->object.vm_object)
875308c24baSJohn Dyson 				vm_object_deallocate(prev->object.vm_object);
876308c24baSJohn Dyson 			vm_map_entry_dispose(map, prev);
877308c24baSJohn Dyson 		}
878308c24baSJohn Dyson 	}
879de5f6a77SJohn Dyson 
880de5f6a77SJohn Dyson 	next = entry->next;
881308c24baSJohn Dyson 	if (next != &map->header) {
88267bf6868SJohn Dyson 		esize = entry->end - entry->start;
88367bf6868SJohn Dyson 		if ((entry->end == next->start) &&
88467bf6868SJohn Dyson 		    (next->object.vm_object == entry->object.vm_object) &&
88567bf6868SJohn Dyson 		    (!next->object.vm_object || (next->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
88667bf6868SJohn Dyson 		     (!entry->object.vm_object ||
88767bf6868SJohn Dyson 			(entry->offset + esize == next->offset)) &&
888afa07f7eSJohn Dyson 		    (next->eflags == entry->eflags) &&
88967bf6868SJohn Dyson 		    (next->protection == entry->protection) &&
89067bf6868SJohn Dyson 		    (next->max_protection == entry->max_protection) &&
89167bf6868SJohn Dyson 		    (next->inheritance == entry->inheritance) &&
892b7b2aac2SJohn Dyson 		    (next->wired_count == entry->wired_count)) {
893308c24baSJohn Dyson 			if (map->first_free == next)
894308c24baSJohn Dyson 				map->first_free = entry;
895b18bfc3dSJohn Dyson 			if (map->hint == next)
896b18bfc3dSJohn Dyson 				map->hint = entry;
897de5f6a77SJohn Dyson 			vm_map_entry_unlink(map, next);
898de5f6a77SJohn Dyson 			entry->end = next->end;
899b18bfc3dSJohn Dyson 			if (next->object.vm_object)
900de5f6a77SJohn Dyson 				vm_object_deallocate(next->object.vm_object);
901de5f6a77SJohn Dyson 			vm_map_entry_dispose(map, next);
902df8bae1dSRodney W. Grimes 	        }
903df8bae1dSRodney W. Grimes 	}
904de5f6a77SJohn Dyson }
905df8bae1dSRodney W. Grimes /*
906df8bae1dSRodney W. Grimes  *	vm_map_clip_start:	[ internal use only ]
907df8bae1dSRodney W. Grimes  *
908df8bae1dSRodney W. Grimes  *	Asserts that the given entry begins at or after
909df8bae1dSRodney W. Grimes  *	the specified address; if necessary,
910df8bae1dSRodney W. Grimes  *	it splits the entry into two.
911df8bae1dSRodney W. Grimes  */
912df8bae1dSRodney W. Grimes #define vm_map_clip_start(map, entry, startaddr) \
913df8bae1dSRodney W. Grimes { \
914df8bae1dSRodney W. Grimes 	if (startaddr > entry->start) \
915df8bae1dSRodney W. Grimes 		_vm_map_clip_start(map, entry, startaddr); \
916df8bae1dSRodney W. Grimes }
917df8bae1dSRodney W. Grimes 
918df8bae1dSRodney W. Grimes /*
919df8bae1dSRodney W. Grimes  *	This routine is called only when it is known that
920df8bae1dSRodney W. Grimes  *	the entry must be split.
921df8bae1dSRodney W. Grimes  */
9220d94caffSDavid Greenman static void
9230d94caffSDavid Greenman _vm_map_clip_start(map, entry, start)
924df8bae1dSRodney W. Grimes 	register vm_map_t map;
925df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
926df8bae1dSRodney W. Grimes 	register vm_offset_t start;
927df8bae1dSRodney W. Grimes {
928df8bae1dSRodney W. Grimes 	register vm_map_entry_t new_entry;
929df8bae1dSRodney W. Grimes 
930df8bae1dSRodney W. Grimes 	/*
9310d94caffSDavid Greenman 	 * Split off the front portion -- note that we must insert the new
9320d94caffSDavid Greenman 	 * entry BEFORE this one, so that this entry has the specified
9330d94caffSDavid Greenman 	 * starting address.
934df8bae1dSRodney W. Grimes 	 */
935df8bae1dSRodney W. Grimes 
936f32dbbeeSJohn Dyson 	vm_map_simplify_entry(map, entry);
937f32dbbeeSJohn Dyson 
938df8bae1dSRodney W. Grimes 	new_entry = vm_map_entry_create(map);
939df8bae1dSRodney W. Grimes 	*new_entry = *entry;
940df8bae1dSRodney W. Grimes 
941df8bae1dSRodney W. Grimes 	new_entry->end = start;
942df8bae1dSRodney W. Grimes 	entry->offset += (start - entry->start);
943df8bae1dSRodney W. Grimes 	entry->start = start;
944df8bae1dSRodney W. Grimes 
945df8bae1dSRodney W. Grimes 	vm_map_entry_link(map, entry->prev, new_entry);
946df8bae1dSRodney W. Grimes 
947afa07f7eSJohn Dyson 	if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
948df8bae1dSRodney W. Grimes 		vm_map_reference(new_entry->object.share_map);
949df8bae1dSRodney W. Grimes 	else
950df8bae1dSRodney W. Grimes 		vm_object_reference(new_entry->object.vm_object);
951df8bae1dSRodney W. Grimes }
952df8bae1dSRodney W. Grimes 
953df8bae1dSRodney W. Grimes /*
954df8bae1dSRodney W. Grimes  *	vm_map_clip_end:	[ internal use only ]
955df8bae1dSRodney W. Grimes  *
956df8bae1dSRodney W. Grimes  *	Asserts that the given entry ends at or before
957df8bae1dSRodney W. Grimes  *	the specified address; if necessary,
958df8bae1dSRodney W. Grimes  *	it splits the entry into two.
959df8bae1dSRodney W. Grimes  */
960df8bae1dSRodney W. Grimes 
961df8bae1dSRodney W. Grimes #define vm_map_clip_end(map, entry, endaddr) \
962df8bae1dSRodney W. Grimes { \
963df8bae1dSRodney W. Grimes 	if (endaddr < entry->end) \
964df8bae1dSRodney W. Grimes 		_vm_map_clip_end(map, entry, endaddr); \
965df8bae1dSRodney W. Grimes }
966df8bae1dSRodney W. Grimes 
967df8bae1dSRodney W. Grimes /*
968df8bae1dSRodney W. Grimes  *	This routine is called only when it is known that
969df8bae1dSRodney W. Grimes  *	the entry must be split.
970df8bae1dSRodney W. Grimes  */
9710d94caffSDavid Greenman static void
9720d94caffSDavid Greenman _vm_map_clip_end(map, entry, end)
973df8bae1dSRodney W. Grimes 	register vm_map_t map;
974df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
975df8bae1dSRodney W. Grimes 	register vm_offset_t end;
976df8bae1dSRodney W. Grimes {
977df8bae1dSRodney W. Grimes 	register vm_map_entry_t new_entry;
978df8bae1dSRodney W. Grimes 
979df8bae1dSRodney W. Grimes 	/*
9800d94caffSDavid Greenman 	 * Create a new entry and insert it AFTER the specified entry
981df8bae1dSRodney W. Grimes 	 */
982df8bae1dSRodney W. Grimes 
983df8bae1dSRodney W. Grimes 	new_entry = vm_map_entry_create(map);
984df8bae1dSRodney W. Grimes 	*new_entry = *entry;
985df8bae1dSRodney W. Grimes 
986df8bae1dSRodney W. Grimes 	new_entry->start = entry->end = end;
987df8bae1dSRodney W. Grimes 	new_entry->offset += (end - entry->start);
988df8bae1dSRodney W. Grimes 
989df8bae1dSRodney W. Grimes 	vm_map_entry_link(map, entry, new_entry);
990df8bae1dSRodney W. Grimes 
991afa07f7eSJohn Dyson 	if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
992df8bae1dSRodney W. Grimes 		vm_map_reference(new_entry->object.share_map);
993df8bae1dSRodney W. Grimes 	else
994df8bae1dSRodney W. Grimes 		vm_object_reference(new_entry->object.vm_object);
995df8bae1dSRodney W. Grimes }
996df8bae1dSRodney W. Grimes 
997df8bae1dSRodney W. Grimes /*
998df8bae1dSRodney W. Grimes  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
999df8bae1dSRodney W. Grimes  *
1000df8bae1dSRodney W. Grimes  *	Asserts that the starting and ending region
1001df8bae1dSRodney W. Grimes  *	addresses fall within the valid range of the map.
1002df8bae1dSRodney W. Grimes  */
1003df8bae1dSRodney W. Grimes #define	VM_MAP_RANGE_CHECK(map, start, end)		\
1004df8bae1dSRodney W. Grimes 		{					\
1005df8bae1dSRodney W. Grimes 		if (start < vm_map_min(map))		\
1006df8bae1dSRodney W. Grimes 			start = vm_map_min(map);	\
1007df8bae1dSRodney W. Grimes 		if (end > vm_map_max(map))		\
1008df8bae1dSRodney W. Grimes 			end = vm_map_max(map);		\
1009df8bae1dSRodney W. Grimes 		if (start > end)			\
1010df8bae1dSRodney W. Grimes 			start = end;			\
1011df8bae1dSRodney W. Grimes 		}
1012df8bae1dSRodney W. Grimes 
1013df8bae1dSRodney W. Grimes /*
1014df8bae1dSRodney W. Grimes  *	vm_map_submap:		[ kernel use only ]
1015df8bae1dSRodney W. Grimes  *
1016df8bae1dSRodney W. Grimes  *	Mark the given range as handled by a subordinate map.
1017df8bae1dSRodney W. Grimes  *
1018df8bae1dSRodney W. Grimes  *	This range must have been created with vm_map_find,
1019df8bae1dSRodney W. Grimes  *	and no other operations may have been performed on this
1020df8bae1dSRodney W. Grimes  *	range prior to calling vm_map_submap.
1021df8bae1dSRodney W. Grimes  *
1022df8bae1dSRodney W. Grimes  *	Only a limited number of operations can be performed
1023df8bae1dSRodney W. Grimes  *	within this rage after calling vm_map_submap:
1024df8bae1dSRodney W. Grimes  *		vm_fault
1025df8bae1dSRodney W. Grimes  *	[Don't try vm_map_copy!]
1026df8bae1dSRodney W. Grimes  *
1027df8bae1dSRodney W. Grimes  *	To remove a submapping, one must first remove the
1028df8bae1dSRodney W. Grimes  *	range from the superior map, and then destroy the
1029df8bae1dSRodney W. Grimes  *	submap (if desired).  [Better yet, don't try it.]
1030df8bae1dSRodney W. Grimes  */
1031df8bae1dSRodney W. Grimes int
1032df8bae1dSRodney W. Grimes vm_map_submap(map, start, end, submap)
1033df8bae1dSRodney W. Grimes 	register vm_map_t map;
1034df8bae1dSRodney W. Grimes 	register vm_offset_t start;
1035df8bae1dSRodney W. Grimes 	register vm_offset_t end;
1036df8bae1dSRodney W. Grimes 	vm_map_t submap;
1037df8bae1dSRodney W. Grimes {
1038df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1039df8bae1dSRodney W. Grimes 	register int result = KERN_INVALID_ARGUMENT;
1040df8bae1dSRodney W. Grimes 
1041df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1042df8bae1dSRodney W. Grimes 
1043df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1044df8bae1dSRodney W. Grimes 
1045df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &entry)) {
1046df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
10470d94caffSDavid Greenman 	} else
1048df8bae1dSRodney W. Grimes 		entry = entry->next;
1049df8bae1dSRodney W. Grimes 
1050df8bae1dSRodney W. Grimes 	vm_map_clip_end(map, entry, end);
1051df8bae1dSRodney W. Grimes 
1052df8bae1dSRodney W. Grimes 	if ((entry->start == start) && (entry->end == end) &&
1053afa07f7eSJohn Dyson 	    ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_COW)) == 0) &&
1054afa07f7eSJohn Dyson 	    (entry->object.vm_object == NULL)) {
1055afa07f7eSJohn Dyson 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1056df8bae1dSRodney W. Grimes 		vm_map_reference(entry->object.sub_map = submap);
1057df8bae1dSRodney W. Grimes 		result = KERN_SUCCESS;
1058df8bae1dSRodney W. Grimes 	}
1059df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1060df8bae1dSRodney W. Grimes 
1061df8bae1dSRodney W. Grimes 	return (result);
1062df8bae1dSRodney W. Grimes }
1063df8bae1dSRodney W. Grimes 
1064df8bae1dSRodney W. Grimes /*
1065df8bae1dSRodney W. Grimes  *	vm_map_protect:
1066df8bae1dSRodney W. Grimes  *
1067df8bae1dSRodney W. Grimes  *	Sets the protection of the specified address
1068df8bae1dSRodney W. Grimes  *	region in the target map.  If "set_max" is
1069df8bae1dSRodney W. Grimes  *	specified, the maximum protection is to be set;
1070df8bae1dSRodney W. Grimes  *	otherwise, only the current protection is affected.
1071df8bae1dSRodney W. Grimes  */
1072df8bae1dSRodney W. Grimes int
1073df8bae1dSRodney W. Grimes vm_map_protect(map, start, end, new_prot, set_max)
1074df8bae1dSRodney W. Grimes 	register vm_map_t map;
1075df8bae1dSRodney W. Grimes 	register vm_offset_t start;
1076df8bae1dSRodney W. Grimes 	register vm_offset_t end;
1077df8bae1dSRodney W. Grimes 	register vm_prot_t new_prot;
1078df8bae1dSRodney W. Grimes 	register boolean_t set_max;
1079df8bae1dSRodney W. Grimes {
1080df8bae1dSRodney W. Grimes 	register vm_map_entry_t current;
1081df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1082df8bae1dSRodney W. Grimes 
1083df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1084df8bae1dSRodney W. Grimes 
1085df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1086df8bae1dSRodney W. Grimes 
1087df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &entry)) {
1088df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1089b7b2aac2SJohn Dyson 	} else {
1090df8bae1dSRodney W. Grimes 		entry = entry->next;
1091b7b2aac2SJohn Dyson 	}
1092df8bae1dSRodney W. Grimes 
1093df8bae1dSRodney W. Grimes 	/*
10940d94caffSDavid Greenman 	 * Make a first pass to check for protection violations.
1095df8bae1dSRodney W. Grimes 	 */
1096df8bae1dSRodney W. Grimes 
1097df8bae1dSRodney W. Grimes 	current = entry;
1098df8bae1dSRodney W. Grimes 	while ((current != &map->header) && (current->start < end)) {
1099afa07f7eSJohn Dyson 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1100a1f6d91cSDavid Greenman 			vm_map_unlock(map);
1101df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ARGUMENT);
1102a1f6d91cSDavid Greenman 		}
1103df8bae1dSRodney W. Grimes 		if ((new_prot & current->max_protection) != new_prot) {
1104df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
1105df8bae1dSRodney W. Grimes 			return (KERN_PROTECTION_FAILURE);
1106df8bae1dSRodney W. Grimes 		}
1107df8bae1dSRodney W. Grimes 		current = current->next;
1108df8bae1dSRodney W. Grimes 	}
1109df8bae1dSRodney W. Grimes 
1110df8bae1dSRodney W. Grimes 	/*
11110d94caffSDavid Greenman 	 * Go back and fix up protections. [Note that clipping is not
11120d94caffSDavid Greenman 	 * necessary the second time.]
1113df8bae1dSRodney W. Grimes 	 */
1114df8bae1dSRodney W. Grimes 
1115df8bae1dSRodney W. Grimes 	current = entry;
1116df8bae1dSRodney W. Grimes 
1117df8bae1dSRodney W. Grimes 	while ((current != &map->header) && (current->start < end)) {
1118df8bae1dSRodney W. Grimes 		vm_prot_t old_prot;
1119df8bae1dSRodney W. Grimes 
1120df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, current, end);
1121df8bae1dSRodney W. Grimes 
1122df8bae1dSRodney W. Grimes 		old_prot = current->protection;
1123df8bae1dSRodney W. Grimes 		if (set_max)
1124df8bae1dSRodney W. Grimes 			current->protection =
1125df8bae1dSRodney W. Grimes 			    (current->max_protection = new_prot) &
1126df8bae1dSRodney W. Grimes 			    old_prot;
1127df8bae1dSRodney W. Grimes 		else
1128df8bae1dSRodney W. Grimes 			current->protection = new_prot;
1129df8bae1dSRodney W. Grimes 
1130df8bae1dSRodney W. Grimes 		/*
11310d94caffSDavid Greenman 		 * Update physical map if necessary. Worry about copy-on-write
11320d94caffSDavid Greenman 		 * here -- CHECK THIS XXX
1133df8bae1dSRodney W. Grimes 		 */
1134df8bae1dSRodney W. Grimes 
1135df8bae1dSRodney W. Grimes 		if (current->protection != old_prot) {
1136afa07f7eSJohn Dyson #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1137df8bae1dSRodney W. Grimes 							VM_PROT_ALL)
1138df8bae1dSRodney W. Grimes #define	max(a,b)	((a) > (b) ? (a) : (b))
1139df8bae1dSRodney W. Grimes 
1140afa07f7eSJohn Dyson 			if (current->eflags & MAP_ENTRY_IS_A_MAP) {
1141df8bae1dSRodney W. Grimes 				vm_map_entry_t share_entry;
1142df8bae1dSRodney W. Grimes 				vm_offset_t share_end;
1143df8bae1dSRodney W. Grimes 
1144df8bae1dSRodney W. Grimes 				vm_map_lock(current->object.share_map);
1145df8bae1dSRodney W. Grimes 				(void) vm_map_lookup_entry(
1146df8bae1dSRodney W. Grimes 				    current->object.share_map,
1147df8bae1dSRodney W. Grimes 				    current->offset,
1148df8bae1dSRodney W. Grimes 				    &share_entry);
1149df8bae1dSRodney W. Grimes 				share_end = current->offset +
1150df8bae1dSRodney W. Grimes 				    (current->end - current->start);
1151df8bae1dSRodney W. Grimes 				while ((share_entry !=
1152df8bae1dSRodney W. Grimes 					&current->object.share_map->header) &&
1153df8bae1dSRodney W. Grimes 				    (share_entry->start < share_end)) {
1154df8bae1dSRodney W. Grimes 
1155df8bae1dSRodney W. Grimes 					pmap_protect(map->pmap,
1156df8bae1dSRodney W. Grimes 					    (max(share_entry->start,
1157df8bae1dSRodney W. Grimes 						    current->offset) -
1158df8bae1dSRodney W. Grimes 						current->offset +
1159df8bae1dSRodney W. Grimes 						current->start),
1160df8bae1dSRodney W. Grimes 					    min(share_entry->end,
1161df8bae1dSRodney W. Grimes 						share_end) -
1162df8bae1dSRodney W. Grimes 					    current->offset +
1163df8bae1dSRodney W. Grimes 					    current->start,
1164df8bae1dSRodney W. Grimes 					    current->protection &
1165df8bae1dSRodney W. Grimes 					    MASK(share_entry));
1166df8bae1dSRodney W. Grimes 
1167df8bae1dSRodney W. Grimes 					share_entry = share_entry->next;
1168df8bae1dSRodney W. Grimes 				}
1169df8bae1dSRodney W. Grimes 				vm_map_unlock(current->object.share_map);
11700d94caffSDavid Greenman 			} else
1171df8bae1dSRodney W. Grimes 				pmap_protect(map->pmap, current->start,
1172df8bae1dSRodney W. Grimes 				    current->end,
1173df8bae1dSRodney W. Grimes 				    current->protection & MASK(entry));
1174df8bae1dSRodney W. Grimes #undef	max
1175df8bae1dSRodney W. Grimes #undef	MASK
1176df8bae1dSRodney W. Grimes 		}
1177df8bae1dSRodney W. Grimes 		current = current->next;
1178df8bae1dSRodney W. Grimes 	}
1179df8bae1dSRodney W. Grimes 
1180f32dbbeeSJohn Dyson 	vm_map_simplify_entry(map, entry);
1181df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1182df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1183df8bae1dSRodney W. Grimes }
1184df8bae1dSRodney W. Grimes 
1185df8bae1dSRodney W. Grimes /*
1186867a482dSJohn Dyson  *	vm_map_madvise:
1187867a482dSJohn Dyson  *
1188867a482dSJohn Dyson  * 	This routine traverses a processes map handling the madvise
1189867a482dSJohn Dyson  *	system call.
1190867a482dSJohn Dyson  */
1191867a482dSJohn Dyson void
1192867a482dSJohn Dyson vm_map_madvise(map, pmap, start, end, advise)
1193867a482dSJohn Dyson 	vm_map_t map;
1194867a482dSJohn Dyson 	pmap_t pmap;
1195867a482dSJohn Dyson 	vm_offset_t start, end;
1196867a482dSJohn Dyson 	int advise;
1197867a482dSJohn Dyson {
1198867a482dSJohn Dyson 	register vm_map_entry_t current;
1199867a482dSJohn Dyson 	vm_map_entry_t entry;
1200867a482dSJohn Dyson 
1201867a482dSJohn Dyson 	vm_map_lock(map);
1202867a482dSJohn Dyson 
1203867a482dSJohn Dyson 	VM_MAP_RANGE_CHECK(map, start, end);
1204867a482dSJohn Dyson 
1205867a482dSJohn Dyson 	if (vm_map_lookup_entry(map, start, &entry)) {
1206867a482dSJohn Dyson 		vm_map_clip_start(map, entry, start);
1207867a482dSJohn Dyson 	} else
1208867a482dSJohn Dyson 		entry = entry->next;
1209867a482dSJohn Dyson 
1210867a482dSJohn Dyson 	for(current = entry;
1211867a482dSJohn Dyson 		(current != &map->header) && (current->start < end);
1212867a482dSJohn Dyson 		current = current->next) {
1213fed9a903SJohn Dyson 		vm_size_t size = current->end - current->start;
1214fed9a903SJohn Dyson 
1215afa07f7eSJohn Dyson 		if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1216867a482dSJohn Dyson 			continue;
1217867a482dSJohn Dyson 		}
1218fed9a903SJohn Dyson 
1219fed9a903SJohn Dyson 		/*
1220fed9a903SJohn Dyson 		 * Create an object if needed
1221fed9a903SJohn Dyson 		 */
1222fed9a903SJohn Dyson 		if (current->object.vm_object == NULL) {
1223fed9a903SJohn Dyson 			vm_object_t object;
1224fed9a903SJohn Dyson 			object = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(size));
1225fed9a903SJohn Dyson 			current->object.vm_object = object;
1226fed9a903SJohn Dyson 			current->offset = 0;
1227fed9a903SJohn Dyson 		}
1228fed9a903SJohn Dyson 
1229867a482dSJohn Dyson 		vm_map_clip_end(map, current, end);
1230867a482dSJohn Dyson 		switch (advise) {
1231867a482dSJohn Dyson 	case MADV_NORMAL:
1232867a482dSJohn Dyson 			current->object.vm_object->behavior = OBJ_NORMAL;
1233867a482dSJohn Dyson 			break;
1234867a482dSJohn Dyson 	case MADV_SEQUENTIAL:
1235867a482dSJohn Dyson 			current->object.vm_object->behavior = OBJ_SEQUENTIAL;
1236867a482dSJohn Dyson 			break;
1237867a482dSJohn Dyson 	case MADV_RANDOM:
1238867a482dSJohn Dyson 			current->object.vm_object->behavior = OBJ_RANDOM;
1239867a482dSJohn Dyson 			break;
1240867a482dSJohn Dyson 	/*
1241867a482dSJohn Dyson 	 * Right now, we could handle DONTNEED and WILLNEED with common code.
1242867a482dSJohn Dyson 	 * They are mostly the same, except for the potential async reads (NYI).
1243867a482dSJohn Dyson 	 */
12440a47b48bSJohn Dyson 	case MADV_FREE:
1245867a482dSJohn Dyson 	case MADV_DONTNEED:
1246867a482dSJohn Dyson 			{
1247867a482dSJohn Dyson 				vm_pindex_t pindex;
1248867a482dSJohn Dyson 				int count;
1249fed9a903SJohn Dyson 				size = current->end - current->start;
1250867a482dSJohn Dyson 				pindex = OFF_TO_IDX(entry->offset);
1251867a482dSJohn Dyson 				count = OFF_TO_IDX(size);
1252867a482dSJohn Dyson 				/*
1253867a482dSJohn Dyson 				 * MADV_DONTNEED removes the page from all
1254867a482dSJohn Dyson 				 * pmaps, so pmap_remove is not necessary.
1255867a482dSJohn Dyson 				 */
1256867a482dSJohn Dyson 				vm_object_madvise(current->object.vm_object,
1257867a482dSJohn Dyson 					pindex, count, advise);
1258867a482dSJohn Dyson 			}
1259867a482dSJohn Dyson 			break;
1260867a482dSJohn Dyson 
1261867a482dSJohn Dyson 	case MADV_WILLNEED:
1262867a482dSJohn Dyson 			{
1263867a482dSJohn Dyson 				vm_pindex_t pindex;
1264867a482dSJohn Dyson 				int count;
1265fed9a903SJohn Dyson 				size = current->end - current->start;
12664334b0d8SJohn Dyson 				pindex = OFF_TO_IDX(current->offset);
1267867a482dSJohn Dyson 				count = OFF_TO_IDX(size);
1268867a482dSJohn Dyson 				vm_object_madvise(current->object.vm_object,
1269867a482dSJohn Dyson 					pindex, count, advise);
1270867a482dSJohn Dyson 				pmap_object_init_pt(pmap, current->start,
1271867a482dSJohn Dyson 					current->object.vm_object, pindex,
1272867a482dSJohn Dyson 					(count << PAGE_SHIFT), 0);
1273867a482dSJohn Dyson 			}
1274867a482dSJohn Dyson 			break;
1275867a482dSJohn Dyson 
1276867a482dSJohn Dyson 	default:
1277867a482dSJohn Dyson 			break;
1278867a482dSJohn Dyson 		}
1279867a482dSJohn Dyson 	}
1280867a482dSJohn Dyson 
1281867a482dSJohn Dyson 	vm_map_simplify_entry(map, entry);
1282867a482dSJohn Dyson 	vm_map_unlock(map);
1283867a482dSJohn Dyson 	return;
1284867a482dSJohn Dyson }
1285867a482dSJohn Dyson 
1286867a482dSJohn Dyson 
1287867a482dSJohn Dyson /*
1288df8bae1dSRodney W. Grimes  *	vm_map_inherit:
1289df8bae1dSRodney W. Grimes  *
1290df8bae1dSRodney W. Grimes  *	Sets the inheritance of the specified address
1291df8bae1dSRodney W. Grimes  *	range in the target map.  Inheritance
1292df8bae1dSRodney W. Grimes  *	affects how the map will be shared with
1293df8bae1dSRodney W. Grimes  *	child maps at the time of vm_map_fork.
1294df8bae1dSRodney W. Grimes  */
1295df8bae1dSRodney W. Grimes int
1296df8bae1dSRodney W. Grimes vm_map_inherit(map, start, end, new_inheritance)
1297df8bae1dSRodney W. Grimes 	register vm_map_t map;
1298df8bae1dSRodney W. Grimes 	register vm_offset_t start;
1299df8bae1dSRodney W. Grimes 	register vm_offset_t end;
1300df8bae1dSRodney W. Grimes 	register vm_inherit_t new_inheritance;
1301df8bae1dSRodney W. Grimes {
1302df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
1303df8bae1dSRodney W. Grimes 	vm_map_entry_t temp_entry;
1304df8bae1dSRodney W. Grimes 
1305df8bae1dSRodney W. Grimes 	switch (new_inheritance) {
1306df8bae1dSRodney W. Grimes 	case VM_INHERIT_NONE:
1307df8bae1dSRodney W. Grimes 	case VM_INHERIT_COPY:
1308df8bae1dSRodney W. Grimes 	case VM_INHERIT_SHARE:
1309df8bae1dSRodney W. Grimes 		break;
1310df8bae1dSRodney W. Grimes 	default:
1311df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ARGUMENT);
1312df8bae1dSRodney W. Grimes 	}
1313df8bae1dSRodney W. Grimes 
1314df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1315df8bae1dSRodney W. Grimes 
1316df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1317df8bae1dSRodney W. Grimes 
1318df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
1319df8bae1dSRodney W. Grimes 		entry = temp_entry;
1320df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
13210d94caffSDavid Greenman 	} else
1322df8bae1dSRodney W. Grimes 		entry = temp_entry->next;
1323df8bae1dSRodney W. Grimes 
1324df8bae1dSRodney W. Grimes 	while ((entry != &map->header) && (entry->start < end)) {
1325df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
1326df8bae1dSRodney W. Grimes 
1327df8bae1dSRodney W. Grimes 		entry->inheritance = new_inheritance;
1328df8bae1dSRodney W. Grimes 
1329df8bae1dSRodney W. Grimes 		entry = entry->next;
1330df8bae1dSRodney W. Grimes 	}
1331df8bae1dSRodney W. Grimes 
1332f32dbbeeSJohn Dyson 	vm_map_simplify_entry(map, temp_entry);
1333df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1334df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1335df8bae1dSRodney W. Grimes }
1336df8bae1dSRodney W. Grimes 
1337df8bae1dSRodney W. Grimes /*
13387aaaa4fdSJohn Dyson  * Implement the semantics of mlock
13397aaaa4fdSJohn Dyson  */
13407aaaa4fdSJohn Dyson int
13417aaaa4fdSJohn Dyson vm_map_user_pageable(map, start, end, new_pageable)
13427aaaa4fdSJohn Dyson 	register vm_map_t map;
13437aaaa4fdSJohn Dyson 	register vm_offset_t start;
13447aaaa4fdSJohn Dyson 	register vm_offset_t end;
13457aaaa4fdSJohn Dyson 	register boolean_t new_pageable;
13467aaaa4fdSJohn Dyson {
13477aaaa4fdSJohn Dyson 	register vm_map_entry_t entry;
13487aaaa4fdSJohn Dyson 	vm_map_entry_t start_entry;
13497aaaa4fdSJohn Dyson 	register vm_offset_t failed = 0;
13507aaaa4fdSJohn Dyson 	int rv;
13517aaaa4fdSJohn Dyson 
13527aaaa4fdSJohn Dyson 	vm_map_lock(map);
13537aaaa4fdSJohn Dyson 	VM_MAP_RANGE_CHECK(map, start, end);
13547aaaa4fdSJohn Dyson 
13557aaaa4fdSJohn Dyson 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
13567aaaa4fdSJohn Dyson 		vm_map_unlock(map);
13577aaaa4fdSJohn Dyson 		return (KERN_INVALID_ADDRESS);
13587aaaa4fdSJohn Dyson 	}
13597aaaa4fdSJohn Dyson 
13607aaaa4fdSJohn Dyson 	if (new_pageable) {
13617aaaa4fdSJohn Dyson 
13627aaaa4fdSJohn Dyson 		entry = start_entry;
13637aaaa4fdSJohn Dyson 		vm_map_clip_start(map, entry, start);
13647aaaa4fdSJohn Dyson 
13657aaaa4fdSJohn Dyson 		/*
13667aaaa4fdSJohn Dyson 		 * Now decrement the wiring count for each region. If a region
13677aaaa4fdSJohn Dyson 		 * becomes completely unwired, unwire its physical pages and
13687aaaa4fdSJohn Dyson 		 * mappings.
13697aaaa4fdSJohn Dyson 		 */
13707aaaa4fdSJohn Dyson 		lock_set_recursive(&map->lock);
13717aaaa4fdSJohn Dyson 
13727aaaa4fdSJohn Dyson 		entry = start_entry;
13737aaaa4fdSJohn Dyson 		while ((entry != &map->header) && (entry->start < end)) {
1374afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
13757aaaa4fdSJohn Dyson 				vm_map_clip_end(map, entry, end);
1376afa07f7eSJohn Dyson 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
13777aaaa4fdSJohn Dyson 				entry->wired_count--;
13787aaaa4fdSJohn Dyson 				if (entry->wired_count == 0)
13797aaaa4fdSJohn Dyson 					vm_fault_unwire(map, entry->start, entry->end);
13807aaaa4fdSJohn Dyson 			}
13817aaaa4fdSJohn Dyson 			entry = entry->next;
13827aaaa4fdSJohn Dyson 		}
13837aaaa4fdSJohn Dyson 		vm_map_simplify_entry(map, start_entry);
13847aaaa4fdSJohn Dyson 		lock_clear_recursive(&map->lock);
13857aaaa4fdSJohn Dyson 	} else {
13867aaaa4fdSJohn Dyson 
13877aaaa4fdSJohn Dyson 		/*
13887aaaa4fdSJohn Dyson 		 * Because of the possiblity of blocking, etc.  We restart
13897aaaa4fdSJohn Dyson 		 * through the process's map entries from beginning so that
13907aaaa4fdSJohn Dyson 		 * we don't end up depending on a map entry that could have
13917aaaa4fdSJohn Dyson 		 * changed.
13927aaaa4fdSJohn Dyson 		 */
13937aaaa4fdSJohn Dyson 	rescan:
13947aaaa4fdSJohn Dyson 
13957aaaa4fdSJohn Dyson 		entry = start_entry;
13967aaaa4fdSJohn Dyson 
13977aaaa4fdSJohn Dyson 		while ((entry != &map->header) && (entry->start < end)) {
13987aaaa4fdSJohn Dyson 
1399afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
14007aaaa4fdSJohn Dyson 				entry = entry->next;
14017aaaa4fdSJohn Dyson 				continue;
14027aaaa4fdSJohn Dyson 			}
14037aaaa4fdSJohn Dyson 
14047aaaa4fdSJohn Dyson 			if (entry->wired_count != 0) {
14057aaaa4fdSJohn Dyson 				entry->wired_count++;
1406afa07f7eSJohn Dyson 				entry->eflags |= MAP_ENTRY_USER_WIRED;
14077aaaa4fdSJohn Dyson 				entry = entry->next;
14087aaaa4fdSJohn Dyson 				continue;
14097aaaa4fdSJohn Dyson 			}
14107aaaa4fdSJohn Dyson 
14117aaaa4fdSJohn Dyson 			/* Here on entry being newly wired */
14127aaaa4fdSJohn Dyson 
1413afa07f7eSJohn Dyson 			if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1414afa07f7eSJohn Dyson 				int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
14157aaaa4fdSJohn Dyson 				if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
14167aaaa4fdSJohn Dyson 
14177aaaa4fdSJohn Dyson 					vm_object_shadow(&entry->object.vm_object,
14187aaaa4fdSJohn Dyson 					    &entry->offset,
14197aaaa4fdSJohn Dyson 					    OFF_TO_IDX(entry->end
14207aaaa4fdSJohn Dyson 						- entry->start));
1421afa07f7eSJohn Dyson 					entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
14227aaaa4fdSJohn Dyson 
14237aaaa4fdSJohn Dyson 				} else if (entry->object.vm_object == NULL) {
14247aaaa4fdSJohn Dyson 
14257aaaa4fdSJohn Dyson 					entry->object.vm_object =
14267aaaa4fdSJohn Dyson 					    vm_object_allocate(OBJT_DEFAULT,
14277aaaa4fdSJohn Dyson 						OFF_TO_IDX(entry->end - entry->start));
14287aaaa4fdSJohn Dyson 					entry->offset = (vm_offset_t) 0;
14297aaaa4fdSJohn Dyson 
14307aaaa4fdSJohn Dyson 				}
14317aaaa4fdSJohn Dyson 				default_pager_convert_to_swapq(entry->object.vm_object);
14327aaaa4fdSJohn Dyson 			}
14337aaaa4fdSJohn Dyson 
14347aaaa4fdSJohn Dyson 			vm_map_clip_start(map, entry, start);
14357aaaa4fdSJohn Dyson 			vm_map_clip_end(map, entry, end);
14367aaaa4fdSJohn Dyson 
14377aaaa4fdSJohn Dyson 			entry->wired_count++;
1438afa07f7eSJohn Dyson 			entry->eflags |= MAP_ENTRY_USER_WIRED;
14397aaaa4fdSJohn Dyson 
14407aaaa4fdSJohn Dyson 			/* First we need to allow map modifications */
14417aaaa4fdSJohn Dyson 			lock_set_recursive(&map->lock);
14427aaaa4fdSJohn Dyson 			lock_write_to_read(&map->lock);
14437aaaa4fdSJohn Dyson 
14447aaaa4fdSJohn Dyson 			rv = vm_fault_user_wire(map, entry->start, entry->end);
14457aaaa4fdSJohn Dyson 			if (rv) {
14467aaaa4fdSJohn Dyson 
14477aaaa4fdSJohn Dyson 				entry->wired_count--;
1448afa07f7eSJohn Dyson 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
14497aaaa4fdSJohn Dyson 
14507aaaa4fdSJohn Dyson 				lock_clear_recursive(&map->lock);
14517aaaa4fdSJohn Dyson 				vm_map_unlock(map);
14527aaaa4fdSJohn Dyson 
14537aaaa4fdSJohn Dyson 				(void) vm_map_user_pageable(map, start, entry->start, TRUE);
14547aaaa4fdSJohn Dyson 				return rv;
14557aaaa4fdSJohn Dyson 			}
14567aaaa4fdSJohn Dyson 
14577aaaa4fdSJohn Dyson 			lock_clear_recursive(&map->lock);
14587aaaa4fdSJohn Dyson 			vm_map_unlock(map);
14597aaaa4fdSJohn Dyson 			vm_map_lock(map);
14607aaaa4fdSJohn Dyson 
14617aaaa4fdSJohn Dyson 			goto rescan;
14627aaaa4fdSJohn Dyson 		}
14637aaaa4fdSJohn Dyson 	}
14647aaaa4fdSJohn Dyson 	vm_map_unlock(map);
14657aaaa4fdSJohn Dyson 	return KERN_SUCCESS;
14667aaaa4fdSJohn Dyson }
14677aaaa4fdSJohn Dyson 
14687aaaa4fdSJohn Dyson /*
1469df8bae1dSRodney W. Grimes  *	vm_map_pageable:
1470df8bae1dSRodney W. Grimes  *
1471df8bae1dSRodney W. Grimes  *	Sets the pageability of the specified address
1472df8bae1dSRodney W. Grimes  *	range in the target map.  Regions specified
1473df8bae1dSRodney W. Grimes  *	as not pageable require locked-down physical
1474df8bae1dSRodney W. Grimes  *	memory and physical page maps.
1475df8bae1dSRodney W. Grimes  *
1476df8bae1dSRodney W. Grimes  *	The map must not be locked, but a reference
1477df8bae1dSRodney W. Grimes  *	must remain to the map throughout the call.
1478df8bae1dSRodney W. Grimes  */
1479df8bae1dSRodney W. Grimes int
1480df8bae1dSRodney W. Grimes vm_map_pageable(map, start, end, new_pageable)
1481df8bae1dSRodney W. Grimes 	register vm_map_t map;
1482df8bae1dSRodney W. Grimes 	register vm_offset_t start;
1483df8bae1dSRodney W. Grimes 	register vm_offset_t end;
1484df8bae1dSRodney W. Grimes 	register boolean_t new_pageable;
1485df8bae1dSRodney W. Grimes {
1486df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
1487df8bae1dSRodney W. Grimes 	vm_map_entry_t start_entry;
148826f9a767SRodney W. Grimes 	register vm_offset_t failed = 0;
1489df8bae1dSRodney W. Grimes 	int rv;
1490df8bae1dSRodney W. Grimes 
1491df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1492df8bae1dSRodney W. Grimes 
1493df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1494df8bae1dSRodney W. Grimes 
1495df8bae1dSRodney W. Grimes 	/*
14960d94caffSDavid Greenman 	 * Only one pageability change may take place at one time, since
14970d94caffSDavid Greenman 	 * vm_fault assumes it will be called only once for each
14980d94caffSDavid Greenman 	 * wiring/unwiring.  Therefore, we have to make sure we're actually
14990d94caffSDavid Greenman 	 * changing the pageability for the entire region.  We do so before
15000d94caffSDavid Greenman 	 * making any changes.
1501df8bae1dSRodney W. Grimes 	 */
1502df8bae1dSRodney W. Grimes 
1503df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1504df8bae1dSRodney W. Grimes 		vm_map_unlock(map);
1505df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ADDRESS);
1506df8bae1dSRodney W. Grimes 	}
1507df8bae1dSRodney W. Grimes 	entry = start_entry;
1508df8bae1dSRodney W. Grimes 
1509df8bae1dSRodney W. Grimes 	/*
15100d94caffSDavid Greenman 	 * Actions are rather different for wiring and unwiring, so we have
15110d94caffSDavid Greenman 	 * two separate cases.
1512df8bae1dSRodney W. Grimes 	 */
1513df8bae1dSRodney W. Grimes 
1514df8bae1dSRodney W. Grimes 	if (new_pageable) {
1515df8bae1dSRodney W. Grimes 
1516df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1517df8bae1dSRodney W. Grimes 
1518df8bae1dSRodney W. Grimes 		/*
15190d94caffSDavid Greenman 		 * Unwiring.  First ensure that the range to be unwired is
15200d94caffSDavid Greenman 		 * really wired down and that there are no holes.
1521df8bae1dSRodney W. Grimes 		 */
1522df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1523df8bae1dSRodney W. Grimes 
1524df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0 ||
1525df8bae1dSRodney W. Grimes 			    (entry->end < end &&
1526df8bae1dSRodney W. Grimes 				(entry->next == &map->header ||
1527df8bae1dSRodney W. Grimes 				    entry->next->start > entry->end))) {
1528df8bae1dSRodney W. Grimes 				vm_map_unlock(map);
1529df8bae1dSRodney W. Grimes 				return (KERN_INVALID_ARGUMENT);
1530df8bae1dSRodney W. Grimes 			}
1531df8bae1dSRodney W. Grimes 			entry = entry->next;
1532df8bae1dSRodney W. Grimes 		}
1533df8bae1dSRodney W. Grimes 
1534df8bae1dSRodney W. Grimes 		/*
15350d94caffSDavid Greenman 		 * Now decrement the wiring count for each region. If a region
15360d94caffSDavid Greenman 		 * becomes completely unwired, unwire its physical pages and
15370d94caffSDavid Greenman 		 * mappings.
1538df8bae1dSRodney W. Grimes 		 */
1539df8bae1dSRodney W. Grimes 		lock_set_recursive(&map->lock);
1540df8bae1dSRodney W. Grimes 
1541df8bae1dSRodney W. Grimes 		entry = start_entry;
1542df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1543df8bae1dSRodney W. Grimes 			vm_map_clip_end(map, entry, end);
1544df8bae1dSRodney W. Grimes 
1545df8bae1dSRodney W. Grimes 			entry->wired_count--;
1546df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0)
1547df8bae1dSRodney W. Grimes 				vm_fault_unwire(map, entry->start, entry->end);
1548df8bae1dSRodney W. Grimes 
1549df8bae1dSRodney W. Grimes 			entry = entry->next;
1550df8bae1dSRodney W. Grimes 		}
1551f32dbbeeSJohn Dyson 		vm_map_simplify_entry(map, start_entry);
1552df8bae1dSRodney W. Grimes 		lock_clear_recursive(&map->lock);
15530d94caffSDavid Greenman 	} else {
1554df8bae1dSRodney W. Grimes 		/*
1555df8bae1dSRodney W. Grimes 		 * Wiring.  We must do this in two passes:
1556df8bae1dSRodney W. Grimes 		 *
15570d94caffSDavid Greenman 		 * 1.  Holding the write lock, we create any shadow or zero-fill
15580d94caffSDavid Greenman 		 * objects that need to be created. Then we clip each map
15590d94caffSDavid Greenman 		 * entry to the region to be wired and increment its wiring
15600d94caffSDavid Greenman 		 * count.  We create objects before clipping the map entries
1561df8bae1dSRodney W. Grimes 		 * to avoid object proliferation.
1562df8bae1dSRodney W. Grimes 		 *
15630d94caffSDavid Greenman 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
15640d94caffSDavid Greenman 		 * fault in the pages for any newly wired area (wired_count is
15650d94caffSDavid Greenman 		 * 1).
1566df8bae1dSRodney W. Grimes 		 *
15670d94caffSDavid Greenman 		 * Downgrading to a read lock for vm_fault_wire avoids a possible
156824a1cce3SDavid Greenman 		 * deadlock with another process that may have faulted on one
15690d94caffSDavid Greenman 		 * of the pages to be wired (it would mark the page busy,
15700d94caffSDavid Greenman 		 * blocking us, then in turn block on the map lock that we
15710d94caffSDavid Greenman 		 * hold).  Because of problems in the recursive lock package,
15720d94caffSDavid Greenman 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
15730d94caffSDavid Greenman 		 * any actions that require the write lock must be done
15740d94caffSDavid Greenman 		 * beforehand.  Because we keep the read lock on the map, the
15750d94caffSDavid Greenman 		 * copy-on-write status of the entries we modify here cannot
15760d94caffSDavid Greenman 		 * change.
1577df8bae1dSRodney W. Grimes 		 */
1578df8bae1dSRodney W. Grimes 
1579df8bae1dSRodney W. Grimes 		/*
1580df8bae1dSRodney W. Grimes 		 * Pass 1.
1581df8bae1dSRodney W. Grimes 		 */
1582df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1583df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0) {
1584df8bae1dSRodney W. Grimes 
1585df8bae1dSRodney W. Grimes 				/*
1586df8bae1dSRodney W. Grimes 				 * Perform actions of vm_map_lookup that need
1587df8bae1dSRodney W. Grimes 				 * the write lock on the map: create a shadow
1588df8bae1dSRodney W. Grimes 				 * object for a copy-on-write region, or an
1589df8bae1dSRodney W. Grimes 				 * object for a zero-fill region.
1590df8bae1dSRodney W. Grimes 				 *
1591df8bae1dSRodney W. Grimes 				 * We don't have to do this for entries that
15920d94caffSDavid Greenman 				 * point to sharing maps, because we won't
15930d94caffSDavid Greenman 				 * hold the lock on the sharing map.
1594df8bae1dSRodney W. Grimes 				 */
1595afa07f7eSJohn Dyson 				if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1596afa07f7eSJohn Dyson 					int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1597b5b40fa6SJohn Dyson 					if (copyflag &&
1598df8bae1dSRodney W. Grimes 					    ((entry->protection & VM_PROT_WRITE) != 0)) {
1599df8bae1dSRodney W. Grimes 
1600df8bae1dSRodney W. Grimes 						vm_object_shadow(&entry->object.vm_object,
1601df8bae1dSRodney W. Grimes 						    &entry->offset,
1602a316d390SJohn Dyson 						    OFF_TO_IDX(entry->end
1603df8bae1dSRodney W. Grimes 							- entry->start));
1604afa07f7eSJohn Dyson 						entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
16050d94caffSDavid Greenman 					} else if (entry->object.vm_object == NULL) {
1606df8bae1dSRodney W. Grimes 						entry->object.vm_object =
1607a316d390SJohn Dyson 						    vm_object_allocate(OBJT_DEFAULT,
1608a316d390SJohn Dyson 							OFF_TO_IDX(entry->end - entry->start));
1609df8bae1dSRodney W. Grimes 						entry->offset = (vm_offset_t) 0;
1610df8bae1dSRodney W. Grimes 					}
1611b5b40fa6SJohn Dyson 					default_pager_convert_to_swapq(entry->object.vm_object);
1612df8bae1dSRodney W. Grimes 				}
1613df8bae1dSRodney W. Grimes 			}
1614df8bae1dSRodney W. Grimes 			vm_map_clip_start(map, entry, start);
1615df8bae1dSRodney W. Grimes 			vm_map_clip_end(map, entry, end);
1616df8bae1dSRodney W. Grimes 			entry->wired_count++;
1617df8bae1dSRodney W. Grimes 
1618df8bae1dSRodney W. Grimes 			/*
1619df8bae1dSRodney W. Grimes 			 * Check for holes
1620df8bae1dSRodney W. Grimes 			 */
1621df8bae1dSRodney W. Grimes 			if (entry->end < end &&
1622df8bae1dSRodney W. Grimes 			    (entry->next == &map->header ||
1623df8bae1dSRodney W. Grimes 				entry->next->start > entry->end)) {
1624df8bae1dSRodney W. Grimes 				/*
16250d94caffSDavid Greenman 				 * Found one.  Object creation actions do not
16260d94caffSDavid Greenman 				 * need to be undone, but the wired counts
16270d94caffSDavid Greenman 				 * need to be restored.
1628df8bae1dSRodney W. Grimes 				 */
1629df8bae1dSRodney W. Grimes 				while (entry != &map->header && entry->end > start) {
1630df8bae1dSRodney W. Grimes 					entry->wired_count--;
1631df8bae1dSRodney W. Grimes 					entry = entry->prev;
1632df8bae1dSRodney W. Grimes 				}
1633df8bae1dSRodney W. Grimes 				vm_map_unlock(map);
1634df8bae1dSRodney W. Grimes 				return (KERN_INVALID_ARGUMENT);
1635df8bae1dSRodney W. Grimes 			}
1636df8bae1dSRodney W. Grimes 			entry = entry->next;
1637df8bae1dSRodney W. Grimes 		}
1638df8bae1dSRodney W. Grimes 
1639df8bae1dSRodney W. Grimes 		/*
1640df8bae1dSRodney W. Grimes 		 * Pass 2.
1641df8bae1dSRodney W. Grimes 		 */
1642df8bae1dSRodney W. Grimes 
1643df8bae1dSRodney W. Grimes 		/*
1644df8bae1dSRodney W. Grimes 		 * HACK HACK HACK HACK
1645df8bae1dSRodney W. Grimes 		 *
164624a1cce3SDavid Greenman 		 * If we are wiring in the kernel map or a submap of it,
164724a1cce3SDavid Greenman 		 * unlock the map to avoid deadlocks.  We trust that the
164824a1cce3SDavid Greenman 		 * kernel is well-behaved, and therefore will not do
164924a1cce3SDavid Greenman 		 * anything destructive to this region of the map while
165024a1cce3SDavid Greenman 		 * we have it unlocked.  We cannot trust user processes
165124a1cce3SDavid Greenman 		 * to do the same.
1652df8bae1dSRodney W. Grimes 		 *
1653df8bae1dSRodney W. Grimes 		 * HACK HACK HACK HACK
1654df8bae1dSRodney W. Grimes 		 */
1655df8bae1dSRodney W. Grimes 		if (vm_map_pmap(map) == kernel_pmap) {
1656df8bae1dSRodney W. Grimes 			vm_map_unlock(map);	/* trust me ... */
16570d94caffSDavid Greenman 		} else {
1658df8bae1dSRodney W. Grimes 			lock_set_recursive(&map->lock);
1659df8bae1dSRodney W. Grimes 			lock_write_to_read(&map->lock);
1660df8bae1dSRodney W. Grimes 		}
1661df8bae1dSRodney W. Grimes 
1662df8bae1dSRodney W. Grimes 		rv = 0;
1663df8bae1dSRodney W. Grimes 		entry = start_entry;
1664df8bae1dSRodney W. Grimes 		while (entry != &map->header && entry->start < end) {
1665df8bae1dSRodney W. Grimes 			/*
16660d94caffSDavid Greenman 			 * If vm_fault_wire fails for any page we need to undo
16670d94caffSDavid Greenman 			 * what has been done.  We decrement the wiring count
16680d94caffSDavid Greenman 			 * for those pages which have not yet been wired (now)
16690d94caffSDavid Greenman 			 * and unwire those that have (later).
1670df8bae1dSRodney W. Grimes 			 *
1671df8bae1dSRodney W. Grimes 			 * XXX this violates the locking protocol on the map,
1672df8bae1dSRodney W. Grimes 			 * needs to be fixed.
1673df8bae1dSRodney W. Grimes 			 */
1674df8bae1dSRodney W. Grimes 			if (rv)
1675df8bae1dSRodney W. Grimes 				entry->wired_count--;
1676df8bae1dSRodney W. Grimes 			else if (entry->wired_count == 1) {
1677df8bae1dSRodney W. Grimes 				rv = vm_fault_wire(map, entry->start, entry->end);
1678df8bae1dSRodney W. Grimes 				if (rv) {
1679df8bae1dSRodney W. Grimes 					failed = entry->start;
1680df8bae1dSRodney W. Grimes 					entry->wired_count--;
1681df8bae1dSRodney W. Grimes 				}
1682df8bae1dSRodney W. Grimes 			}
1683df8bae1dSRodney W. Grimes 			entry = entry->next;
1684df8bae1dSRodney W. Grimes 		}
1685df8bae1dSRodney W. Grimes 
1686df8bae1dSRodney W. Grimes 		if (vm_map_pmap(map) == kernel_pmap) {
1687df8bae1dSRodney W. Grimes 			vm_map_lock(map);
16880d94caffSDavid Greenman 		} else {
1689df8bae1dSRodney W. Grimes 			lock_clear_recursive(&map->lock);
1690df8bae1dSRodney W. Grimes 		}
1691df8bae1dSRodney W. Grimes 		if (rv) {
1692df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
1693df8bae1dSRodney W. Grimes 			(void) vm_map_pageable(map, start, failed, TRUE);
1694df8bae1dSRodney W. Grimes 			return (rv);
1695df8bae1dSRodney W. Grimes 		}
1696b7b2aac2SJohn Dyson 		vm_map_simplify_entry(map, start_entry);
1697df8bae1dSRodney W. Grimes 	}
1698df8bae1dSRodney W. Grimes 
1699df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1700df8bae1dSRodney W. Grimes 
1701df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1702df8bae1dSRodney W. Grimes }
1703df8bae1dSRodney W. Grimes 
1704df8bae1dSRodney W. Grimes /*
1705df8bae1dSRodney W. Grimes  * vm_map_clean
1706df8bae1dSRodney W. Grimes  *
1707df8bae1dSRodney W. Grimes  * Push any dirty cached pages in the address range to their pager.
1708df8bae1dSRodney W. Grimes  * If syncio is TRUE, dirty pages are written synchronously.
1709df8bae1dSRodney W. Grimes  * If invalidate is TRUE, any cached pages are freed as well.
1710df8bae1dSRodney W. Grimes  *
1711df8bae1dSRodney W. Grimes  * Returns an error if any part of the specified range is not mapped.
1712df8bae1dSRodney W. Grimes  */
1713df8bae1dSRodney W. Grimes int
1714df8bae1dSRodney W. Grimes vm_map_clean(map, start, end, syncio, invalidate)
1715df8bae1dSRodney W. Grimes 	vm_map_t map;
1716df8bae1dSRodney W. Grimes 	vm_offset_t start;
1717df8bae1dSRodney W. Grimes 	vm_offset_t end;
1718df8bae1dSRodney W. Grimes 	boolean_t syncio;
1719df8bae1dSRodney W. Grimes 	boolean_t invalidate;
1720df8bae1dSRodney W. Grimes {
1721df8bae1dSRodney W. Grimes 	register vm_map_entry_t current;
1722df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1723df8bae1dSRodney W. Grimes 	vm_size_t size;
1724df8bae1dSRodney W. Grimes 	vm_object_t object;
1725a316d390SJohn Dyson 	vm_ooffset_t offset;
1726df8bae1dSRodney W. Grimes 
1727df8bae1dSRodney W. Grimes 	vm_map_lock_read(map);
1728df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1729df8bae1dSRodney W. Grimes 	if (!vm_map_lookup_entry(map, start, &entry)) {
1730df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map);
1731df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ADDRESS);
1732df8bae1dSRodney W. Grimes 	}
1733df8bae1dSRodney W. Grimes 	/*
1734df8bae1dSRodney W. Grimes 	 * Make a first pass to check for holes.
1735df8bae1dSRodney W. Grimes 	 */
1736df8bae1dSRodney W. Grimes 	for (current = entry; current->start < end; current = current->next) {
1737afa07f7eSJohn Dyson 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1738df8bae1dSRodney W. Grimes 			vm_map_unlock_read(map);
1739df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ARGUMENT);
1740df8bae1dSRodney W. Grimes 		}
1741df8bae1dSRodney W. Grimes 		if (end > current->end &&
1742df8bae1dSRodney W. Grimes 		    (current->next == &map->header ||
1743df8bae1dSRodney W. Grimes 			current->end != current->next->start)) {
1744df8bae1dSRodney W. Grimes 			vm_map_unlock_read(map);
1745df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ADDRESS);
1746df8bae1dSRodney W. Grimes 		}
1747df8bae1dSRodney W. Grimes 	}
1748df8bae1dSRodney W. Grimes 
1749df8bae1dSRodney W. Grimes 	/*
1750df8bae1dSRodney W. Grimes 	 * Make a second pass, cleaning/uncaching pages from the indicated
1751df8bae1dSRodney W. Grimes 	 * objects as we go.
1752df8bae1dSRodney W. Grimes 	 */
1753df8bae1dSRodney W. Grimes 	for (current = entry; current->start < end; current = current->next) {
1754df8bae1dSRodney W. Grimes 		offset = current->offset + (start - current->start);
1755df8bae1dSRodney W. Grimes 		size = (end <= current->end ? end : current->end) - start;
1756afa07f7eSJohn Dyson 		if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1757df8bae1dSRodney W. Grimes 			register vm_map_t smap;
1758df8bae1dSRodney W. Grimes 			vm_map_entry_t tentry;
1759df8bae1dSRodney W. Grimes 			vm_size_t tsize;
1760df8bae1dSRodney W. Grimes 
1761df8bae1dSRodney W. Grimes 			smap = current->object.share_map;
1762df8bae1dSRodney W. Grimes 			vm_map_lock_read(smap);
1763df8bae1dSRodney W. Grimes 			(void) vm_map_lookup_entry(smap, offset, &tentry);
1764df8bae1dSRodney W. Grimes 			tsize = tentry->end - offset;
1765df8bae1dSRodney W. Grimes 			if (tsize < size)
1766df8bae1dSRodney W. Grimes 				size = tsize;
1767df8bae1dSRodney W. Grimes 			object = tentry->object.vm_object;
1768df8bae1dSRodney W. Grimes 			offset = tentry->offset + (offset - tentry->start);
1769df8bae1dSRodney W. Grimes 			vm_map_unlock_read(smap);
1770df8bae1dSRodney W. Grimes 		} else {
1771df8bae1dSRodney W. Grimes 			object = current->object.vm_object;
1772df8bae1dSRodney W. Grimes 		}
17738a02c104SJohn Dyson 		/*
17748a02c104SJohn Dyson 		 * Note that there is absolutely no sense in writing out
17758a02c104SJohn Dyson 		 * anonymous objects, so we track down the vnode object
17768a02c104SJohn Dyson 		 * to write out.
17778a02c104SJohn Dyson 		 * We invalidate (remove) all pages from the address space
17788a02c104SJohn Dyson 		 * anyway, for semantic correctness.
17798a02c104SJohn Dyson 		 */
17808a02c104SJohn Dyson 		while (object->backing_object) {
17818a02c104SJohn Dyson 			object = object->backing_object;
17828a02c104SJohn Dyson 			offset += object->backing_object_offset;
17838a02c104SJohn Dyson 			if (object->size < OFF_TO_IDX( offset + size))
17848a02c104SJohn Dyson 				size = IDX_TO_OFF(object->size) - offset;
17858a02c104SJohn Dyson 		}
17868a02c104SJohn Dyson 		if (invalidate)
17878a02c104SJohn Dyson 			pmap_remove(vm_map_pmap(map), current->start,
178867cc64f4SJohn Dyson 				current->start + size);
178924a1cce3SDavid Greenman 		if (object && (object->type == OBJT_VNODE)) {
1790df8bae1dSRodney W. Grimes 			/*
17910d94caffSDavid Greenman 			 * Flush pages if writing is allowed. XXX should we continue
17920d94caffSDavid Greenman 			 * on an error?
1793f5cf85d4SDavid Greenman 			 *
1794f5cf85d4SDavid Greenman 			 * XXX Doing async I/O and then removing all the pages from
1795f5cf85d4SDavid Greenman 			 *     the object before it completes is probably a very bad
1796f5cf85d4SDavid Greenman 			 *     idea.
1797df8bae1dSRodney W. Grimes 			 */
1798a02051c3SJohn Dyson 			if (current->protection & VM_PROT_WRITE) {
1799a316d390SJohn Dyson 		   	    	vm_object_page_clean(object,
1800a316d390SJohn Dyson 					OFF_TO_IDX(offset),
1801a316d390SJohn Dyson 					OFF_TO_IDX(offset + size),
1802a02051c3SJohn Dyson 					(syncio||invalidate)?1:0, TRUE);
1803df8bae1dSRodney W. Grimes 				if (invalidate)
1804a316d390SJohn Dyson 					vm_object_page_remove(object,
1805a316d390SJohn Dyson 						OFF_TO_IDX(offset),
1806a316d390SJohn Dyson 						OFF_TO_IDX(offset + size),
1807a316d390SJohn Dyson 						FALSE);
1808bf4bd9bdSDavid Greenman 			}
1809a02051c3SJohn Dyson 		}
1810df8bae1dSRodney W. Grimes 		start += size;
1811df8bae1dSRodney W. Grimes 	}
1812df8bae1dSRodney W. Grimes 
1813df8bae1dSRodney W. Grimes 	vm_map_unlock_read(map);
1814df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1815df8bae1dSRodney W. Grimes }
1816df8bae1dSRodney W. Grimes 
1817df8bae1dSRodney W. Grimes /*
1818df8bae1dSRodney W. Grimes  *	vm_map_entry_unwire:	[ internal use only ]
1819df8bae1dSRodney W. Grimes  *
1820df8bae1dSRodney W. Grimes  *	Make the region specified by this entry pageable.
1821df8bae1dSRodney W. Grimes  *
1822df8bae1dSRodney W. Grimes  *	The map in question should be locked.
1823df8bae1dSRodney W. Grimes  *	[This is the reason for this routine's existence.]
1824df8bae1dSRodney W. Grimes  */
18250362d7d7SJohn Dyson static void
18260d94caffSDavid Greenman vm_map_entry_unwire(map, entry)
1827df8bae1dSRodney W. Grimes 	vm_map_t map;
1828df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
1829df8bae1dSRodney W. Grimes {
1830df8bae1dSRodney W. Grimes 	vm_fault_unwire(map, entry->start, entry->end);
1831df8bae1dSRodney W. Grimes 	entry->wired_count = 0;
1832df8bae1dSRodney W. Grimes }
1833df8bae1dSRodney W. Grimes 
1834df8bae1dSRodney W. Grimes /*
1835df8bae1dSRodney W. Grimes  *	vm_map_entry_delete:	[ internal use only ]
1836df8bae1dSRodney W. Grimes  *
1837df8bae1dSRodney W. Grimes  *	Deallocate the given entry from the target map.
1838df8bae1dSRodney W. Grimes  */
18390362d7d7SJohn Dyson static void
18400d94caffSDavid Greenman vm_map_entry_delete(map, entry)
1841df8bae1dSRodney W. Grimes 	register vm_map_t map;
1842df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
1843df8bae1dSRodney W. Grimes {
1844df8bae1dSRodney W. Grimes 	vm_map_entry_unlink(map, entry);
1845df8bae1dSRodney W. Grimes 	map->size -= entry->end - entry->start;
1846df8bae1dSRodney W. Grimes 
1847afa07f7eSJohn Dyson 	if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1848df8bae1dSRodney W. Grimes 		vm_map_deallocate(entry->object.share_map);
1849b5b40fa6SJohn Dyson 	} else {
1850df8bae1dSRodney W. Grimes 		vm_object_deallocate(entry->object.vm_object);
1851b5b40fa6SJohn Dyson 	}
1852df8bae1dSRodney W. Grimes 
1853df8bae1dSRodney W. Grimes 	vm_map_entry_dispose(map, entry);
1854df8bae1dSRodney W. Grimes }
1855df8bae1dSRodney W. Grimes 
1856df8bae1dSRodney W. Grimes /*
1857df8bae1dSRodney W. Grimes  *	vm_map_delete:	[ internal use only ]
1858df8bae1dSRodney W. Grimes  *
1859df8bae1dSRodney W. Grimes  *	Deallocates the given address range from the target
1860df8bae1dSRodney W. Grimes  *	map.
1861df8bae1dSRodney W. Grimes  *
1862df8bae1dSRodney W. Grimes  *	When called with a sharing map, removes pages from
1863df8bae1dSRodney W. Grimes  *	that region from all physical maps.
1864df8bae1dSRodney W. Grimes  */
1865df8bae1dSRodney W. Grimes int
1866df8bae1dSRodney W. Grimes vm_map_delete(map, start, end)
1867df8bae1dSRodney W. Grimes 	register vm_map_t map;
1868df8bae1dSRodney W. Grimes 	vm_offset_t start;
1869df8bae1dSRodney W. Grimes 	register vm_offset_t end;
1870df8bae1dSRodney W. Grimes {
1871df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
1872df8bae1dSRodney W. Grimes 	vm_map_entry_t first_entry;
1873df8bae1dSRodney W. Grimes 
1874df8bae1dSRodney W. Grimes 	/*
1875df8bae1dSRodney W. Grimes 	 * Find the start of the region, and clip it
1876df8bae1dSRodney W. Grimes 	 */
1877df8bae1dSRodney W. Grimes 
1878df8bae1dSRodney W. Grimes 	if (!vm_map_lookup_entry(map, start, &first_entry))
1879df8bae1dSRodney W. Grimes 		entry = first_entry->next;
1880df8bae1dSRodney W. Grimes 	else {
1881df8bae1dSRodney W. Grimes 		entry = first_entry;
1882df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1883df8bae1dSRodney W. Grimes 
1884df8bae1dSRodney W. Grimes 		/*
18850d94caffSDavid Greenman 		 * Fix the lookup hint now, rather than each time though the
18860d94caffSDavid Greenman 		 * loop.
1887df8bae1dSRodney W. Grimes 		 */
1888df8bae1dSRodney W. Grimes 
1889df8bae1dSRodney W. Grimes 		SAVE_HINT(map, entry->prev);
1890df8bae1dSRodney W. Grimes 	}
1891df8bae1dSRodney W. Grimes 
1892df8bae1dSRodney W. Grimes 	/*
1893df8bae1dSRodney W. Grimes 	 * Save the free space hint
1894df8bae1dSRodney W. Grimes 	 */
1895df8bae1dSRodney W. Grimes 
1896b18bfc3dSJohn Dyson 	if (entry == &map->header) {
1897b18bfc3dSJohn Dyson 		map->first_free = &map->header;
1898b18bfc3dSJohn Dyson 	} else if (map->first_free->start >= start)
1899df8bae1dSRodney W. Grimes 		map->first_free = entry->prev;
1900df8bae1dSRodney W. Grimes 
1901df8bae1dSRodney W. Grimes 	/*
1902df8bae1dSRodney W. Grimes 	 * Step through all entries in this region
1903df8bae1dSRodney W. Grimes 	 */
1904df8bae1dSRodney W. Grimes 
1905df8bae1dSRodney W. Grimes 	while ((entry != &map->header) && (entry->start < end)) {
1906df8bae1dSRodney W. Grimes 		vm_map_entry_t next;
1907b18bfc3dSJohn Dyson 		vm_offset_t s, e;
1908b18bfc3dSJohn Dyson 		vm_object_t object;
1909b18bfc3dSJohn Dyson 		vm_ooffset_t offset;
1910df8bae1dSRodney W. Grimes 
1911df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
1912df8bae1dSRodney W. Grimes 
1913df8bae1dSRodney W. Grimes 		next = entry->next;
1914df8bae1dSRodney W. Grimes 		s = entry->start;
1915df8bae1dSRodney W. Grimes 		e = entry->end;
1916b18bfc3dSJohn Dyson 		offset = entry->offset;
1917df8bae1dSRodney W. Grimes 
1918df8bae1dSRodney W. Grimes 		/*
19190d94caffSDavid Greenman 		 * Unwire before removing addresses from the pmap; otherwise,
19200d94caffSDavid Greenman 		 * unwiring will put the entries back in the pmap.
1921df8bae1dSRodney W. Grimes 		 */
1922df8bae1dSRodney W. Grimes 
1923df8bae1dSRodney W. Grimes 		object = entry->object.vm_object;
1924df8bae1dSRodney W. Grimes 		if (entry->wired_count != 0)
1925df8bae1dSRodney W. Grimes 			vm_map_entry_unwire(map, entry);
1926df8bae1dSRodney W. Grimes 
1927df8bae1dSRodney W. Grimes 		/*
19280d94caffSDavid Greenman 		 * If this is a sharing map, we must remove *all* references
19290d94caffSDavid Greenman 		 * to this data, since we can't find all of the physical maps
19300d94caffSDavid Greenman 		 * which are sharing it.
1931df8bae1dSRodney W. Grimes 		 */
1932df8bae1dSRodney W. Grimes 
1933b18bfc3dSJohn Dyson 		if (object == kernel_object || object == kmem_object) {
1934b18bfc3dSJohn Dyson 			vm_object_page_remove(object, OFF_TO_IDX(offset),
1935b18bfc3dSJohn Dyson 			    OFF_TO_IDX(offset + (e - s)), FALSE);
1936b18bfc3dSJohn Dyson 		} else if (!map->is_main_map) {
1937df8bae1dSRodney W. Grimes 			vm_object_pmap_remove(object,
1938b18bfc3dSJohn Dyson 			    OFF_TO_IDX(offset),
1939b18bfc3dSJohn Dyson 			    OFF_TO_IDX(offset + (e - s)));
1940b18bfc3dSJohn Dyson 		} else {
1941df8bae1dSRodney W. Grimes 			pmap_remove(map->pmap, s, e);
1942b18bfc3dSJohn Dyson 		}
1943df8bae1dSRodney W. Grimes 
1944df8bae1dSRodney W. Grimes 		/*
19450d94caffSDavid Greenman 		 * Delete the entry (which may delete the object) only after
19460d94caffSDavid Greenman 		 * removing all pmap entries pointing to its pages.
19470d94caffSDavid Greenman 		 * (Otherwise, its page frames may be reallocated, and any
19480d94caffSDavid Greenman 		 * modify bits will be set in the wrong object!)
1949df8bae1dSRodney W. Grimes 		 */
1950df8bae1dSRodney W. Grimes 
1951df8bae1dSRodney W. Grimes 		vm_map_entry_delete(map, entry);
1952df8bae1dSRodney W. Grimes 		entry = next;
1953df8bae1dSRodney W. Grimes 	}
1954df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1955df8bae1dSRodney W. Grimes }
1956df8bae1dSRodney W. Grimes 
1957df8bae1dSRodney W. Grimes /*
1958df8bae1dSRodney W. Grimes  *	vm_map_remove:
1959df8bae1dSRodney W. Grimes  *
1960df8bae1dSRodney W. Grimes  *	Remove the given address range from the target map.
1961df8bae1dSRodney W. Grimes  *	This is the exported form of vm_map_delete.
1962df8bae1dSRodney W. Grimes  */
1963df8bae1dSRodney W. Grimes int
1964df8bae1dSRodney W. Grimes vm_map_remove(map, start, end)
1965df8bae1dSRodney W. Grimes 	register vm_map_t map;
1966df8bae1dSRodney W. Grimes 	register vm_offset_t start;
1967df8bae1dSRodney W. Grimes 	register vm_offset_t end;
1968df8bae1dSRodney W. Grimes {
19698d6e8edeSDavid Greenman 	register int result, s = 0;
19708d6e8edeSDavid Greenman 
19719579ee64SDavid Greenman 	if (map == kmem_map || map == mb_map)
1972b18bfc3dSJohn Dyson 		s = splvm();
1973df8bae1dSRodney W. Grimes 
1974df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1975df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1976df8bae1dSRodney W. Grimes 	result = vm_map_delete(map, start, end);
1977df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1978df8bae1dSRodney W. Grimes 
19799579ee64SDavid Greenman 	if (map == kmem_map || map == mb_map)
19808d6e8edeSDavid Greenman 		splx(s);
19818d6e8edeSDavid Greenman 
1982df8bae1dSRodney W. Grimes 	return (result);
1983df8bae1dSRodney W. Grimes }
1984df8bae1dSRodney W. Grimes 
1985df8bae1dSRodney W. Grimes /*
1986df8bae1dSRodney W. Grimes  *	vm_map_check_protection:
1987df8bae1dSRodney W. Grimes  *
1988df8bae1dSRodney W. Grimes  *	Assert that the target map allows the specified
1989df8bae1dSRodney W. Grimes  *	privilege on the entire address region given.
1990df8bae1dSRodney W. Grimes  *	The entire region must be allocated.
1991df8bae1dSRodney W. Grimes  */
19920d94caffSDavid Greenman boolean_t
19930d94caffSDavid Greenman vm_map_check_protection(map, start, end, protection)
1994df8bae1dSRodney W. Grimes 	register vm_map_t map;
1995df8bae1dSRodney W. Grimes 	register vm_offset_t start;
1996df8bae1dSRodney W. Grimes 	register vm_offset_t end;
1997df8bae1dSRodney W. Grimes 	register vm_prot_t protection;
1998df8bae1dSRodney W. Grimes {
1999df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
2000df8bae1dSRodney W. Grimes 	vm_map_entry_t tmp_entry;
2001df8bae1dSRodney W. Grimes 
2002df8bae1dSRodney W. Grimes 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2003df8bae1dSRodney W. Grimes 		return (FALSE);
2004df8bae1dSRodney W. Grimes 	}
2005df8bae1dSRodney W. Grimes 	entry = tmp_entry;
2006df8bae1dSRodney W. Grimes 
2007df8bae1dSRodney W. Grimes 	while (start < end) {
2008df8bae1dSRodney W. Grimes 		if (entry == &map->header) {
2009df8bae1dSRodney W. Grimes 			return (FALSE);
2010df8bae1dSRodney W. Grimes 		}
2011df8bae1dSRodney W. Grimes 		/*
2012df8bae1dSRodney W. Grimes 		 * No holes allowed!
2013df8bae1dSRodney W. Grimes 		 */
2014df8bae1dSRodney W. Grimes 
2015df8bae1dSRodney W. Grimes 		if (start < entry->start) {
2016df8bae1dSRodney W. Grimes 			return (FALSE);
2017df8bae1dSRodney W. Grimes 		}
2018df8bae1dSRodney W. Grimes 		/*
2019df8bae1dSRodney W. Grimes 		 * Check protection associated with entry.
2020df8bae1dSRodney W. Grimes 		 */
2021df8bae1dSRodney W. Grimes 
2022df8bae1dSRodney W. Grimes 		if ((entry->protection & protection) != protection) {
2023df8bae1dSRodney W. Grimes 			return (FALSE);
2024df8bae1dSRodney W. Grimes 		}
2025df8bae1dSRodney W. Grimes 		/* go to next entry */
2026df8bae1dSRodney W. Grimes 
2027df8bae1dSRodney W. Grimes 		start = entry->end;
2028df8bae1dSRodney W. Grimes 		entry = entry->next;
2029df8bae1dSRodney W. Grimes 	}
2030df8bae1dSRodney W. Grimes 	return (TRUE);
2031df8bae1dSRodney W. Grimes }
2032df8bae1dSRodney W. Grimes 
2033df8bae1dSRodney W. Grimes /*
2034df8bae1dSRodney W. Grimes  *	vm_map_copy_entry:
2035df8bae1dSRodney W. Grimes  *
2036df8bae1dSRodney W. Grimes  *	Copies the contents of the source entry to the destination
2037df8bae1dSRodney W. Grimes  *	entry.  The entries *must* be aligned properly.
2038df8bae1dSRodney W. Grimes  */
2039f708ef1bSPoul-Henning Kamp static void
20400d94caffSDavid Greenman vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
2041df8bae1dSRodney W. Grimes 	vm_map_t src_map, dst_map;
2042df8bae1dSRodney W. Grimes 	register vm_map_entry_t src_entry, dst_entry;
2043df8bae1dSRodney W. Grimes {
2044afa07f7eSJohn Dyson 	if ((dst_entry->eflags|src_entry->eflags) &
2045afa07f7eSJohn Dyson 		(MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
2046df8bae1dSRodney W. Grimes 		return;
2047df8bae1dSRodney W. Grimes 
2048df8bae1dSRodney W. Grimes 	if (src_entry->wired_count == 0) {
2049df8bae1dSRodney W. Grimes 
2050df8bae1dSRodney W. Grimes 		/*
20510d94caffSDavid Greenman 		 * If the source entry is marked needs_copy, it is already
20520d94caffSDavid Greenman 		 * write-protected.
2053df8bae1dSRodney W. Grimes 		 */
2054afa07f7eSJohn Dyson 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2055df8bae1dSRodney W. Grimes 
2056df8bae1dSRodney W. Grimes 			boolean_t su;
2057df8bae1dSRodney W. Grimes 
2058df8bae1dSRodney W. Grimes 			/*
20590d94caffSDavid Greenman 			 * If the source entry has only one mapping, we can
20600d94caffSDavid Greenman 			 * just protect the virtual address range.
2061df8bae1dSRodney W. Grimes 			 */
2062df8bae1dSRodney W. Grimes 			if (!(su = src_map->is_main_map)) {
2063df8bae1dSRodney W. Grimes 				su = (src_map->ref_count == 1);
2064df8bae1dSRodney W. Grimes 			}
2065df8bae1dSRodney W. Grimes 			if (su) {
2066df8bae1dSRodney W. Grimes 				pmap_protect(src_map->pmap,
2067df8bae1dSRodney W. Grimes 				    src_entry->start,
2068df8bae1dSRodney W. Grimes 				    src_entry->end,
2069df8bae1dSRodney W. Grimes 				    src_entry->protection & ~VM_PROT_WRITE);
20700d94caffSDavid Greenman 			} else {
2071df8bae1dSRodney W. Grimes 				vm_object_pmap_copy(src_entry->object.vm_object,
2072a316d390SJohn Dyson 				    OFF_TO_IDX(src_entry->offset),
2073a316d390SJohn Dyson 				    OFF_TO_IDX(src_entry->offset + (src_entry->end
2074a316d390SJohn Dyson 					- src_entry->start)));
2075df8bae1dSRodney W. Grimes 			}
2076df8bae1dSRodney W. Grimes 		}
2077b18bfc3dSJohn Dyson 
2078df8bae1dSRodney W. Grimes 		/*
2079df8bae1dSRodney W. Grimes 		 * Make a copy of the object.
2080df8bae1dSRodney W. Grimes 		 */
2081b18bfc3dSJohn Dyson 		if (src_entry->object.vm_object) {
2082b18bfc3dSJohn Dyson 			if ((src_entry->object.vm_object->handle == NULL) &&
2083b18bfc3dSJohn Dyson 				(src_entry->object.vm_object->type == OBJT_DEFAULT ||
2084b18bfc3dSJohn Dyson 				 src_entry->object.vm_object->type == OBJT_SWAP))
2085b18bfc3dSJohn Dyson 				vm_object_collapse(src_entry->object.vm_object);
2086b18bfc3dSJohn Dyson 			++src_entry->object.vm_object->ref_count;
2087afa07f7eSJohn Dyson 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2088afa07f7eSJohn Dyson 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2089b18bfc3dSJohn Dyson 			dst_entry->object.vm_object =
2090b18bfc3dSJohn Dyson 				src_entry->object.vm_object;
2091b18bfc3dSJohn Dyson 			dst_entry->offset = src_entry->offset;
2092b18bfc3dSJohn Dyson 		} else {
2093b18bfc3dSJohn Dyson 			dst_entry->object.vm_object = NULL;
2094b18bfc3dSJohn Dyson 			dst_entry->offset = 0;
2095b18bfc3dSJohn Dyson 		}
2096df8bae1dSRodney W. Grimes 
2097df8bae1dSRodney W. Grimes 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2098df8bae1dSRodney W. Grimes 		    dst_entry->end - dst_entry->start, src_entry->start);
20990d94caffSDavid Greenman 	} else {
2100df8bae1dSRodney W. Grimes 		/*
2101df8bae1dSRodney W. Grimes 		 * Of course, wired down pages can't be set copy-on-write.
21020d94caffSDavid Greenman 		 * Cause wired pages to be copied into the new map by
21030d94caffSDavid Greenman 		 * simulating faults (the new pages are pageable)
2104df8bae1dSRodney W. Grimes 		 */
2105df8bae1dSRodney W. Grimes 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2106df8bae1dSRodney W. Grimes 	}
2107df8bae1dSRodney W. Grimes }
2108df8bae1dSRodney W. Grimes 
2109df8bae1dSRodney W. Grimes /*
2110df8bae1dSRodney W. Grimes  * vmspace_fork:
2111df8bae1dSRodney W. Grimes  * Create a new process vmspace structure and vm_map
2112df8bae1dSRodney W. Grimes  * based on those of an existing process.  The new map
2113df8bae1dSRodney W. Grimes  * is based on the old map, according to the inheritance
2114df8bae1dSRodney W. Grimes  * values on the regions in that map.
2115df8bae1dSRodney W. Grimes  *
2116df8bae1dSRodney W. Grimes  * The source map must not be locked.
2117df8bae1dSRodney W. Grimes  */
2118df8bae1dSRodney W. Grimes struct vmspace *
2119df8bae1dSRodney W. Grimes vmspace_fork(vm1)
2120df8bae1dSRodney W. Grimes 	register struct vmspace *vm1;
2121df8bae1dSRodney W. Grimes {
2122df8bae1dSRodney W. Grimes 	register struct vmspace *vm2;
2123df8bae1dSRodney W. Grimes 	vm_map_t old_map = &vm1->vm_map;
2124df8bae1dSRodney W. Grimes 	vm_map_t new_map;
2125df8bae1dSRodney W. Grimes 	vm_map_entry_t old_entry;
2126df8bae1dSRodney W. Grimes 	vm_map_entry_t new_entry;
2127df8bae1dSRodney W. Grimes 	pmap_t new_pmap;
2128de5f6a77SJohn Dyson 	vm_object_t object;
2129df8bae1dSRodney W. Grimes 
2130df8bae1dSRodney W. Grimes 	vm_map_lock(old_map);
2131df8bae1dSRodney W. Grimes 
2132df8bae1dSRodney W. Grimes 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
2133df8bae1dSRodney W. Grimes 	    old_map->entries_pageable);
2134df8bae1dSRodney W. Grimes 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2135df8bae1dSRodney W. Grimes 	    (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2136df8bae1dSRodney W. Grimes 	new_pmap = &vm2->vm_pmap;	/* XXX */
2137df8bae1dSRodney W. Grimes 	new_map = &vm2->vm_map;	/* XXX */
2138df8bae1dSRodney W. Grimes 
2139df8bae1dSRodney W. Grimes 	old_entry = old_map->header.next;
2140df8bae1dSRodney W. Grimes 
2141df8bae1dSRodney W. Grimes 	while (old_entry != &old_map->header) {
2142afa07f7eSJohn Dyson 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2143df8bae1dSRodney W. Grimes 			panic("vm_map_fork: encountered a submap");
2144df8bae1dSRodney W. Grimes 
2145df8bae1dSRodney W. Grimes 		switch (old_entry->inheritance) {
2146df8bae1dSRodney W. Grimes 		case VM_INHERIT_NONE:
2147df8bae1dSRodney W. Grimes 			break;
2148df8bae1dSRodney W. Grimes 
2149df8bae1dSRodney W. Grimes                 case VM_INHERIT_SHARE:
2150df8bae1dSRodney W. Grimes                         /*
2151fed9a903SJohn Dyson                          * Clone the entry, creating the shared object if necessary.
2152fed9a903SJohn Dyson                          */
2153fed9a903SJohn Dyson                         object = old_entry->object.vm_object;
2154fed9a903SJohn Dyson                         if (object == NULL) {
2155fed9a903SJohn Dyson                                 object = vm_object_allocate(OBJT_DEFAULT,
2156fed9a903SJohn Dyson                                                             OFF_TO_IDX(old_entry->end -
2157fed9a903SJohn Dyson                                                                        old_entry->start));
2158fed9a903SJohn Dyson                                 old_entry->object.vm_object = object;
2159fed9a903SJohn Dyson                                 old_entry->offset = (vm_offset_t) 0;
21605069bf57SJohn Dyson                         } else if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
21615069bf57SJohn Dyson 				vm_object_shadow(&old_entry->object.vm_object,
21625069bf57SJohn Dyson 						 &old_entry->offset,
21635069bf57SJohn Dyson 						 OFF_TO_IDX(old_entry->end -
21645069bf57SJohn Dyson 							old_entry->start));
21655069bf57SJohn Dyson 
21665069bf57SJohn Dyson 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
21675069bf57SJohn Dyson 				object = old_entry->object.vm_object;
2168fed9a903SJohn Dyson 			}
2169fed9a903SJohn Dyson 
2170fed9a903SJohn Dyson 			/*
2171df8bae1dSRodney W. Grimes 			 * Clone the entry, referencing the sharing map.
2172df8bae1dSRodney W. Grimes 			 */
2173df8bae1dSRodney W. Grimes 			new_entry = vm_map_entry_create(new_map);
2174df8bae1dSRodney W. Grimes 			*new_entry = *old_entry;
2175df8bae1dSRodney W. Grimes 			new_entry->wired_count = 0;
2176de5f6a77SJohn Dyson 			++object->ref_count;
2177df8bae1dSRodney W. Grimes 
2178df8bae1dSRodney W. Grimes 			/*
21790d94caffSDavid Greenman 			 * Insert the entry into the new map -- we know we're
21800d94caffSDavid Greenman 			 * inserting at the end of the new map.
2181df8bae1dSRodney W. Grimes 			 */
2182df8bae1dSRodney W. Grimes 
2183df8bae1dSRodney W. Grimes 			vm_map_entry_link(new_map, new_map->header.prev,
2184df8bae1dSRodney W. Grimes 			    new_entry);
2185df8bae1dSRodney W. Grimes 
2186df8bae1dSRodney W. Grimes 			/*
2187df8bae1dSRodney W. Grimes 			 * Update the physical map
2188df8bae1dSRodney W. Grimes 			 */
2189df8bae1dSRodney W. Grimes 
2190df8bae1dSRodney W. Grimes 			pmap_copy(new_map->pmap, old_map->pmap,
2191df8bae1dSRodney W. Grimes 			    new_entry->start,
2192df8bae1dSRodney W. Grimes 			    (old_entry->end - old_entry->start),
2193df8bae1dSRodney W. Grimes 			    old_entry->start);
2194df8bae1dSRodney W. Grimes 			break;
2195df8bae1dSRodney W. Grimes 
2196df8bae1dSRodney W. Grimes 		case VM_INHERIT_COPY:
2197df8bae1dSRodney W. Grimes 			/*
2198df8bae1dSRodney W. Grimes 			 * Clone the entry and link into the map.
2199df8bae1dSRodney W. Grimes 			 */
2200df8bae1dSRodney W. Grimes 			new_entry = vm_map_entry_create(new_map);
2201df8bae1dSRodney W. Grimes 			*new_entry = *old_entry;
2202df8bae1dSRodney W. Grimes 			new_entry->wired_count = 0;
2203df8bae1dSRodney W. Grimes 			new_entry->object.vm_object = NULL;
2204afa07f7eSJohn Dyson 			new_entry->eflags &= ~MAP_ENTRY_IS_A_MAP;
2205df8bae1dSRodney W. Grimes 			vm_map_entry_link(new_map, new_map->header.prev,
2206df8bae1dSRodney W. Grimes 			    new_entry);
2207bd7e5f99SJohn Dyson 			vm_map_copy_entry(old_map, new_map, old_entry,
2208bd7e5f99SJohn Dyson 			    new_entry);
2209df8bae1dSRodney W. Grimes 			break;
2210df8bae1dSRodney W. Grimes 		}
2211df8bae1dSRodney W. Grimes 		old_entry = old_entry->next;
2212df8bae1dSRodney W. Grimes 	}
2213df8bae1dSRodney W. Grimes 
2214df8bae1dSRodney W. Grimes 	new_map->size = old_map->size;
2215df8bae1dSRodney W. Grimes 	vm_map_unlock(old_map);
2216df8bae1dSRodney W. Grimes 
2217df8bae1dSRodney W. Grimes 	return (vm2);
2218df8bae1dSRodney W. Grimes }
2219df8bae1dSRodney W. Grimes 
2220df8bae1dSRodney W. Grimes /*
2221df8bae1dSRodney W. Grimes  *	vm_map_lookup:
2222df8bae1dSRodney W. Grimes  *
2223df8bae1dSRodney W. Grimes  *	Finds the VM object, offset, and
2224df8bae1dSRodney W. Grimes  *	protection for a given virtual address in the
2225df8bae1dSRodney W. Grimes  *	specified map, assuming a page fault of the
2226df8bae1dSRodney W. Grimes  *	type specified.
2227df8bae1dSRodney W. Grimes  *
2228df8bae1dSRodney W. Grimes  *	Leaves the map in question locked for read; return
2229df8bae1dSRodney W. Grimes  *	values are guaranteed until a vm_map_lookup_done
2230df8bae1dSRodney W. Grimes  *	call is performed.  Note that the map argument
2231df8bae1dSRodney W. Grimes  *	is in/out; the returned map must be used in
2232df8bae1dSRodney W. Grimes  *	the call to vm_map_lookup_done.
2233df8bae1dSRodney W. Grimes  *
2234df8bae1dSRodney W. Grimes  *	A handle (out_entry) is returned for use in
2235df8bae1dSRodney W. Grimes  *	vm_map_lookup_done, to make that fast.
2236df8bae1dSRodney W. Grimes  *
2237df8bae1dSRodney W. Grimes  *	If a lookup is requested with "write protection"
2238df8bae1dSRodney W. Grimes  *	specified, the map may be changed to perform virtual
2239df8bae1dSRodney W. Grimes  *	copying operations, although the data referenced will
2240df8bae1dSRodney W. Grimes  *	remain the same.
2241df8bae1dSRodney W. Grimes  */
2242df8bae1dSRodney W. Grimes int
2243df8bae1dSRodney W. Grimes vm_map_lookup(var_map, vaddr, fault_type, out_entry,
2244a316d390SJohn Dyson     object, pindex, out_prot, wired, single_use)
2245df8bae1dSRodney W. Grimes 	vm_map_t *var_map;	/* IN/OUT */
2246df8bae1dSRodney W. Grimes 	register vm_offset_t vaddr;
2247df8bae1dSRodney W. Grimes 	register vm_prot_t fault_type;
2248df8bae1dSRodney W. Grimes 
2249df8bae1dSRodney W. Grimes 	vm_map_entry_t *out_entry;	/* OUT */
2250df8bae1dSRodney W. Grimes 	vm_object_t *object;	/* OUT */
2251a316d390SJohn Dyson 	vm_pindex_t *pindex;	/* OUT */
2252df8bae1dSRodney W. Grimes 	vm_prot_t *out_prot;	/* OUT */
2253df8bae1dSRodney W. Grimes 	boolean_t *wired;	/* OUT */
2254df8bae1dSRodney W. Grimes 	boolean_t *single_use;	/* OUT */
2255df8bae1dSRodney W. Grimes {
2256df8bae1dSRodney W. Grimes 	vm_map_t share_map;
2257df8bae1dSRodney W. Grimes 	vm_offset_t share_offset;
2258df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
2259df8bae1dSRodney W. Grimes 	register vm_map_t map = *var_map;
2260df8bae1dSRodney W. Grimes 	register vm_prot_t prot;
2261df8bae1dSRodney W. Grimes 	register boolean_t su;
2262df8bae1dSRodney W. Grimes 
2263df8bae1dSRodney W. Grimes RetryLookup:;
2264df8bae1dSRodney W. Grimes 
2265df8bae1dSRodney W. Grimes 	/*
2266df8bae1dSRodney W. Grimes 	 * Lookup the faulting address.
2267df8bae1dSRodney W. Grimes 	 */
2268df8bae1dSRodney W. Grimes 
2269df8bae1dSRodney W. Grimes 	vm_map_lock_read(map);
2270df8bae1dSRodney W. Grimes 
2271df8bae1dSRodney W. Grimes #define	RETURN(why) \
2272df8bae1dSRodney W. Grimes 		{ \
2273df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map); \
2274df8bae1dSRodney W. Grimes 		return(why); \
2275df8bae1dSRodney W. Grimes 		}
2276df8bae1dSRodney W. Grimes 
2277df8bae1dSRodney W. Grimes 	/*
22780d94caffSDavid Greenman 	 * If the map has an interesting hint, try it before calling full
22790d94caffSDavid Greenman 	 * blown lookup routine.
2280df8bae1dSRodney W. Grimes 	 */
2281df8bae1dSRodney W. Grimes 
2282df8bae1dSRodney W. Grimes 	entry = map->hint;
2283df8bae1dSRodney W. Grimes 
2284df8bae1dSRodney W. Grimes 	*out_entry = entry;
2285df8bae1dSRodney W. Grimes 
2286df8bae1dSRodney W. Grimes 	if ((entry == &map->header) ||
2287df8bae1dSRodney W. Grimes 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
2288df8bae1dSRodney W. Grimes 		vm_map_entry_t tmp_entry;
2289df8bae1dSRodney W. Grimes 
2290df8bae1dSRodney W. Grimes 		/*
22910d94caffSDavid Greenman 		 * Entry was either not a valid hint, or the vaddr was not
22920d94caffSDavid Greenman 		 * contained in the entry, so do a full lookup.
2293df8bae1dSRodney W. Grimes 		 */
2294df8bae1dSRodney W. Grimes 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2295df8bae1dSRodney W. Grimes 			RETURN(KERN_INVALID_ADDRESS);
2296df8bae1dSRodney W. Grimes 
2297df8bae1dSRodney W. Grimes 		entry = tmp_entry;
2298df8bae1dSRodney W. Grimes 		*out_entry = entry;
2299df8bae1dSRodney W. Grimes 	}
2300b7b2aac2SJohn Dyson 
2301df8bae1dSRodney W. Grimes 	/*
2302df8bae1dSRodney W. Grimes 	 * Handle submaps.
2303df8bae1dSRodney W. Grimes 	 */
2304df8bae1dSRodney W. Grimes 
2305afa07f7eSJohn Dyson 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2306df8bae1dSRodney W. Grimes 		vm_map_t old_map = map;
2307df8bae1dSRodney W. Grimes 
2308df8bae1dSRodney W. Grimes 		*var_map = map = entry->object.sub_map;
2309df8bae1dSRodney W. Grimes 		vm_map_unlock_read(old_map);
2310df8bae1dSRodney W. Grimes 		goto RetryLookup;
2311df8bae1dSRodney W. Grimes 	}
2312df8bae1dSRodney W. Grimes 	/*
23130d94caffSDavid Greenman 	 * Check whether this task is allowed to have this page.
2314df8bae1dSRodney W. Grimes 	 */
2315df8bae1dSRodney W. Grimes 
2316df8bae1dSRodney W. Grimes 	prot = entry->protection;
2317df8bae1dSRodney W. Grimes 	if ((fault_type & (prot)) != fault_type)
2318df8bae1dSRodney W. Grimes 		RETURN(KERN_PROTECTION_FAILURE);
2319df8bae1dSRodney W. Grimes 
2320df8bae1dSRodney W. Grimes 	/*
23210d94caffSDavid Greenman 	 * If this page is not pageable, we have to get it for all possible
23220d94caffSDavid Greenman 	 * accesses.
2323df8bae1dSRodney W. Grimes 	 */
2324df8bae1dSRodney W. Grimes 
232505f0fdd2SPoul-Henning Kamp 	*wired = (entry->wired_count != 0);
232605f0fdd2SPoul-Henning Kamp 	if (*wired)
2327df8bae1dSRodney W. Grimes 		prot = fault_type = entry->protection;
2328df8bae1dSRodney W. Grimes 
2329df8bae1dSRodney W. Grimes 	/*
23300d94caffSDavid Greenman 	 * If we don't already have a VM object, track it down.
2331df8bae1dSRodney W. Grimes 	 */
2332df8bae1dSRodney W. Grimes 
2333afa07f7eSJohn Dyson 	su = (entry->eflags & MAP_ENTRY_IS_A_MAP) == 0;
233405f0fdd2SPoul-Henning Kamp 	if (su) {
2335df8bae1dSRodney W. Grimes 		share_map = map;
2336df8bae1dSRodney W. Grimes 		share_offset = vaddr;
23370d94caffSDavid Greenman 	} else {
2338df8bae1dSRodney W. Grimes 		vm_map_entry_t share_entry;
2339df8bae1dSRodney W. Grimes 
2340df8bae1dSRodney W. Grimes 		/*
2341df8bae1dSRodney W. Grimes 		 * Compute the sharing map, and offset into it.
2342df8bae1dSRodney W. Grimes 		 */
2343df8bae1dSRodney W. Grimes 
2344df8bae1dSRodney W. Grimes 		share_map = entry->object.share_map;
2345df8bae1dSRodney W. Grimes 		share_offset = (vaddr - entry->start) + entry->offset;
2346df8bae1dSRodney W. Grimes 
2347df8bae1dSRodney W. Grimes 		/*
2348df8bae1dSRodney W. Grimes 		 * Look for the backing store object and offset
2349df8bae1dSRodney W. Grimes 		 */
2350df8bae1dSRodney W. Grimes 
2351df8bae1dSRodney W. Grimes 		vm_map_lock_read(share_map);
2352df8bae1dSRodney W. Grimes 
2353df8bae1dSRodney W. Grimes 		if (!vm_map_lookup_entry(share_map, share_offset,
2354df8bae1dSRodney W. Grimes 			&share_entry)) {
2355df8bae1dSRodney W. Grimes 			vm_map_unlock_read(share_map);
2356df8bae1dSRodney W. Grimes 			RETURN(KERN_INVALID_ADDRESS);
2357df8bae1dSRodney W. Grimes 		}
2358df8bae1dSRodney W. Grimes 		entry = share_entry;
2359df8bae1dSRodney W. Grimes 	}
2360df8bae1dSRodney W. Grimes 
2361df8bae1dSRodney W. Grimes 	/*
2362df8bae1dSRodney W. Grimes 	 * If the entry was copy-on-write, we either ...
2363df8bae1dSRodney W. Grimes 	 */
2364df8bae1dSRodney W. Grimes 
2365afa07f7eSJohn Dyson 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2366df8bae1dSRodney W. Grimes 		/*
23670d94caffSDavid Greenman 		 * If we want to write the page, we may as well handle that
23680d94caffSDavid Greenman 		 * now since we've got the sharing map locked.
2369df8bae1dSRodney W. Grimes 		 *
23700d94caffSDavid Greenman 		 * If we don't need to write the page, we just demote the
23710d94caffSDavid Greenman 		 * permissions allowed.
2372df8bae1dSRodney W. Grimes 		 */
2373df8bae1dSRodney W. Grimes 
2374df8bae1dSRodney W. Grimes 		if (fault_type & VM_PROT_WRITE) {
2375df8bae1dSRodney W. Grimes 			/*
23760d94caffSDavid Greenman 			 * Make a new object, and place it in the object
23770d94caffSDavid Greenman 			 * chain.  Note that no new references have appeared
23780d94caffSDavid Greenman 			 * -- one just moved from the share map to the new
23790d94caffSDavid Greenman 			 * object.
2380df8bae1dSRodney W. Grimes 			 */
2381df8bae1dSRodney W. Grimes 
2382df8bae1dSRodney W. Grimes 			if (lock_read_to_write(&share_map->lock)) {
2383df8bae1dSRodney W. Grimes 				if (share_map != map)
2384df8bae1dSRodney W. Grimes 					vm_map_unlock_read(map);
2385df8bae1dSRodney W. Grimes 				goto RetryLookup;
2386df8bae1dSRodney W. Grimes 			}
2387df8bae1dSRodney W. Grimes 			vm_object_shadow(
2388df8bae1dSRodney W. Grimes 			    &entry->object.vm_object,
2389df8bae1dSRodney W. Grimes 			    &entry->offset,
2390a316d390SJohn Dyson 			    OFF_TO_IDX(entry->end - entry->start));
2391df8bae1dSRodney W. Grimes 
2392afa07f7eSJohn Dyson 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2393df8bae1dSRodney W. Grimes 
2394df8bae1dSRodney W. Grimes 			lock_write_to_read(&share_map->lock);
23950d94caffSDavid Greenman 		} else {
2396df8bae1dSRodney W. Grimes 			/*
23970d94caffSDavid Greenman 			 * We're attempting to read a copy-on-write page --
23980d94caffSDavid Greenman 			 * don't allow writes.
2399df8bae1dSRodney W. Grimes 			 */
2400df8bae1dSRodney W. Grimes 
2401df8bae1dSRodney W. Grimes 			prot &= (~VM_PROT_WRITE);
2402df8bae1dSRodney W. Grimes 		}
2403df8bae1dSRodney W. Grimes 	}
2404df8bae1dSRodney W. Grimes 	/*
2405df8bae1dSRodney W. Grimes 	 * Create an object if necessary.
2406df8bae1dSRodney W. Grimes 	 */
2407df8bae1dSRodney W. Grimes 	if (entry->object.vm_object == NULL) {
2408df8bae1dSRodney W. Grimes 
2409df8bae1dSRodney W. Grimes 		if (lock_read_to_write(&share_map->lock)) {
2410df8bae1dSRodney W. Grimes 			if (share_map != map)
2411df8bae1dSRodney W. Grimes 				vm_map_unlock_read(map);
2412df8bae1dSRodney W. Grimes 			goto RetryLookup;
2413df8bae1dSRodney W. Grimes 		}
241424a1cce3SDavid Greenman 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2415a316d390SJohn Dyson 		    OFF_TO_IDX(entry->end - entry->start));
2416df8bae1dSRodney W. Grimes 		entry->offset = 0;
2417df8bae1dSRodney W. Grimes 		lock_write_to_read(&share_map->lock);
2418df8bae1dSRodney W. Grimes 	}
2419b5b40fa6SJohn Dyson 
242067bf6868SJohn Dyson 	if (entry->object.vm_object != NULL)
2421b5b40fa6SJohn Dyson 		default_pager_convert_to_swapq(entry->object.vm_object);
2422df8bae1dSRodney W. Grimes 	/*
24230d94caffSDavid Greenman 	 * Return the object/offset from this entry.  If the entry was
24240d94caffSDavid Greenman 	 * copy-on-write or empty, it has been fixed up.
2425df8bae1dSRodney W. Grimes 	 */
2426df8bae1dSRodney W. Grimes 
2427a316d390SJohn Dyson 	*pindex = OFF_TO_IDX((share_offset - entry->start) + entry->offset);
2428df8bae1dSRodney W. Grimes 	*object = entry->object.vm_object;
2429df8bae1dSRodney W. Grimes 
2430df8bae1dSRodney W. Grimes 	/*
2431df8bae1dSRodney W. Grimes 	 * Return whether this is the only map sharing this data.
2432df8bae1dSRodney W. Grimes 	 */
2433df8bae1dSRodney W. Grimes 
2434df8bae1dSRodney W. Grimes 	if (!su) {
2435df8bae1dSRodney W. Grimes 		su = (share_map->ref_count == 1);
2436df8bae1dSRodney W. Grimes 	}
2437df8bae1dSRodney W. Grimes 	*out_prot = prot;
2438df8bae1dSRodney W. Grimes 	*single_use = su;
2439df8bae1dSRodney W. Grimes 
2440df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
2441df8bae1dSRodney W. Grimes 
2442df8bae1dSRodney W. Grimes #undef	RETURN
2443df8bae1dSRodney W. Grimes }
2444df8bae1dSRodney W. Grimes 
2445df8bae1dSRodney W. Grimes /*
2446df8bae1dSRodney W. Grimes  *	vm_map_lookup_done:
2447df8bae1dSRodney W. Grimes  *
2448df8bae1dSRodney W. Grimes  *	Releases locks acquired by a vm_map_lookup
2449df8bae1dSRodney W. Grimes  *	(according to the handle returned by that lookup).
2450df8bae1dSRodney W. Grimes  */
2451df8bae1dSRodney W. Grimes 
24520d94caffSDavid Greenman void
24530d94caffSDavid Greenman vm_map_lookup_done(map, entry)
2454df8bae1dSRodney W. Grimes 	register vm_map_t map;
2455df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
2456df8bae1dSRodney W. Grimes {
2457df8bae1dSRodney W. Grimes 	/*
2458df8bae1dSRodney W. Grimes 	 * If this entry references a map, unlock it first.
2459df8bae1dSRodney W. Grimes 	 */
2460df8bae1dSRodney W. Grimes 
2461afa07f7eSJohn Dyson 	if (entry->eflags & MAP_ENTRY_IS_A_MAP)
2462df8bae1dSRodney W. Grimes 		vm_map_unlock_read(entry->object.share_map);
2463df8bae1dSRodney W. Grimes 
2464df8bae1dSRodney W. Grimes 	/*
2465df8bae1dSRodney W. Grimes 	 * Unlock the main-level map
2466df8bae1dSRodney W. Grimes 	 */
2467df8bae1dSRodney W. Grimes 
2468df8bae1dSRodney W. Grimes 	vm_map_unlock_read(map);
2469df8bae1dSRodney W. Grimes }
2470df8bae1dSRodney W. Grimes 
2471c7c34a24SBruce Evans #include "opt_ddb.h"
2472c3cb3e12SDavid Greenman #ifdef DDB
2473c7c34a24SBruce Evans #include <sys/kernel.h>
2474c7c34a24SBruce Evans 
2475c7c34a24SBruce Evans #include <ddb/ddb.h>
2476c7c34a24SBruce Evans 
2477df8bae1dSRodney W. Grimes /*
2478df8bae1dSRodney W. Grimes  *	vm_map_print:	[ debug ]
2479df8bae1dSRodney W. Grimes  */
2480c7c34a24SBruce Evans DB_SHOW_COMMAND(map, vm_map_print)
2481df8bae1dSRodney W. Grimes {
2482c7c34a24SBruce Evans 	/* XXX convert args. */
2483c7c34a24SBruce Evans 	register vm_map_t map = (vm_map_t)addr;
2484c7c34a24SBruce Evans 	boolean_t full = have_addr;
2485df8bae1dSRodney W. Grimes 
2486c7c34a24SBruce Evans 	register vm_map_entry_t entry;
2487c7c34a24SBruce Evans 
2488c7c34a24SBruce Evans 	db_iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n",
2489df8bae1dSRodney W. Grimes 	    (map->is_main_map ? "Task" : "Share"),
2490df8bae1dSRodney W. Grimes 	    (int) map, (int) (map->pmap), map->ref_count, map->nentries,
2491df8bae1dSRodney W. Grimes 	    map->timestamp);
2492df8bae1dSRodney W. Grimes 
2493c7c34a24SBruce Evans 	if (!full && db_indent)
2494df8bae1dSRodney W. Grimes 		return;
2495df8bae1dSRodney W. Grimes 
2496c7c34a24SBruce Evans 	db_indent += 2;
2497df8bae1dSRodney W. Grimes 	for (entry = map->header.next; entry != &map->header;
2498df8bae1dSRodney W. Grimes 	    entry = entry->next) {
2499c7c34a24SBruce Evans 		db_iprintf("map entry 0x%x: start=0x%x, end=0x%x, ",
2500df8bae1dSRodney W. Grimes 		    (int) entry, (int) entry->start, (int) entry->end);
2501df8bae1dSRodney W. Grimes 		if (map->is_main_map) {
2502df8bae1dSRodney W. Grimes 			static char *inheritance_name[4] =
2503df8bae1dSRodney W. Grimes 			{"share", "copy", "none", "donate_copy"};
25040d94caffSDavid Greenman 
2505c7c34a24SBruce Evans 			db_printf("prot=%x/%x/%s, ",
2506df8bae1dSRodney W. Grimes 			    entry->protection,
2507df8bae1dSRodney W. Grimes 			    entry->max_protection,
2508df8bae1dSRodney W. Grimes 			    inheritance_name[entry->inheritance]);
2509df8bae1dSRodney W. Grimes 			if (entry->wired_count != 0)
2510c7c34a24SBruce Evans 				db_printf("wired, ");
2511df8bae1dSRodney W. Grimes 		}
2512afa07f7eSJohn Dyson 		if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
2513c7c34a24SBruce Evans 			db_printf("share=0x%x, offset=0x%x\n",
2514df8bae1dSRodney W. Grimes 			    (int) entry->object.share_map,
2515df8bae1dSRodney W. Grimes 			    (int) entry->offset);
2516df8bae1dSRodney W. Grimes 			if ((entry->prev == &map->header) ||
2517afa07f7eSJohn Dyson 			    ((entry->prev->eflags & MAP_ENTRY_IS_A_MAP) == 0) ||
2518df8bae1dSRodney W. Grimes 			    (entry->prev->object.share_map !=
2519df8bae1dSRodney W. Grimes 				entry->object.share_map)) {
2520c7c34a24SBruce Evans 				db_indent += 2;
2521914181e7SBruce Evans 				vm_map_print((int)entry->object.share_map,
2522914181e7SBruce Evans 					     full, 0, (char *)0);
2523c7c34a24SBruce Evans 				db_indent -= 2;
2524df8bae1dSRodney W. Grimes 			}
25250d94caffSDavid Greenman 		} else {
2526c7c34a24SBruce Evans 			db_printf("object=0x%x, offset=0x%x",
2527df8bae1dSRodney W. Grimes 			    (int) entry->object.vm_object,
2528df8bae1dSRodney W. Grimes 			    (int) entry->offset);
2529afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_COW)
2530c7c34a24SBruce Evans 				db_printf(", copy (%s)",
2531afa07f7eSJohn Dyson 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
2532c7c34a24SBruce Evans 			db_printf("\n");
2533df8bae1dSRodney W. Grimes 
2534df8bae1dSRodney W. Grimes 			if ((entry->prev == &map->header) ||
2535afa07f7eSJohn Dyson 			    (entry->prev->eflags & MAP_ENTRY_IS_A_MAP) ||
2536df8bae1dSRodney W. Grimes 			    (entry->prev->object.vm_object !=
2537df8bae1dSRodney W. Grimes 				entry->object.vm_object)) {
2538c7c34a24SBruce Evans 				db_indent += 2;
2539914181e7SBruce Evans 				vm_object_print((int)entry->object.vm_object,
2540914181e7SBruce Evans 						full, 0, (char *)0);
2541c7c34a24SBruce Evans 				db_indent -= 2;
2542df8bae1dSRodney W. Grimes 			}
2543df8bae1dSRodney W. Grimes 		}
2544df8bae1dSRodney W. Grimes 	}
2545c7c34a24SBruce Evans 	db_indent -= 2;
2546df8bae1dSRodney W. Grimes }
2547c7c34a24SBruce Evans #endif /* DDB */
2548