xref: /freebsd/sys/vm/vm_map.c (revision a316d390bda3e185e04632e807a012a345492935)
1df8bae1dSRodney W. Grimes /*
2df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  *
5df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
6df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
9df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
10df8bae1dSRodney W. Grimes  * are met:
11df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
12df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
13df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
15df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
16df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
17df8bae1dSRodney W. Grimes  *    must display the following acknowledgement:
18df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
19df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
20df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
21df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
22df8bae1dSRodney W. Grimes  *    without specific prior written permission.
23df8bae1dSRodney W. Grimes  *
24df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
35df8bae1dSRodney W. Grimes  *
363c4dd356SDavid Greenman  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  *
39df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40df8bae1dSRodney W. Grimes  * All rights reserved.
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43df8bae1dSRodney W. Grimes  *
44df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
45df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
46df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
47df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
48df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
49df8bae1dSRodney W. Grimes  *
50df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
55df8bae1dSRodney W. Grimes  *
56df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57df8bae1dSRodney W. Grimes  *  School of Computer Science
58df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
59df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
60df8bae1dSRodney W. Grimes  *
61df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
62df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
633c4dd356SDavid Greenman  *
64a316d390SJohn Dyson  * $Id: vm_map.c,v 1.28 1995/12/07 12:48:15 davidg Exp $
65df8bae1dSRodney W. Grimes  */
66df8bae1dSRodney W. Grimes 
67df8bae1dSRodney W. Grimes /*
68df8bae1dSRodney W. Grimes  *	Virtual memory mapping module.
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes #include <sys/param.h>
72df8bae1dSRodney W. Grimes #include <sys/systm.h>
73df8bae1dSRodney W. Grimes #include <sys/malloc.h>
74b5e8ce9fSBruce Evans #include <sys/proc.h>
75efeaf95aSDavid Greenman #include <sys/queue.h>
76efeaf95aSDavid Greenman #include <sys/vmmeter.h>
77df8bae1dSRodney W. Grimes 
78df8bae1dSRodney W. Grimes #include <vm/vm.h>
79efeaf95aSDavid Greenman #include <vm/vm_param.h>
80efeaf95aSDavid Greenman #include <vm/vm_prot.h>
81efeaf95aSDavid Greenman #include <vm/vm_inherit.h>
82efeaf95aSDavid Greenman #include <vm/lock.h>
83efeaf95aSDavid Greenman #include <vm/pmap.h>
84efeaf95aSDavid Greenman #include <vm/vm_map.h>
85df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
86df8bae1dSRodney W. Grimes #include <vm/vm_object.h>
8726f9a767SRodney W. Grimes #include <vm/vm_kern.h>
8824a1cce3SDavid Greenman #include <vm/vm_pager.h>
89efeaf95aSDavid Greenman #include <vm/vm_extern.h>
90df8bae1dSRodney W. Grimes 
91df8bae1dSRodney W. Grimes /*
92df8bae1dSRodney W. Grimes  *	Virtual memory maps provide for the mapping, protection,
93df8bae1dSRodney W. Grimes  *	and sharing of virtual memory objects.  In addition,
94df8bae1dSRodney W. Grimes  *	this module provides for an efficient virtual copy of
95df8bae1dSRodney W. Grimes  *	memory from one map to another.
96df8bae1dSRodney W. Grimes  *
97df8bae1dSRodney W. Grimes  *	Synchronization is required prior to most operations.
98df8bae1dSRodney W. Grimes  *
99df8bae1dSRodney W. Grimes  *	Maps consist of an ordered doubly-linked list of simple
100df8bae1dSRodney W. Grimes  *	entries; a single hint is used to speed up lookups.
101df8bae1dSRodney W. Grimes  *
102df8bae1dSRodney W. Grimes  *	In order to properly represent the sharing of virtual
103df8bae1dSRodney W. Grimes  *	memory regions among maps, the map structure is bi-level.
104df8bae1dSRodney W. Grimes  *	Top-level ("address") maps refer to regions of sharable
105df8bae1dSRodney W. Grimes  *	virtual memory.  These regions are implemented as
106df8bae1dSRodney W. Grimes  *	("sharing") maps, which then refer to the actual virtual
107df8bae1dSRodney W. Grimes  *	memory objects.  When two address maps "share" memory,
108df8bae1dSRodney W. Grimes  *	their top-level maps both have references to the same
109df8bae1dSRodney W. Grimes  *	sharing map.  When memory is virtual-copied from one
110df8bae1dSRodney W. Grimes  *	address map to another, the references in the sharing
111df8bae1dSRodney W. Grimes  *	maps are actually copied -- no copying occurs at the
112df8bae1dSRodney W. Grimes  *	virtual memory object level.
113df8bae1dSRodney W. Grimes  *
114df8bae1dSRodney W. Grimes  *	Since portions of maps are specified by start/end addreses,
115df8bae1dSRodney W. Grimes  *	which may not align with existing map entries, all
116df8bae1dSRodney W. Grimes  *	routines merely "clip" entries to these start/end values.
117df8bae1dSRodney W. Grimes  *	[That is, an entry is split into two, bordering at a
118df8bae1dSRodney W. Grimes  *	start or end value.]  Note that these clippings may not
119df8bae1dSRodney W. Grimes  *	always be necessary (as the two resulting entries are then
120df8bae1dSRodney W. Grimes  *	not changed); however, the clipping is done for convenience.
121df8bae1dSRodney W. Grimes  *	No attempt is currently made to "glue back together" two
122df8bae1dSRodney W. Grimes  *	abutting entries.
123df8bae1dSRodney W. Grimes  *
124df8bae1dSRodney W. Grimes  *	As mentioned above, virtual copy operations are performed
125df8bae1dSRodney W. Grimes  *	by copying VM object references from one sharing map to
126df8bae1dSRodney W. Grimes  *	another, and then marking both regions as copy-on-write.
127df8bae1dSRodney W. Grimes  *	It is important to note that only one writeable reference
128df8bae1dSRodney W. Grimes  *	to a VM object region exists in any map -- this means that
129df8bae1dSRodney W. Grimes  *	shadow object creation can be delayed until a write operation
130df8bae1dSRodney W. Grimes  *	occurs.
131df8bae1dSRodney W. Grimes  */
132df8bae1dSRodney W. Grimes 
133df8bae1dSRodney W. Grimes /*
134df8bae1dSRodney W. Grimes  *	vm_map_startup:
135df8bae1dSRodney W. Grimes  *
136df8bae1dSRodney W. Grimes  *	Initialize the vm_map module.  Must be called before
137df8bae1dSRodney W. Grimes  *	any other vm_map routines.
138df8bae1dSRodney W. Grimes  *
139df8bae1dSRodney W. Grimes  *	Map and entry structures are allocated from the general
140df8bae1dSRodney W. Grimes  *	purpose memory pool with some exceptions:
141df8bae1dSRodney W. Grimes  *
142df8bae1dSRodney W. Grimes  *	- The kernel map and kmem submap are allocated statically.
143df8bae1dSRodney W. Grimes  *	- Kernel map entries are allocated out of a static pool.
144df8bae1dSRodney W. Grimes  *
145df8bae1dSRodney W. Grimes  *	These restrictions are necessary since malloc() uses the
146df8bae1dSRodney W. Grimes  *	maps and requires map entries.
147df8bae1dSRodney W. Grimes  */
148df8bae1dSRodney W. Grimes 
149df8bae1dSRodney W. Grimes vm_offset_t kentry_data;
150df8bae1dSRodney W. Grimes vm_size_t kentry_data_size;
151df8bae1dSRodney W. Grimes vm_map_entry_t kentry_free;
152df8bae1dSRodney W. Grimes vm_map_t kmap_free;
153df8bae1dSRodney W. Grimes 
15426f9a767SRodney W. Grimes int kentry_count;
155c3cb3e12SDavid Greenman static vm_offset_t mapvm_start, mapvm, mapvmmax;
156c3cb3e12SDavid Greenman static int mapvmpgcnt;
15726f9a767SRodney W. Grimes 
158df8bae1dSRodney W. Grimes static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
159df8bae1dSRodney W. Grimes static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
160df8bae1dSRodney W. Grimes 
1610d94caffSDavid Greenman void
1620d94caffSDavid Greenman vm_map_startup()
163df8bae1dSRodney W. Grimes {
164df8bae1dSRodney W. Grimes 	register int i;
165df8bae1dSRodney W. Grimes 	register vm_map_entry_t mep;
166df8bae1dSRodney W. Grimes 	vm_map_t mp;
167df8bae1dSRodney W. Grimes 
168df8bae1dSRodney W. Grimes 	/*
169df8bae1dSRodney W. Grimes 	 * Static map structures for allocation before initialization of
170df8bae1dSRodney W. Grimes 	 * kernel map or kmem map.  vm_map_create knows how to deal with them.
171df8bae1dSRodney W. Grimes 	 */
172df8bae1dSRodney W. Grimes 	kmap_free = mp = (vm_map_t) kentry_data;
173df8bae1dSRodney W. Grimes 	i = MAX_KMAP;
174df8bae1dSRodney W. Grimes 	while (--i > 0) {
175df8bae1dSRodney W. Grimes 		mp->header.next = (vm_map_entry_t) (mp + 1);
176df8bae1dSRodney W. Grimes 		mp++;
177df8bae1dSRodney W. Grimes 	}
178df8bae1dSRodney W. Grimes 	mp++->header.next = NULL;
179df8bae1dSRodney W. Grimes 
180df8bae1dSRodney W. Grimes 	/*
1810d94caffSDavid Greenman 	 * Form a free list of statically allocated kernel map entries with
1820d94caffSDavid Greenman 	 * the rest.
183df8bae1dSRodney W. Grimes 	 */
184df8bae1dSRodney W. Grimes 	kentry_free = mep = (vm_map_entry_t) mp;
18566ecebedSDavid Greenman 	kentry_count = i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep;
186df8bae1dSRodney W. Grimes 	while (--i > 0) {
187df8bae1dSRodney W. Grimes 		mep->next = mep + 1;
188df8bae1dSRodney W. Grimes 		mep++;
189df8bae1dSRodney W. Grimes 	}
190df8bae1dSRodney W. Grimes 	mep->next = NULL;
191df8bae1dSRodney W. Grimes }
192df8bae1dSRodney W. Grimes 
193df8bae1dSRodney W. Grimes /*
194df8bae1dSRodney W. Grimes  * Allocate a vmspace structure, including a vm_map and pmap,
195df8bae1dSRodney W. Grimes  * and initialize those structures.  The refcnt is set to 1.
196df8bae1dSRodney W. Grimes  * The remaining fields must be initialized by the caller.
197df8bae1dSRodney W. Grimes  */
198df8bae1dSRodney W. Grimes struct vmspace *
199df8bae1dSRodney W. Grimes vmspace_alloc(min, max, pageable)
200df8bae1dSRodney W. Grimes 	vm_offset_t min, max;
201df8bae1dSRodney W. Grimes 	int pageable;
202df8bae1dSRodney W. Grimes {
203df8bae1dSRodney W. Grimes 	register struct vmspace *vm;
2040d94caffSDavid Greenman 
205d6a6c0f6SDavid Greenman 	if (mapvmpgcnt == 0 && mapvm == 0) {
206d6a6c0f6SDavid Greenman 		int s;
2070d94caffSDavid Greenman 
208d6a6c0f6SDavid Greenman 		mapvmpgcnt = (cnt.v_page_count * sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE;
209d6a6c0f6SDavid Greenman 		s = splhigh();
21066ecebedSDavid Greenman 		mapvm_start = mapvm = kmem_alloc_pageable(kmem_map, mapvmpgcnt * PAGE_SIZE);
21166ecebedSDavid Greenman 		mapvmmax = mapvm_start + mapvmpgcnt * PAGE_SIZE;
212d6a6c0f6SDavid Greenman 		splx(s);
213d6a6c0f6SDavid Greenman 		if (!mapvm)
214d6a6c0f6SDavid Greenman 			mapvmpgcnt = 0;
215d6a6c0f6SDavid Greenman 	}
216df8bae1dSRodney W. Grimes 	MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
217df8bae1dSRodney W. Grimes 	bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
218df8bae1dSRodney W. Grimes 	vm_map_init(&vm->vm_map, min, max, pageable);
219df8bae1dSRodney W. Grimes 	pmap_pinit(&vm->vm_pmap);
220df8bae1dSRodney W. Grimes 	vm->vm_map.pmap = &vm->vm_pmap;	/* XXX */
221df8bae1dSRodney W. Grimes 	vm->vm_refcnt = 1;
222df8bae1dSRodney W. Grimes 	return (vm);
223df8bae1dSRodney W. Grimes }
224df8bae1dSRodney W. Grimes 
225df8bae1dSRodney W. Grimes void
226df8bae1dSRodney W. Grimes vmspace_free(vm)
227df8bae1dSRodney W. Grimes 	register struct vmspace *vm;
228df8bae1dSRodney W. Grimes {
229df8bae1dSRodney W. Grimes 
230a1f6d91cSDavid Greenman 	if (vm->vm_refcnt == 0)
231a1f6d91cSDavid Greenman 		panic("vmspace_free: attempt to free already freed vmspace");
232a1f6d91cSDavid Greenman 
233df8bae1dSRodney W. Grimes 	if (--vm->vm_refcnt == 0) {
234df8bae1dSRodney W. Grimes 		/*
235df8bae1dSRodney W. Grimes 		 * Lock the map, to wait out all other references to it.
2360d94caffSDavid Greenman 		 * Delete all of the mappings and pages they hold, then call
2370d94caffSDavid Greenman 		 * the pmap module to reclaim anything left.
238df8bae1dSRodney W. Grimes 		 */
239df8bae1dSRodney W. Grimes 		vm_map_lock(&vm->vm_map);
240df8bae1dSRodney W. Grimes 		(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
241df8bae1dSRodney W. Grimes 		    vm->vm_map.max_offset);
242a1f6d91cSDavid Greenman 		vm_map_unlock(&vm->vm_map);
243a1f6d91cSDavid Greenman 		while( vm->vm_map.ref_count != 1)
244a1f6d91cSDavid Greenman 			tsleep(&vm->vm_map.ref_count, PVM, "vmsfre", 0);
245a1f6d91cSDavid Greenman 		--vm->vm_map.ref_count;
246df8bae1dSRodney W. Grimes 		pmap_release(&vm->vm_pmap);
247df8bae1dSRodney W. Grimes 		FREE(vm, M_VMMAP);
248df8bae1dSRodney W. Grimes 	}
249df8bae1dSRodney W. Grimes }
250df8bae1dSRodney W. Grimes 
251df8bae1dSRodney W. Grimes /*
252df8bae1dSRodney W. Grimes  *	vm_map_create:
253df8bae1dSRodney W. Grimes  *
254df8bae1dSRodney W. Grimes  *	Creates and returns a new empty VM map with
255df8bae1dSRodney W. Grimes  *	the given physical map structure, and having
256df8bae1dSRodney W. Grimes  *	the given lower and upper address bounds.
257df8bae1dSRodney W. Grimes  */
2580d94caffSDavid Greenman vm_map_t
2590d94caffSDavid Greenman vm_map_create(pmap, min, max, pageable)
260df8bae1dSRodney W. Grimes 	pmap_t pmap;
261df8bae1dSRodney W. Grimes 	vm_offset_t min, max;
262df8bae1dSRodney W. Grimes 	boolean_t pageable;
263df8bae1dSRodney W. Grimes {
264df8bae1dSRodney W. Grimes 	register vm_map_t result;
265df8bae1dSRodney W. Grimes 
266df8bae1dSRodney W. Grimes 	if (kmem_map == NULL) {
267df8bae1dSRodney W. Grimes 		result = kmap_free;
268df8bae1dSRodney W. Grimes 		kmap_free = (vm_map_t) result->header.next;
269df8bae1dSRodney W. Grimes 		if (result == NULL)
270df8bae1dSRodney W. Grimes 			panic("vm_map_create: out of maps");
271df8bae1dSRodney W. Grimes 	} else
272df8bae1dSRodney W. Grimes 		MALLOC(result, vm_map_t, sizeof(struct vm_map),
273df8bae1dSRodney W. Grimes 		    M_VMMAP, M_WAITOK);
274df8bae1dSRodney W. Grimes 
275df8bae1dSRodney W. Grimes 	vm_map_init(result, min, max, pageable);
276df8bae1dSRodney W. Grimes 	result->pmap = pmap;
277df8bae1dSRodney W. Grimes 	return (result);
278df8bae1dSRodney W. Grimes }
279df8bae1dSRodney W. Grimes 
280df8bae1dSRodney W. Grimes /*
281df8bae1dSRodney W. Grimes  * Initialize an existing vm_map structure
282df8bae1dSRodney W. Grimes  * such as that in the vmspace structure.
283df8bae1dSRodney W. Grimes  * The pmap is set elsewhere.
284df8bae1dSRodney W. Grimes  */
285df8bae1dSRodney W. Grimes void
286df8bae1dSRodney W. Grimes vm_map_init(map, min, max, pageable)
287df8bae1dSRodney W. Grimes 	register struct vm_map *map;
288df8bae1dSRodney W. Grimes 	vm_offset_t min, max;
289df8bae1dSRodney W. Grimes 	boolean_t pageable;
290df8bae1dSRodney W. Grimes {
291df8bae1dSRodney W. Grimes 	map->header.next = map->header.prev = &map->header;
292df8bae1dSRodney W. Grimes 	map->nentries = 0;
293df8bae1dSRodney W. Grimes 	map->size = 0;
294df8bae1dSRodney W. Grimes 	map->ref_count = 1;
295df8bae1dSRodney W. Grimes 	map->is_main_map = TRUE;
296df8bae1dSRodney W. Grimes 	map->min_offset = min;
297df8bae1dSRodney W. Grimes 	map->max_offset = max;
298df8bae1dSRodney W. Grimes 	map->entries_pageable = pageable;
299df8bae1dSRodney W. Grimes 	map->first_free = &map->header;
300df8bae1dSRodney W. Grimes 	map->hint = &map->header;
301df8bae1dSRodney W. Grimes 	map->timestamp = 0;
302df8bae1dSRodney W. Grimes 	lock_init(&map->lock, TRUE);
303df8bae1dSRodney W. Grimes }
304df8bae1dSRodney W. Grimes 
305df8bae1dSRodney W. Grimes /*
306df8bae1dSRodney W. Grimes  *	vm_map_entry_create:	[ internal use only ]
307df8bae1dSRodney W. Grimes  *
308df8bae1dSRodney W. Grimes  *	Allocates a VM map entry for insertion.
309df8bae1dSRodney W. Grimes  *	No entry fields are filled in.  This routine is
310df8bae1dSRodney W. Grimes  */
31126f9a767SRodney W. Grimes static struct vm_map_entry *mappool;
31226f9a767SRodney W. Grimes static int mappoolcnt;
31326f9a767SRodney W. Grimes 
31426f9a767SRodney W. Grimes vm_map_entry_t
31526f9a767SRodney W. Grimes vm_map_entry_create(map)
316df8bae1dSRodney W. Grimes 	vm_map_t map;
317df8bae1dSRodney W. Grimes {
318df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
31926f9a767SRodney W. Grimes 	int i;
3200d94caffSDavid Greenman 
32126f9a767SRodney W. Grimes #define KENTRY_LOW_WATER 64
32266ecebedSDavid Greenman #define MAPENTRY_LOW_WATER 128
323df8bae1dSRodney W. Grimes 
32426f9a767SRodney W. Grimes 	/*
32526f9a767SRodney W. Grimes 	 * This is a *very* nasty (and sort of incomplete) hack!!!!
32626f9a767SRodney W. Grimes 	 */
32726f9a767SRodney W. Grimes 	if (kentry_count < KENTRY_LOW_WATER) {
32826f9a767SRodney W. Grimes 		if (mapvmpgcnt && mapvm) {
32926f9a767SRodney W. Grimes 			vm_page_t m;
3300d94caffSDavid Greenman 
33105f0fdd2SPoul-Henning Kamp 			m = vm_page_alloc(kmem_object,
332a316d390SJohn Dyson 			        OFF_TO_IDX(mapvm - vm_map_min(kmem_map)),
3336d40c3d3SDavid Greenman 				    (map == kmem_map) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL);
33405f0fdd2SPoul-Henning Kamp 			if (m) {
33526f9a767SRodney W. Grimes 				int newentries;
3360d94caffSDavid Greenman 
337a91c5a7eSJohn Dyson 				newentries = (PAGE_SIZE / sizeof(struct vm_map_entry));
33826f9a767SRodney W. Grimes 				vm_page_wire(m);
33926f9a767SRodney W. Grimes 				m->flags &= ~PG_BUSY;
340d9459480SDavid Greenman 				m->valid = VM_PAGE_BITS_ALL;
34126f9a767SRodney W. Grimes 				pmap_enter(vm_map_pmap(kmem_map), mapvm,
34226f9a767SRodney W. Grimes 				    VM_PAGE_TO_PHYS(m), VM_PROT_DEFAULT, 1);
343f919ebdeSDavid Greenman 				m->flags |= PG_WRITEABLE|PG_MAPPED;
34426f9a767SRodney W. Grimes 
34526f9a767SRodney W. Grimes 				entry = (vm_map_entry_t) mapvm;
346a91c5a7eSJohn Dyson 				mapvm += PAGE_SIZE;
34726f9a767SRodney W. Grimes 				--mapvmpgcnt;
34826f9a767SRodney W. Grimes 
34926f9a767SRodney W. Grimes 				for (i = 0; i < newentries; i++) {
35026f9a767SRodney W. Grimes 					vm_map_entry_dispose(kernel_map, entry);
35126f9a767SRodney W. Grimes 					entry++;
35226f9a767SRodney W. Grimes 				}
35326f9a767SRodney W. Grimes 			}
35426f9a767SRodney W. Grimes 		}
35526f9a767SRodney W. Grimes 	}
35626f9a767SRodney W. Grimes 	if (map == kernel_map || map == kmem_map || map == pager_map) {
35726f9a767SRodney W. Grimes 
35805f0fdd2SPoul-Henning Kamp 		entry = kentry_free;
35905f0fdd2SPoul-Henning Kamp 		if (entry) {
36026f9a767SRodney W. Grimes 			kentry_free = entry->next;
36126f9a767SRodney W. Grimes 			--kentry_count;
36226f9a767SRodney W. Grimes 			return entry;
36326f9a767SRodney W. Grimes 		}
36405f0fdd2SPoul-Henning Kamp 		entry = mappool;
36505f0fdd2SPoul-Henning Kamp 		if (entry) {
36626f9a767SRodney W. Grimes 			mappool = entry->next;
36726f9a767SRodney W. Grimes 			--mappoolcnt;
36826f9a767SRodney W. Grimes 			return entry;
36926f9a767SRodney W. Grimes 		}
37026f9a767SRodney W. Grimes 	} else {
37105f0fdd2SPoul-Henning Kamp 		entry = mappool;
37205f0fdd2SPoul-Henning Kamp 		if (entry) {
37326f9a767SRodney W. Grimes 			mappool = entry->next;
37426f9a767SRodney W. Grimes 			--mappoolcnt;
37526f9a767SRodney W. Grimes 			return entry;
37626f9a767SRodney W. Grimes 		}
377df8bae1dSRodney W. Grimes 		MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
378df8bae1dSRodney W. Grimes 		    M_VMMAPENT, M_WAITOK);
379df8bae1dSRodney W. Grimes 	}
380df8bae1dSRodney W. Grimes 	if (entry == NULL)
381df8bae1dSRodney W. Grimes 		panic("vm_map_entry_create: out of map entries");
382df8bae1dSRodney W. Grimes 
383df8bae1dSRodney W. Grimes 	return (entry);
384df8bae1dSRodney W. Grimes }
385df8bae1dSRodney W. Grimes 
386df8bae1dSRodney W. Grimes /*
387df8bae1dSRodney W. Grimes  *	vm_map_entry_dispose:	[ internal use only ]
388df8bae1dSRodney W. Grimes  *
389df8bae1dSRodney W. Grimes  *	Inverse of vm_map_entry_create.
390df8bae1dSRodney W. Grimes  */
39126f9a767SRodney W. Grimes void
39226f9a767SRodney W. Grimes vm_map_entry_dispose(map, entry)
393df8bae1dSRodney W. Grimes 	vm_map_t map;
394df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
395df8bae1dSRodney W. Grimes {
396053bbc78SDavid Greenman 	if ((kentry_count < KENTRY_LOW_WATER) ||
39766ecebedSDavid Greenman 	    ((vm_offset_t) entry >= kentry_data && (vm_offset_t) entry < (kentry_data + kentry_data_size)) ||
39866ecebedSDavid Greenman 	    ((vm_offset_t) entry >= mapvm_start && (vm_offset_t) entry < mapvmmax)) {
399df8bae1dSRodney W. Grimes 		entry->next = kentry_free;
400df8bae1dSRodney W. Grimes 		kentry_free = entry;
40126f9a767SRodney W. Grimes 		++kentry_count;
402053bbc78SDavid Greenman 		return;
40326f9a767SRodney W. Grimes 	} else {
40426f9a767SRodney W. Grimes 		if (mappoolcnt < MAPENTRY_LOW_WATER) {
40526f9a767SRodney W. Grimes 			entry->next = mappool;
40626f9a767SRodney W. Grimes 			mappool = entry;
40726f9a767SRodney W. Grimes 			++mappoolcnt;
40826f9a767SRodney W. Grimes 			return;
40926f9a767SRodney W. Grimes 		}
41026f9a767SRodney W. Grimes 		FREE(entry, M_VMMAPENT);
411df8bae1dSRodney W. Grimes 	}
412df8bae1dSRodney W. Grimes }
413df8bae1dSRodney W. Grimes 
414df8bae1dSRodney W. Grimes /*
415df8bae1dSRodney W. Grimes  *	vm_map_entry_{un,}link:
416df8bae1dSRodney W. Grimes  *
417df8bae1dSRodney W. Grimes  *	Insert/remove entries from maps.
418df8bae1dSRodney W. Grimes  */
419df8bae1dSRodney W. Grimes #define	vm_map_entry_link(map, after_where, entry) \
420df8bae1dSRodney W. Grimes 		{ \
421df8bae1dSRodney W. Grimes 		(map)->nentries++; \
422df8bae1dSRodney W. Grimes 		(entry)->prev = (after_where); \
423df8bae1dSRodney W. Grimes 		(entry)->next = (after_where)->next; \
424df8bae1dSRodney W. Grimes 		(entry)->prev->next = (entry); \
425df8bae1dSRodney W. Grimes 		(entry)->next->prev = (entry); \
426df8bae1dSRodney W. Grimes 		}
427df8bae1dSRodney W. Grimes #define	vm_map_entry_unlink(map, entry) \
428df8bae1dSRodney W. Grimes 		{ \
429df8bae1dSRodney W. Grimes 		(map)->nentries--; \
430df8bae1dSRodney W. Grimes 		(entry)->next->prev = (entry)->prev; \
431df8bae1dSRodney W. Grimes 		(entry)->prev->next = (entry)->next; \
432df8bae1dSRodney W. Grimes 		}
433df8bae1dSRodney W. Grimes 
434df8bae1dSRodney W. Grimes /*
435df8bae1dSRodney W. Grimes  *	vm_map_reference:
436df8bae1dSRodney W. Grimes  *
437df8bae1dSRodney W. Grimes  *	Creates another valid reference to the given map.
438df8bae1dSRodney W. Grimes  *
439df8bae1dSRodney W. Grimes  */
4400d94caffSDavid Greenman void
4410d94caffSDavid Greenman vm_map_reference(map)
442df8bae1dSRodney W. Grimes 	register vm_map_t map;
443df8bae1dSRodney W. Grimes {
444df8bae1dSRodney W. Grimes 	if (map == NULL)
445df8bae1dSRodney W. Grimes 		return;
446df8bae1dSRodney W. Grimes 
447df8bae1dSRodney W. Grimes 	map->ref_count++;
448df8bae1dSRodney W. Grimes }
449df8bae1dSRodney W. Grimes 
450df8bae1dSRodney W. Grimes /*
451df8bae1dSRodney W. Grimes  *	vm_map_deallocate:
452df8bae1dSRodney W. Grimes  *
453df8bae1dSRodney W. Grimes  *	Removes a reference from the specified map,
454df8bae1dSRodney W. Grimes  *	destroying it if no references remain.
455df8bae1dSRodney W. Grimes  *	The map should not be locked.
456df8bae1dSRodney W. Grimes  */
4570d94caffSDavid Greenman void
4580d94caffSDavid Greenman vm_map_deallocate(map)
459df8bae1dSRodney W. Grimes 	register vm_map_t map;
460df8bae1dSRodney W. Grimes {
461df8bae1dSRodney W. Grimes 	register int c;
462df8bae1dSRodney W. Grimes 
463df8bae1dSRodney W. Grimes 	if (map == NULL)
464df8bae1dSRodney W. Grimes 		return;
465df8bae1dSRodney W. Grimes 
466a1f6d91cSDavid Greenman 	c = map->ref_count;
467df8bae1dSRodney W. Grimes 
468a1f6d91cSDavid Greenman 	if (c == 0)
469a1f6d91cSDavid Greenman 		panic("vm_map_deallocate: deallocating already freed map");
470a1f6d91cSDavid Greenman 
471a1f6d91cSDavid Greenman 	if (c != 1) {
472a1f6d91cSDavid Greenman 		--map->ref_count;
47324a1cce3SDavid Greenman 		wakeup(&map->ref_count);
474df8bae1dSRodney W. Grimes 		return;
475df8bae1dSRodney W. Grimes 	}
476df8bae1dSRodney W. Grimes 	/*
4770d94caffSDavid Greenman 	 * Lock the map, to wait out all other references to it.
478df8bae1dSRodney W. Grimes 	 */
479df8bae1dSRodney W. Grimes 
480df8bae1dSRodney W. Grimes 	vm_map_lock(map);
481df8bae1dSRodney W. Grimes 	(void) vm_map_delete(map, map->min_offset, map->max_offset);
482a1f6d91cSDavid Greenman 	--map->ref_count;
483a1f6d91cSDavid Greenman 	if( map->ref_count != 0) {
484a1f6d91cSDavid Greenman 		vm_map_unlock(map);
485a1f6d91cSDavid Greenman 		return;
486a1f6d91cSDavid Greenman 	}
487df8bae1dSRodney W. Grimes 
488df8bae1dSRodney W. Grimes 	pmap_destroy(map->pmap);
489df8bae1dSRodney W. Grimes 	FREE(map, M_VMMAP);
490df8bae1dSRodney W. Grimes }
491df8bae1dSRodney W. Grimes 
492df8bae1dSRodney W. Grimes /*
493df8bae1dSRodney W. Grimes  *	vm_map_insert:
494df8bae1dSRodney W. Grimes  *
495df8bae1dSRodney W. Grimes  *	Inserts the given whole VM object into the target
496df8bae1dSRodney W. Grimes  *	map at the specified address range.  The object's
497df8bae1dSRodney W. Grimes  *	size should match that of the address range.
498df8bae1dSRodney W. Grimes  *
499df8bae1dSRodney W. Grimes  *	Requires that the map be locked, and leaves it so.
500df8bae1dSRodney W. Grimes  */
501df8bae1dSRodney W. Grimes int
502df8bae1dSRodney W. Grimes vm_map_insert(map, object, offset, start, end)
503df8bae1dSRodney W. Grimes 	vm_map_t map;
504df8bae1dSRodney W. Grimes 	vm_object_t object;
505a316d390SJohn Dyson 	vm_ooffset_t offset;
506df8bae1dSRodney W. Grimes 	vm_offset_t start;
507df8bae1dSRodney W. Grimes 	vm_offset_t end;
508df8bae1dSRodney W. Grimes {
509df8bae1dSRodney W. Grimes 	register vm_map_entry_t new_entry;
510df8bae1dSRodney W. Grimes 	register vm_map_entry_t prev_entry;
511df8bae1dSRodney W. Grimes 	vm_map_entry_t temp_entry;
512df8bae1dSRodney W. Grimes 
513df8bae1dSRodney W. Grimes 	/*
514df8bae1dSRodney W. Grimes 	 * Check that the start and end points are not bogus.
515df8bae1dSRodney W. Grimes 	 */
516df8bae1dSRodney W. Grimes 
517df8bae1dSRodney W. Grimes 	if ((start < map->min_offset) || (end > map->max_offset) ||
518df8bae1dSRodney W. Grimes 	    (start >= end))
519df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ADDRESS);
520df8bae1dSRodney W. Grimes 
521df8bae1dSRodney W. Grimes 	/*
5220d94caffSDavid Greenman 	 * Find the entry prior to the proposed starting address; if it's part
5230d94caffSDavid Greenman 	 * of an existing entry, this range is bogus.
524df8bae1dSRodney W. Grimes 	 */
525df8bae1dSRodney W. Grimes 
526df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &temp_entry))
527df8bae1dSRodney W. Grimes 		return (KERN_NO_SPACE);
528df8bae1dSRodney W. Grimes 
529df8bae1dSRodney W. Grimes 	prev_entry = temp_entry;
530df8bae1dSRodney W. Grimes 
531df8bae1dSRodney W. Grimes 	/*
5320d94caffSDavid Greenman 	 * Assert that the next entry doesn't overlap the end point.
533df8bae1dSRodney W. Grimes 	 */
534df8bae1dSRodney W. Grimes 
535df8bae1dSRodney W. Grimes 	if ((prev_entry->next != &map->header) &&
536df8bae1dSRodney W. Grimes 	    (prev_entry->next->start < end))
537df8bae1dSRodney W. Grimes 		return (KERN_NO_SPACE);
538df8bae1dSRodney W. Grimes 
539df8bae1dSRodney W. Grimes 	/*
5400d94caffSDavid Greenman 	 * See if we can avoid creating a new entry by extending one of our
5410d94caffSDavid Greenman 	 * neighbors.
542df8bae1dSRodney W. Grimes 	 */
543df8bae1dSRodney W. Grimes 
544df8bae1dSRodney W. Grimes 	if (object == NULL) {
545df8bae1dSRodney W. Grimes 		if ((prev_entry != &map->header) &&
546df8bae1dSRodney W. Grimes 		    (prev_entry->end == start) &&
547df8bae1dSRodney W. Grimes 		    (map->is_main_map) &&
548df8bae1dSRodney W. Grimes 		    (prev_entry->is_a_map == FALSE) &&
549df8bae1dSRodney W. Grimes 		    (prev_entry->is_sub_map == FALSE) &&
550df8bae1dSRodney W. Grimes 		    (prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
551df8bae1dSRodney W. Grimes 		    (prev_entry->protection == VM_PROT_DEFAULT) &&
552df8bae1dSRodney W. Grimes 		    (prev_entry->max_protection == VM_PROT_DEFAULT) &&
553df8bae1dSRodney W. Grimes 		    (prev_entry->wired_count == 0)) {
554df8bae1dSRodney W. Grimes 
555df8bae1dSRodney W. Grimes 			if (vm_object_coalesce(prev_entry->object.vm_object,
556a316d390SJohn Dyson 				OFF_TO_IDX(prev_entry->offset),
557df8bae1dSRodney W. Grimes 				(vm_size_t) (prev_entry->end
558df8bae1dSRodney W. Grimes 				    - prev_entry->start),
559df8bae1dSRodney W. Grimes 				(vm_size_t) (end - prev_entry->end))) {
560df8bae1dSRodney W. Grimes 				/*
5610d94caffSDavid Greenman 				 * Coalesced the two objects - can extend the
5620d94caffSDavid Greenman 				 * previous map entry to include the new
5630d94caffSDavid Greenman 				 * range.
564df8bae1dSRodney W. Grimes 				 */
565df8bae1dSRodney W. Grimes 				map->size += (end - prev_entry->end);
566df8bae1dSRodney W. Grimes 				prev_entry->end = end;
567df8bae1dSRodney W. Grimes 				return (KERN_SUCCESS);
568df8bae1dSRodney W. Grimes 			}
569df8bae1dSRodney W. Grimes 		}
570df8bae1dSRodney W. Grimes 	}
571df8bae1dSRodney W. Grimes 	/*
572df8bae1dSRodney W. Grimes 	 * Create a new entry
573df8bae1dSRodney W. Grimes 	 */
574df8bae1dSRodney W. Grimes 
575df8bae1dSRodney W. Grimes 	new_entry = vm_map_entry_create(map);
576df8bae1dSRodney W. Grimes 	new_entry->start = start;
577df8bae1dSRodney W. Grimes 	new_entry->end = end;
578df8bae1dSRodney W. Grimes 
579df8bae1dSRodney W. Grimes 	new_entry->is_a_map = FALSE;
580df8bae1dSRodney W. Grimes 	new_entry->is_sub_map = FALSE;
581df8bae1dSRodney W. Grimes 	new_entry->object.vm_object = object;
582df8bae1dSRodney W. Grimes 	new_entry->offset = offset;
583df8bae1dSRodney W. Grimes 
584df8bae1dSRodney W. Grimes 	new_entry->copy_on_write = FALSE;
585df8bae1dSRodney W. Grimes 	new_entry->needs_copy = FALSE;
586df8bae1dSRodney W. Grimes 
587df8bae1dSRodney W. Grimes 	if (map->is_main_map) {
588df8bae1dSRodney W. Grimes 		new_entry->inheritance = VM_INHERIT_DEFAULT;
589df8bae1dSRodney W. Grimes 		new_entry->protection = VM_PROT_DEFAULT;
590df8bae1dSRodney W. Grimes 		new_entry->max_protection = VM_PROT_DEFAULT;
591df8bae1dSRodney W. Grimes 		new_entry->wired_count = 0;
592df8bae1dSRodney W. Grimes 	}
593df8bae1dSRodney W. Grimes 	/*
594df8bae1dSRodney W. Grimes 	 * Insert the new entry into the list
595df8bae1dSRodney W. Grimes 	 */
596df8bae1dSRodney W. Grimes 
597df8bae1dSRodney W. Grimes 	vm_map_entry_link(map, prev_entry, new_entry);
598df8bae1dSRodney W. Grimes 	map->size += new_entry->end - new_entry->start;
599df8bae1dSRodney W. Grimes 
600df8bae1dSRodney W. Grimes 	/*
601df8bae1dSRodney W. Grimes 	 * Update the free space hint
602df8bae1dSRodney W. Grimes 	 */
603df8bae1dSRodney W. Grimes 
604df8bae1dSRodney W. Grimes 	if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start))
605df8bae1dSRodney W. Grimes 		map->first_free = new_entry;
606df8bae1dSRodney W. Grimes 
607df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
608df8bae1dSRodney W. Grimes }
609df8bae1dSRodney W. Grimes 
610df8bae1dSRodney W. Grimes /*
611df8bae1dSRodney W. Grimes  *	SAVE_HINT:
612df8bae1dSRodney W. Grimes  *
613df8bae1dSRodney W. Grimes  *	Saves the specified entry as the hint for
61424a1cce3SDavid Greenman  *	future lookups.
615df8bae1dSRodney W. Grimes  */
616df8bae1dSRodney W. Grimes #define	SAVE_HINT(map,value) \
61724a1cce3SDavid Greenman 		(map)->hint = (value);
618df8bae1dSRodney W. Grimes 
619df8bae1dSRodney W. Grimes /*
620df8bae1dSRodney W. Grimes  *	vm_map_lookup_entry:	[ internal use only ]
621df8bae1dSRodney W. Grimes  *
622df8bae1dSRodney W. Grimes  *	Finds the map entry containing (or
623df8bae1dSRodney W. Grimes  *	immediately preceding) the specified address
624df8bae1dSRodney W. Grimes  *	in the given map; the entry is returned
625df8bae1dSRodney W. Grimes  *	in the "entry" parameter.  The boolean
626df8bae1dSRodney W. Grimes  *	result indicates whether the address is
627df8bae1dSRodney W. Grimes  *	actually contained in the map.
628df8bae1dSRodney W. Grimes  */
6290d94caffSDavid Greenman boolean_t
6300d94caffSDavid Greenman vm_map_lookup_entry(map, address, entry)
631df8bae1dSRodney W. Grimes 	register vm_map_t map;
632df8bae1dSRodney W. Grimes 	register vm_offset_t address;
633df8bae1dSRodney W. Grimes 	vm_map_entry_t *entry;	/* OUT */
634df8bae1dSRodney W. Grimes {
635df8bae1dSRodney W. Grimes 	register vm_map_entry_t cur;
636df8bae1dSRodney W. Grimes 	register vm_map_entry_t last;
637df8bae1dSRodney W. Grimes 
638df8bae1dSRodney W. Grimes 	/*
6390d94caffSDavid Greenman 	 * Start looking either from the head of the list, or from the hint.
640df8bae1dSRodney W. Grimes 	 */
641df8bae1dSRodney W. Grimes 
642df8bae1dSRodney W. Grimes 	cur = map->hint;
643df8bae1dSRodney W. Grimes 
644df8bae1dSRodney W. Grimes 	if (cur == &map->header)
645df8bae1dSRodney W. Grimes 		cur = cur->next;
646df8bae1dSRodney W. Grimes 
647df8bae1dSRodney W. Grimes 	if (address >= cur->start) {
648df8bae1dSRodney W. Grimes 		/*
649df8bae1dSRodney W. Grimes 		 * Go from hint to end of list.
650df8bae1dSRodney W. Grimes 		 *
6510d94caffSDavid Greenman 		 * But first, make a quick check to see if we are already looking
6520d94caffSDavid Greenman 		 * at the entry we want (which is usually the case). Note also
6530d94caffSDavid Greenman 		 * that we don't need to save the hint here... it is the same
6540d94caffSDavid Greenman 		 * hint (unless we are at the header, in which case the hint
6550d94caffSDavid Greenman 		 * didn't buy us anything anyway).
656df8bae1dSRodney W. Grimes 		 */
657df8bae1dSRodney W. Grimes 		last = &map->header;
658df8bae1dSRodney W. Grimes 		if ((cur != last) && (cur->end > address)) {
659df8bae1dSRodney W. Grimes 			*entry = cur;
660df8bae1dSRodney W. Grimes 			return (TRUE);
661df8bae1dSRodney W. Grimes 		}
6620d94caffSDavid Greenman 	} else {
663df8bae1dSRodney W. Grimes 		/*
664df8bae1dSRodney W. Grimes 		 * Go from start to hint, *inclusively*
665df8bae1dSRodney W. Grimes 		 */
666df8bae1dSRodney W. Grimes 		last = cur->next;
667df8bae1dSRodney W. Grimes 		cur = map->header.next;
668df8bae1dSRodney W. Grimes 	}
669df8bae1dSRodney W. Grimes 
670df8bae1dSRodney W. Grimes 	/*
671df8bae1dSRodney W. Grimes 	 * Search linearly
672df8bae1dSRodney W. Grimes 	 */
673df8bae1dSRodney W. Grimes 
674df8bae1dSRodney W. Grimes 	while (cur != last) {
675df8bae1dSRodney W. Grimes 		if (cur->end > address) {
676df8bae1dSRodney W. Grimes 			if (address >= cur->start) {
677df8bae1dSRodney W. Grimes 				/*
6780d94caffSDavid Greenman 				 * Save this lookup for future hints, and
6790d94caffSDavid Greenman 				 * return
680df8bae1dSRodney W. Grimes 				 */
681df8bae1dSRodney W. Grimes 
682df8bae1dSRodney W. Grimes 				*entry = cur;
683df8bae1dSRodney W. Grimes 				SAVE_HINT(map, cur);
684df8bae1dSRodney W. Grimes 				return (TRUE);
685df8bae1dSRodney W. Grimes 			}
686df8bae1dSRodney W. Grimes 			break;
687df8bae1dSRodney W. Grimes 		}
688df8bae1dSRodney W. Grimes 		cur = cur->next;
689df8bae1dSRodney W. Grimes 	}
690df8bae1dSRodney W. Grimes 	*entry = cur->prev;
691df8bae1dSRodney W. Grimes 	SAVE_HINT(map, *entry);
692df8bae1dSRodney W. Grimes 	return (FALSE);
693df8bae1dSRodney W. Grimes }
694df8bae1dSRodney W. Grimes 
695df8bae1dSRodney W. Grimes /*
696df8bae1dSRodney W. Grimes  * Find sufficient space for `length' bytes in the given map, starting at
697df8bae1dSRodney W. Grimes  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
698df8bae1dSRodney W. Grimes  */
699df8bae1dSRodney W. Grimes int
700df8bae1dSRodney W. Grimes vm_map_findspace(map, start, length, addr)
701df8bae1dSRodney W. Grimes 	register vm_map_t map;
702df8bae1dSRodney W. Grimes 	register vm_offset_t start;
703df8bae1dSRodney W. Grimes 	vm_size_t length;
704df8bae1dSRodney W. Grimes 	vm_offset_t *addr;
705df8bae1dSRodney W. Grimes {
706df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry, next;
707df8bae1dSRodney W. Grimes 	register vm_offset_t end;
708df8bae1dSRodney W. Grimes 
709df8bae1dSRodney W. Grimes 	if (start < map->min_offset)
710df8bae1dSRodney W. Grimes 		start = map->min_offset;
711df8bae1dSRodney W. Grimes 	if (start > map->max_offset)
712df8bae1dSRodney W. Grimes 		return (1);
713df8bae1dSRodney W. Grimes 
714df8bae1dSRodney W. Grimes 	/*
7150d94caffSDavid Greenman 	 * Look for the first possible address; if there's already something
7160d94caffSDavid Greenman 	 * at this address, we have to start after it.
717df8bae1dSRodney W. Grimes 	 */
718df8bae1dSRodney W. Grimes 	if (start == map->min_offset) {
719df8bae1dSRodney W. Grimes 		if ((entry = map->first_free) != &map->header)
720df8bae1dSRodney W. Grimes 			start = entry->end;
721df8bae1dSRodney W. Grimes 	} else {
722df8bae1dSRodney W. Grimes 		vm_map_entry_t tmp;
7230d94caffSDavid Greenman 
724df8bae1dSRodney W. Grimes 		if (vm_map_lookup_entry(map, start, &tmp))
725df8bae1dSRodney W. Grimes 			start = tmp->end;
726df8bae1dSRodney W. Grimes 		entry = tmp;
727df8bae1dSRodney W. Grimes 	}
728df8bae1dSRodney W. Grimes 
729df8bae1dSRodney W. Grimes 	/*
7300d94caffSDavid Greenman 	 * Look through the rest of the map, trying to fit a new region in the
7310d94caffSDavid Greenman 	 * gap between existing regions, or after the very last region.
732df8bae1dSRodney W. Grimes 	 */
733df8bae1dSRodney W. Grimes 	for (;; start = (entry = next)->end) {
734df8bae1dSRodney W. Grimes 		/*
735df8bae1dSRodney W. Grimes 		 * Find the end of the proposed new region.  Be sure we didn't
736df8bae1dSRodney W. Grimes 		 * go beyond the end of the map, or wrap around the address;
737df8bae1dSRodney W. Grimes 		 * if so, we lose.  Otherwise, if this is the last entry, or
738df8bae1dSRodney W. Grimes 		 * if the proposed new region fits before the next entry, we
739df8bae1dSRodney W. Grimes 		 * win.
740df8bae1dSRodney W. Grimes 		 */
741df8bae1dSRodney W. Grimes 		end = start + length;
742df8bae1dSRodney W. Grimes 		if (end > map->max_offset || end < start)
743df8bae1dSRodney W. Grimes 			return (1);
744df8bae1dSRodney W. Grimes 		next = entry->next;
745df8bae1dSRodney W. Grimes 		if (next == &map->header || next->start >= end)
746df8bae1dSRodney W. Grimes 			break;
747df8bae1dSRodney W. Grimes 	}
748df8bae1dSRodney W. Grimes 	SAVE_HINT(map, entry);
749df8bae1dSRodney W. Grimes 	*addr = start;
7500d94caffSDavid Greenman 	if (map == kernel_map && round_page(start + length) > kernel_vm_end)
7510d94caffSDavid Greenman 		pmap_growkernel(round_page(start + length));
752df8bae1dSRodney W. Grimes 	return (0);
753df8bae1dSRodney W. Grimes }
754df8bae1dSRodney W. Grimes 
755df8bae1dSRodney W. Grimes /*
756df8bae1dSRodney W. Grimes  *	vm_map_find finds an unallocated region in the target address
757df8bae1dSRodney W. Grimes  *	map with the given length.  The search is defined to be
758df8bae1dSRodney W. Grimes  *	first-fit from the specified address; the region found is
759df8bae1dSRodney W. Grimes  *	returned in the same parameter.
760df8bae1dSRodney W. Grimes  *
761df8bae1dSRodney W. Grimes  */
762df8bae1dSRodney W. Grimes int
763df8bae1dSRodney W. Grimes vm_map_find(map, object, offset, addr, length, find_space)
764df8bae1dSRodney W. Grimes 	vm_map_t map;
765df8bae1dSRodney W. Grimes 	vm_object_t object;
766a316d390SJohn Dyson 	vm_ooffset_t offset;
767df8bae1dSRodney W. Grimes 	vm_offset_t *addr;	/* IN/OUT */
768df8bae1dSRodney W. Grimes 	vm_size_t length;
769df8bae1dSRodney W. Grimes 	boolean_t find_space;
770df8bae1dSRodney W. Grimes {
771df8bae1dSRodney W. Grimes 	register vm_offset_t start;
7728d6e8edeSDavid Greenman 	int result, s = 0;
773df8bae1dSRodney W. Grimes 
774df8bae1dSRodney W. Grimes 	start = *addr;
7758d6e8edeSDavid Greenman 
7768d6e8edeSDavid Greenman 	if (map == kmem_map)
7778d6e8edeSDavid Greenman 		s = splhigh();
7788d6e8edeSDavid Greenman 
779bea41bcfSDavid Greenman 	vm_map_lock(map);
780df8bae1dSRodney W. Grimes 	if (find_space) {
781df8bae1dSRodney W. Grimes 		if (vm_map_findspace(map, start, length, addr)) {
782df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
7838d6e8edeSDavid Greenman 			if (map == kmem_map)
7848d6e8edeSDavid Greenman 				splx(s);
785df8bae1dSRodney W. Grimes 			return (KERN_NO_SPACE);
786df8bae1dSRodney W. Grimes 		}
787df8bae1dSRodney W. Grimes 		start = *addr;
788df8bae1dSRodney W. Grimes 	}
789df8bae1dSRodney W. Grimes 	result = vm_map_insert(map, object, offset, start, start + length);
790df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
7918d6e8edeSDavid Greenman 
7928d6e8edeSDavid Greenman 	if (map == kmem_map)
7938d6e8edeSDavid Greenman 		splx(s);
7948d6e8edeSDavid Greenman 
795df8bae1dSRodney W. Grimes 	return (result);
796df8bae1dSRodney W. Grimes }
797df8bae1dSRodney W. Grimes 
798df8bae1dSRodney W. Grimes /*
799df8bae1dSRodney W. Grimes  *	vm_map_simplify_entry:	[ internal use only ]
800df8bae1dSRodney W. Grimes  *
801df8bae1dSRodney W. Grimes  *	Simplify the given map entry by:
802df8bae1dSRodney W. Grimes  *		removing extra sharing maps
803df8bae1dSRodney W. Grimes  *		[XXX maybe later] merging with a neighbor
804df8bae1dSRodney W. Grimes  */
8050d94caffSDavid Greenman void
8060d94caffSDavid Greenman vm_map_simplify_entry(map, entry)
807df8bae1dSRodney W. Grimes 	vm_map_t map;
808df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
809df8bae1dSRodney W. Grimes {
810df8bae1dSRodney W. Grimes #ifdef	lint
811df8bae1dSRodney W. Grimes 	map++;
812df8bae1dSRodney W. Grimes #endif
813df8bae1dSRodney W. Grimes 
814df8bae1dSRodney W. Grimes 	/*
8150d94caffSDavid Greenman 	 * If this entry corresponds to a sharing map, then see if we can
8160d94caffSDavid Greenman 	 * remove the level of indirection. If it's not a sharing map, then it
8170d94caffSDavid Greenman 	 * points to a VM object, so see if we can merge with either of our
8180d94caffSDavid Greenman 	 * neighbors.
819df8bae1dSRodney W. Grimes 	 */
820df8bae1dSRodney W. Grimes 
821df8bae1dSRodney W. Grimes 	if (entry->is_sub_map)
822df8bae1dSRodney W. Grimes 		return;
823df8bae1dSRodney W. Grimes 	if (entry->is_a_map) {
824df8bae1dSRodney W. Grimes #if	0
825df8bae1dSRodney W. Grimes 		vm_map_t my_share_map;
826df8bae1dSRodney W. Grimes 		int count;
827df8bae1dSRodney W. Grimes 
828df8bae1dSRodney W. Grimes 		my_share_map = entry->object.share_map;
829df8bae1dSRodney W. Grimes 		count = my_share_map->ref_count;
830df8bae1dSRodney W. Grimes 
831df8bae1dSRodney W. Grimes 		if (count == 1) {
8320d94caffSDavid Greenman 			/*
8330d94caffSDavid Greenman 			 * Can move the region from entry->start to entry->end
8340d94caffSDavid Greenman 			 * (+ entry->offset) in my_share_map into place of
8350d94caffSDavid Greenman 			 * entry. Later.
836df8bae1dSRodney W. Grimes 			 */
837df8bae1dSRodney W. Grimes 		}
838df8bae1dSRodney W. Grimes #endif
8390d94caffSDavid Greenman 	} else {
840df8bae1dSRodney W. Grimes 		/*
841df8bae1dSRodney W. Grimes 		 * Try to merge with our neighbors.
842df8bae1dSRodney W. Grimes 		 *
843df8bae1dSRodney W. Grimes 		 * Conditions for merge are:
844df8bae1dSRodney W. Grimes 		 *
8450d94caffSDavid Greenman 		 * 1.  entries are adjacent. 2.  both entries point to objects
846df8bae1dSRodney W. Grimes 		 * with null pagers.
847df8bae1dSRodney W. Grimes 		 *
8480d94caffSDavid Greenman 		 * If a merge is possible, we replace the two entries with a
8490d94caffSDavid Greenman 		 * single entry, then merge the two objects into a single
8500d94caffSDavid Greenman 		 * object.
851df8bae1dSRodney W. Grimes 		 *
8520d94caffSDavid Greenman 		 * Now, all that is left to do is write the code!
853df8bae1dSRodney W. Grimes 		 */
854df8bae1dSRodney W. Grimes 	}
855df8bae1dSRodney W. Grimes }
856df8bae1dSRodney W. Grimes 
857df8bae1dSRodney W. Grimes /*
858df8bae1dSRodney W. Grimes  *	vm_map_clip_start:	[ internal use only ]
859df8bae1dSRodney W. Grimes  *
860df8bae1dSRodney W. Grimes  *	Asserts that the given entry begins at or after
861df8bae1dSRodney W. Grimes  *	the specified address; if necessary,
862df8bae1dSRodney W. Grimes  *	it splits the entry into two.
863df8bae1dSRodney W. Grimes  */
864df8bae1dSRodney W. Grimes #define vm_map_clip_start(map, entry, startaddr) \
865df8bae1dSRodney W. Grimes { \
866df8bae1dSRodney W. Grimes 	if (startaddr > entry->start) \
867df8bae1dSRodney W. Grimes 		_vm_map_clip_start(map, entry, startaddr); \
868df8bae1dSRodney W. Grimes }
869df8bae1dSRodney W. Grimes 
870df8bae1dSRodney W. Grimes /*
871df8bae1dSRodney W. Grimes  *	This routine is called only when it is known that
872df8bae1dSRodney W. Grimes  *	the entry must be split.
873df8bae1dSRodney W. Grimes  */
8740d94caffSDavid Greenman static void
8750d94caffSDavid Greenman _vm_map_clip_start(map, entry, start)
876df8bae1dSRodney W. Grimes 	register vm_map_t map;
877df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
878df8bae1dSRodney W. Grimes 	register vm_offset_t start;
879df8bae1dSRodney W. Grimes {
880df8bae1dSRodney W. Grimes 	register vm_map_entry_t new_entry;
881df8bae1dSRodney W. Grimes 
882df8bae1dSRodney W. Grimes 	/*
883df8bae1dSRodney W. Grimes 	 * See if we can simplify this entry first
884df8bae1dSRodney W. Grimes 	 */
885df8bae1dSRodney W. Grimes 
88626f9a767SRodney W. Grimes 	/* vm_map_simplify_entry(map, entry); */
887df8bae1dSRodney W. Grimes 
888df8bae1dSRodney W. Grimes 	/*
8890d94caffSDavid Greenman 	 * Split off the front portion -- note that we must insert the new
8900d94caffSDavid Greenman 	 * entry BEFORE this one, so that this entry has the specified
8910d94caffSDavid Greenman 	 * starting address.
892df8bae1dSRodney W. Grimes 	 */
893df8bae1dSRodney W. Grimes 
894df8bae1dSRodney W. Grimes 	new_entry = vm_map_entry_create(map);
895df8bae1dSRodney W. Grimes 	*new_entry = *entry;
896df8bae1dSRodney W. Grimes 
897df8bae1dSRodney W. Grimes 	new_entry->end = start;
898df8bae1dSRodney W. Grimes 	entry->offset += (start - entry->start);
899df8bae1dSRodney W. Grimes 	entry->start = start;
900df8bae1dSRodney W. Grimes 
901df8bae1dSRodney W. Grimes 	vm_map_entry_link(map, entry->prev, new_entry);
902df8bae1dSRodney W. Grimes 
903df8bae1dSRodney W. Grimes 	if (entry->is_a_map || entry->is_sub_map)
904df8bae1dSRodney W. Grimes 		vm_map_reference(new_entry->object.share_map);
905df8bae1dSRodney W. Grimes 	else
906df8bae1dSRodney W. Grimes 		vm_object_reference(new_entry->object.vm_object);
907df8bae1dSRodney W. Grimes }
908df8bae1dSRodney W. Grimes 
909df8bae1dSRodney W. Grimes /*
910df8bae1dSRodney W. Grimes  *	vm_map_clip_end:	[ internal use only ]
911df8bae1dSRodney W. Grimes  *
912df8bae1dSRodney W. Grimes  *	Asserts that the given entry ends at or before
913df8bae1dSRodney W. Grimes  *	the specified address; if necessary,
914df8bae1dSRodney W. Grimes  *	it splits the entry into two.
915df8bae1dSRodney W. Grimes  */
916df8bae1dSRodney W. Grimes 
917df8bae1dSRodney W. Grimes #define vm_map_clip_end(map, entry, endaddr) \
918df8bae1dSRodney W. Grimes { \
919df8bae1dSRodney W. Grimes 	if (endaddr < entry->end) \
920df8bae1dSRodney W. Grimes 		_vm_map_clip_end(map, entry, endaddr); \
921df8bae1dSRodney W. Grimes }
922df8bae1dSRodney W. Grimes 
923df8bae1dSRodney W. Grimes /*
924df8bae1dSRodney W. Grimes  *	This routine is called only when it is known that
925df8bae1dSRodney W. Grimes  *	the entry must be split.
926df8bae1dSRodney W. Grimes  */
9270d94caffSDavid Greenman static void
9280d94caffSDavid Greenman _vm_map_clip_end(map, entry, end)
929df8bae1dSRodney W. Grimes 	register vm_map_t map;
930df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
931df8bae1dSRodney W. Grimes 	register vm_offset_t end;
932df8bae1dSRodney W. Grimes {
933df8bae1dSRodney W. Grimes 	register vm_map_entry_t new_entry;
934df8bae1dSRodney W. Grimes 
935df8bae1dSRodney W. Grimes 	/*
9360d94caffSDavid Greenman 	 * Create a new entry and insert it AFTER the specified entry
937df8bae1dSRodney W. Grimes 	 */
938df8bae1dSRodney W. Grimes 
939df8bae1dSRodney W. Grimes 	new_entry = vm_map_entry_create(map);
940df8bae1dSRodney W. Grimes 	*new_entry = *entry;
941df8bae1dSRodney W. Grimes 
942df8bae1dSRodney W. Grimes 	new_entry->start = entry->end = end;
943df8bae1dSRodney W. Grimes 	new_entry->offset += (end - entry->start);
944df8bae1dSRodney W. Grimes 
945df8bae1dSRodney W. Grimes 	vm_map_entry_link(map, entry, new_entry);
946df8bae1dSRodney W. Grimes 
947df8bae1dSRodney W. Grimes 	if (entry->is_a_map || entry->is_sub_map)
948df8bae1dSRodney W. Grimes 		vm_map_reference(new_entry->object.share_map);
949df8bae1dSRodney W. Grimes 	else
950df8bae1dSRodney W. Grimes 		vm_object_reference(new_entry->object.vm_object);
951df8bae1dSRodney W. Grimes }
952df8bae1dSRodney W. Grimes 
953df8bae1dSRodney W. Grimes /*
954df8bae1dSRodney W. Grimes  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
955df8bae1dSRodney W. Grimes  *
956df8bae1dSRodney W. Grimes  *	Asserts that the starting and ending region
957df8bae1dSRodney W. Grimes  *	addresses fall within the valid range of the map.
958df8bae1dSRodney W. Grimes  */
959df8bae1dSRodney W. Grimes #define	VM_MAP_RANGE_CHECK(map, start, end)		\
960df8bae1dSRodney W. Grimes 		{					\
961df8bae1dSRodney W. Grimes 		if (start < vm_map_min(map))		\
962df8bae1dSRodney W. Grimes 			start = vm_map_min(map);	\
963df8bae1dSRodney W. Grimes 		if (end > vm_map_max(map))		\
964df8bae1dSRodney W. Grimes 			end = vm_map_max(map);		\
965df8bae1dSRodney W. Grimes 		if (start > end)			\
966df8bae1dSRodney W. Grimes 			start = end;			\
967df8bae1dSRodney W. Grimes 		}
968df8bae1dSRodney W. Grimes 
969df8bae1dSRodney W. Grimes /*
970df8bae1dSRodney W. Grimes  *	vm_map_submap:		[ kernel use only ]
971df8bae1dSRodney W. Grimes  *
972df8bae1dSRodney W. Grimes  *	Mark the given range as handled by a subordinate map.
973df8bae1dSRodney W. Grimes  *
974df8bae1dSRodney W. Grimes  *	This range must have been created with vm_map_find,
975df8bae1dSRodney W. Grimes  *	and no other operations may have been performed on this
976df8bae1dSRodney W. Grimes  *	range prior to calling vm_map_submap.
977df8bae1dSRodney W. Grimes  *
978df8bae1dSRodney W. Grimes  *	Only a limited number of operations can be performed
979df8bae1dSRodney W. Grimes  *	within this rage after calling vm_map_submap:
980df8bae1dSRodney W. Grimes  *		vm_fault
981df8bae1dSRodney W. Grimes  *	[Don't try vm_map_copy!]
982df8bae1dSRodney W. Grimes  *
983df8bae1dSRodney W. Grimes  *	To remove a submapping, one must first remove the
984df8bae1dSRodney W. Grimes  *	range from the superior map, and then destroy the
985df8bae1dSRodney W. Grimes  *	submap (if desired).  [Better yet, don't try it.]
986df8bae1dSRodney W. Grimes  */
987df8bae1dSRodney W. Grimes int
988df8bae1dSRodney W. Grimes vm_map_submap(map, start, end, submap)
989df8bae1dSRodney W. Grimes 	register vm_map_t map;
990df8bae1dSRodney W. Grimes 	register vm_offset_t start;
991df8bae1dSRodney W. Grimes 	register vm_offset_t end;
992df8bae1dSRodney W. Grimes 	vm_map_t submap;
993df8bae1dSRodney W. Grimes {
994df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
995df8bae1dSRodney W. Grimes 	register int result = KERN_INVALID_ARGUMENT;
996df8bae1dSRodney W. Grimes 
997df8bae1dSRodney W. Grimes 	vm_map_lock(map);
998df8bae1dSRodney W. Grimes 
999df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1000df8bae1dSRodney W. Grimes 
1001df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &entry)) {
1002df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
10030d94caffSDavid Greenman 	} else
1004df8bae1dSRodney W. Grimes 		entry = entry->next;
1005df8bae1dSRodney W. Grimes 
1006df8bae1dSRodney W. Grimes 	vm_map_clip_end(map, entry, end);
1007df8bae1dSRodney W. Grimes 
1008df8bae1dSRodney W. Grimes 	if ((entry->start == start) && (entry->end == end) &&
1009df8bae1dSRodney W. Grimes 	    (!entry->is_a_map) &&
1010df8bae1dSRodney W. Grimes 	    (entry->object.vm_object == NULL) &&
1011df8bae1dSRodney W. Grimes 	    (!entry->copy_on_write)) {
1012df8bae1dSRodney W. Grimes 		entry->is_a_map = FALSE;
1013df8bae1dSRodney W. Grimes 		entry->is_sub_map = TRUE;
1014df8bae1dSRodney W. Grimes 		vm_map_reference(entry->object.sub_map = submap);
1015df8bae1dSRodney W. Grimes 		result = KERN_SUCCESS;
1016df8bae1dSRodney W. Grimes 	}
1017df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1018df8bae1dSRodney W. Grimes 
1019df8bae1dSRodney W. Grimes 	return (result);
1020df8bae1dSRodney W. Grimes }
1021df8bae1dSRodney W. Grimes 
1022df8bae1dSRodney W. Grimes /*
1023df8bae1dSRodney W. Grimes  *	vm_map_protect:
1024df8bae1dSRodney W. Grimes  *
1025df8bae1dSRodney W. Grimes  *	Sets the protection of the specified address
1026df8bae1dSRodney W. Grimes  *	region in the target map.  If "set_max" is
1027df8bae1dSRodney W. Grimes  *	specified, the maximum protection is to be set;
1028df8bae1dSRodney W. Grimes  *	otherwise, only the current protection is affected.
1029df8bae1dSRodney W. Grimes  */
1030df8bae1dSRodney W. Grimes int
1031df8bae1dSRodney W. Grimes vm_map_protect(map, start, end, new_prot, set_max)
1032df8bae1dSRodney W. Grimes 	register vm_map_t map;
1033df8bae1dSRodney W. Grimes 	register vm_offset_t start;
1034df8bae1dSRodney W. Grimes 	register vm_offset_t end;
1035df8bae1dSRodney W. Grimes 	register vm_prot_t new_prot;
1036df8bae1dSRodney W. Grimes 	register boolean_t set_max;
1037df8bae1dSRodney W. Grimes {
1038df8bae1dSRodney W. Grimes 	register vm_map_entry_t current;
1039df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1040df8bae1dSRodney W. Grimes 
1041df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1042df8bae1dSRodney W. Grimes 
1043df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1044df8bae1dSRodney W. Grimes 
1045df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &entry)) {
1046df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
10470d94caffSDavid Greenman 	} else
1048df8bae1dSRodney W. Grimes 		entry = entry->next;
1049df8bae1dSRodney W. Grimes 
1050df8bae1dSRodney W. Grimes 	/*
10510d94caffSDavid Greenman 	 * Make a first pass to check for protection violations.
1052df8bae1dSRodney W. Grimes 	 */
1053df8bae1dSRodney W. Grimes 
1054df8bae1dSRodney W. Grimes 	current = entry;
1055df8bae1dSRodney W. Grimes 	while ((current != &map->header) && (current->start < end)) {
1056a1f6d91cSDavid Greenman 		if (current->is_sub_map) {
1057a1f6d91cSDavid Greenman 			vm_map_unlock(map);
1058df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ARGUMENT);
1059a1f6d91cSDavid Greenman 		}
1060df8bae1dSRodney W. Grimes 		if ((new_prot & current->max_protection) != new_prot) {
1061df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
1062df8bae1dSRodney W. Grimes 			return (KERN_PROTECTION_FAILURE);
1063df8bae1dSRodney W. Grimes 		}
1064df8bae1dSRodney W. Grimes 		current = current->next;
1065df8bae1dSRodney W. Grimes 	}
1066df8bae1dSRodney W. Grimes 
1067df8bae1dSRodney W. Grimes 	/*
10680d94caffSDavid Greenman 	 * Go back and fix up protections. [Note that clipping is not
10690d94caffSDavid Greenman 	 * necessary the second time.]
1070df8bae1dSRodney W. Grimes 	 */
1071df8bae1dSRodney W. Grimes 
1072df8bae1dSRodney W. Grimes 	current = entry;
1073df8bae1dSRodney W. Grimes 
1074df8bae1dSRodney W. Grimes 	while ((current != &map->header) && (current->start < end)) {
1075df8bae1dSRodney W. Grimes 		vm_prot_t old_prot;
1076df8bae1dSRodney W. Grimes 
1077df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, current, end);
1078df8bae1dSRodney W. Grimes 
1079df8bae1dSRodney W. Grimes 		old_prot = current->protection;
1080df8bae1dSRodney W. Grimes 		if (set_max)
1081df8bae1dSRodney W. Grimes 			current->protection =
1082df8bae1dSRodney W. Grimes 			    (current->max_protection = new_prot) &
1083df8bae1dSRodney W. Grimes 			    old_prot;
1084df8bae1dSRodney W. Grimes 		else
1085df8bae1dSRodney W. Grimes 			current->protection = new_prot;
1086df8bae1dSRodney W. Grimes 
1087df8bae1dSRodney W. Grimes 		/*
10880d94caffSDavid Greenman 		 * Update physical map if necessary. Worry about copy-on-write
10890d94caffSDavid Greenman 		 * here -- CHECK THIS XXX
1090df8bae1dSRodney W. Grimes 		 */
1091df8bae1dSRodney W. Grimes 
1092df8bae1dSRodney W. Grimes 		if (current->protection != old_prot) {
1093df8bae1dSRodney W. Grimes 
1094df8bae1dSRodney W. Grimes #define MASK(entry)	((entry)->copy_on_write ? ~VM_PROT_WRITE : \
1095df8bae1dSRodney W. Grimes 							VM_PROT_ALL)
1096df8bae1dSRodney W. Grimes #define	max(a,b)	((a) > (b) ? (a) : (b))
1097df8bae1dSRodney W. Grimes 
1098df8bae1dSRodney W. Grimes 			if (current->is_a_map) {
1099df8bae1dSRodney W. Grimes 				vm_map_entry_t share_entry;
1100df8bae1dSRodney W. Grimes 				vm_offset_t share_end;
1101df8bae1dSRodney W. Grimes 
1102df8bae1dSRodney W. Grimes 				vm_map_lock(current->object.share_map);
1103df8bae1dSRodney W. Grimes 				(void) vm_map_lookup_entry(
1104df8bae1dSRodney W. Grimes 				    current->object.share_map,
1105df8bae1dSRodney W. Grimes 				    current->offset,
1106df8bae1dSRodney W. Grimes 				    &share_entry);
1107df8bae1dSRodney W. Grimes 				share_end = current->offset +
1108df8bae1dSRodney W. Grimes 				    (current->end - current->start);
1109df8bae1dSRodney W. Grimes 				while ((share_entry !=
1110df8bae1dSRodney W. Grimes 					&current->object.share_map->header) &&
1111df8bae1dSRodney W. Grimes 				    (share_entry->start < share_end)) {
1112df8bae1dSRodney W. Grimes 
1113df8bae1dSRodney W. Grimes 					pmap_protect(map->pmap,
1114df8bae1dSRodney W. Grimes 					    (max(share_entry->start,
1115df8bae1dSRodney W. Grimes 						    current->offset) -
1116df8bae1dSRodney W. Grimes 						current->offset +
1117df8bae1dSRodney W. Grimes 						current->start),
1118df8bae1dSRodney W. Grimes 					    min(share_entry->end,
1119df8bae1dSRodney W. Grimes 						share_end) -
1120df8bae1dSRodney W. Grimes 					    current->offset +
1121df8bae1dSRodney W. Grimes 					    current->start,
1122df8bae1dSRodney W. Grimes 					    current->protection &
1123df8bae1dSRodney W. Grimes 					    MASK(share_entry));
1124df8bae1dSRodney W. Grimes 
1125df8bae1dSRodney W. Grimes 					share_entry = share_entry->next;
1126df8bae1dSRodney W. Grimes 				}
1127df8bae1dSRodney W. Grimes 				vm_map_unlock(current->object.share_map);
11280d94caffSDavid Greenman 			} else
1129df8bae1dSRodney W. Grimes 				pmap_protect(map->pmap, current->start,
1130df8bae1dSRodney W. Grimes 				    current->end,
1131df8bae1dSRodney W. Grimes 				    current->protection & MASK(entry));
1132df8bae1dSRodney W. Grimes #undef	max
1133df8bae1dSRodney W. Grimes #undef	MASK
1134df8bae1dSRodney W. Grimes 		}
1135df8bae1dSRodney W. Grimes 		current = current->next;
1136df8bae1dSRodney W. Grimes 	}
1137df8bae1dSRodney W. Grimes 
1138df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1139df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1140df8bae1dSRodney W. Grimes }
1141df8bae1dSRodney W. Grimes 
1142df8bae1dSRodney W. Grimes /*
1143df8bae1dSRodney W. Grimes  *	vm_map_inherit:
1144df8bae1dSRodney W. Grimes  *
1145df8bae1dSRodney W. Grimes  *	Sets the inheritance of the specified address
1146df8bae1dSRodney W. Grimes  *	range in the target map.  Inheritance
1147df8bae1dSRodney W. Grimes  *	affects how the map will be shared with
1148df8bae1dSRodney W. Grimes  *	child maps at the time of vm_map_fork.
1149df8bae1dSRodney W. Grimes  */
1150df8bae1dSRodney W. Grimes int
1151df8bae1dSRodney W. Grimes vm_map_inherit(map, start, end, new_inheritance)
1152df8bae1dSRodney W. Grimes 	register vm_map_t map;
1153df8bae1dSRodney W. Grimes 	register vm_offset_t start;
1154df8bae1dSRodney W. Grimes 	register vm_offset_t end;
1155df8bae1dSRodney W. Grimes 	register vm_inherit_t new_inheritance;
1156df8bae1dSRodney W. Grimes {
1157df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
1158df8bae1dSRodney W. Grimes 	vm_map_entry_t temp_entry;
1159df8bae1dSRodney W. Grimes 
1160df8bae1dSRodney W. Grimes 	switch (new_inheritance) {
1161df8bae1dSRodney W. Grimes 	case VM_INHERIT_NONE:
1162df8bae1dSRodney W. Grimes 	case VM_INHERIT_COPY:
1163df8bae1dSRodney W. Grimes 	case VM_INHERIT_SHARE:
1164df8bae1dSRodney W. Grimes 		break;
1165df8bae1dSRodney W. Grimes 	default:
1166df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ARGUMENT);
1167df8bae1dSRodney W. Grimes 	}
1168df8bae1dSRodney W. Grimes 
1169df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1170df8bae1dSRodney W. Grimes 
1171df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1172df8bae1dSRodney W. Grimes 
1173df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
1174df8bae1dSRodney W. Grimes 		entry = temp_entry;
1175df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
11760d94caffSDavid Greenman 	} else
1177df8bae1dSRodney W. Grimes 		entry = temp_entry->next;
1178df8bae1dSRodney W. Grimes 
1179df8bae1dSRodney W. Grimes 	while ((entry != &map->header) && (entry->start < end)) {
1180df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
1181df8bae1dSRodney W. Grimes 
1182df8bae1dSRodney W. Grimes 		entry->inheritance = new_inheritance;
1183df8bae1dSRodney W. Grimes 
1184df8bae1dSRodney W. Grimes 		entry = entry->next;
1185df8bae1dSRodney W. Grimes 	}
1186df8bae1dSRodney W. Grimes 
1187df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1188df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1189df8bae1dSRodney W. Grimes }
1190df8bae1dSRodney W. Grimes 
1191df8bae1dSRodney W. Grimes /*
1192df8bae1dSRodney W. Grimes  *	vm_map_pageable:
1193df8bae1dSRodney W. Grimes  *
1194df8bae1dSRodney W. Grimes  *	Sets the pageability of the specified address
1195df8bae1dSRodney W. Grimes  *	range in the target map.  Regions specified
1196df8bae1dSRodney W. Grimes  *	as not pageable require locked-down physical
1197df8bae1dSRodney W. Grimes  *	memory and physical page maps.
1198df8bae1dSRodney W. Grimes  *
1199df8bae1dSRodney W. Grimes  *	The map must not be locked, but a reference
1200df8bae1dSRodney W. Grimes  *	must remain to the map throughout the call.
1201df8bae1dSRodney W. Grimes  */
1202df8bae1dSRodney W. Grimes int
1203df8bae1dSRodney W. Grimes vm_map_pageable(map, start, end, new_pageable)
1204df8bae1dSRodney W. Grimes 	register vm_map_t map;
1205df8bae1dSRodney W. Grimes 	register vm_offset_t start;
1206df8bae1dSRodney W. Grimes 	register vm_offset_t end;
1207df8bae1dSRodney W. Grimes 	register boolean_t new_pageable;
1208df8bae1dSRodney W. Grimes {
1209df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
1210df8bae1dSRodney W. Grimes 	vm_map_entry_t start_entry;
121126f9a767SRodney W. Grimes 	register vm_offset_t failed = 0;
1212df8bae1dSRodney W. Grimes 	int rv;
1213df8bae1dSRodney W. Grimes 
1214df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1215df8bae1dSRodney W. Grimes 
1216df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1217df8bae1dSRodney W. Grimes 
1218df8bae1dSRodney W. Grimes 	/*
12190d94caffSDavid Greenman 	 * Only one pageability change may take place at one time, since
12200d94caffSDavid Greenman 	 * vm_fault assumes it will be called only once for each
12210d94caffSDavid Greenman 	 * wiring/unwiring.  Therefore, we have to make sure we're actually
12220d94caffSDavid Greenman 	 * changing the pageability for the entire region.  We do so before
12230d94caffSDavid Greenman 	 * making any changes.
1224df8bae1dSRodney W. Grimes 	 */
1225df8bae1dSRodney W. Grimes 
1226df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1227df8bae1dSRodney W. Grimes 		vm_map_unlock(map);
1228df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ADDRESS);
1229df8bae1dSRodney W. Grimes 	}
1230df8bae1dSRodney W. Grimes 	entry = start_entry;
1231df8bae1dSRodney W. Grimes 
1232df8bae1dSRodney W. Grimes 	/*
12330d94caffSDavid Greenman 	 * Actions are rather different for wiring and unwiring, so we have
12340d94caffSDavid Greenman 	 * two separate cases.
1235df8bae1dSRodney W. Grimes 	 */
1236df8bae1dSRodney W. Grimes 
1237df8bae1dSRodney W. Grimes 	if (new_pageable) {
1238df8bae1dSRodney W. Grimes 
1239df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1240df8bae1dSRodney W. Grimes 
1241df8bae1dSRodney W. Grimes 		/*
12420d94caffSDavid Greenman 		 * Unwiring.  First ensure that the range to be unwired is
12430d94caffSDavid Greenman 		 * really wired down and that there are no holes.
1244df8bae1dSRodney W. Grimes 		 */
1245df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1246df8bae1dSRodney W. Grimes 
1247df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0 ||
1248df8bae1dSRodney W. Grimes 			    (entry->end < end &&
1249df8bae1dSRodney W. Grimes 				(entry->next == &map->header ||
1250df8bae1dSRodney W. Grimes 				    entry->next->start > entry->end))) {
1251df8bae1dSRodney W. Grimes 				vm_map_unlock(map);
1252df8bae1dSRodney W. Grimes 				return (KERN_INVALID_ARGUMENT);
1253df8bae1dSRodney W. Grimes 			}
1254df8bae1dSRodney W. Grimes 			entry = entry->next;
1255df8bae1dSRodney W. Grimes 		}
1256df8bae1dSRodney W. Grimes 
1257df8bae1dSRodney W. Grimes 		/*
12580d94caffSDavid Greenman 		 * Now decrement the wiring count for each region. If a region
12590d94caffSDavid Greenman 		 * becomes completely unwired, unwire its physical pages and
12600d94caffSDavid Greenman 		 * mappings.
1261df8bae1dSRodney W. Grimes 		 */
1262df8bae1dSRodney W. Grimes 		lock_set_recursive(&map->lock);
1263df8bae1dSRodney W. Grimes 
1264df8bae1dSRodney W. Grimes 		entry = start_entry;
1265df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1266df8bae1dSRodney W. Grimes 			vm_map_clip_end(map, entry, end);
1267df8bae1dSRodney W. Grimes 
1268df8bae1dSRodney W. Grimes 			entry->wired_count--;
1269df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0)
1270df8bae1dSRodney W. Grimes 				vm_fault_unwire(map, entry->start, entry->end);
1271df8bae1dSRodney W. Grimes 
1272df8bae1dSRodney W. Grimes 			entry = entry->next;
1273df8bae1dSRodney W. Grimes 		}
1274df8bae1dSRodney W. Grimes 		lock_clear_recursive(&map->lock);
12750d94caffSDavid Greenman 	} else {
1276df8bae1dSRodney W. Grimes 		/*
1277df8bae1dSRodney W. Grimes 		 * Wiring.  We must do this in two passes:
1278df8bae1dSRodney W. Grimes 		 *
12790d94caffSDavid Greenman 		 * 1.  Holding the write lock, we create any shadow or zero-fill
12800d94caffSDavid Greenman 		 * objects that need to be created. Then we clip each map
12810d94caffSDavid Greenman 		 * entry to the region to be wired and increment its wiring
12820d94caffSDavid Greenman 		 * count.  We create objects before clipping the map entries
1283df8bae1dSRodney W. Grimes 		 * to avoid object proliferation.
1284df8bae1dSRodney W. Grimes 		 *
12850d94caffSDavid Greenman 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
12860d94caffSDavid Greenman 		 * fault in the pages for any newly wired area (wired_count is
12870d94caffSDavid Greenman 		 * 1).
1288df8bae1dSRodney W. Grimes 		 *
12890d94caffSDavid Greenman 		 * Downgrading to a read lock for vm_fault_wire avoids a possible
129024a1cce3SDavid Greenman 		 * deadlock with another process that may have faulted on one
12910d94caffSDavid Greenman 		 * of the pages to be wired (it would mark the page busy,
12920d94caffSDavid Greenman 		 * blocking us, then in turn block on the map lock that we
12930d94caffSDavid Greenman 		 * hold).  Because of problems in the recursive lock package,
12940d94caffSDavid Greenman 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
12950d94caffSDavid Greenman 		 * any actions that require the write lock must be done
12960d94caffSDavid Greenman 		 * beforehand.  Because we keep the read lock on the map, the
12970d94caffSDavid Greenman 		 * copy-on-write status of the entries we modify here cannot
12980d94caffSDavid Greenman 		 * change.
1299df8bae1dSRodney W. Grimes 		 */
1300df8bae1dSRodney W. Grimes 
1301df8bae1dSRodney W. Grimes 		/*
1302df8bae1dSRodney W. Grimes 		 * Pass 1.
1303df8bae1dSRodney W. Grimes 		 */
1304df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1305df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0) {
1306df8bae1dSRodney W. Grimes 
1307df8bae1dSRodney W. Grimes 				/*
1308df8bae1dSRodney W. Grimes 				 * Perform actions of vm_map_lookup that need
1309df8bae1dSRodney W. Grimes 				 * the write lock on the map: create a shadow
1310df8bae1dSRodney W. Grimes 				 * object for a copy-on-write region, or an
1311df8bae1dSRodney W. Grimes 				 * object for a zero-fill region.
1312df8bae1dSRodney W. Grimes 				 *
1313df8bae1dSRodney W. Grimes 				 * We don't have to do this for entries that
13140d94caffSDavid Greenman 				 * point to sharing maps, because we won't
13150d94caffSDavid Greenman 				 * hold the lock on the sharing map.
1316df8bae1dSRodney W. Grimes 				 */
1317bf4bd9bdSDavid Greenman 				if (!entry->is_a_map && !entry->is_sub_map) {
1318df8bae1dSRodney W. Grimes 					if (entry->needs_copy &&
1319df8bae1dSRodney W. Grimes 					    ((entry->protection & VM_PROT_WRITE) != 0)) {
1320df8bae1dSRodney W. Grimes 
1321df8bae1dSRodney W. Grimes 						vm_object_shadow(&entry->object.vm_object,
1322df8bae1dSRodney W. Grimes 						    &entry->offset,
1323a316d390SJohn Dyson 						    OFF_TO_IDX(entry->end
1324df8bae1dSRodney W. Grimes 							- entry->start));
1325df8bae1dSRodney W. Grimes 						entry->needs_copy = FALSE;
13260d94caffSDavid Greenman 					} else if (entry->object.vm_object == NULL) {
1327df8bae1dSRodney W. Grimes 						entry->object.vm_object =
1328a316d390SJohn Dyson 						    vm_object_allocate(OBJT_DEFAULT,
1329a316d390SJohn Dyson 							OFF_TO_IDX(entry->end - entry->start));
1330df8bae1dSRodney W. Grimes 						entry->offset = (vm_offset_t) 0;
1331df8bae1dSRodney W. Grimes 					}
1332df8bae1dSRodney W. Grimes 				}
1333df8bae1dSRodney W. Grimes 			}
1334df8bae1dSRodney W. Grimes 			vm_map_clip_start(map, entry, start);
1335df8bae1dSRodney W. Grimes 			vm_map_clip_end(map, entry, end);
1336df8bae1dSRodney W. Grimes 			entry->wired_count++;
1337df8bae1dSRodney W. Grimes 
1338df8bae1dSRodney W. Grimes 			/*
1339df8bae1dSRodney W. Grimes 			 * Check for holes
1340df8bae1dSRodney W. Grimes 			 */
1341df8bae1dSRodney W. Grimes 			if (entry->end < end &&
1342df8bae1dSRodney W. Grimes 			    (entry->next == &map->header ||
1343df8bae1dSRodney W. Grimes 				entry->next->start > entry->end)) {
1344df8bae1dSRodney W. Grimes 				/*
13450d94caffSDavid Greenman 				 * Found one.  Object creation actions do not
13460d94caffSDavid Greenman 				 * need to be undone, but the wired counts
13470d94caffSDavid Greenman 				 * need to be restored.
1348df8bae1dSRodney W. Grimes 				 */
1349df8bae1dSRodney W. Grimes 				while (entry != &map->header && entry->end > start) {
1350df8bae1dSRodney W. Grimes 					entry->wired_count--;
1351df8bae1dSRodney W. Grimes 					entry = entry->prev;
1352df8bae1dSRodney W. Grimes 				}
1353df8bae1dSRodney W. Grimes 				vm_map_unlock(map);
1354df8bae1dSRodney W. Grimes 				return (KERN_INVALID_ARGUMENT);
1355df8bae1dSRodney W. Grimes 			}
1356df8bae1dSRodney W. Grimes 			entry = entry->next;
1357df8bae1dSRodney W. Grimes 		}
1358df8bae1dSRodney W. Grimes 
1359df8bae1dSRodney W. Grimes 		/*
1360df8bae1dSRodney W. Grimes 		 * Pass 2.
1361df8bae1dSRodney W. Grimes 		 */
1362df8bae1dSRodney W. Grimes 
1363df8bae1dSRodney W. Grimes 		/*
1364df8bae1dSRodney W. Grimes 		 * HACK HACK HACK HACK
1365df8bae1dSRodney W. Grimes 		 *
136624a1cce3SDavid Greenman 		 * If we are wiring in the kernel map or a submap of it,
136724a1cce3SDavid Greenman 		 * unlock the map to avoid deadlocks.  We trust that the
136824a1cce3SDavid Greenman 		 * kernel is well-behaved, and therefore will not do
136924a1cce3SDavid Greenman 		 * anything destructive to this region of the map while
137024a1cce3SDavid Greenman 		 * we have it unlocked.  We cannot trust user processes
137124a1cce3SDavid Greenman 		 * to do the same.
1372df8bae1dSRodney W. Grimes 		 *
1373df8bae1dSRodney W. Grimes 		 * HACK HACK HACK HACK
1374df8bae1dSRodney W. Grimes 		 */
1375df8bae1dSRodney W. Grimes 		if (vm_map_pmap(map) == kernel_pmap) {
1376df8bae1dSRodney W. Grimes 			vm_map_unlock(map);	/* trust me ... */
13770d94caffSDavid Greenman 		} else {
1378df8bae1dSRodney W. Grimes 			lock_set_recursive(&map->lock);
1379df8bae1dSRodney W. Grimes 			lock_write_to_read(&map->lock);
1380df8bae1dSRodney W. Grimes 		}
1381df8bae1dSRodney W. Grimes 
1382df8bae1dSRodney W. Grimes 		rv = 0;
1383df8bae1dSRodney W. Grimes 		entry = start_entry;
1384df8bae1dSRodney W. Grimes 		while (entry != &map->header && entry->start < end) {
1385df8bae1dSRodney W. Grimes 			/*
13860d94caffSDavid Greenman 			 * If vm_fault_wire fails for any page we need to undo
13870d94caffSDavid Greenman 			 * what has been done.  We decrement the wiring count
13880d94caffSDavid Greenman 			 * for those pages which have not yet been wired (now)
13890d94caffSDavid Greenman 			 * and unwire those that have (later).
1390df8bae1dSRodney W. Grimes 			 *
1391df8bae1dSRodney W. Grimes 			 * XXX this violates the locking protocol on the map,
1392df8bae1dSRodney W. Grimes 			 * needs to be fixed.
1393df8bae1dSRodney W. Grimes 			 */
1394df8bae1dSRodney W. Grimes 			if (rv)
1395df8bae1dSRodney W. Grimes 				entry->wired_count--;
1396df8bae1dSRodney W. Grimes 			else if (entry->wired_count == 1) {
1397df8bae1dSRodney W. Grimes 				rv = vm_fault_wire(map, entry->start, entry->end);
1398df8bae1dSRodney W. Grimes 				if (rv) {
1399df8bae1dSRodney W. Grimes 					failed = entry->start;
1400df8bae1dSRodney W. Grimes 					entry->wired_count--;
1401df8bae1dSRodney W. Grimes 				}
1402df8bae1dSRodney W. Grimes 			}
1403df8bae1dSRodney W. Grimes 			entry = entry->next;
1404df8bae1dSRodney W. Grimes 		}
1405df8bae1dSRodney W. Grimes 
1406df8bae1dSRodney W. Grimes 		if (vm_map_pmap(map) == kernel_pmap) {
1407df8bae1dSRodney W. Grimes 			vm_map_lock(map);
14080d94caffSDavid Greenman 		} else {
1409df8bae1dSRodney W. Grimes 			lock_clear_recursive(&map->lock);
1410df8bae1dSRodney W. Grimes 		}
1411df8bae1dSRodney W. Grimes 		if (rv) {
1412df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
1413df8bae1dSRodney W. Grimes 			(void) vm_map_pageable(map, start, failed, TRUE);
1414df8bae1dSRodney W. Grimes 			return (rv);
1415df8bae1dSRodney W. Grimes 		}
1416df8bae1dSRodney W. Grimes 	}
1417df8bae1dSRodney W. Grimes 
1418df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1419df8bae1dSRodney W. Grimes 
1420df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1421df8bae1dSRodney W. Grimes }
1422df8bae1dSRodney W. Grimes 
1423df8bae1dSRodney W. Grimes /*
1424df8bae1dSRodney W. Grimes  * vm_map_clean
1425df8bae1dSRodney W. Grimes  *
1426df8bae1dSRodney W. Grimes  * Push any dirty cached pages in the address range to their pager.
1427df8bae1dSRodney W. Grimes  * If syncio is TRUE, dirty pages are written synchronously.
1428df8bae1dSRodney W. Grimes  * If invalidate is TRUE, any cached pages are freed as well.
1429df8bae1dSRodney W. Grimes  *
1430df8bae1dSRodney W. Grimes  * Returns an error if any part of the specified range is not mapped.
1431df8bae1dSRodney W. Grimes  */
1432df8bae1dSRodney W. Grimes int
1433df8bae1dSRodney W. Grimes vm_map_clean(map, start, end, syncio, invalidate)
1434df8bae1dSRodney W. Grimes 	vm_map_t map;
1435df8bae1dSRodney W. Grimes 	vm_offset_t start;
1436df8bae1dSRodney W. Grimes 	vm_offset_t end;
1437df8bae1dSRodney W. Grimes 	boolean_t syncio;
1438df8bae1dSRodney W. Grimes 	boolean_t invalidate;
1439df8bae1dSRodney W. Grimes {
1440df8bae1dSRodney W. Grimes 	register vm_map_entry_t current;
1441df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1442df8bae1dSRodney W. Grimes 	vm_size_t size;
1443df8bae1dSRodney W. Grimes 	vm_object_t object;
1444a316d390SJohn Dyson 	vm_ooffset_t offset;
1445df8bae1dSRodney W. Grimes 
1446df8bae1dSRodney W. Grimes 	vm_map_lock_read(map);
1447df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1448df8bae1dSRodney W. Grimes 	if (!vm_map_lookup_entry(map, start, &entry)) {
1449df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map);
1450df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ADDRESS);
1451df8bae1dSRodney W. Grimes 	}
1452df8bae1dSRodney W. Grimes 	/*
1453df8bae1dSRodney W. Grimes 	 * Make a first pass to check for holes.
1454df8bae1dSRodney W. Grimes 	 */
1455df8bae1dSRodney W. Grimes 	for (current = entry; current->start < end; current = current->next) {
1456df8bae1dSRodney W. Grimes 		if (current->is_sub_map) {
1457df8bae1dSRodney W. Grimes 			vm_map_unlock_read(map);
1458df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ARGUMENT);
1459df8bae1dSRodney W. Grimes 		}
1460df8bae1dSRodney W. Grimes 		if (end > current->end &&
1461df8bae1dSRodney W. Grimes 		    (current->next == &map->header ||
1462df8bae1dSRodney W. Grimes 			current->end != current->next->start)) {
1463df8bae1dSRodney W. Grimes 			vm_map_unlock_read(map);
1464df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ADDRESS);
1465df8bae1dSRodney W. Grimes 		}
1466df8bae1dSRodney W. Grimes 	}
1467df8bae1dSRodney W. Grimes 
1468df8bae1dSRodney W. Grimes 	/*
1469df8bae1dSRodney W. Grimes 	 * Make a second pass, cleaning/uncaching pages from the indicated
1470df8bae1dSRodney W. Grimes 	 * objects as we go.
1471df8bae1dSRodney W. Grimes 	 */
1472df8bae1dSRodney W. Grimes 	for (current = entry; current->start < end; current = current->next) {
1473df8bae1dSRodney W. Grimes 		offset = current->offset + (start - current->start);
1474df8bae1dSRodney W. Grimes 		size = (end <= current->end ? end : current->end) - start;
1475bf4bd9bdSDavid Greenman 		if (current->is_a_map || current->is_sub_map) {
1476df8bae1dSRodney W. Grimes 			register vm_map_t smap;
1477df8bae1dSRodney W. Grimes 			vm_map_entry_t tentry;
1478df8bae1dSRodney W. Grimes 			vm_size_t tsize;
1479df8bae1dSRodney W. Grimes 
1480df8bae1dSRodney W. Grimes 			smap = current->object.share_map;
1481df8bae1dSRodney W. Grimes 			vm_map_lock_read(smap);
1482df8bae1dSRodney W. Grimes 			(void) vm_map_lookup_entry(smap, offset, &tentry);
1483df8bae1dSRodney W. Grimes 			tsize = tentry->end - offset;
1484df8bae1dSRodney W. Grimes 			if (tsize < size)
1485df8bae1dSRodney W. Grimes 				size = tsize;
1486df8bae1dSRodney W. Grimes 			object = tentry->object.vm_object;
1487df8bae1dSRodney W. Grimes 			offset = tentry->offset + (offset - tentry->start);
1488df8bae1dSRodney W. Grimes 			vm_map_unlock_read(smap);
1489df8bae1dSRodney W. Grimes 		} else {
1490df8bae1dSRodney W. Grimes 			object = current->object.vm_object;
1491df8bae1dSRodney W. Grimes 		}
149224a1cce3SDavid Greenman 		if (object && (object->type == OBJT_VNODE)) {
1493df8bae1dSRodney W. Grimes 			/*
14940d94caffSDavid Greenman 			 * Flush pages if writing is allowed. XXX should we continue
14950d94caffSDavid Greenman 			 * on an error?
1496f5cf85d4SDavid Greenman 			 *
1497f5cf85d4SDavid Greenman 			 * XXX Doing async I/O and then removing all the pages from
1498f5cf85d4SDavid Greenman 			 *     the object before it completes is probably a very bad
1499f5cf85d4SDavid Greenman 			 *     idea.
1500df8bae1dSRodney W. Grimes 			 */
1501f5cf85d4SDavid Greenman 			if (current->protection & VM_PROT_WRITE)
1502a316d390SJohn Dyson 		   	    	vm_object_page_clean(object,
1503a316d390SJohn Dyson 					OFF_TO_IDX(offset),
1504a316d390SJohn Dyson 					OFF_TO_IDX(offset + size),
1505a316d390SJohn Dyson 					syncio, TRUE);
1506df8bae1dSRodney W. Grimes 			if (invalidate)
1507a316d390SJohn Dyson 				vm_object_page_remove(object,
1508a316d390SJohn Dyson 					OFF_TO_IDX(offset),
1509a316d390SJohn Dyson 					OFF_TO_IDX(offset + size),
1510a316d390SJohn Dyson 					FALSE);
1511bf4bd9bdSDavid Greenman 		}
1512df8bae1dSRodney W. Grimes 		start += size;
1513df8bae1dSRodney W. Grimes 	}
1514df8bae1dSRodney W. Grimes 
1515df8bae1dSRodney W. Grimes 	vm_map_unlock_read(map);
1516df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1517df8bae1dSRodney W. Grimes }
1518df8bae1dSRodney W. Grimes 
1519df8bae1dSRodney W. Grimes /*
1520df8bae1dSRodney W. Grimes  *	vm_map_entry_unwire:	[ internal use only ]
1521df8bae1dSRodney W. Grimes  *
1522df8bae1dSRodney W. Grimes  *	Make the region specified by this entry pageable.
1523df8bae1dSRodney W. Grimes  *
1524df8bae1dSRodney W. Grimes  *	The map in question should be locked.
1525df8bae1dSRodney W. Grimes  *	[This is the reason for this routine's existence.]
1526df8bae1dSRodney W. Grimes  */
15270d94caffSDavid Greenman void
15280d94caffSDavid Greenman vm_map_entry_unwire(map, entry)
1529df8bae1dSRodney W. Grimes 	vm_map_t map;
1530df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
1531df8bae1dSRodney W. Grimes {
1532df8bae1dSRodney W. Grimes 	vm_fault_unwire(map, entry->start, entry->end);
1533df8bae1dSRodney W. Grimes 	entry->wired_count = 0;
1534df8bae1dSRodney W. Grimes }
1535df8bae1dSRodney W. Grimes 
1536df8bae1dSRodney W. Grimes /*
1537df8bae1dSRodney W. Grimes  *	vm_map_entry_delete:	[ internal use only ]
1538df8bae1dSRodney W. Grimes  *
1539df8bae1dSRodney W. Grimes  *	Deallocate the given entry from the target map.
1540df8bae1dSRodney W. Grimes  */
15410d94caffSDavid Greenman void
15420d94caffSDavid Greenman vm_map_entry_delete(map, entry)
1543df8bae1dSRodney W. Grimes 	register vm_map_t map;
1544df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
1545df8bae1dSRodney W. Grimes {
1546df8bae1dSRodney W. Grimes 	if (entry->wired_count != 0)
1547df8bae1dSRodney W. Grimes 		vm_map_entry_unwire(map, entry);
1548df8bae1dSRodney W. Grimes 
1549df8bae1dSRodney W. Grimes 	vm_map_entry_unlink(map, entry);
1550df8bae1dSRodney W. Grimes 	map->size -= entry->end - entry->start;
1551df8bae1dSRodney W. Grimes 
1552df8bae1dSRodney W. Grimes 	if (entry->is_a_map || entry->is_sub_map)
1553df8bae1dSRodney W. Grimes 		vm_map_deallocate(entry->object.share_map);
1554df8bae1dSRodney W. Grimes 	else
1555df8bae1dSRodney W. Grimes 		vm_object_deallocate(entry->object.vm_object);
1556df8bae1dSRodney W. Grimes 
1557df8bae1dSRodney W. Grimes 	vm_map_entry_dispose(map, entry);
1558df8bae1dSRodney W. Grimes }
1559df8bae1dSRodney W. Grimes 
1560df8bae1dSRodney W. Grimes /*
1561df8bae1dSRodney W. Grimes  *	vm_map_delete:	[ internal use only ]
1562df8bae1dSRodney W. Grimes  *
1563df8bae1dSRodney W. Grimes  *	Deallocates the given address range from the target
1564df8bae1dSRodney W. Grimes  *	map.
1565df8bae1dSRodney W. Grimes  *
1566df8bae1dSRodney W. Grimes  *	When called with a sharing map, removes pages from
1567df8bae1dSRodney W. Grimes  *	that region from all physical maps.
1568df8bae1dSRodney W. Grimes  */
1569df8bae1dSRodney W. Grimes int
1570df8bae1dSRodney W. Grimes vm_map_delete(map, start, end)
1571df8bae1dSRodney W. Grimes 	register vm_map_t map;
1572df8bae1dSRodney W. Grimes 	vm_offset_t start;
1573df8bae1dSRodney W. Grimes 	register vm_offset_t end;
1574df8bae1dSRodney W. Grimes {
1575df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
1576df8bae1dSRodney W. Grimes 	vm_map_entry_t first_entry;
1577df8bae1dSRodney W. Grimes 
1578df8bae1dSRodney W. Grimes 	/*
1579df8bae1dSRodney W. Grimes 	 * Find the start of the region, and clip it
1580df8bae1dSRodney W. Grimes 	 */
1581df8bae1dSRodney W. Grimes 
1582df8bae1dSRodney W. Grimes 	if (!vm_map_lookup_entry(map, start, &first_entry))
1583df8bae1dSRodney W. Grimes 		entry = first_entry->next;
1584df8bae1dSRodney W. Grimes 	else {
1585df8bae1dSRodney W. Grimes 		entry = first_entry;
1586df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1587df8bae1dSRodney W. Grimes 
1588df8bae1dSRodney W. Grimes 		/*
15890d94caffSDavid Greenman 		 * Fix the lookup hint now, rather than each time though the
15900d94caffSDavid Greenman 		 * loop.
1591df8bae1dSRodney W. Grimes 		 */
1592df8bae1dSRodney W. Grimes 
1593df8bae1dSRodney W. Grimes 		SAVE_HINT(map, entry->prev);
1594df8bae1dSRodney W. Grimes 	}
1595df8bae1dSRodney W. Grimes 
1596df8bae1dSRodney W. Grimes 	/*
1597df8bae1dSRodney W. Grimes 	 * Save the free space hint
1598df8bae1dSRodney W. Grimes 	 */
1599df8bae1dSRodney W. Grimes 
1600df8bae1dSRodney W. Grimes 	if (map->first_free->start >= start)
1601df8bae1dSRodney W. Grimes 		map->first_free = entry->prev;
1602df8bae1dSRodney W. Grimes 
1603df8bae1dSRodney W. Grimes 	/*
1604df8bae1dSRodney W. Grimes 	 * Step through all entries in this region
1605df8bae1dSRodney W. Grimes 	 */
1606df8bae1dSRodney W. Grimes 
1607df8bae1dSRodney W. Grimes 	while ((entry != &map->header) && (entry->start < end)) {
1608df8bae1dSRodney W. Grimes 		vm_map_entry_t next;
1609df8bae1dSRodney W. Grimes 		register vm_offset_t s, e;
1610df8bae1dSRodney W. Grimes 		register vm_object_t object;
1611df8bae1dSRodney W. Grimes 
1612df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
1613df8bae1dSRodney W. Grimes 
1614df8bae1dSRodney W. Grimes 		next = entry->next;
1615df8bae1dSRodney W. Grimes 		s = entry->start;
1616df8bae1dSRodney W. Grimes 		e = entry->end;
1617df8bae1dSRodney W. Grimes 
1618df8bae1dSRodney W. Grimes 		/*
16190d94caffSDavid Greenman 		 * Unwire before removing addresses from the pmap; otherwise,
16200d94caffSDavid Greenman 		 * unwiring will put the entries back in the pmap.
1621df8bae1dSRodney W. Grimes 		 */
1622df8bae1dSRodney W. Grimes 
1623df8bae1dSRodney W. Grimes 		object = entry->object.vm_object;
1624df8bae1dSRodney W. Grimes 		if (entry->wired_count != 0)
1625df8bae1dSRodney W. Grimes 			vm_map_entry_unwire(map, entry);
1626df8bae1dSRodney W. Grimes 
1627df8bae1dSRodney W. Grimes 		/*
16280d94caffSDavid Greenman 		 * If this is a sharing map, we must remove *all* references
16290d94caffSDavid Greenman 		 * to this data, since we can't find all of the physical maps
16300d94caffSDavid Greenman 		 * which are sharing it.
1631df8bae1dSRodney W. Grimes 		 */
1632df8bae1dSRodney W. Grimes 
1633df8bae1dSRodney W. Grimes 		if (object == kernel_object || object == kmem_object)
1634a316d390SJohn Dyson 			vm_object_page_remove(object, OFF_TO_IDX(entry->offset),
1635a316d390SJohn Dyson 			    OFF_TO_IDX(entry->offset + (e - s)), FALSE);
1636df8bae1dSRodney W. Grimes 		else if (!map->is_main_map)
1637df8bae1dSRodney W. Grimes 			vm_object_pmap_remove(object,
1638a316d390SJohn Dyson 			    OFF_TO_IDX(entry->offset),
1639a316d390SJohn Dyson 			    OFF_TO_IDX(entry->offset + (e - s)));
1640df8bae1dSRodney W. Grimes 		else
1641df8bae1dSRodney W. Grimes 			pmap_remove(map->pmap, s, e);
1642df8bae1dSRodney W. Grimes 
1643df8bae1dSRodney W. Grimes 		/*
16440d94caffSDavid Greenman 		 * Delete the entry (which may delete the object) only after
16450d94caffSDavid Greenman 		 * removing all pmap entries pointing to its pages.
16460d94caffSDavid Greenman 		 * (Otherwise, its page frames may be reallocated, and any
16470d94caffSDavid Greenman 		 * modify bits will be set in the wrong object!)
1648df8bae1dSRodney W. Grimes 		 */
1649df8bae1dSRodney W. Grimes 
1650df8bae1dSRodney W. Grimes 		vm_map_entry_delete(map, entry);
1651df8bae1dSRodney W. Grimes 		entry = next;
1652df8bae1dSRodney W. Grimes 	}
1653df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1654df8bae1dSRodney W. Grimes }
1655df8bae1dSRodney W. Grimes 
1656df8bae1dSRodney W. Grimes /*
1657df8bae1dSRodney W. Grimes  *	vm_map_remove:
1658df8bae1dSRodney W. Grimes  *
1659df8bae1dSRodney W. Grimes  *	Remove the given address range from the target map.
1660df8bae1dSRodney W. Grimes  *	This is the exported form of vm_map_delete.
1661df8bae1dSRodney W. Grimes  */
1662df8bae1dSRodney W. Grimes int
1663df8bae1dSRodney W. Grimes vm_map_remove(map, start, end)
1664df8bae1dSRodney W. Grimes 	register vm_map_t map;
1665df8bae1dSRodney W. Grimes 	register vm_offset_t start;
1666df8bae1dSRodney W. Grimes 	register vm_offset_t end;
1667df8bae1dSRodney W. Grimes {
16688d6e8edeSDavid Greenman 	register int result, s = 0;
16698d6e8edeSDavid Greenman 
16708d6e8edeSDavid Greenman 	if (map == kmem_map)
16718d6e8edeSDavid Greenman 		s = splhigh();
1672df8bae1dSRodney W. Grimes 
1673df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1674df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1675df8bae1dSRodney W. Grimes 	result = vm_map_delete(map, start, end);
1676df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1677df8bae1dSRodney W. Grimes 
16788d6e8edeSDavid Greenman 	if (map == kmem_map)
16798d6e8edeSDavid Greenman 		splx(s);
16808d6e8edeSDavid Greenman 
1681df8bae1dSRodney W. Grimes 	return (result);
1682df8bae1dSRodney W. Grimes }
1683df8bae1dSRodney W. Grimes 
1684df8bae1dSRodney W. Grimes /*
1685df8bae1dSRodney W. Grimes  *	vm_map_check_protection:
1686df8bae1dSRodney W. Grimes  *
1687df8bae1dSRodney W. Grimes  *	Assert that the target map allows the specified
1688df8bae1dSRodney W. Grimes  *	privilege on the entire address region given.
1689df8bae1dSRodney W. Grimes  *	The entire region must be allocated.
1690df8bae1dSRodney W. Grimes  */
16910d94caffSDavid Greenman boolean_t
16920d94caffSDavid Greenman vm_map_check_protection(map, start, end, protection)
1693df8bae1dSRodney W. Grimes 	register vm_map_t map;
1694df8bae1dSRodney W. Grimes 	register vm_offset_t start;
1695df8bae1dSRodney W. Grimes 	register vm_offset_t end;
1696df8bae1dSRodney W. Grimes 	register vm_prot_t protection;
1697df8bae1dSRodney W. Grimes {
1698df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
1699df8bae1dSRodney W. Grimes 	vm_map_entry_t tmp_entry;
1700df8bae1dSRodney W. Grimes 
1701df8bae1dSRodney W. Grimes 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
1702df8bae1dSRodney W. Grimes 		return (FALSE);
1703df8bae1dSRodney W. Grimes 	}
1704df8bae1dSRodney W. Grimes 	entry = tmp_entry;
1705df8bae1dSRodney W. Grimes 
1706df8bae1dSRodney W. Grimes 	while (start < end) {
1707df8bae1dSRodney W. Grimes 		if (entry == &map->header) {
1708df8bae1dSRodney W. Grimes 			return (FALSE);
1709df8bae1dSRodney W. Grimes 		}
1710df8bae1dSRodney W. Grimes 		/*
1711df8bae1dSRodney W. Grimes 		 * No holes allowed!
1712df8bae1dSRodney W. Grimes 		 */
1713df8bae1dSRodney W. Grimes 
1714df8bae1dSRodney W. Grimes 		if (start < entry->start) {
1715df8bae1dSRodney W. Grimes 			return (FALSE);
1716df8bae1dSRodney W. Grimes 		}
1717df8bae1dSRodney W. Grimes 		/*
1718df8bae1dSRodney W. Grimes 		 * Check protection associated with entry.
1719df8bae1dSRodney W. Grimes 		 */
1720df8bae1dSRodney W. Grimes 
1721df8bae1dSRodney W. Grimes 		if ((entry->protection & protection) != protection) {
1722df8bae1dSRodney W. Grimes 			return (FALSE);
1723df8bae1dSRodney W. Grimes 		}
1724df8bae1dSRodney W. Grimes 		/* go to next entry */
1725df8bae1dSRodney W. Grimes 
1726df8bae1dSRodney W. Grimes 		start = entry->end;
1727df8bae1dSRodney W. Grimes 		entry = entry->next;
1728df8bae1dSRodney W. Grimes 	}
1729df8bae1dSRodney W. Grimes 	return (TRUE);
1730df8bae1dSRodney W. Grimes }
1731df8bae1dSRodney W. Grimes 
1732df8bae1dSRodney W. Grimes /*
1733df8bae1dSRodney W. Grimes  *	vm_map_copy_entry:
1734df8bae1dSRodney W. Grimes  *
1735df8bae1dSRodney W. Grimes  *	Copies the contents of the source entry to the destination
1736df8bae1dSRodney W. Grimes  *	entry.  The entries *must* be aligned properly.
1737df8bae1dSRodney W. Grimes  */
17380d94caffSDavid Greenman void
17390d94caffSDavid Greenman vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
1740df8bae1dSRodney W. Grimes 	vm_map_t src_map, dst_map;
1741df8bae1dSRodney W. Grimes 	register vm_map_entry_t src_entry, dst_entry;
1742df8bae1dSRodney W. Grimes {
1743a316d390SJohn Dyson 	vm_pindex_t temp_pindex;
1744a316d390SJohn Dyson 
1745df8bae1dSRodney W. Grimes 	if (src_entry->is_sub_map || dst_entry->is_sub_map)
1746df8bae1dSRodney W. Grimes 		return;
1747df8bae1dSRodney W. Grimes 
174824a1cce3SDavid Greenman 	if (dst_entry->object.vm_object != NULL)
174924a1cce3SDavid Greenman 		printf("vm_map_copy_entry: dst_entry object not NULL!\n");
1750df8bae1dSRodney W. Grimes 
1751df8bae1dSRodney W. Grimes 	/*
17520d94caffSDavid Greenman 	 * If our destination map was wired down, unwire it now.
1753df8bae1dSRodney W. Grimes 	 */
1754df8bae1dSRodney W. Grimes 
1755df8bae1dSRodney W. Grimes 	if (dst_entry->wired_count != 0)
1756df8bae1dSRodney W. Grimes 		vm_map_entry_unwire(dst_map, dst_entry);
1757df8bae1dSRodney W. Grimes 
1758df8bae1dSRodney W. Grimes 	/*
17590d94caffSDavid Greenman 	 * If we're dealing with a sharing map, we must remove the destination
17600d94caffSDavid Greenman 	 * pages from all maps (since we cannot know which maps this sharing
17610d94caffSDavid Greenman 	 * map belongs in).
1762df8bae1dSRodney W. Grimes 	 */
1763df8bae1dSRodney W. Grimes 
1764df8bae1dSRodney W. Grimes 	if (dst_map->is_main_map)
1765df8bae1dSRodney W. Grimes 		pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end);
1766df8bae1dSRodney W. Grimes 	else
1767df8bae1dSRodney W. Grimes 		vm_object_pmap_remove(dst_entry->object.vm_object,
1768a316d390SJohn Dyson 		    OFF_TO_IDX(dst_entry->offset),
1769a316d390SJohn Dyson 		    OFF_TO_IDX(dst_entry->offset +
1770a316d390SJohn Dyson 		    (dst_entry->end - dst_entry->start)));
1771df8bae1dSRodney W. Grimes 
1772df8bae1dSRodney W. Grimes 	if (src_entry->wired_count == 0) {
1773df8bae1dSRodney W. Grimes 
1774df8bae1dSRodney W. Grimes 		boolean_t src_needs_copy;
1775df8bae1dSRodney W. Grimes 
1776df8bae1dSRodney W. Grimes 		/*
17770d94caffSDavid Greenman 		 * If the source entry is marked needs_copy, it is already
17780d94caffSDavid Greenman 		 * write-protected.
1779df8bae1dSRodney W. Grimes 		 */
1780df8bae1dSRodney W. Grimes 		if (!src_entry->needs_copy) {
1781df8bae1dSRodney W. Grimes 
1782df8bae1dSRodney W. Grimes 			boolean_t su;
1783df8bae1dSRodney W. Grimes 
1784df8bae1dSRodney W. Grimes 			/*
17850d94caffSDavid Greenman 			 * If the source entry has only one mapping, we can
17860d94caffSDavid Greenman 			 * just protect the virtual address range.
1787df8bae1dSRodney W. Grimes 			 */
1788df8bae1dSRodney W. Grimes 			if (!(su = src_map->is_main_map)) {
1789df8bae1dSRodney W. Grimes 				su = (src_map->ref_count == 1);
1790df8bae1dSRodney W. Grimes 			}
1791df8bae1dSRodney W. Grimes 			if (su) {
1792df8bae1dSRodney W. Grimes 				pmap_protect(src_map->pmap,
1793df8bae1dSRodney W. Grimes 				    src_entry->start,
1794df8bae1dSRodney W. Grimes 				    src_entry->end,
1795df8bae1dSRodney W. Grimes 				    src_entry->protection & ~VM_PROT_WRITE);
17960d94caffSDavid Greenman 			} else {
1797df8bae1dSRodney W. Grimes 				vm_object_pmap_copy(src_entry->object.vm_object,
1798a316d390SJohn Dyson 				    OFF_TO_IDX(src_entry->offset),
1799a316d390SJohn Dyson 				    OFF_TO_IDX(src_entry->offset + (src_entry->end
1800a316d390SJohn Dyson 					- src_entry->start)));
1801df8bae1dSRodney W. Grimes 			}
1802df8bae1dSRodney W. Grimes 		}
1803df8bae1dSRodney W. Grimes 		/*
1804df8bae1dSRodney W. Grimes 		 * Make a copy of the object.
1805df8bae1dSRodney W. Grimes 		 */
1806a316d390SJohn Dyson 		temp_pindex = OFF_TO_IDX(dst_entry->offset);
1807df8bae1dSRodney W. Grimes 		vm_object_copy(src_entry->object.vm_object,
1808a316d390SJohn Dyson 		    OFF_TO_IDX(src_entry->offset),
1809df8bae1dSRodney W. Grimes 		    &dst_entry->object.vm_object,
1810a316d390SJohn Dyson 		    &temp_pindex,
1811df8bae1dSRodney W. Grimes 		    &src_needs_copy);
1812a316d390SJohn Dyson 		dst_entry->offset = IDX_TO_OFF(temp_pindex);
1813df8bae1dSRodney W. Grimes 		/*
18140d94caffSDavid Greenman 		 * If we didn't get a copy-object now, mark the source map
18150d94caffSDavid Greenman 		 * entry so that a shadow will be created to hold its changed
18160d94caffSDavid Greenman 		 * pages.
1817df8bae1dSRodney W. Grimes 		 */
1818df8bae1dSRodney W. Grimes 		if (src_needs_copy)
1819df8bae1dSRodney W. Grimes 			src_entry->needs_copy = TRUE;
1820df8bae1dSRodney W. Grimes 
1821df8bae1dSRodney W. Grimes 		/*
18220d94caffSDavid Greenman 		 * The destination always needs to have a shadow created.
1823df8bae1dSRodney W. Grimes 		 */
1824df8bae1dSRodney W. Grimes 		dst_entry->needs_copy = TRUE;
1825df8bae1dSRodney W. Grimes 
1826df8bae1dSRodney W. Grimes 		/*
18270d94caffSDavid Greenman 		 * Mark the entries copy-on-write, so that write-enabling the
18280d94caffSDavid Greenman 		 * entry won't make copy-on-write pages writable.
1829df8bae1dSRodney W. Grimes 		 */
1830df8bae1dSRodney W. Grimes 		src_entry->copy_on_write = TRUE;
1831df8bae1dSRodney W. Grimes 		dst_entry->copy_on_write = TRUE;
1832df8bae1dSRodney W. Grimes 
1833df8bae1dSRodney W. Grimes 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
1834df8bae1dSRodney W. Grimes 		    dst_entry->end - dst_entry->start, src_entry->start);
18350d94caffSDavid Greenman 	} else {
1836df8bae1dSRodney W. Grimes 		/*
1837df8bae1dSRodney W. Grimes 		 * Of course, wired down pages can't be set copy-on-write.
18380d94caffSDavid Greenman 		 * Cause wired pages to be copied into the new map by
18390d94caffSDavid Greenman 		 * simulating faults (the new pages are pageable)
1840df8bae1dSRodney W. Grimes 		 */
1841df8bae1dSRodney W. Grimes 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
1842df8bae1dSRodney W. Grimes 	}
1843df8bae1dSRodney W. Grimes }
1844df8bae1dSRodney W. Grimes 
1845df8bae1dSRodney W. Grimes /*
1846df8bae1dSRodney W. Grimes  * vmspace_fork:
1847df8bae1dSRodney W. Grimes  * Create a new process vmspace structure and vm_map
1848df8bae1dSRodney W. Grimes  * based on those of an existing process.  The new map
1849df8bae1dSRodney W. Grimes  * is based on the old map, according to the inheritance
1850df8bae1dSRodney W. Grimes  * values on the regions in that map.
1851df8bae1dSRodney W. Grimes  *
1852df8bae1dSRodney W. Grimes  * The source map must not be locked.
1853df8bae1dSRodney W. Grimes  */
1854df8bae1dSRodney W. Grimes struct vmspace *
1855df8bae1dSRodney W. Grimes vmspace_fork(vm1)
1856df8bae1dSRodney W. Grimes 	register struct vmspace *vm1;
1857df8bae1dSRodney W. Grimes {
1858df8bae1dSRodney W. Grimes 	register struct vmspace *vm2;
1859df8bae1dSRodney W. Grimes 	vm_map_t old_map = &vm1->vm_map;
1860df8bae1dSRodney W. Grimes 	vm_map_t new_map;
1861df8bae1dSRodney W. Grimes 	vm_map_entry_t old_entry;
1862df8bae1dSRodney W. Grimes 	vm_map_entry_t new_entry;
1863df8bae1dSRodney W. Grimes 	pmap_t new_pmap;
1864df8bae1dSRodney W. Grimes 
1865df8bae1dSRodney W. Grimes 	vm_map_lock(old_map);
1866df8bae1dSRodney W. Grimes 
1867df8bae1dSRodney W. Grimes 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
1868df8bae1dSRodney W. Grimes 	    old_map->entries_pageable);
1869df8bae1dSRodney W. Grimes 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
1870df8bae1dSRodney W. Grimes 	    (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
1871df8bae1dSRodney W. Grimes 	new_pmap = &vm2->vm_pmap;	/* XXX */
1872df8bae1dSRodney W. Grimes 	new_map = &vm2->vm_map;	/* XXX */
1873df8bae1dSRodney W. Grimes 
1874df8bae1dSRodney W. Grimes 	old_entry = old_map->header.next;
1875df8bae1dSRodney W. Grimes 
1876df8bae1dSRodney W. Grimes 	while (old_entry != &old_map->header) {
1877df8bae1dSRodney W. Grimes 		if (old_entry->is_sub_map)
1878df8bae1dSRodney W. Grimes 			panic("vm_map_fork: encountered a submap");
1879df8bae1dSRodney W. Grimes 
1880df8bae1dSRodney W. Grimes 		switch (old_entry->inheritance) {
1881df8bae1dSRodney W. Grimes 		case VM_INHERIT_NONE:
1882df8bae1dSRodney W. Grimes 			break;
1883df8bae1dSRodney W. Grimes 
1884df8bae1dSRodney W. Grimes 		case VM_INHERIT_SHARE:
1885df8bae1dSRodney W. Grimes 			/*
1886df8bae1dSRodney W. Grimes 			 * Clone the entry, referencing the sharing map.
1887df8bae1dSRodney W. Grimes 			 */
1888df8bae1dSRodney W. Grimes 			new_entry = vm_map_entry_create(new_map);
1889df8bae1dSRodney W. Grimes 			*new_entry = *old_entry;
1890df8bae1dSRodney W. Grimes 			new_entry->wired_count = 0;
189124a1cce3SDavid Greenman 			++new_entry->object.vm_object->ref_count;
1892df8bae1dSRodney W. Grimes 
1893df8bae1dSRodney W. Grimes 			/*
18940d94caffSDavid Greenman 			 * Insert the entry into the new map -- we know we're
18950d94caffSDavid Greenman 			 * inserting at the end of the new map.
1896df8bae1dSRodney W. Grimes 			 */
1897df8bae1dSRodney W. Grimes 
1898df8bae1dSRodney W. Grimes 			vm_map_entry_link(new_map, new_map->header.prev,
1899df8bae1dSRodney W. Grimes 			    new_entry);
1900df8bae1dSRodney W. Grimes 
1901df8bae1dSRodney W. Grimes 			/*
1902df8bae1dSRodney W. Grimes 			 * Update the physical map
1903df8bae1dSRodney W. Grimes 			 */
1904df8bae1dSRodney W. Grimes 
1905df8bae1dSRodney W. Grimes 			pmap_copy(new_map->pmap, old_map->pmap,
1906df8bae1dSRodney W. Grimes 			    new_entry->start,
1907df8bae1dSRodney W. Grimes 			    (old_entry->end - old_entry->start),
1908df8bae1dSRodney W. Grimes 			    old_entry->start);
1909df8bae1dSRodney W. Grimes 			break;
1910df8bae1dSRodney W. Grimes 
1911df8bae1dSRodney W. Grimes 		case VM_INHERIT_COPY:
1912df8bae1dSRodney W. Grimes 			/*
1913df8bae1dSRodney W. Grimes 			 * Clone the entry and link into the map.
1914df8bae1dSRodney W. Grimes 			 */
1915df8bae1dSRodney W. Grimes 
1916df8bae1dSRodney W. Grimes 			new_entry = vm_map_entry_create(new_map);
1917df8bae1dSRodney W. Grimes 			*new_entry = *old_entry;
1918df8bae1dSRodney W. Grimes 			new_entry->wired_count = 0;
1919df8bae1dSRodney W. Grimes 			new_entry->object.vm_object = NULL;
1920df8bae1dSRodney W. Grimes 			new_entry->is_a_map = FALSE;
1921df8bae1dSRodney W. Grimes 			vm_map_entry_link(new_map, new_map->header.prev,
1922df8bae1dSRodney W. Grimes 			    new_entry);
192324a1cce3SDavid Greenman 			vm_map_copy_entry(old_map, new_map, old_entry, new_entry);
1924df8bae1dSRodney W. Grimes 			break;
1925df8bae1dSRodney W. Grimes 		}
1926df8bae1dSRodney W. Grimes 		old_entry = old_entry->next;
1927df8bae1dSRodney W. Grimes 	}
1928df8bae1dSRodney W. Grimes 
1929df8bae1dSRodney W. Grimes 	new_map->size = old_map->size;
1930df8bae1dSRodney W. Grimes 	vm_map_unlock(old_map);
1931df8bae1dSRodney W. Grimes 
1932df8bae1dSRodney W. Grimes 	return (vm2);
1933df8bae1dSRodney W. Grimes }
1934df8bae1dSRodney W. Grimes 
1935df8bae1dSRodney W. Grimes /*
1936df8bae1dSRodney W. Grimes  *	vm_map_lookup:
1937df8bae1dSRodney W. Grimes  *
1938df8bae1dSRodney W. Grimes  *	Finds the VM object, offset, and
1939df8bae1dSRodney W. Grimes  *	protection for a given virtual address in the
1940df8bae1dSRodney W. Grimes  *	specified map, assuming a page fault of the
1941df8bae1dSRodney W. Grimes  *	type specified.
1942df8bae1dSRodney W. Grimes  *
1943df8bae1dSRodney W. Grimes  *	Leaves the map in question locked for read; return
1944df8bae1dSRodney W. Grimes  *	values are guaranteed until a vm_map_lookup_done
1945df8bae1dSRodney W. Grimes  *	call is performed.  Note that the map argument
1946df8bae1dSRodney W. Grimes  *	is in/out; the returned map must be used in
1947df8bae1dSRodney W. Grimes  *	the call to vm_map_lookup_done.
1948df8bae1dSRodney W. Grimes  *
1949df8bae1dSRodney W. Grimes  *	A handle (out_entry) is returned for use in
1950df8bae1dSRodney W. Grimes  *	vm_map_lookup_done, to make that fast.
1951df8bae1dSRodney W. Grimes  *
1952df8bae1dSRodney W. Grimes  *	If a lookup is requested with "write protection"
1953df8bae1dSRodney W. Grimes  *	specified, the map may be changed to perform virtual
1954df8bae1dSRodney W. Grimes  *	copying operations, although the data referenced will
1955df8bae1dSRodney W. Grimes  *	remain the same.
1956df8bae1dSRodney W. Grimes  */
1957df8bae1dSRodney W. Grimes int
1958df8bae1dSRodney W. Grimes vm_map_lookup(var_map, vaddr, fault_type, out_entry,
1959a316d390SJohn Dyson     object, pindex, out_prot, wired, single_use)
1960df8bae1dSRodney W. Grimes 	vm_map_t *var_map;	/* IN/OUT */
1961df8bae1dSRodney W. Grimes 	register vm_offset_t vaddr;
1962df8bae1dSRodney W. Grimes 	register vm_prot_t fault_type;
1963df8bae1dSRodney W. Grimes 
1964df8bae1dSRodney W. Grimes 	vm_map_entry_t *out_entry;	/* OUT */
1965df8bae1dSRodney W. Grimes 	vm_object_t *object;	/* OUT */
1966a316d390SJohn Dyson 	vm_pindex_t *pindex;	/* OUT */
1967df8bae1dSRodney W. Grimes 	vm_prot_t *out_prot;	/* OUT */
1968df8bae1dSRodney W. Grimes 	boolean_t *wired;	/* OUT */
1969df8bae1dSRodney W. Grimes 	boolean_t *single_use;	/* OUT */
1970df8bae1dSRodney W. Grimes {
1971df8bae1dSRodney W. Grimes 	vm_map_t share_map;
1972df8bae1dSRodney W. Grimes 	vm_offset_t share_offset;
1973df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
1974df8bae1dSRodney W. Grimes 	register vm_map_t map = *var_map;
1975df8bae1dSRodney W. Grimes 	register vm_prot_t prot;
1976df8bae1dSRodney W. Grimes 	register boolean_t su;
1977df8bae1dSRodney W. Grimes 
1978df8bae1dSRodney W. Grimes RetryLookup:;
1979df8bae1dSRodney W. Grimes 
1980df8bae1dSRodney W. Grimes 	/*
1981df8bae1dSRodney W. Grimes 	 * Lookup the faulting address.
1982df8bae1dSRodney W. Grimes 	 */
1983df8bae1dSRodney W. Grimes 
1984df8bae1dSRodney W. Grimes 	vm_map_lock_read(map);
1985df8bae1dSRodney W. Grimes 
1986df8bae1dSRodney W. Grimes #define	RETURN(why) \
1987df8bae1dSRodney W. Grimes 		{ \
1988df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map); \
1989df8bae1dSRodney W. Grimes 		return(why); \
1990df8bae1dSRodney W. Grimes 		}
1991df8bae1dSRodney W. Grimes 
1992df8bae1dSRodney W. Grimes 	/*
19930d94caffSDavid Greenman 	 * If the map has an interesting hint, try it before calling full
19940d94caffSDavid Greenman 	 * blown lookup routine.
1995df8bae1dSRodney W. Grimes 	 */
1996df8bae1dSRodney W. Grimes 
1997df8bae1dSRodney W. Grimes 	entry = map->hint;
1998df8bae1dSRodney W. Grimes 
1999df8bae1dSRodney W. Grimes 	*out_entry = entry;
2000df8bae1dSRodney W. Grimes 
2001df8bae1dSRodney W. Grimes 	if ((entry == &map->header) ||
2002df8bae1dSRodney W. Grimes 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
2003df8bae1dSRodney W. Grimes 		vm_map_entry_t tmp_entry;
2004df8bae1dSRodney W. Grimes 
2005df8bae1dSRodney W. Grimes 		/*
20060d94caffSDavid Greenman 		 * Entry was either not a valid hint, or the vaddr was not
20070d94caffSDavid Greenman 		 * contained in the entry, so do a full lookup.
2008df8bae1dSRodney W. Grimes 		 */
2009df8bae1dSRodney W. Grimes 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2010df8bae1dSRodney W. Grimes 			RETURN(KERN_INVALID_ADDRESS);
2011df8bae1dSRodney W. Grimes 
2012df8bae1dSRodney W. Grimes 		entry = tmp_entry;
2013df8bae1dSRodney W. Grimes 		*out_entry = entry;
2014df8bae1dSRodney W. Grimes 	}
2015df8bae1dSRodney W. Grimes 	/*
2016df8bae1dSRodney W. Grimes 	 * Handle submaps.
2017df8bae1dSRodney W. Grimes 	 */
2018df8bae1dSRodney W. Grimes 
2019df8bae1dSRodney W. Grimes 	if (entry->is_sub_map) {
2020df8bae1dSRodney W. Grimes 		vm_map_t old_map = map;
2021df8bae1dSRodney W. Grimes 
2022df8bae1dSRodney W. Grimes 		*var_map = map = entry->object.sub_map;
2023df8bae1dSRodney W. Grimes 		vm_map_unlock_read(old_map);
2024df8bae1dSRodney W. Grimes 		goto RetryLookup;
2025df8bae1dSRodney W. Grimes 	}
2026df8bae1dSRodney W. Grimes 	/*
20270d94caffSDavid Greenman 	 * Check whether this task is allowed to have this page.
2028df8bae1dSRodney W. Grimes 	 */
2029df8bae1dSRodney W. Grimes 
2030df8bae1dSRodney W. Grimes 	prot = entry->protection;
2031df8bae1dSRodney W. Grimes 	if ((fault_type & (prot)) != fault_type)
2032df8bae1dSRodney W. Grimes 		RETURN(KERN_PROTECTION_FAILURE);
2033df8bae1dSRodney W. Grimes 
2034df8bae1dSRodney W. Grimes 	/*
20350d94caffSDavid Greenman 	 * If this page is not pageable, we have to get it for all possible
20360d94caffSDavid Greenman 	 * accesses.
2037df8bae1dSRodney W. Grimes 	 */
2038df8bae1dSRodney W. Grimes 
203905f0fdd2SPoul-Henning Kamp 	*wired = (entry->wired_count != 0);
204005f0fdd2SPoul-Henning Kamp 	if (*wired)
2041df8bae1dSRodney W. Grimes 		prot = fault_type = entry->protection;
2042df8bae1dSRodney W. Grimes 
2043df8bae1dSRodney W. Grimes 	/*
20440d94caffSDavid Greenman 	 * If we don't already have a VM object, track it down.
2045df8bae1dSRodney W. Grimes 	 */
2046df8bae1dSRodney W. Grimes 
204705f0fdd2SPoul-Henning Kamp 	su = !entry->is_a_map;
204805f0fdd2SPoul-Henning Kamp 	if (su) {
2049df8bae1dSRodney W. Grimes 		share_map = map;
2050df8bae1dSRodney W. Grimes 		share_offset = vaddr;
20510d94caffSDavid Greenman 	} else {
2052df8bae1dSRodney W. Grimes 		vm_map_entry_t share_entry;
2053df8bae1dSRodney W. Grimes 
2054df8bae1dSRodney W. Grimes 		/*
2055df8bae1dSRodney W. Grimes 		 * Compute the sharing map, and offset into it.
2056df8bae1dSRodney W. Grimes 		 */
2057df8bae1dSRodney W. Grimes 
2058df8bae1dSRodney W. Grimes 		share_map = entry->object.share_map;
2059df8bae1dSRodney W. Grimes 		share_offset = (vaddr - entry->start) + entry->offset;
2060df8bae1dSRodney W. Grimes 
2061df8bae1dSRodney W. Grimes 		/*
2062df8bae1dSRodney W. Grimes 		 * Look for the backing store object and offset
2063df8bae1dSRodney W. Grimes 		 */
2064df8bae1dSRodney W. Grimes 
2065df8bae1dSRodney W. Grimes 		vm_map_lock_read(share_map);
2066df8bae1dSRodney W. Grimes 
2067df8bae1dSRodney W. Grimes 		if (!vm_map_lookup_entry(share_map, share_offset,
2068df8bae1dSRodney W. Grimes 			&share_entry)) {
2069df8bae1dSRodney W. Grimes 			vm_map_unlock_read(share_map);
2070df8bae1dSRodney W. Grimes 			RETURN(KERN_INVALID_ADDRESS);
2071df8bae1dSRodney W. Grimes 		}
2072df8bae1dSRodney W. Grimes 		entry = share_entry;
2073df8bae1dSRodney W. Grimes 	}
2074df8bae1dSRodney W. Grimes 
2075df8bae1dSRodney W. Grimes 	/*
2076df8bae1dSRodney W. Grimes 	 * If the entry was copy-on-write, we either ...
2077df8bae1dSRodney W. Grimes 	 */
2078df8bae1dSRodney W. Grimes 
2079df8bae1dSRodney W. Grimes 	if (entry->needs_copy) {
2080df8bae1dSRodney W. Grimes 		/*
20810d94caffSDavid Greenman 		 * If we want to write the page, we may as well handle that
20820d94caffSDavid Greenman 		 * now since we've got the sharing map locked.
2083df8bae1dSRodney W. Grimes 		 *
20840d94caffSDavid Greenman 		 * If we don't need to write the page, we just demote the
20850d94caffSDavid Greenman 		 * permissions allowed.
2086df8bae1dSRodney W. Grimes 		 */
2087df8bae1dSRodney W. Grimes 
2088df8bae1dSRodney W. Grimes 		if (fault_type & VM_PROT_WRITE) {
2089df8bae1dSRodney W. Grimes 			/*
20900d94caffSDavid Greenman 			 * Make a new object, and place it in the object
20910d94caffSDavid Greenman 			 * chain.  Note that no new references have appeared
20920d94caffSDavid Greenman 			 * -- one just moved from the share map to the new
20930d94caffSDavid Greenman 			 * object.
2094df8bae1dSRodney W. Grimes 			 */
2095df8bae1dSRodney W. Grimes 
2096df8bae1dSRodney W. Grimes 			if (lock_read_to_write(&share_map->lock)) {
2097df8bae1dSRodney W. Grimes 				if (share_map != map)
2098df8bae1dSRodney W. Grimes 					vm_map_unlock_read(map);
2099df8bae1dSRodney W. Grimes 				goto RetryLookup;
2100df8bae1dSRodney W. Grimes 			}
2101df8bae1dSRodney W. Grimes 			vm_object_shadow(
2102df8bae1dSRodney W. Grimes 			    &entry->object.vm_object,
2103df8bae1dSRodney W. Grimes 			    &entry->offset,
2104a316d390SJohn Dyson 			    OFF_TO_IDX(entry->end - entry->start));
2105df8bae1dSRodney W. Grimes 
2106df8bae1dSRodney W. Grimes 			entry->needs_copy = FALSE;
2107df8bae1dSRodney W. Grimes 
2108df8bae1dSRodney W. Grimes 			lock_write_to_read(&share_map->lock);
21090d94caffSDavid Greenman 		} else {
2110df8bae1dSRodney W. Grimes 			/*
21110d94caffSDavid Greenman 			 * We're attempting to read a copy-on-write page --
21120d94caffSDavid Greenman 			 * don't allow writes.
2113df8bae1dSRodney W. Grimes 			 */
2114df8bae1dSRodney W. Grimes 
2115df8bae1dSRodney W. Grimes 			prot &= (~VM_PROT_WRITE);
2116df8bae1dSRodney W. Grimes 		}
2117df8bae1dSRodney W. Grimes 	}
2118df8bae1dSRodney W. Grimes 	/*
2119df8bae1dSRodney W. Grimes 	 * Create an object if necessary.
2120df8bae1dSRodney W. Grimes 	 */
2121df8bae1dSRodney W. Grimes 	if (entry->object.vm_object == NULL) {
2122df8bae1dSRodney W. Grimes 
2123df8bae1dSRodney W. Grimes 		if (lock_read_to_write(&share_map->lock)) {
2124df8bae1dSRodney W. Grimes 			if (share_map != map)
2125df8bae1dSRodney W. Grimes 				vm_map_unlock_read(map);
2126df8bae1dSRodney W. Grimes 			goto RetryLookup;
2127df8bae1dSRodney W. Grimes 		}
212824a1cce3SDavid Greenman 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2129a316d390SJohn Dyson 		    OFF_TO_IDX(entry->end - entry->start));
2130df8bae1dSRodney W. Grimes 		entry->offset = 0;
2131df8bae1dSRodney W. Grimes 		lock_write_to_read(&share_map->lock);
2132df8bae1dSRodney W. Grimes 	}
2133df8bae1dSRodney W. Grimes 	/*
21340d94caffSDavid Greenman 	 * Return the object/offset from this entry.  If the entry was
21350d94caffSDavid Greenman 	 * copy-on-write or empty, it has been fixed up.
2136df8bae1dSRodney W. Grimes 	 */
2137df8bae1dSRodney W. Grimes 
2138a316d390SJohn Dyson 	*pindex = OFF_TO_IDX((share_offset - entry->start) + entry->offset);
2139df8bae1dSRodney W. Grimes 	*object = entry->object.vm_object;
2140df8bae1dSRodney W. Grimes 
2141df8bae1dSRodney W. Grimes 	/*
2142df8bae1dSRodney W. Grimes 	 * Return whether this is the only map sharing this data.
2143df8bae1dSRodney W. Grimes 	 */
2144df8bae1dSRodney W. Grimes 
2145df8bae1dSRodney W. Grimes 	if (!su) {
2146df8bae1dSRodney W. Grimes 		su = (share_map->ref_count == 1);
2147df8bae1dSRodney W. Grimes 	}
2148df8bae1dSRodney W. Grimes 	*out_prot = prot;
2149df8bae1dSRodney W. Grimes 	*single_use = su;
2150df8bae1dSRodney W. Grimes 
2151df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
2152df8bae1dSRodney W. Grimes 
2153df8bae1dSRodney W. Grimes #undef	RETURN
2154df8bae1dSRodney W. Grimes }
2155df8bae1dSRodney W. Grimes 
2156df8bae1dSRodney W. Grimes /*
2157df8bae1dSRodney W. Grimes  *	vm_map_lookup_done:
2158df8bae1dSRodney W. Grimes  *
2159df8bae1dSRodney W. Grimes  *	Releases locks acquired by a vm_map_lookup
2160df8bae1dSRodney W. Grimes  *	(according to the handle returned by that lookup).
2161df8bae1dSRodney W. Grimes  */
2162df8bae1dSRodney W. Grimes 
21630d94caffSDavid Greenman void
21640d94caffSDavid Greenman vm_map_lookup_done(map, entry)
2165df8bae1dSRodney W. Grimes 	register vm_map_t map;
2166df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
2167df8bae1dSRodney W. Grimes {
2168df8bae1dSRodney W. Grimes 	/*
2169df8bae1dSRodney W. Grimes 	 * If this entry references a map, unlock it first.
2170df8bae1dSRodney W. Grimes 	 */
2171df8bae1dSRodney W. Grimes 
2172df8bae1dSRodney W. Grimes 	if (entry->is_a_map)
2173df8bae1dSRodney W. Grimes 		vm_map_unlock_read(entry->object.share_map);
2174df8bae1dSRodney W. Grimes 
2175df8bae1dSRodney W. Grimes 	/*
2176df8bae1dSRodney W. Grimes 	 * Unlock the main-level map
2177df8bae1dSRodney W. Grimes 	 */
2178df8bae1dSRodney W. Grimes 
2179df8bae1dSRodney W. Grimes 	vm_map_unlock_read(map);
2180df8bae1dSRodney W. Grimes }
2181df8bae1dSRodney W. Grimes 
2182df8bae1dSRodney W. Grimes /*
2183df8bae1dSRodney W. Grimes  *	Routine:	vm_map_simplify
2184df8bae1dSRodney W. Grimes  *	Purpose:
2185df8bae1dSRodney W. Grimes  *		Attempt to simplify the map representation in
2186df8bae1dSRodney W. Grimes  *		the vicinity of the given starting address.
2187df8bae1dSRodney W. Grimes  *	Note:
2188df8bae1dSRodney W. Grimes  *		This routine is intended primarily to keep the
2189df8bae1dSRodney W. Grimes  *		kernel maps more compact -- they generally don't
2190df8bae1dSRodney W. Grimes  *		benefit from the "expand a map entry" technology
2191df8bae1dSRodney W. Grimes  *		at allocation time because the adjacent entry
2192df8bae1dSRodney W. Grimes  *		is often wired down.
2193df8bae1dSRodney W. Grimes  */
21940d94caffSDavid Greenman void
21950d94caffSDavid Greenman vm_map_simplify(map, start)
2196df8bae1dSRodney W. Grimes 	vm_map_t map;
2197df8bae1dSRodney W. Grimes 	vm_offset_t start;
2198df8bae1dSRodney W. Grimes {
2199df8bae1dSRodney W. Grimes 	vm_map_entry_t this_entry;
2200df8bae1dSRodney W. Grimes 	vm_map_entry_t prev_entry;
2201df8bae1dSRodney W. Grimes 
2202df8bae1dSRodney W. Grimes 	vm_map_lock(map);
2203df8bae1dSRodney W. Grimes 	if (
2204df8bae1dSRodney W. Grimes 	    (vm_map_lookup_entry(map, start, &this_entry)) &&
2205df8bae1dSRodney W. Grimes 	    ((prev_entry = this_entry->prev) != &map->header) &&
2206df8bae1dSRodney W. Grimes 
2207df8bae1dSRodney W. Grimes 	    (prev_entry->end == start) &&
2208df8bae1dSRodney W. Grimes 	    (map->is_main_map) &&
2209df8bae1dSRodney W. Grimes 
2210df8bae1dSRodney W. Grimes 	    (prev_entry->is_a_map == FALSE) &&
2211df8bae1dSRodney W. Grimes 	    (prev_entry->is_sub_map == FALSE) &&
2212df8bae1dSRodney W. Grimes 
2213df8bae1dSRodney W. Grimes 	    (this_entry->is_a_map == FALSE) &&
2214df8bae1dSRodney W. Grimes 	    (this_entry->is_sub_map == FALSE) &&
2215df8bae1dSRodney W. Grimes 
2216df8bae1dSRodney W. Grimes 	    (prev_entry->inheritance == this_entry->inheritance) &&
2217df8bae1dSRodney W. Grimes 	    (prev_entry->protection == this_entry->protection) &&
2218df8bae1dSRodney W. Grimes 	    (prev_entry->max_protection == this_entry->max_protection) &&
2219df8bae1dSRodney W. Grimes 	    (prev_entry->wired_count == this_entry->wired_count) &&
2220df8bae1dSRodney W. Grimes 
2221df8bae1dSRodney W. Grimes 	    (prev_entry->copy_on_write == this_entry->copy_on_write) &&
2222df8bae1dSRodney W. Grimes 	    (prev_entry->needs_copy == this_entry->needs_copy) &&
2223df8bae1dSRodney W. Grimes 
2224df8bae1dSRodney W. Grimes 	    (prev_entry->object.vm_object == this_entry->object.vm_object) &&
2225df8bae1dSRodney W. Grimes 	    ((prev_entry->offset + (prev_entry->end - prev_entry->start))
2226df8bae1dSRodney W. Grimes 		== this_entry->offset)
2227df8bae1dSRodney W. Grimes 	    ) {
2228df8bae1dSRodney W. Grimes 		if (map->first_free == this_entry)
2229df8bae1dSRodney W. Grimes 			map->first_free = prev_entry;
2230df8bae1dSRodney W. Grimes 
223126f9a767SRodney W. Grimes 		if (!this_entry->object.vm_object->paging_in_progress) {
2232df8bae1dSRodney W. Grimes 			SAVE_HINT(map, prev_entry);
2233df8bae1dSRodney W. Grimes 			vm_map_entry_unlink(map, this_entry);
2234df8bae1dSRodney W. Grimes 			prev_entry->end = this_entry->end;
2235df8bae1dSRodney W. Grimes 			vm_object_deallocate(this_entry->object.vm_object);
2236df8bae1dSRodney W. Grimes 			vm_map_entry_dispose(map, this_entry);
2237df8bae1dSRodney W. Grimes 		}
223826f9a767SRodney W. Grimes 	}
2239df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
2240df8bae1dSRodney W. Grimes }
2241df8bae1dSRodney W. Grimes 
2242c3cb3e12SDavid Greenman #ifdef DDB
2243df8bae1dSRodney W. Grimes /*
2244df8bae1dSRodney W. Grimes  *	vm_map_print:	[ debug ]
2245df8bae1dSRodney W. Grimes  */
22460d94caffSDavid Greenman void
2247914181e7SBruce Evans vm_map_print(imap, full, dummy3, dummy4)
2248914181e7SBruce Evans 	/* db_expr_t */ int imap;
2249df8bae1dSRodney W. Grimes 	boolean_t full;
2250914181e7SBruce Evans 	/* db_expr_t */ int dummy3;
2251914181e7SBruce Evans 	char *dummy4;
2252df8bae1dSRodney W. Grimes {
2253df8bae1dSRodney W. Grimes 	register vm_map_entry_t entry;
2254914181e7SBruce Evans 	register vm_map_t map = (vm_map_t)imap;	/* XXX */
2255df8bae1dSRodney W. Grimes 
2256df8bae1dSRodney W. Grimes 	iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n",
2257df8bae1dSRodney W. Grimes 	    (map->is_main_map ? "Task" : "Share"),
2258df8bae1dSRodney W. Grimes 	    (int) map, (int) (map->pmap), map->ref_count, map->nentries,
2259df8bae1dSRodney W. Grimes 	    map->timestamp);
2260df8bae1dSRodney W. Grimes 
2261df8bae1dSRodney W. Grimes 	if (!full && indent)
2262df8bae1dSRodney W. Grimes 		return;
2263df8bae1dSRodney W. Grimes 
2264df8bae1dSRodney W. Grimes 	indent += 2;
2265df8bae1dSRodney W. Grimes 	for (entry = map->header.next; entry != &map->header;
2266df8bae1dSRodney W. Grimes 	    entry = entry->next) {
2267df8bae1dSRodney W. Grimes 		iprintf("map entry 0x%x: start=0x%x, end=0x%x, ",
2268df8bae1dSRodney W. Grimes 		    (int) entry, (int) entry->start, (int) entry->end);
2269df8bae1dSRodney W. Grimes 		if (map->is_main_map) {
2270df8bae1dSRodney W. Grimes 			static char *inheritance_name[4] =
2271df8bae1dSRodney W. Grimes 			{"share", "copy", "none", "donate_copy"};
22720d94caffSDavid Greenman 
2273df8bae1dSRodney W. Grimes 			printf("prot=%x/%x/%s, ",
2274df8bae1dSRodney W. Grimes 			    entry->protection,
2275df8bae1dSRodney W. Grimes 			    entry->max_protection,
2276df8bae1dSRodney W. Grimes 			    inheritance_name[entry->inheritance]);
2277df8bae1dSRodney W. Grimes 			if (entry->wired_count != 0)
2278df8bae1dSRodney W. Grimes 				printf("wired, ");
2279df8bae1dSRodney W. Grimes 		}
2280df8bae1dSRodney W. Grimes 		if (entry->is_a_map || entry->is_sub_map) {
2281df8bae1dSRodney W. Grimes 			printf("share=0x%x, offset=0x%x\n",
2282df8bae1dSRodney W. Grimes 			    (int) entry->object.share_map,
2283df8bae1dSRodney W. Grimes 			    (int) entry->offset);
2284df8bae1dSRodney W. Grimes 			if ((entry->prev == &map->header) ||
2285df8bae1dSRodney W. Grimes 			    (!entry->prev->is_a_map) ||
2286df8bae1dSRodney W. Grimes 			    (entry->prev->object.share_map !=
2287df8bae1dSRodney W. Grimes 				entry->object.share_map)) {
2288df8bae1dSRodney W. Grimes 				indent += 2;
2289914181e7SBruce Evans 				vm_map_print((int)entry->object.share_map,
2290914181e7SBruce Evans 					     full, 0, (char *)0);
2291df8bae1dSRodney W. Grimes 				indent -= 2;
2292df8bae1dSRodney W. Grimes 			}
22930d94caffSDavid Greenman 		} else {
2294df8bae1dSRodney W. Grimes 			printf("object=0x%x, offset=0x%x",
2295df8bae1dSRodney W. Grimes 			    (int) entry->object.vm_object,
2296df8bae1dSRodney W. Grimes 			    (int) entry->offset);
2297df8bae1dSRodney W. Grimes 			if (entry->copy_on_write)
2298df8bae1dSRodney W. Grimes 				printf(", copy (%s)",
2299df8bae1dSRodney W. Grimes 				    entry->needs_copy ? "needed" : "done");
2300df8bae1dSRodney W. Grimes 			printf("\n");
2301df8bae1dSRodney W. Grimes 
2302df8bae1dSRodney W. Grimes 			if ((entry->prev == &map->header) ||
2303df8bae1dSRodney W. Grimes 			    (entry->prev->is_a_map) ||
2304df8bae1dSRodney W. Grimes 			    (entry->prev->object.vm_object !=
2305df8bae1dSRodney W. Grimes 				entry->object.vm_object)) {
2306df8bae1dSRodney W. Grimes 				indent += 2;
2307914181e7SBruce Evans 				vm_object_print((int)entry->object.vm_object,
2308914181e7SBruce Evans 						full, 0, (char *)0);
2309df8bae1dSRodney W. Grimes 				indent -= 2;
2310df8bae1dSRodney W. Grimes 			}
2311df8bae1dSRodney W. Grimes 		}
2312df8bae1dSRodney W. Grimes 	}
2313df8bae1dSRodney W. Grimes 	indent -= 2;
2314df8bae1dSRodney W. Grimes }
2315c3cb3e12SDavid Greenman #endif
2316