xref: /freebsd/sys/vm/vm_map.c (revision 4e94f4022287c40a9216678a7d9e5ccd61e421fa)
1df8bae1dSRodney W. Grimes /*
2df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  *
5df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
6df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
9df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
10df8bae1dSRodney W. Grimes  * are met:
11df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
12df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
13df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
15df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
16df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
175929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
18df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
19df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
20df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
21df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
22df8bae1dSRodney W. Grimes  *    without specific prior written permission.
23df8bae1dSRodney W. Grimes  *
24df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
35df8bae1dSRodney W. Grimes  *
363c4dd356SDavid Greenman  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  *
39df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40df8bae1dSRodney W. Grimes  * All rights reserved.
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43df8bae1dSRodney W. Grimes  *
44df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
45df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
46df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
47df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
48df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
49df8bae1dSRodney W. Grimes  *
50df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
55df8bae1dSRodney W. Grimes  *
56df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57df8bae1dSRodney W. Grimes  *  School of Computer Science
58df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
59df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
60df8bae1dSRodney W. Grimes  *
61df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
62df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
633c4dd356SDavid Greenman  *
64c3aac50fSPeter Wemm  * $FreeBSD$
65df8bae1dSRodney W. Grimes  */
66df8bae1dSRodney W. Grimes 
67df8bae1dSRodney W. Grimes /*
68df8bae1dSRodney W. Grimes  *	Virtual memory mapping module.
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes #include <sys/param.h>
72df8bae1dSRodney W. Grimes #include <sys/systm.h>
7361d80e90SJohn Baldwin #include <sys/ktr.h>
74fb919e4dSMark Murray #include <sys/lock.h>
75fb919e4dSMark Murray #include <sys/mutex.h>
76b5e8ce9fSBruce Evans #include <sys/proc.h>
77efeaf95aSDavid Greenman #include <sys/vmmeter.h>
78867a482dSJohn Dyson #include <sys/mman.h>
791efb74fbSJohn Dyson #include <sys/vnode.h>
802267af78SJulian Elischer #include <sys/resourcevar.h>
81df8bae1dSRodney W. Grimes 
82df8bae1dSRodney W. Grimes #include <vm/vm.h>
83efeaf95aSDavid Greenman #include <vm/vm_param.h>
84efeaf95aSDavid Greenman #include <vm/pmap.h>
85efeaf95aSDavid Greenman #include <vm/vm_map.h>
86df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
87df8bae1dSRodney W. Grimes #include <vm/vm_object.h>
8847221757SJohn Dyson #include <vm/vm_pager.h>
8926f9a767SRodney W. Grimes #include <vm/vm_kern.h>
90efeaf95aSDavid Greenman #include <vm/vm_extern.h>
9121cd6e62SSeigo Tanimura #include <vm/swap_pager.h>
92670d17b5SJeff Roberson #include <vm/uma.h>
93df8bae1dSRodney W. Grimes 
94df8bae1dSRodney W. Grimes /*
95df8bae1dSRodney W. Grimes  *	Virtual memory maps provide for the mapping, protection,
96df8bae1dSRodney W. Grimes  *	and sharing of virtual memory objects.  In addition,
97df8bae1dSRodney W. Grimes  *	this module provides for an efficient virtual copy of
98df8bae1dSRodney W. Grimes  *	memory from one map to another.
99df8bae1dSRodney W. Grimes  *
100df8bae1dSRodney W. Grimes  *	Synchronization is required prior to most operations.
101df8bae1dSRodney W. Grimes  *
102df8bae1dSRodney W. Grimes  *	Maps consist of an ordered doubly-linked list of simple
103df8bae1dSRodney W. Grimes  *	entries; a single hint is used to speed up lookups.
104df8bae1dSRodney W. Grimes  *
105956f3135SPhilippe Charnier  *	Since portions of maps are specified by start/end addresses,
106df8bae1dSRodney W. Grimes  *	which may not align with existing map entries, all
107df8bae1dSRodney W. Grimes  *	routines merely "clip" entries to these start/end values.
108df8bae1dSRodney W. Grimes  *	[That is, an entry is split into two, bordering at a
109df8bae1dSRodney W. Grimes  *	start or end value.]  Note that these clippings may not
110df8bae1dSRodney W. Grimes  *	always be necessary (as the two resulting entries are then
111df8bae1dSRodney W. Grimes  *	not changed); however, the clipping is done for convenience.
112df8bae1dSRodney W. Grimes  *
113df8bae1dSRodney W. Grimes  *	As mentioned above, virtual copy operations are performed
114ad5fca3bSAlan Cox  *	by copying VM object references from one map to
115df8bae1dSRodney W. Grimes  *	another, and then marking both regions as copy-on-write.
116df8bae1dSRodney W. Grimes  */
117df8bae1dSRodney W. Grimes 
118df8bae1dSRodney W. Grimes /*
119df8bae1dSRodney W. Grimes  *	vm_map_startup:
120df8bae1dSRodney W. Grimes  *
121df8bae1dSRodney W. Grimes  *	Initialize the vm_map module.  Must be called before
122df8bae1dSRodney W. Grimes  *	any other vm_map routines.
123df8bae1dSRodney W. Grimes  *
124df8bae1dSRodney W. Grimes  *	Map and entry structures are allocated from the general
125df8bae1dSRodney W. Grimes  *	purpose memory pool with some exceptions:
126df8bae1dSRodney W. Grimes  *
127df8bae1dSRodney W. Grimes  *	- The kernel map and kmem submap are allocated statically.
128df8bae1dSRodney W. Grimes  *	- Kernel map entries are allocated out of a static pool.
129df8bae1dSRodney W. Grimes  *
130df8bae1dSRodney W. Grimes  *	These restrictions are necessary since malloc() uses the
131df8bae1dSRodney W. Grimes  *	maps and requires map entries.
132df8bae1dSRodney W. Grimes  */
133df8bae1dSRodney W. Grimes 
1348355f576SJeff Roberson static uma_zone_t mapentzone;
1358355f576SJeff Roberson static uma_zone_t kmapentzone;
1368355f576SJeff Roberson static uma_zone_t mapzone;
1378355f576SJeff Roberson static uma_zone_t vmspace_zone;
1388355f576SJeff Roberson static struct vm_object kmapentobj;
1398355f576SJeff Roberson static void vmspace_zinit(void *mem, int size);
1408355f576SJeff Roberson static void vmspace_zfini(void *mem, int size);
1418355f576SJeff Roberson static void vm_map_zinit(void *mem, int size);
1428355f576SJeff Roberson static void vm_map_zfini(void *mem, int size);
1438355f576SJeff Roberson static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max);
1441fc43fd1SAlan Cox 
1458355f576SJeff Roberson #ifdef INVARIANTS
1468355f576SJeff Roberson static void vm_map_zdtor(void *mem, int size, void *arg);
1478355f576SJeff Roberson static void vmspace_zdtor(void *mem, int size, void *arg);
1488355f576SJeff Roberson #endif
149b18bfc3dSJohn Dyson 
1500d94caffSDavid Greenman void
1511b40f8c0SMatthew Dillon vm_map_startup(void)
152df8bae1dSRodney W. Grimes {
1538355f576SJeff Roberson 	mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
1548355f576SJeff Roberson #ifdef INVARIANTS
1558355f576SJeff Roberson 	    vm_map_zdtor,
1568355f576SJeff Roberson #else
1578355f576SJeff Roberson 	    NULL,
1588355f576SJeff Roberson #endif
1598355f576SJeff Roberson 	    vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
1608355f576SJeff Roberson 	uma_prealloc(mapzone, MAX_KMAP);
161670d17b5SJeff Roberson 	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
16228bc4419SJeff Roberson 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_MTXCLASS);
163670d17b5SJeff Roberson 	uma_prealloc(kmapentzone, MAX_KMAPENT);
164670d17b5SJeff Roberson 	mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
165670d17b5SJeff Roberson 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1668355f576SJeff Roberson 	uma_prealloc(mapentzone, MAX_MAPENT);
167df8bae1dSRodney W. Grimes }
168df8bae1dSRodney W. Grimes 
1698355f576SJeff Roberson static void
1708355f576SJeff Roberson vmspace_zfini(void *mem, int size)
1718355f576SJeff Roberson {
1728355f576SJeff Roberson 	struct vmspace *vm;
1738355f576SJeff Roberson 
1748355f576SJeff Roberson 	vm = (struct vmspace *)mem;
1758355f576SJeff Roberson 
1768355f576SJeff Roberson 	vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
1778355f576SJeff Roberson }
1788355f576SJeff Roberson 
1798355f576SJeff Roberson static void
1808355f576SJeff Roberson vmspace_zinit(void *mem, int size)
1818355f576SJeff Roberson {
1828355f576SJeff Roberson 	struct vmspace *vm;
1838355f576SJeff Roberson 
1848355f576SJeff Roberson 	vm = (struct vmspace *)mem;
1858355f576SJeff Roberson 
1868355f576SJeff Roberson 	vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map));
1878355f576SJeff Roberson }
1888355f576SJeff Roberson 
1898355f576SJeff Roberson static void
1908355f576SJeff Roberson vm_map_zfini(void *mem, int size)
1918355f576SJeff Roberson {
1928355f576SJeff Roberson 	vm_map_t map;
1938355f576SJeff Roberson 
1948355f576SJeff Roberson 	GIANT_REQUIRED;
1958355f576SJeff Roberson 	map = (vm_map_t)mem;
1968355f576SJeff Roberson 
1978355f576SJeff Roberson 	lockdestroy(&map->lock);
1988355f576SJeff Roberson }
1998355f576SJeff Roberson 
2008355f576SJeff Roberson static void
2018355f576SJeff Roberson vm_map_zinit(void *mem, int size)
2028355f576SJeff Roberson {
2038355f576SJeff Roberson 	vm_map_t map;
2048355f576SJeff Roberson 
2058355f576SJeff Roberson 	GIANT_REQUIRED;
2068355f576SJeff Roberson 
2078355f576SJeff Roberson 	map = (vm_map_t)mem;
2088355f576SJeff Roberson 	map->nentries = 0;
2098355f576SJeff Roberson 	map->size = 0;
2108355f576SJeff Roberson 	map->infork = 0;
211bc91c510SAlan Cox 	lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_CANRECURSE | LK_NOPAUSE);
2128355f576SJeff Roberson }
2138355f576SJeff Roberson 
2148355f576SJeff Roberson #ifdef INVARIANTS
2158355f576SJeff Roberson static void
2168355f576SJeff Roberson vmspace_zdtor(void *mem, int size, void *arg)
2178355f576SJeff Roberson {
2188355f576SJeff Roberson 	struct vmspace *vm;
2198355f576SJeff Roberson 
2208355f576SJeff Roberson 	vm = (struct vmspace *)mem;
2218355f576SJeff Roberson 
2228355f576SJeff Roberson 	vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
2238355f576SJeff Roberson }
2248355f576SJeff Roberson static void
2258355f576SJeff Roberson vm_map_zdtor(void *mem, int size, void *arg)
2268355f576SJeff Roberson {
2278355f576SJeff Roberson 	vm_map_t map;
2288355f576SJeff Roberson 
2298355f576SJeff Roberson 	map = (vm_map_t)mem;
2308355f576SJeff Roberson 	KASSERT(map->nentries == 0,
2318355f576SJeff Roberson 	    ("map %p nentries == %d on free.",
2328355f576SJeff Roberson 	    map, map->nentries));
2338355f576SJeff Roberson 	KASSERT(map->size == 0,
2348355f576SJeff Roberson 	    ("map %p size == %lu on free.",
2359eb6e519SJeff Roberson 	    map, (unsigned long)map->size));
2368355f576SJeff Roberson 	KASSERT(map->infork == 0,
2378355f576SJeff Roberson 	    ("map %p infork == %d on free.",
2388355f576SJeff Roberson 	    map, map->infork));
2398355f576SJeff Roberson }
2408355f576SJeff Roberson #endif	/* INVARIANTS */
2418355f576SJeff Roberson 
242df8bae1dSRodney W. Grimes /*
243df8bae1dSRodney W. Grimes  * Allocate a vmspace structure, including a vm_map and pmap,
244df8bae1dSRodney W. Grimes  * and initialize those structures.  The refcnt is set to 1.
245df8bae1dSRodney W. Grimes  * The remaining fields must be initialized by the caller.
246df8bae1dSRodney W. Grimes  */
247df8bae1dSRodney W. Grimes struct vmspace *
2482d8acc0fSJohn Dyson vmspace_alloc(min, max)
249df8bae1dSRodney W. Grimes 	vm_offset_t min, max;
250df8bae1dSRodney W. Grimes {
251c0877f10SJohn Dyson 	struct vmspace *vm;
2520d94caffSDavid Greenman 
2530cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
2548355f576SJeff Roberson 	vm = uma_zalloc(vmspace_zone, M_WAITOK);
25521c641b2SJohn Baldwin 	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
2568355f576SJeff Roberson 	_vm_map_init(&vm->vm_map, min, max);
257b1028ad1SLuoqi Chen 	pmap_pinit(vmspace_pmap(vm));
258b1028ad1SLuoqi Chen 	vm->vm_map.pmap = vmspace_pmap(vm);		/* XXX */
259df8bae1dSRodney W. Grimes 	vm->vm_refcnt = 1;
2602d8acc0fSJohn Dyson 	vm->vm_shm = NULL;
261582ec34cSAlfred Perlstein 	vm->vm_freer = NULL;
262df8bae1dSRodney W. Grimes 	return (vm);
263df8bae1dSRodney W. Grimes }
264df8bae1dSRodney W. Grimes 
265df8bae1dSRodney W. Grimes void
2661b40f8c0SMatthew Dillon vm_init2(void)
2671b40f8c0SMatthew Dillon {
2688355f576SJeff Roberson 	uma_zone_set_obj(kmapentzone, &kmapentobj, cnt.v_page_count / 4);
2698355f576SJeff Roberson 	vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
2708355f576SJeff Roberson #ifdef INVARIANTS
2718355f576SJeff Roberson 	    vmspace_zdtor,
2728355f576SJeff Roberson #else
2738355f576SJeff Roberson 	    NULL,
2748355f576SJeff Roberson #endif
2758355f576SJeff Roberson 	    vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
276ba9be04cSJohn Dyson 	pmap_init2();
27799448ed1SJohn Dyson 	vm_object_init2();
2783075778bSJohn Dyson }
2793075778bSJohn Dyson 
280582ec34cSAlfred Perlstein static __inline void
281582ec34cSAlfred Perlstein vmspace_dofree(struct vmspace *vm)
282df8bae1dSRodney W. Grimes {
28321c641b2SJohn Baldwin 	CTR1(KTR_VM, "vmspace_free: %p", vm);
28430dcfc09SJohn Dyson 	/*
285df8bae1dSRodney W. Grimes 	 * Lock the map, to wait out all other references to it.
2860d94caffSDavid Greenman 	 * Delete all of the mappings and pages they hold, then call
2870d94caffSDavid Greenman 	 * the pmap module to reclaim anything left.
288df8bae1dSRodney W. Grimes 	 */
289df8bae1dSRodney W. Grimes 	vm_map_lock(&vm->vm_map);
290df8bae1dSRodney W. Grimes 	(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
291df8bae1dSRodney W. Grimes 	    vm->vm_map.max_offset);
292a1f6d91cSDavid Greenman 	vm_map_unlock(&vm->vm_map);
2938355f576SJeff Roberson 
294b1028ad1SLuoqi Chen 	pmap_release(vmspace_pmap(vm));
2958355f576SJeff Roberson 	uma_zfree(vmspace_zone, vm);
296df8bae1dSRodney W. Grimes }
297582ec34cSAlfred Perlstein 
298582ec34cSAlfred Perlstein void
299582ec34cSAlfred Perlstein vmspace_free(struct vmspace *vm)
300582ec34cSAlfred Perlstein {
301582ec34cSAlfred Perlstein 	GIANT_REQUIRED;
302582ec34cSAlfred Perlstein 
303582ec34cSAlfred Perlstein 	if (vm->vm_refcnt == 0)
304582ec34cSAlfred Perlstein 		panic("vmspace_free: attempt to free already freed vmspace");
305582ec34cSAlfred Perlstein 
306582ec34cSAlfred Perlstein 	if (--vm->vm_refcnt == 0)
307582ec34cSAlfred Perlstein 		vmspace_dofree(vm);
308582ec34cSAlfred Perlstein }
309582ec34cSAlfred Perlstein 
310582ec34cSAlfred Perlstein void
311582ec34cSAlfred Perlstein vmspace_exitfree(struct proc *p)
312582ec34cSAlfred Perlstein {
313334f7061SPeter Wemm 	struct vmspace *vm;
314582ec34cSAlfred Perlstein 
315334f7061SPeter Wemm 	GIANT_REQUIRED;
316334f7061SPeter Wemm 	if (p == p->p_vmspace->vm_freer) {
317334f7061SPeter Wemm 		vm = p->p_vmspace;
318334f7061SPeter Wemm 		p->p_vmspace = NULL;
319334f7061SPeter Wemm 		vmspace_dofree(vm);
320334f7061SPeter Wemm 	}
321df8bae1dSRodney W. Grimes }
322df8bae1dSRodney W. Grimes 
323df8bae1dSRodney W. Grimes /*
324ff2b5645SMatthew Dillon  * vmspace_swap_count() - count the approximate swap useage in pages for a
325ff2b5645SMatthew Dillon  *			  vmspace.
326ff2b5645SMatthew Dillon  *
327ff2b5645SMatthew Dillon  *	Swap useage is determined by taking the proportional swap used by
328ff2b5645SMatthew Dillon  *	VM objects backing the VM map.  To make up for fractional losses,
329ff2b5645SMatthew Dillon  *	if the VM object has any swap use at all the associated map entries
330ff2b5645SMatthew Dillon  *	count for at least 1 swap page.
331ff2b5645SMatthew Dillon  */
332ff2b5645SMatthew Dillon int
333ff2b5645SMatthew Dillon vmspace_swap_count(struct vmspace *vmspace)
334ff2b5645SMatthew Dillon {
335ff2b5645SMatthew Dillon 	vm_map_t map = &vmspace->vm_map;
336ff2b5645SMatthew Dillon 	vm_map_entry_t cur;
337ff2b5645SMatthew Dillon 	int count = 0;
338ff2b5645SMatthew Dillon 
339d974f03cSAlan Cox 	vm_map_lock_read(map);
340ff2b5645SMatthew Dillon 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
341ff2b5645SMatthew Dillon 		vm_object_t object;
342ff2b5645SMatthew Dillon 
343ff2b5645SMatthew Dillon 		if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
344ff2b5645SMatthew Dillon 		    (object = cur->object.vm_object) != NULL &&
345ff2b5645SMatthew Dillon 		    object->type == OBJT_SWAP
346ff2b5645SMatthew Dillon 		) {
347ff2b5645SMatthew Dillon 			int n = (cur->end - cur->start) / PAGE_SIZE;
348ff2b5645SMatthew Dillon 
349ff2b5645SMatthew Dillon 			if (object->un_pager.swp.swp_bcount) {
350ef6a93efSMatthew Dillon 				count += object->un_pager.swp.swp_bcount *
351ef6a93efSMatthew Dillon 				    SWAP_META_PAGES * n / object->size + 1;
352ff2b5645SMatthew Dillon 			}
353ff2b5645SMatthew Dillon 		}
354ff2b5645SMatthew Dillon 	}
355d974f03cSAlan Cox 	vm_map_unlock_read(map);
356ff2b5645SMatthew Dillon 	return (count);
357ff2b5645SMatthew Dillon }
358ff2b5645SMatthew Dillon 
3591b40f8c0SMatthew Dillon u_char
3601b40f8c0SMatthew Dillon vm_map_entry_behavior(struct vm_map_entry *entry)
3611b40f8c0SMatthew Dillon {
3621b40f8c0SMatthew Dillon 	return entry->eflags & MAP_ENTRY_BEHAV_MASK;
3631b40f8c0SMatthew Dillon }
3641b40f8c0SMatthew Dillon 
3651b40f8c0SMatthew Dillon void
3661b40f8c0SMatthew Dillon vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior)
3671b40f8c0SMatthew Dillon {
3681b40f8c0SMatthew Dillon 	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
3691b40f8c0SMatthew Dillon 		(behavior & MAP_ENTRY_BEHAV_MASK);
3701b40f8c0SMatthew Dillon }
3711b40f8c0SMatthew Dillon 
3721b40f8c0SMatthew Dillon void
373780b1c09SAlan Cox _vm_map_lock(vm_map_t map, const char *file, int line)
3741b40f8c0SMatthew Dillon {
375bc91c510SAlan Cox 	int error;
376bc91c510SAlan Cox 
377bc91c510SAlan Cox 	error = lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread);
378bc91c510SAlan Cox 	KASSERT(error == 0, ("%s: failed to get lock", __func__));
3791b40f8c0SMatthew Dillon 	map->timestamp++;
3801b40f8c0SMatthew Dillon }
3811b40f8c0SMatthew Dillon 
3821b40f8c0SMatthew Dillon void
383780b1c09SAlan Cox _vm_map_unlock(vm_map_t map, const char *file, int line)
3840e0af8ecSBrian Feldman {
385bc91c510SAlan Cox 
386bc91c510SAlan Cox 	lockmgr(&map->lock, LK_RELEASE, NULL, curthread);
3870e0af8ecSBrian Feldman }
3880e0af8ecSBrian Feldman 
3890e0af8ecSBrian Feldman void
390780b1c09SAlan Cox _vm_map_lock_read(vm_map_t map, const char *file, int line)
3910e0af8ecSBrian Feldman {
392bc91c510SAlan Cox 	int error;
393bc91c510SAlan Cox 
394bc91c510SAlan Cox 	error = lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread);
395bc91c510SAlan Cox 	KASSERT(error == 0, ("%s: failed to get lock", __func__));
3960e0af8ecSBrian Feldman }
3970e0af8ecSBrian Feldman 
3980e0af8ecSBrian Feldman void
399780b1c09SAlan Cox _vm_map_unlock_read(vm_map_t map, const char *file, int line)
4000e0af8ecSBrian Feldman {
401bc91c510SAlan Cox 
402bc91c510SAlan Cox 	lockmgr(&map->lock, LK_RELEASE, NULL, curthread);
40325adb370SBrian Feldman }
40425adb370SBrian Feldman 
405d974f03cSAlan Cox int
406780b1c09SAlan Cox _vm_map_trylock(vm_map_t map, const char *file, int line)
407d974f03cSAlan Cox {
40825adb370SBrian Feldman 	int error;
40925adb370SBrian Feldman 
410bc91c510SAlan Cox 	error = lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, NULL, curthread);
411bc91c510SAlan Cox 	return (error == 0);
4120e0af8ecSBrian Feldman }
4130e0af8ecSBrian Feldman 
4140e0af8ecSBrian Feldman int
415780b1c09SAlan Cox _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
4160e0af8ecSBrian Feldman {
417bc91c510SAlan Cox 
418bc91c510SAlan Cox 	KASSERT(lockstatus(&map->lock, curthread) == LK_EXCLUSIVE,
419bc91c510SAlan Cox 		("%s: lock not held", __func__));
420bc91c510SAlan Cox 	map->timestamp++;
421bc91c510SAlan Cox 	return (0);
4220e0af8ecSBrian Feldman }
4230e0af8ecSBrian Feldman 
4240e0af8ecSBrian Feldman void
425780b1c09SAlan Cox _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
4261b40f8c0SMatthew Dillon {
427bc91c510SAlan Cox 
428bc91c510SAlan Cox 	KASSERT(lockstatus(&map->lock, curthread) == LK_EXCLUSIVE,
429bc91c510SAlan Cox 		("%s: lock not held", __func__));
43025adb370SBrian Feldman }
43125adb370SBrian Feldman 
43225adb370SBrian Feldman void
433780b1c09SAlan Cox _vm_map_set_recursive(vm_map_t map, const char *file, int line)
43425adb370SBrian Feldman {
43525adb370SBrian Feldman }
43625adb370SBrian Feldman 
43725adb370SBrian Feldman void
438780b1c09SAlan Cox _vm_map_clear_recursive(vm_map_t map, const char *file, int line)
43925adb370SBrian Feldman {
4401b40f8c0SMatthew Dillon }
4411b40f8c0SMatthew Dillon 
4421b40f8c0SMatthew Dillon struct pmap *
4431b40f8c0SMatthew Dillon vmspace_pmap(struct vmspace *vmspace)
4441b40f8c0SMatthew Dillon {
4451b40f8c0SMatthew Dillon 	return &vmspace->vm_pmap;
4461b40f8c0SMatthew Dillon }
4471b40f8c0SMatthew Dillon 
4481b40f8c0SMatthew Dillon long
4491b40f8c0SMatthew Dillon vmspace_resident_count(struct vmspace *vmspace)
4501b40f8c0SMatthew Dillon {
4511b40f8c0SMatthew Dillon 	return pmap_resident_count(vmspace_pmap(vmspace));
4521b40f8c0SMatthew Dillon }
4531b40f8c0SMatthew Dillon 
454ff2b5645SMatthew Dillon /*
455df8bae1dSRodney W. Grimes  *	vm_map_create:
456df8bae1dSRodney W. Grimes  *
457df8bae1dSRodney W. Grimes  *	Creates and returns a new empty VM map with
458df8bae1dSRodney W. Grimes  *	the given physical map structure, and having
459df8bae1dSRodney W. Grimes  *	the given lower and upper address bounds.
460df8bae1dSRodney W. Grimes  */
4610d94caffSDavid Greenman vm_map_t
4621b40f8c0SMatthew Dillon vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
463df8bae1dSRodney W. Grimes {
464c0877f10SJohn Dyson 	vm_map_t result;
465df8bae1dSRodney W. Grimes 
4660cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
4670cddd8f0SMatthew Dillon 
4688355f576SJeff Roberson 	result = uma_zalloc(mapzone, M_WAITOK);
46921c641b2SJohn Baldwin 	CTR1(KTR_VM, "vm_map_create: %p", result);
4708355f576SJeff Roberson 	_vm_map_init(result, min, max);
471df8bae1dSRodney W. Grimes 	result->pmap = pmap;
472df8bae1dSRodney W. Grimes 	return (result);
473df8bae1dSRodney W. Grimes }
474df8bae1dSRodney W. Grimes 
475df8bae1dSRodney W. Grimes /*
476df8bae1dSRodney W. Grimes  * Initialize an existing vm_map structure
477df8bae1dSRodney W. Grimes  * such as that in the vmspace structure.
478df8bae1dSRodney W. Grimes  * The pmap is set elsewhere.
479df8bae1dSRodney W. Grimes  */
4808355f576SJeff Roberson static void
4818355f576SJeff Roberson _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
482df8bae1dSRodney W. Grimes {
48321c641b2SJohn Baldwin 
484df8bae1dSRodney W. Grimes 	map->header.next = map->header.prev = &map->header;
4853075778bSJohn Dyson 	map->system_map = 0;
486df8bae1dSRodney W. Grimes 	map->min_offset = min;
487df8bae1dSRodney W. Grimes 	map->max_offset = max;
488df8bae1dSRodney W. Grimes 	map->first_free = &map->header;
4894e94f402SAlan Cox 	map->root = NULL;
490df8bae1dSRodney W. Grimes 	map->timestamp = 0;
491df8bae1dSRodney W. Grimes }
492df8bae1dSRodney W. Grimes 
493a18b1f1dSJason Evans void
4948355f576SJeff Roberson vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
495a18b1f1dSJason Evans {
4968355f576SJeff Roberson 	_vm_map_init(map, min, max);
497bc91c510SAlan Cox 	lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_CANRECURSE | LK_NOPAUSE);
498a18b1f1dSJason Evans }
499a18b1f1dSJason Evans 
500df8bae1dSRodney W. Grimes /*
501b18bfc3dSJohn Dyson  *	vm_map_entry_dispose:	[ internal use only ]
502b18bfc3dSJohn Dyson  *
503b18bfc3dSJohn Dyson  *	Inverse of vm_map_entry_create.
504b18bfc3dSJohn Dyson  */
50562487bb4SJohn Dyson static void
5061b40f8c0SMatthew Dillon vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
507b18bfc3dSJohn Dyson {
5088355f576SJeff Roberson 	uma_zfree((map->system_map || !mapentzone)
5098355f576SJeff Roberson 	    ? kmapentzone : mapentzone, entry);
510b18bfc3dSJohn Dyson }
511b18bfc3dSJohn Dyson 
512b18bfc3dSJohn Dyson /*
513df8bae1dSRodney W. Grimes  *	vm_map_entry_create:	[ internal use only ]
514df8bae1dSRodney W. Grimes  *
515df8bae1dSRodney W. Grimes  *	Allocates a VM map entry for insertion.
516b28cb1caSAlfred Perlstein  *	No entry fields are filled in.
517df8bae1dSRodney W. Grimes  */
518f708ef1bSPoul-Henning Kamp static vm_map_entry_t
5191b40f8c0SMatthew Dillon vm_map_entry_create(vm_map_t map)
520df8bae1dSRodney W. Grimes {
5211f6889a1SMatthew Dillon 	vm_map_entry_t new_entry;
5221f6889a1SMatthew Dillon 
5238355f576SJeff Roberson 	new_entry = uma_zalloc((map->system_map || !mapentzone) ?
5248355f576SJeff Roberson 		kmapentzone : mapentzone, M_WAITOK);
5251f6889a1SMatthew Dillon 	if (new_entry == NULL)
5261f6889a1SMatthew Dillon 	    panic("vm_map_entry_create: kernel resources exhausted");
5271f6889a1SMatthew Dillon 	return (new_entry);
528df8bae1dSRodney W. Grimes }
529df8bae1dSRodney W. Grimes 
530df8bae1dSRodney W. Grimes /*
5314e94f402SAlan Cox  *	vm_map_entry_splay:
5324e94f402SAlan Cox  *
5334e94f402SAlan Cox  *	Implements Sleator and Tarjan's top-down splay algorithm.  Returns
5344e94f402SAlan Cox  *	the vm_map_entry containing the given address.  If, however, that
5354e94f402SAlan Cox  *	address is not found in the vm_map, returns a vm_map_entry that is
5364e94f402SAlan Cox  *	adjacent to the address, coming before or after it.
5374e94f402SAlan Cox  */
5384e94f402SAlan Cox static vm_map_entry_t
5394e94f402SAlan Cox vm_map_entry_splay(vm_offset_t address, vm_map_entry_t root)
5404e94f402SAlan Cox {
5414e94f402SAlan Cox 	struct vm_map_entry dummy;
5424e94f402SAlan Cox 	vm_map_entry_t lefttreemax, righttreemin, y;
5434e94f402SAlan Cox 
5444e94f402SAlan Cox 	if (root == NULL)
5454e94f402SAlan Cox 		return (root);
5464e94f402SAlan Cox 	dummy.left = dummy.right = NULL;
5474e94f402SAlan Cox 	lefttreemax = righttreemin = &dummy;
5484e94f402SAlan Cox 	for (;;) {
5494e94f402SAlan Cox 		if (address < root->start) {
5504e94f402SAlan Cox 			if (root->left == NULL)
5514e94f402SAlan Cox 				break;
5524e94f402SAlan Cox 			if (address < root->left->start) {
5534e94f402SAlan Cox 				/* Rotate right. */
5544e94f402SAlan Cox 				y = root->left;
5554e94f402SAlan Cox 				root->left = y->right;
5564e94f402SAlan Cox 				y->right = root;
5574e94f402SAlan Cox 				root = y;
5584e94f402SAlan Cox 				if (root->left == NULL)
5594e94f402SAlan Cox 					break;
5604e94f402SAlan Cox 			}
5614e94f402SAlan Cox 			/* Link into the new root's right tree. */
5624e94f402SAlan Cox 			righttreemin->left = root;
5634e94f402SAlan Cox 			righttreemin = root;
5644e94f402SAlan Cox 			root = root->left;
5654e94f402SAlan Cox 		} else if (address >= root->end) {
5664e94f402SAlan Cox 			if (root->right == NULL)
5674e94f402SAlan Cox 				break;
5684e94f402SAlan Cox 			if (address >= root->right->end) {
5694e94f402SAlan Cox 				/* Rotate left. */
5704e94f402SAlan Cox 				y = root->right;
5714e94f402SAlan Cox 				root->right = y->left;
5724e94f402SAlan Cox 				y->left = root;
5734e94f402SAlan Cox 				root = y;
5744e94f402SAlan Cox 				if (root->right == NULL)
5754e94f402SAlan Cox 					break;
5764e94f402SAlan Cox 			}
5774e94f402SAlan Cox 			/* Link into the new root's left tree. */
5784e94f402SAlan Cox 			lefttreemax->right = root;
5794e94f402SAlan Cox 			lefttreemax = root;
5804e94f402SAlan Cox 			root = root->right;
5814e94f402SAlan Cox 		} else
5824e94f402SAlan Cox 			break;
5834e94f402SAlan Cox 	}
5844e94f402SAlan Cox 	/* Assemble the new root. */
5854e94f402SAlan Cox 	lefttreemax->right = root->left;
5864e94f402SAlan Cox 	righttreemin->left = root->right;
5874e94f402SAlan Cox 	root->left = dummy.right;
5884e94f402SAlan Cox 	root->right = dummy.left;
5894e94f402SAlan Cox 	return (root);
5904e94f402SAlan Cox }
5914e94f402SAlan Cox 
5924e94f402SAlan Cox /*
593df8bae1dSRodney W. Grimes  *	vm_map_entry_{un,}link:
594df8bae1dSRodney W. Grimes  *
595df8bae1dSRodney W. Grimes  *	Insert/remove entries from maps.
596df8bae1dSRodney W. Grimes  */
5974e94f402SAlan Cox static void
59899c81ca9SAlan Cox vm_map_entry_link(vm_map_t map,
59999c81ca9SAlan Cox 		  vm_map_entry_t after_where,
60099c81ca9SAlan Cox 		  vm_map_entry_t entry)
60199c81ca9SAlan Cox {
60221c641b2SJohn Baldwin 
60321c641b2SJohn Baldwin 	CTR4(KTR_VM,
60421c641b2SJohn Baldwin 	    "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
60521c641b2SJohn Baldwin 	    map->nentries, entry, after_where);
60699c81ca9SAlan Cox 	map->nentries++;
60799c81ca9SAlan Cox 	entry->prev = after_where;
60899c81ca9SAlan Cox 	entry->next = after_where->next;
60999c81ca9SAlan Cox 	entry->next->prev = entry;
61099c81ca9SAlan Cox 	after_where->next = entry;
6114e94f402SAlan Cox 
6124e94f402SAlan Cox 	if (after_where != &map->header) {
6134e94f402SAlan Cox 		if (after_where != map->root)
6144e94f402SAlan Cox 			vm_map_entry_splay(after_where->start, map->root);
6154e94f402SAlan Cox 		entry->right = after_where->right;
6164e94f402SAlan Cox 		entry->left = after_where;
6174e94f402SAlan Cox 		after_where->right = NULL;
6184e94f402SAlan Cox 	} else {
6194e94f402SAlan Cox 		entry->right = map->root;
6204e94f402SAlan Cox 		entry->left = NULL;
6214e94f402SAlan Cox 	}
6224e94f402SAlan Cox 	map->root = entry;
623df8bae1dSRodney W. Grimes }
62499c81ca9SAlan Cox 
6254e94f402SAlan Cox static void
62699c81ca9SAlan Cox vm_map_entry_unlink(vm_map_t map,
62799c81ca9SAlan Cox 		    vm_map_entry_t entry)
62899c81ca9SAlan Cox {
6294e94f402SAlan Cox 	vm_map_entry_t next, prev, root;
63099c81ca9SAlan Cox 
6314e94f402SAlan Cox 	if (entry != map->root)
6324e94f402SAlan Cox 		vm_map_entry_splay(entry->start, map->root);
6334e94f402SAlan Cox 	if (entry->left == NULL)
6344e94f402SAlan Cox 		root = entry->right;
6354e94f402SAlan Cox 	else {
6364e94f402SAlan Cox 		root = vm_map_entry_splay(entry->start, entry->left);
6374e94f402SAlan Cox 		root->right = entry->right;
6384e94f402SAlan Cox 	}
6394e94f402SAlan Cox 	map->root = root;
6404e94f402SAlan Cox 
6414e94f402SAlan Cox 	prev = entry->prev;
6424e94f402SAlan Cox 	next = entry->next;
64399c81ca9SAlan Cox 	next->prev = prev;
64499c81ca9SAlan Cox 	prev->next = next;
64599c81ca9SAlan Cox 	map->nentries--;
64621c641b2SJohn Baldwin 	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
64721c641b2SJohn Baldwin 	    map->nentries, entry);
648df8bae1dSRodney W. Grimes }
649df8bae1dSRodney W. Grimes 
650df8bae1dSRodney W. Grimes /*
651df8bae1dSRodney W. Grimes  *	vm_map_lookup_entry:	[ internal use only ]
652df8bae1dSRodney W. Grimes  *
653df8bae1dSRodney W. Grimes  *	Finds the map entry containing (or
654df8bae1dSRodney W. Grimes  *	immediately preceding) the specified address
655df8bae1dSRodney W. Grimes  *	in the given map; the entry is returned
656df8bae1dSRodney W. Grimes  *	in the "entry" parameter.  The boolean
657df8bae1dSRodney W. Grimes  *	result indicates whether the address is
658df8bae1dSRodney W. Grimes  *	actually contained in the map.
659df8bae1dSRodney W. Grimes  */
6600d94caffSDavid Greenman boolean_t
6611b40f8c0SMatthew Dillon vm_map_lookup_entry(
6621b40f8c0SMatthew Dillon 	vm_map_t map,
6631b40f8c0SMatthew Dillon 	vm_offset_t address,
6641b40f8c0SMatthew Dillon 	vm_map_entry_t *entry)	/* OUT */
665df8bae1dSRodney W. Grimes {
666c0877f10SJohn Dyson 	vm_map_entry_t cur;
667df8bae1dSRodney W. Grimes 
6684e94f402SAlan Cox 	cur = vm_map_entry_splay(address, map->root);
6694e94f402SAlan Cox 	if (cur == NULL)
6704e94f402SAlan Cox 		*entry = &map->header;
6714e94f402SAlan Cox 	else {
6724e94f402SAlan Cox 		map->root = cur;
673df8bae1dSRodney W. Grimes 
674df8bae1dSRodney W. Grimes 		if (address >= cur->start) {
675df8bae1dSRodney W. Grimes 			*entry = cur;
6764e94f402SAlan Cox 			if (cur->end > address)
677df8bae1dSRodney W. Grimes 				return (TRUE);
6784e94f402SAlan Cox 		} else
679df8bae1dSRodney W. Grimes 			*entry = cur->prev;
6804e94f402SAlan Cox 	}
681df8bae1dSRodney W. Grimes 	return (FALSE);
682df8bae1dSRodney W. Grimes }
683df8bae1dSRodney W. Grimes 
684df8bae1dSRodney W. Grimes /*
68530dcfc09SJohn Dyson  *	vm_map_insert:
68630dcfc09SJohn Dyson  *
68730dcfc09SJohn Dyson  *	Inserts the given whole VM object into the target
68830dcfc09SJohn Dyson  *	map at the specified address range.  The object's
68930dcfc09SJohn Dyson  *	size should match that of the address range.
69030dcfc09SJohn Dyson  *
69130dcfc09SJohn Dyson  *	Requires that the map be locked, and leaves it so.
6922aaeadf8SMatthew Dillon  *
6932aaeadf8SMatthew Dillon  *	If object is non-NULL, ref count must be bumped by caller
6942aaeadf8SMatthew Dillon  *	prior to making call to account for the new entry.
69530dcfc09SJohn Dyson  */
69630dcfc09SJohn Dyson int
697b9dcd593SBruce Evans vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
698b9dcd593SBruce Evans 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
699b9dcd593SBruce Evans 	      int cow)
70030dcfc09SJohn Dyson {
701c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
702c0877f10SJohn Dyson 	vm_map_entry_t prev_entry;
70330dcfc09SJohn Dyson 	vm_map_entry_t temp_entry;
7049730a5daSPaul Saab 	vm_eflags_t protoeflags;
70530dcfc09SJohn Dyson 
7060cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
7070cddd8f0SMatthew Dillon 
70830dcfc09SJohn Dyson 	/*
70930dcfc09SJohn Dyson 	 * Check that the start and end points are not bogus.
71030dcfc09SJohn Dyson 	 */
71130dcfc09SJohn Dyson 	if ((start < map->min_offset) || (end > map->max_offset) ||
71230dcfc09SJohn Dyson 	    (start >= end))
71330dcfc09SJohn Dyson 		return (KERN_INVALID_ADDRESS);
71430dcfc09SJohn Dyson 
71530dcfc09SJohn Dyson 	/*
71630dcfc09SJohn Dyson 	 * Find the entry prior to the proposed starting address; if it's part
71730dcfc09SJohn Dyson 	 * of an existing entry, this range is bogus.
71830dcfc09SJohn Dyson 	 */
71930dcfc09SJohn Dyson 	if (vm_map_lookup_entry(map, start, &temp_entry))
72030dcfc09SJohn Dyson 		return (KERN_NO_SPACE);
72130dcfc09SJohn Dyson 
72230dcfc09SJohn Dyson 	prev_entry = temp_entry;
72330dcfc09SJohn Dyson 
72430dcfc09SJohn Dyson 	/*
72530dcfc09SJohn Dyson 	 * Assert that the next entry doesn't overlap the end point.
72630dcfc09SJohn Dyson 	 */
72730dcfc09SJohn Dyson 	if ((prev_entry->next != &map->header) &&
72830dcfc09SJohn Dyson 	    (prev_entry->next->start < end))
72930dcfc09SJohn Dyson 		return (KERN_NO_SPACE);
73030dcfc09SJohn Dyson 
731afa07f7eSJohn Dyson 	protoeflags = 0;
732afa07f7eSJohn Dyson 
733afa07f7eSJohn Dyson 	if (cow & MAP_COPY_ON_WRITE)
734e5f13bddSAlan Cox 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
735afa07f7eSJohn Dyson 
7364e045f93SAlan Cox 	if (cow & MAP_NOFAULT) {
737afa07f7eSJohn Dyson 		protoeflags |= MAP_ENTRY_NOFAULT;
738afa07f7eSJohn Dyson 
7394e045f93SAlan Cox 		KASSERT(object == NULL,
7404e045f93SAlan Cox 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
7414e045f93SAlan Cox 	}
7424f79d873SMatthew Dillon 	if (cow & MAP_DISABLE_SYNCER)
7434f79d873SMatthew Dillon 		protoeflags |= MAP_ENTRY_NOSYNC;
7449730a5daSPaul Saab 	if (cow & MAP_DISABLE_COREDUMP)
7459730a5daSPaul Saab 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
7464f79d873SMatthew Dillon 
7472aaeadf8SMatthew Dillon 	if (object) {
74830dcfc09SJohn Dyson 		/*
7492aaeadf8SMatthew Dillon 		 * When object is non-NULL, it could be shared with another
7502aaeadf8SMatthew Dillon 		 * process.  We have to set or clear OBJ_ONEMAPPING
7512aaeadf8SMatthew Dillon 		 * appropriately.
75230dcfc09SJohn Dyson 		 */
7532aaeadf8SMatthew Dillon 		if ((object->ref_count > 1) || (object->shadow_count != 0)) {
7542aaeadf8SMatthew Dillon 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
7552aaeadf8SMatthew Dillon 		}
7564e045f93SAlan Cox 	}
7574e045f93SAlan Cox 	else if ((prev_entry != &map->header) &&
7584e045f93SAlan Cox 		 (prev_entry->eflags == protoeflags) &&
7598cc7e047SJohn Dyson 		 (prev_entry->end == start) &&
7604e045f93SAlan Cox 		 (prev_entry->wired_count == 0) &&
7614e045f93SAlan Cox 		 ((prev_entry->object.vm_object == NULL) ||
7628cc7e047SJohn Dyson 		  vm_object_coalesce(prev_entry->object.vm_object,
76330dcfc09SJohn Dyson 				     OFF_TO_IDX(prev_entry->offset),
7648cc7e047SJohn Dyson 				     (vm_size_t)(prev_entry->end - prev_entry->start),
765cdc2c291SJohn Dyson 				     (vm_size_t)(end - prev_entry->end)))) {
76630dcfc09SJohn Dyson 		/*
7672aaeadf8SMatthew Dillon 		 * We were able to extend the object.  Determine if we
7682aaeadf8SMatthew Dillon 		 * can extend the previous map entry to include the
7692aaeadf8SMatthew Dillon 		 * new range as well.
77030dcfc09SJohn Dyson 		 */
7718cc7e047SJohn Dyson 		if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
7728cc7e047SJohn Dyson 		    (prev_entry->protection == prot) &&
7738cc7e047SJohn Dyson 		    (prev_entry->max_protection == max)) {
77430dcfc09SJohn Dyson 			map->size += (end - prev_entry->end);
77530dcfc09SJohn Dyson 			prev_entry->end = end;
7764e71e795SMatthew Dillon 			vm_map_simplify_entry(map, prev_entry);
77730dcfc09SJohn Dyson 			return (KERN_SUCCESS);
77830dcfc09SJohn Dyson 		}
7798cc7e047SJohn Dyson 
7802aaeadf8SMatthew Dillon 		/*
7812aaeadf8SMatthew Dillon 		 * If we can extend the object but cannot extend the
7822aaeadf8SMatthew Dillon 		 * map entry, we have to create a new map entry.  We
7832aaeadf8SMatthew Dillon 		 * must bump the ref count on the extended object to
7844e71e795SMatthew Dillon 		 * account for it.  object may be NULL.
7852aaeadf8SMatthew Dillon 		 */
7862aaeadf8SMatthew Dillon 		object = prev_entry->object.vm_object;
7872aaeadf8SMatthew Dillon 		offset = prev_entry->offset +
7882aaeadf8SMatthew Dillon 			(prev_entry->end - prev_entry->start);
7898cc7e047SJohn Dyson 		vm_object_reference(object);
790b18bfc3dSJohn Dyson 	}
7912aaeadf8SMatthew Dillon 
7922aaeadf8SMatthew Dillon 	/*
7932aaeadf8SMatthew Dillon 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
7942aaeadf8SMatthew Dillon 	 * in things like the buffer map where we manage kva but do not manage
7952aaeadf8SMatthew Dillon 	 * backing objects.
7962aaeadf8SMatthew Dillon 	 */
7978cc7e047SJohn Dyson 
79830dcfc09SJohn Dyson 	/*
79930dcfc09SJohn Dyson 	 * Create a new entry
80030dcfc09SJohn Dyson 	 */
80130dcfc09SJohn Dyson 	new_entry = vm_map_entry_create(map);
80230dcfc09SJohn Dyson 	new_entry->start = start;
80330dcfc09SJohn Dyson 	new_entry->end = end;
80430dcfc09SJohn Dyson 
805afa07f7eSJohn Dyson 	new_entry->eflags = protoeflags;
80630dcfc09SJohn Dyson 	new_entry->object.vm_object = object;
80730dcfc09SJohn Dyson 	new_entry->offset = offset;
8082267af78SJulian Elischer 	new_entry->avail_ssize = 0;
8092267af78SJulian Elischer 
81030dcfc09SJohn Dyson 	new_entry->inheritance = VM_INHERIT_DEFAULT;
81130dcfc09SJohn Dyson 	new_entry->protection = prot;
81230dcfc09SJohn Dyson 	new_entry->max_protection = max;
81330dcfc09SJohn Dyson 	new_entry->wired_count = 0;
814e5f251d2SAlan Cox 
81530dcfc09SJohn Dyson 	/*
81630dcfc09SJohn Dyson 	 * Insert the new entry into the list
81730dcfc09SJohn Dyson 	 */
81830dcfc09SJohn Dyson 	vm_map_entry_link(map, prev_entry, new_entry);
81930dcfc09SJohn Dyson 	map->size += new_entry->end - new_entry->start;
82030dcfc09SJohn Dyson 
82130dcfc09SJohn Dyson 	/*
82230dcfc09SJohn Dyson 	 * Update the free space hint
82330dcfc09SJohn Dyson 	 */
82467bf6868SJohn Dyson 	if ((map->first_free == prev_entry) &&
8254f79d873SMatthew Dillon 	    (prev_entry->end >= new_entry->start)) {
82630dcfc09SJohn Dyson 		map->first_free = new_entry;
8274f79d873SMatthew Dillon 	}
82830dcfc09SJohn Dyson 
8291a484d28SMatthew Dillon #if 0
8301a484d28SMatthew Dillon 	/*
8311a484d28SMatthew Dillon 	 * Temporarily removed to avoid MAP_STACK panic, due to
8321a484d28SMatthew Dillon 	 * MAP_STACK being a huge hack.  Will be added back in
8331a484d28SMatthew Dillon 	 * when MAP_STACK (and the user stack mapping) is fixed.
8341a484d28SMatthew Dillon 	 */
8354e71e795SMatthew Dillon 	/*
8364e71e795SMatthew Dillon 	 * It may be possible to simplify the entry
8374e71e795SMatthew Dillon 	 */
8384e71e795SMatthew Dillon 	vm_map_simplify_entry(map, new_entry);
8391a484d28SMatthew Dillon #endif
8404e71e795SMatthew Dillon 
8414f79d873SMatthew Dillon 	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
842e972780aSAlan Cox 		pmap_object_init_pt(map->pmap, start,
843e972780aSAlan Cox 				    object, OFF_TO_IDX(offset), end - start,
844e972780aSAlan Cox 				    cow & MAP_PREFAULT_PARTIAL);
8454f79d873SMatthew Dillon 	}
846e972780aSAlan Cox 
84730dcfc09SJohn Dyson 	return (KERN_SUCCESS);
84830dcfc09SJohn Dyson }
84930dcfc09SJohn Dyson 
85030dcfc09SJohn Dyson /*
851df8bae1dSRodney W. Grimes  * Find sufficient space for `length' bytes in the given map, starting at
852df8bae1dSRodney W. Grimes  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
853df8bae1dSRodney W. Grimes  */
854df8bae1dSRodney W. Grimes int
8551b40f8c0SMatthew Dillon vm_map_findspace(
8561b40f8c0SMatthew Dillon 	vm_map_t map,
8571b40f8c0SMatthew Dillon 	vm_offset_t start,
8581b40f8c0SMatthew Dillon 	vm_size_t length,
8591b40f8c0SMatthew Dillon 	vm_offset_t *addr)
860df8bae1dSRodney W. Grimes {
861c0877f10SJohn Dyson 	vm_map_entry_t entry, next;
862c0877f10SJohn Dyson 	vm_offset_t end;
863df8bae1dSRodney W. Grimes 
8640cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
865df8bae1dSRodney W. Grimes 	if (start < map->min_offset)
866df8bae1dSRodney W. Grimes 		start = map->min_offset;
867df8bae1dSRodney W. Grimes 	if (start > map->max_offset)
868df8bae1dSRodney W. Grimes 		return (1);
869df8bae1dSRodney W. Grimes 
870df8bae1dSRodney W. Grimes 	/*
8710d94caffSDavid Greenman 	 * Look for the first possible address; if there's already something
8720d94caffSDavid Greenman 	 * at this address, we have to start after it.
873df8bae1dSRodney W. Grimes 	 */
874df8bae1dSRodney W. Grimes 	if (start == map->min_offset) {
87567bf6868SJohn Dyson 		if ((entry = map->first_free) != &map->header)
876df8bae1dSRodney W. Grimes 			start = entry->end;
877df8bae1dSRodney W. Grimes 	} else {
878df8bae1dSRodney W. Grimes 		vm_map_entry_t tmp;
8790d94caffSDavid Greenman 
880df8bae1dSRodney W. Grimes 		if (vm_map_lookup_entry(map, start, &tmp))
881df8bae1dSRodney W. Grimes 			start = tmp->end;
882df8bae1dSRodney W. Grimes 		entry = tmp;
883df8bae1dSRodney W. Grimes 	}
884df8bae1dSRodney W. Grimes 
885df8bae1dSRodney W. Grimes 	/*
8860d94caffSDavid Greenman 	 * Look through the rest of the map, trying to fit a new region in the
8870d94caffSDavid Greenman 	 * gap between existing regions, or after the very last region.
888df8bae1dSRodney W. Grimes 	 */
889df8bae1dSRodney W. Grimes 	for (;; start = (entry = next)->end) {
890df8bae1dSRodney W. Grimes 		/*
891df8bae1dSRodney W. Grimes 		 * Find the end of the proposed new region.  Be sure we didn't
892df8bae1dSRodney W. Grimes 		 * go beyond the end of the map, or wrap around the address;
893df8bae1dSRodney W. Grimes 		 * if so, we lose.  Otherwise, if this is the last entry, or
894df8bae1dSRodney W. Grimes 		 * if the proposed new region fits before the next entry, we
895df8bae1dSRodney W. Grimes 		 * win.
896df8bae1dSRodney W. Grimes 		 */
897df8bae1dSRodney W. Grimes 		end = start + length;
898df8bae1dSRodney W. Grimes 		if (end > map->max_offset || end < start)
899df8bae1dSRodney W. Grimes 			return (1);
900df8bae1dSRodney W. Grimes 		next = entry->next;
901df8bae1dSRodney W. Grimes 		if (next == &map->header || next->start >= end)
902df8bae1dSRodney W. Grimes 			break;
903df8bae1dSRodney W. Grimes 	}
904df8bae1dSRodney W. Grimes 	*addr = start;
90599448ed1SJohn Dyson 	if (map == kernel_map) {
90699448ed1SJohn Dyson 		vm_offset_t ksize;
90799448ed1SJohn Dyson 		if ((ksize = round_page(start + length)) > kernel_vm_end) {
90899448ed1SJohn Dyson 			pmap_growkernel(ksize);
90999448ed1SJohn Dyson 		}
91099448ed1SJohn Dyson 	}
911df8bae1dSRodney W. Grimes 	return (0);
912df8bae1dSRodney W. Grimes }
913df8bae1dSRodney W. Grimes 
914df8bae1dSRodney W. Grimes /*
915df8bae1dSRodney W. Grimes  *	vm_map_find finds an unallocated region in the target address
916df8bae1dSRodney W. Grimes  *	map with the given length.  The search is defined to be
917df8bae1dSRodney W. Grimes  *	first-fit from the specified address; the region found is
918df8bae1dSRodney W. Grimes  *	returned in the same parameter.
919df8bae1dSRodney W. Grimes  *
9202aaeadf8SMatthew Dillon  *	If object is non-NULL, ref count must be bumped by caller
9212aaeadf8SMatthew Dillon  *	prior to making call to account for the new entry.
922df8bae1dSRodney W. Grimes  */
923df8bae1dSRodney W. Grimes int
924b9dcd593SBruce Evans vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
925b9dcd593SBruce Evans 	    vm_offset_t *addr,	/* IN/OUT */
926b9dcd593SBruce Evans 	    vm_size_t length, boolean_t find_space, vm_prot_t prot,
927b9dcd593SBruce Evans 	    vm_prot_t max, int cow)
928df8bae1dSRodney W. Grimes {
929c0877f10SJohn Dyson 	vm_offset_t start;
9308d6e8edeSDavid Greenman 	int result, s = 0;
931df8bae1dSRodney W. Grimes 
9320cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
9330cddd8f0SMatthew Dillon 
934df8bae1dSRodney W. Grimes 	start = *addr;
9358d6e8edeSDavid Greenman 
93608442f8aSBosko Milekic 	if (map == kmem_map)
937b18bfc3dSJohn Dyson 		s = splvm();
9388d6e8edeSDavid Greenman 
939bea41bcfSDavid Greenman 	vm_map_lock(map);
940df8bae1dSRodney W. Grimes 	if (find_space) {
941df8bae1dSRodney W. Grimes 		if (vm_map_findspace(map, start, length, addr)) {
942df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
94308442f8aSBosko Milekic 			if (map == kmem_map)
9448d6e8edeSDavid Greenman 				splx(s);
945df8bae1dSRodney W. Grimes 			return (KERN_NO_SPACE);
946df8bae1dSRodney W. Grimes 		}
947df8bae1dSRodney W. Grimes 		start = *addr;
948df8bae1dSRodney W. Grimes 	}
949bd7e5f99SJohn Dyson 	result = vm_map_insert(map, object, offset,
950bd7e5f99SJohn Dyson 		start, start + length, prot, max, cow);
951df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
9528d6e8edeSDavid Greenman 
95308442f8aSBosko Milekic 	if (map == kmem_map)
9548d6e8edeSDavid Greenman 		splx(s);
9558d6e8edeSDavid Greenman 
956df8bae1dSRodney W. Grimes 	return (result);
957df8bae1dSRodney W. Grimes }
958df8bae1dSRodney W. Grimes 
959df8bae1dSRodney W. Grimes /*
960b7b2aac2SJohn Dyson  *	vm_map_simplify_entry:
96167bf6868SJohn Dyson  *
9624e71e795SMatthew Dillon  *	Simplify the given map entry by merging with either neighbor.  This
9634e71e795SMatthew Dillon  *	routine also has the ability to merge with both neighbors.
9644e71e795SMatthew Dillon  *
9654e71e795SMatthew Dillon  *	The map must be locked.
9664e71e795SMatthew Dillon  *
9674e71e795SMatthew Dillon  *	This routine guarentees that the passed entry remains valid (though
9684e71e795SMatthew Dillon  *	possibly extended).  When merging, this routine may delete one or
9694e71e795SMatthew Dillon  *	both neighbors.
970df8bae1dSRodney W. Grimes  */
971b7b2aac2SJohn Dyson void
9721b40f8c0SMatthew Dillon vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
973df8bae1dSRodney W. Grimes {
974308c24baSJohn Dyson 	vm_map_entry_t next, prev;
975b7b2aac2SJohn Dyson 	vm_size_t prevsize, esize;
976df8bae1dSRodney W. Grimes 
9779fdfe602SMatthew Dillon 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
978df8bae1dSRodney W. Grimes 		return;
979308c24baSJohn Dyson 
980308c24baSJohn Dyson 	prev = entry->prev;
981308c24baSJohn Dyson 	if (prev != &map->header) {
98267bf6868SJohn Dyson 		prevsize = prev->end - prev->start;
98367bf6868SJohn Dyson 		if ( (prev->end == entry->start) &&
98467bf6868SJohn Dyson 		     (prev->object.vm_object == entry->object.vm_object) &&
98595e5e988SJohn Dyson 		     (!prev->object.vm_object ||
98667bf6868SJohn Dyson 			(prev->offset + prevsize == entry->offset)) &&
987afa07f7eSJohn Dyson 		     (prev->eflags == entry->eflags) &&
98867bf6868SJohn Dyson 		     (prev->protection == entry->protection) &&
98967bf6868SJohn Dyson 		     (prev->max_protection == entry->max_protection) &&
99067bf6868SJohn Dyson 		     (prev->inheritance == entry->inheritance) &&
991b7b2aac2SJohn Dyson 		     (prev->wired_count == entry->wired_count)) {
992308c24baSJohn Dyson 			if (map->first_free == prev)
993308c24baSJohn Dyson 				map->first_free = entry;
994308c24baSJohn Dyson 			vm_map_entry_unlink(map, prev);
995308c24baSJohn Dyson 			entry->start = prev->start;
996308c24baSJohn Dyson 			entry->offset = prev->offset;
997b18bfc3dSJohn Dyson 			if (prev->object.vm_object)
998308c24baSJohn Dyson 				vm_object_deallocate(prev->object.vm_object);
999308c24baSJohn Dyson 			vm_map_entry_dispose(map, prev);
1000308c24baSJohn Dyson 		}
1001308c24baSJohn Dyson 	}
1002de5f6a77SJohn Dyson 
1003de5f6a77SJohn Dyson 	next = entry->next;
1004308c24baSJohn Dyson 	if (next != &map->header) {
100567bf6868SJohn Dyson 		esize = entry->end - entry->start;
100667bf6868SJohn Dyson 		if ((entry->end == next->start) &&
100767bf6868SJohn Dyson 		    (next->object.vm_object == entry->object.vm_object) &&
100867bf6868SJohn Dyson 		     (!entry->object.vm_object ||
100967bf6868SJohn Dyson 			(entry->offset + esize == next->offset)) &&
1010afa07f7eSJohn Dyson 		    (next->eflags == entry->eflags) &&
101167bf6868SJohn Dyson 		    (next->protection == entry->protection) &&
101267bf6868SJohn Dyson 		    (next->max_protection == entry->max_protection) &&
101367bf6868SJohn Dyson 		    (next->inheritance == entry->inheritance) &&
1014b7b2aac2SJohn Dyson 		    (next->wired_count == entry->wired_count)) {
1015308c24baSJohn Dyson 			if (map->first_free == next)
1016308c24baSJohn Dyson 				map->first_free = entry;
1017de5f6a77SJohn Dyson 			vm_map_entry_unlink(map, next);
1018de5f6a77SJohn Dyson 			entry->end = next->end;
1019b18bfc3dSJohn Dyson 			if (next->object.vm_object)
1020de5f6a77SJohn Dyson 				vm_object_deallocate(next->object.vm_object);
1021de5f6a77SJohn Dyson 			vm_map_entry_dispose(map, next);
1022df8bae1dSRodney W. Grimes 	        }
1023df8bae1dSRodney W. Grimes 	}
1024de5f6a77SJohn Dyson }
1025df8bae1dSRodney W. Grimes /*
1026df8bae1dSRodney W. Grimes  *	vm_map_clip_start:	[ internal use only ]
1027df8bae1dSRodney W. Grimes  *
1028df8bae1dSRodney W. Grimes  *	Asserts that the given entry begins at or after
1029df8bae1dSRodney W. Grimes  *	the specified address; if necessary,
1030df8bae1dSRodney W. Grimes  *	it splits the entry into two.
1031df8bae1dSRodney W. Grimes  */
1032df8bae1dSRodney W. Grimes #define vm_map_clip_start(map, entry, startaddr) \
1033df8bae1dSRodney W. Grimes { \
1034df8bae1dSRodney W. Grimes 	if (startaddr > entry->start) \
1035df8bae1dSRodney W. Grimes 		_vm_map_clip_start(map, entry, startaddr); \
1036df8bae1dSRodney W. Grimes }
1037df8bae1dSRodney W. Grimes 
1038df8bae1dSRodney W. Grimes /*
1039df8bae1dSRodney W. Grimes  *	This routine is called only when it is known that
1040df8bae1dSRodney W. Grimes  *	the entry must be split.
1041df8bae1dSRodney W. Grimes  */
10420d94caffSDavid Greenman static void
10431b40f8c0SMatthew Dillon _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1044df8bae1dSRodney W. Grimes {
1045c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
1046df8bae1dSRodney W. Grimes 
1047df8bae1dSRodney W. Grimes 	/*
10480d94caffSDavid Greenman 	 * Split off the front portion -- note that we must insert the new
10490d94caffSDavid Greenman 	 * entry BEFORE this one, so that this entry has the specified
10500d94caffSDavid Greenman 	 * starting address.
1051df8bae1dSRodney W. Grimes 	 */
1052f32dbbeeSJohn Dyson 	vm_map_simplify_entry(map, entry);
1053f32dbbeeSJohn Dyson 
105411cccda1SJohn Dyson 	/*
105511cccda1SJohn Dyson 	 * If there is no object backing this entry, we might as well create
105611cccda1SJohn Dyson 	 * one now.  If we defer it, an object can get created after the map
105711cccda1SJohn Dyson 	 * is clipped, and individual objects will be created for the split-up
105811cccda1SJohn Dyson 	 * map.  This is a bit of a hack, but is also about the best place to
105911cccda1SJohn Dyson 	 * put this improvement.
106011cccda1SJohn Dyson 	 */
10614e71e795SMatthew Dillon 	if (entry->object.vm_object == NULL && !map->system_map) {
106211cccda1SJohn Dyson 		vm_object_t object;
106311cccda1SJohn Dyson 		object = vm_object_allocate(OBJT_DEFAULT,
1064c2e11a03SJohn Dyson 				atop(entry->end - entry->start));
106511cccda1SJohn Dyson 		entry->object.vm_object = object;
106611cccda1SJohn Dyson 		entry->offset = 0;
106711cccda1SJohn Dyson 	}
106811cccda1SJohn Dyson 
1069df8bae1dSRodney W. Grimes 	new_entry = vm_map_entry_create(map);
1070df8bae1dSRodney W. Grimes 	*new_entry = *entry;
1071df8bae1dSRodney W. Grimes 
1072df8bae1dSRodney W. Grimes 	new_entry->end = start;
1073df8bae1dSRodney W. Grimes 	entry->offset += (start - entry->start);
1074df8bae1dSRodney W. Grimes 	entry->start = start;
1075df8bae1dSRodney W. Grimes 
1076df8bae1dSRodney W. Grimes 	vm_map_entry_link(map, entry->prev, new_entry);
1077df8bae1dSRodney W. Grimes 
10789fdfe602SMatthew Dillon 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1079df8bae1dSRodney W. Grimes 		vm_object_reference(new_entry->object.vm_object);
1080df8bae1dSRodney W. Grimes 	}
1081c0877f10SJohn Dyson }
1082df8bae1dSRodney W. Grimes 
1083df8bae1dSRodney W. Grimes /*
1084df8bae1dSRodney W. Grimes  *	vm_map_clip_end:	[ internal use only ]
1085df8bae1dSRodney W. Grimes  *
1086df8bae1dSRodney W. Grimes  *	Asserts that the given entry ends at or before
1087df8bae1dSRodney W. Grimes  *	the specified address; if necessary,
1088df8bae1dSRodney W. Grimes  *	it splits the entry into two.
1089df8bae1dSRodney W. Grimes  */
1090df8bae1dSRodney W. Grimes #define vm_map_clip_end(map, entry, endaddr) \
1091df8bae1dSRodney W. Grimes { \
1092df8bae1dSRodney W. Grimes 	if (endaddr < entry->end) \
1093df8bae1dSRodney W. Grimes 		_vm_map_clip_end(map, entry, endaddr); \
1094df8bae1dSRodney W. Grimes }
1095df8bae1dSRodney W. Grimes 
1096df8bae1dSRodney W. Grimes /*
1097df8bae1dSRodney W. Grimes  *	This routine is called only when it is known that
1098df8bae1dSRodney W. Grimes  *	the entry must be split.
1099df8bae1dSRodney W. Grimes  */
11000d94caffSDavid Greenman static void
11011b40f8c0SMatthew Dillon _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1102df8bae1dSRodney W. Grimes {
1103c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
1104df8bae1dSRodney W. Grimes 
1105df8bae1dSRodney W. Grimes 	/*
110611cccda1SJohn Dyson 	 * If there is no object backing this entry, we might as well create
110711cccda1SJohn Dyson 	 * one now.  If we defer it, an object can get created after the map
110811cccda1SJohn Dyson 	 * is clipped, and individual objects will be created for the split-up
110911cccda1SJohn Dyson 	 * map.  This is a bit of a hack, but is also about the best place to
111011cccda1SJohn Dyson 	 * put this improvement.
111111cccda1SJohn Dyson 	 */
11124e71e795SMatthew Dillon 	if (entry->object.vm_object == NULL && !map->system_map) {
111311cccda1SJohn Dyson 		vm_object_t object;
111411cccda1SJohn Dyson 		object = vm_object_allocate(OBJT_DEFAULT,
1115c2e11a03SJohn Dyson 				atop(entry->end - entry->start));
111611cccda1SJohn Dyson 		entry->object.vm_object = object;
111711cccda1SJohn Dyson 		entry->offset = 0;
111811cccda1SJohn Dyson 	}
111911cccda1SJohn Dyson 
112011cccda1SJohn Dyson 	/*
11210d94caffSDavid Greenman 	 * Create a new entry and insert it AFTER the specified entry
1122df8bae1dSRodney W. Grimes 	 */
1123df8bae1dSRodney W. Grimes 	new_entry = vm_map_entry_create(map);
1124df8bae1dSRodney W. Grimes 	*new_entry = *entry;
1125df8bae1dSRodney W. Grimes 
1126df8bae1dSRodney W. Grimes 	new_entry->start = entry->end = end;
1127df8bae1dSRodney W. Grimes 	new_entry->offset += (end - entry->start);
1128df8bae1dSRodney W. Grimes 
1129df8bae1dSRodney W. Grimes 	vm_map_entry_link(map, entry, new_entry);
1130df8bae1dSRodney W. Grimes 
11319fdfe602SMatthew Dillon 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1132df8bae1dSRodney W. Grimes 		vm_object_reference(new_entry->object.vm_object);
1133df8bae1dSRodney W. Grimes 	}
1134c0877f10SJohn Dyson }
1135df8bae1dSRodney W. Grimes 
1136df8bae1dSRodney W. Grimes /*
1137df8bae1dSRodney W. Grimes  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
1138df8bae1dSRodney W. Grimes  *
1139df8bae1dSRodney W. Grimes  *	Asserts that the starting and ending region
1140df8bae1dSRodney W. Grimes  *	addresses fall within the valid range of the map.
1141df8bae1dSRodney W. Grimes  */
1142df8bae1dSRodney W. Grimes #define	VM_MAP_RANGE_CHECK(map, start, end)		\
1143df8bae1dSRodney W. Grimes 		{					\
1144df8bae1dSRodney W. Grimes 		if (start < vm_map_min(map))		\
1145df8bae1dSRodney W. Grimes 			start = vm_map_min(map);	\
1146df8bae1dSRodney W. Grimes 		if (end > vm_map_max(map))		\
1147df8bae1dSRodney W. Grimes 			end = vm_map_max(map);		\
1148df8bae1dSRodney W. Grimes 		if (start > end)			\
1149df8bae1dSRodney W. Grimes 			start = end;			\
1150df8bae1dSRodney W. Grimes 		}
1151df8bae1dSRodney W. Grimes 
1152df8bae1dSRodney W. Grimes /*
1153df8bae1dSRodney W. Grimes  *	vm_map_submap:		[ kernel use only ]
1154df8bae1dSRodney W. Grimes  *
1155df8bae1dSRodney W. Grimes  *	Mark the given range as handled by a subordinate map.
1156df8bae1dSRodney W. Grimes  *
1157df8bae1dSRodney W. Grimes  *	This range must have been created with vm_map_find,
1158df8bae1dSRodney W. Grimes  *	and no other operations may have been performed on this
1159df8bae1dSRodney W. Grimes  *	range prior to calling vm_map_submap.
1160df8bae1dSRodney W. Grimes  *
1161df8bae1dSRodney W. Grimes  *	Only a limited number of operations can be performed
1162df8bae1dSRodney W. Grimes  *	within this rage after calling vm_map_submap:
1163df8bae1dSRodney W. Grimes  *		vm_fault
1164df8bae1dSRodney W. Grimes  *	[Don't try vm_map_copy!]
1165df8bae1dSRodney W. Grimes  *
1166df8bae1dSRodney W. Grimes  *	To remove a submapping, one must first remove the
1167df8bae1dSRodney W. Grimes  *	range from the superior map, and then destroy the
1168df8bae1dSRodney W. Grimes  *	submap (if desired).  [Better yet, don't try it.]
1169df8bae1dSRodney W. Grimes  */
1170df8bae1dSRodney W. Grimes int
11711b40f8c0SMatthew Dillon vm_map_submap(
11721b40f8c0SMatthew Dillon 	vm_map_t map,
11731b40f8c0SMatthew Dillon 	vm_offset_t start,
11741b40f8c0SMatthew Dillon 	vm_offset_t end,
11751b40f8c0SMatthew Dillon 	vm_map_t submap)
1176df8bae1dSRodney W. Grimes {
1177df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1178c0877f10SJohn Dyson 	int result = KERN_INVALID_ARGUMENT;
1179df8bae1dSRodney W. Grimes 
11800cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
11810cddd8f0SMatthew Dillon 
1182df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1183df8bae1dSRodney W. Grimes 
1184df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1185df8bae1dSRodney W. Grimes 
1186df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &entry)) {
1187df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
11880d94caffSDavid Greenman 	} else
1189df8bae1dSRodney W. Grimes 		entry = entry->next;
1190df8bae1dSRodney W. Grimes 
1191df8bae1dSRodney W. Grimes 	vm_map_clip_end(map, entry, end);
1192df8bae1dSRodney W. Grimes 
1193df8bae1dSRodney W. Grimes 	if ((entry->start == start) && (entry->end == end) &&
11949fdfe602SMatthew Dillon 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1195afa07f7eSJohn Dyson 	    (entry->object.vm_object == NULL)) {
11962d8acc0fSJohn Dyson 		entry->object.sub_map = submap;
1197afa07f7eSJohn Dyson 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1198df8bae1dSRodney W. Grimes 		result = KERN_SUCCESS;
1199df8bae1dSRodney W. Grimes 	}
1200df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1201df8bae1dSRodney W. Grimes 
1202df8bae1dSRodney W. Grimes 	return (result);
1203df8bae1dSRodney W. Grimes }
1204df8bae1dSRodney W. Grimes 
1205df8bae1dSRodney W. Grimes /*
1206df8bae1dSRodney W. Grimes  *	vm_map_protect:
1207df8bae1dSRodney W. Grimes  *
1208df8bae1dSRodney W. Grimes  *	Sets the protection of the specified address
1209df8bae1dSRodney W. Grimes  *	region in the target map.  If "set_max" is
1210df8bae1dSRodney W. Grimes  *	specified, the maximum protection is to be set;
1211df8bae1dSRodney W. Grimes  *	otherwise, only the current protection is affected.
1212df8bae1dSRodney W. Grimes  */
1213df8bae1dSRodney W. Grimes int
1214b9dcd593SBruce Evans vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1215b9dcd593SBruce Evans 	       vm_prot_t new_prot, boolean_t set_max)
1216df8bae1dSRodney W. Grimes {
1217c0877f10SJohn Dyson 	vm_map_entry_t current;
1218df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1219df8bae1dSRodney W. Grimes 
1220df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1221df8bae1dSRodney W. Grimes 
1222df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1223df8bae1dSRodney W. Grimes 
1224df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &entry)) {
1225df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1226b7b2aac2SJohn Dyson 	} else {
1227df8bae1dSRodney W. Grimes 		entry = entry->next;
1228b7b2aac2SJohn Dyson 	}
1229df8bae1dSRodney W. Grimes 
1230df8bae1dSRodney W. Grimes 	/*
12310d94caffSDavid Greenman 	 * Make a first pass to check for protection violations.
1232df8bae1dSRodney W. Grimes 	 */
1233df8bae1dSRodney W. Grimes 	current = entry;
1234df8bae1dSRodney W. Grimes 	while ((current != &map->header) && (current->start < end)) {
1235afa07f7eSJohn Dyson 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1236a1f6d91cSDavid Greenman 			vm_map_unlock(map);
1237df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ARGUMENT);
1238a1f6d91cSDavid Greenman 		}
1239df8bae1dSRodney W. Grimes 		if ((new_prot & current->max_protection) != new_prot) {
1240df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
1241df8bae1dSRodney W. Grimes 			return (KERN_PROTECTION_FAILURE);
1242df8bae1dSRodney W. Grimes 		}
1243df8bae1dSRodney W. Grimes 		current = current->next;
1244df8bae1dSRodney W. Grimes 	}
1245df8bae1dSRodney W. Grimes 
1246df8bae1dSRodney W. Grimes 	/*
12470d94caffSDavid Greenman 	 * Go back and fix up protections. [Note that clipping is not
12480d94caffSDavid Greenman 	 * necessary the second time.]
1249df8bae1dSRodney W. Grimes 	 */
1250df8bae1dSRodney W. Grimes 	current = entry;
1251df8bae1dSRodney W. Grimes 	while ((current != &map->header) && (current->start < end)) {
1252df8bae1dSRodney W. Grimes 		vm_prot_t old_prot;
1253df8bae1dSRodney W. Grimes 
1254df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, current, end);
1255df8bae1dSRodney W. Grimes 
1256df8bae1dSRodney W. Grimes 		old_prot = current->protection;
1257df8bae1dSRodney W. Grimes 		if (set_max)
1258df8bae1dSRodney W. Grimes 			current->protection =
1259df8bae1dSRodney W. Grimes 			    (current->max_protection = new_prot) &
1260df8bae1dSRodney W. Grimes 			    old_prot;
1261df8bae1dSRodney W. Grimes 		else
1262df8bae1dSRodney W. Grimes 			current->protection = new_prot;
1263df8bae1dSRodney W. Grimes 
1264df8bae1dSRodney W. Grimes 		/*
12650d94caffSDavid Greenman 		 * Update physical map if necessary. Worry about copy-on-write
12660d94caffSDavid Greenman 		 * here -- CHECK THIS XXX
1267df8bae1dSRodney W. Grimes 		 */
1268df8bae1dSRodney W. Grimes 		if (current->protection != old_prot) {
126947c3ccc4SAlan Cox 			mtx_lock(&Giant);
1270afa07f7eSJohn Dyson #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1271df8bae1dSRodney W. Grimes 							VM_PROT_ALL)
1272df8bae1dSRodney W. Grimes 			pmap_protect(map->pmap, current->start,
1273df8bae1dSRodney W. Grimes 			    current->end,
12741c85e3dfSAlan Cox 			    current->protection & MASK(current));
1275df8bae1dSRodney W. Grimes #undef	MASK
127647c3ccc4SAlan Cox 			mtx_unlock(&Giant);
1277df8bae1dSRodney W. Grimes 		}
12787d78abc9SJohn Dyson 		vm_map_simplify_entry(map, current);
1279df8bae1dSRodney W. Grimes 		current = current->next;
1280df8bae1dSRodney W. Grimes 	}
1281df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1282df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1283df8bae1dSRodney W. Grimes }
1284df8bae1dSRodney W. Grimes 
1285df8bae1dSRodney W. Grimes /*
1286867a482dSJohn Dyson  *	vm_map_madvise:
1287867a482dSJohn Dyson  *
1288867a482dSJohn Dyson  * 	This routine traverses a processes map handling the madvise
1289f7fc307aSAlan Cox  *	system call.  Advisories are classified as either those effecting
1290f7fc307aSAlan Cox  *	the vm_map_entry structure, or those effecting the underlying
1291f7fc307aSAlan Cox  *	objects.
1292867a482dSJohn Dyson  */
1293b4309055SMatthew Dillon int
12941b40f8c0SMatthew Dillon vm_map_madvise(
12951b40f8c0SMatthew Dillon 	vm_map_t map,
12961b40f8c0SMatthew Dillon 	vm_offset_t start,
12971b40f8c0SMatthew Dillon 	vm_offset_t end,
12981b40f8c0SMatthew Dillon 	int behav)
1299867a482dSJohn Dyson {
1300f7fc307aSAlan Cox 	vm_map_entry_t current, entry;
1301b4309055SMatthew Dillon 	int modify_map = 0;
1302867a482dSJohn Dyson 
1303b4309055SMatthew Dillon 	/*
1304b4309055SMatthew Dillon 	 * Some madvise calls directly modify the vm_map_entry, in which case
1305b4309055SMatthew Dillon 	 * we need to use an exclusive lock on the map and we need to perform
1306b4309055SMatthew Dillon 	 * various clipping operations.  Otherwise we only need a read-lock
1307b4309055SMatthew Dillon 	 * on the map.
1308b4309055SMatthew Dillon 	 */
1309b4309055SMatthew Dillon 	switch(behav) {
1310b4309055SMatthew Dillon 	case MADV_NORMAL:
1311b4309055SMatthew Dillon 	case MADV_SEQUENTIAL:
1312b4309055SMatthew Dillon 	case MADV_RANDOM:
13134f79d873SMatthew Dillon 	case MADV_NOSYNC:
13144f79d873SMatthew Dillon 	case MADV_AUTOSYNC:
13159730a5daSPaul Saab 	case MADV_NOCORE:
13169730a5daSPaul Saab 	case MADV_CORE:
1317b4309055SMatthew Dillon 		modify_map = 1;
1318867a482dSJohn Dyson 		vm_map_lock(map);
1319b4309055SMatthew Dillon 		break;
1320b4309055SMatthew Dillon 	case MADV_WILLNEED:
1321b4309055SMatthew Dillon 	case MADV_DONTNEED:
1322b4309055SMatthew Dillon 	case MADV_FREE:
1323f7fc307aSAlan Cox 		vm_map_lock_read(map);
1324b4309055SMatthew Dillon 		break;
1325b4309055SMatthew Dillon 	default:
1326b4309055SMatthew Dillon 		return (KERN_INVALID_ARGUMENT);
1327b4309055SMatthew Dillon 	}
1328b4309055SMatthew Dillon 
1329b4309055SMatthew Dillon 	/*
1330b4309055SMatthew Dillon 	 * Locate starting entry and clip if necessary.
1331b4309055SMatthew Dillon 	 */
1332867a482dSJohn Dyson 	VM_MAP_RANGE_CHECK(map, start, end);
1333867a482dSJohn Dyson 
1334867a482dSJohn Dyson 	if (vm_map_lookup_entry(map, start, &entry)) {
1335f7fc307aSAlan Cox 		if (modify_map)
1336867a482dSJohn Dyson 			vm_map_clip_start(map, entry, start);
1337b4309055SMatthew Dillon 	} else {
1338867a482dSJohn Dyson 		entry = entry->next;
1339b4309055SMatthew Dillon 	}
1340867a482dSJohn Dyson 
1341f7fc307aSAlan Cox 	if (modify_map) {
1342f7fc307aSAlan Cox 		/*
1343f7fc307aSAlan Cox 		 * madvise behaviors that are implemented in the vm_map_entry.
1344f7fc307aSAlan Cox 		 *
1345f7fc307aSAlan Cox 		 * We clip the vm_map_entry so that behavioral changes are
1346f7fc307aSAlan Cox 		 * limited to the specified address range.
1347f7fc307aSAlan Cox 		 */
1348867a482dSJohn Dyson 		for (current = entry;
1349867a482dSJohn Dyson 		     (current != &map->header) && (current->start < end);
1350b4309055SMatthew Dillon 		     current = current->next
1351b4309055SMatthew Dillon 		) {
1352f7fc307aSAlan Cox 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1353867a482dSJohn Dyson 				continue;
1354fed9a903SJohn Dyson 
135547221757SJohn Dyson 			vm_map_clip_end(map, current, end);
1356fed9a903SJohn Dyson 
1357f7fc307aSAlan Cox 			switch (behav) {
1358867a482dSJohn Dyson 			case MADV_NORMAL:
13597f866e4bSAlan Cox 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1360867a482dSJohn Dyson 				break;
1361867a482dSJohn Dyson 			case MADV_SEQUENTIAL:
13627f866e4bSAlan Cox 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1363867a482dSJohn Dyson 				break;
1364867a482dSJohn Dyson 			case MADV_RANDOM:
13657f866e4bSAlan Cox 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1366867a482dSJohn Dyson 				break;
13674f79d873SMatthew Dillon 			case MADV_NOSYNC:
13684f79d873SMatthew Dillon 				current->eflags |= MAP_ENTRY_NOSYNC;
13694f79d873SMatthew Dillon 				break;
13704f79d873SMatthew Dillon 			case MADV_AUTOSYNC:
13714f79d873SMatthew Dillon 				current->eflags &= ~MAP_ENTRY_NOSYNC;
13724f79d873SMatthew Dillon 				break;
13739730a5daSPaul Saab 			case MADV_NOCORE:
13749730a5daSPaul Saab 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
13759730a5daSPaul Saab 				break;
13769730a5daSPaul Saab 			case MADV_CORE:
13779730a5daSPaul Saab 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
13789730a5daSPaul Saab 				break;
1379867a482dSJohn Dyson 			default:
1380867a482dSJohn Dyson 				break;
1381867a482dSJohn Dyson 			}
1382f7fc307aSAlan Cox 			vm_map_simplify_entry(map, current);
1383867a482dSJohn Dyson 		}
1384867a482dSJohn Dyson 		vm_map_unlock(map);
1385b4309055SMatthew Dillon 	} else {
1386f7fc307aSAlan Cox 		vm_pindex_t pindex;
1387f7fc307aSAlan Cox 		int count;
1388f7fc307aSAlan Cox 
1389f7fc307aSAlan Cox 		/*
1390f7fc307aSAlan Cox 		 * madvise behaviors that are implemented in the underlying
1391f7fc307aSAlan Cox 		 * vm_object.
1392f7fc307aSAlan Cox 		 *
1393f7fc307aSAlan Cox 		 * Since we don't clip the vm_map_entry, we have to clip
1394f7fc307aSAlan Cox 		 * the vm_object pindex and count.
1395f7fc307aSAlan Cox 		 */
1396f7fc307aSAlan Cox 		for (current = entry;
1397f7fc307aSAlan Cox 		     (current != &map->header) && (current->start < end);
1398b4309055SMatthew Dillon 		     current = current->next
1399b4309055SMatthew Dillon 		) {
14005f99b57cSMatthew Dillon 			vm_offset_t useStart;
14015f99b57cSMatthew Dillon 
1402f7fc307aSAlan Cox 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1403f7fc307aSAlan Cox 				continue;
1404f7fc307aSAlan Cox 
1405f7fc307aSAlan Cox 			pindex = OFF_TO_IDX(current->offset);
1406f7fc307aSAlan Cox 			count = atop(current->end - current->start);
14075f99b57cSMatthew Dillon 			useStart = current->start;
1408f7fc307aSAlan Cox 
1409f7fc307aSAlan Cox 			if (current->start < start) {
1410f7fc307aSAlan Cox 				pindex += atop(start - current->start);
1411f7fc307aSAlan Cox 				count -= atop(start - current->start);
14125f99b57cSMatthew Dillon 				useStart = start;
1413f7fc307aSAlan Cox 			}
1414f7fc307aSAlan Cox 			if (current->end > end)
1415f7fc307aSAlan Cox 				count -= atop(current->end - end);
1416f7fc307aSAlan Cox 
1417f7fc307aSAlan Cox 			if (count <= 0)
1418f7fc307aSAlan Cox 				continue;
1419f7fc307aSAlan Cox 
1420f7fc307aSAlan Cox 			vm_object_madvise(current->object.vm_object,
1421f7fc307aSAlan Cox 					  pindex, count, behav);
1422b4309055SMatthew Dillon 			if (behav == MADV_WILLNEED) {
1423094f6d26SAlan Cox 				mtx_lock(&Giant);
1424b4309055SMatthew Dillon 				pmap_object_init_pt(
1425b4309055SMatthew Dillon 				    map->pmap,
14265f99b57cSMatthew Dillon 				    useStart,
1427f7fc307aSAlan Cox 				    current->object.vm_object,
1428b4309055SMatthew Dillon 				    pindex,
1429b4309055SMatthew Dillon 				    (count << PAGE_SHIFT),
1430e3026983SMatthew Dillon 				    MAP_PREFAULT_MADVISE
1431b4309055SMatthew Dillon 				);
1432094f6d26SAlan Cox 				mtx_unlock(&Giant);
1433f7fc307aSAlan Cox 			}
1434f7fc307aSAlan Cox 		}
1435f7fc307aSAlan Cox 		vm_map_unlock_read(map);
1436f7fc307aSAlan Cox 	}
1437b4309055SMatthew Dillon 	return (0);
1438867a482dSJohn Dyson }
1439867a482dSJohn Dyson 
1440867a482dSJohn Dyson 
1441867a482dSJohn Dyson /*
1442df8bae1dSRodney W. Grimes  *	vm_map_inherit:
1443df8bae1dSRodney W. Grimes  *
1444df8bae1dSRodney W. Grimes  *	Sets the inheritance of the specified address
1445df8bae1dSRodney W. Grimes  *	range in the target map.  Inheritance
1446df8bae1dSRodney W. Grimes  *	affects how the map will be shared with
1447df8bae1dSRodney W. Grimes  *	child maps at the time of vm_map_fork.
1448df8bae1dSRodney W. Grimes  */
1449df8bae1dSRodney W. Grimes int
1450b9dcd593SBruce Evans vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1451b9dcd593SBruce Evans 	       vm_inherit_t new_inheritance)
1452df8bae1dSRodney W. Grimes {
1453c0877f10SJohn Dyson 	vm_map_entry_t entry;
1454df8bae1dSRodney W. Grimes 	vm_map_entry_t temp_entry;
1455df8bae1dSRodney W. Grimes 
1456df8bae1dSRodney W. Grimes 	switch (new_inheritance) {
1457df8bae1dSRodney W. Grimes 	case VM_INHERIT_NONE:
1458df8bae1dSRodney W. Grimes 	case VM_INHERIT_COPY:
1459df8bae1dSRodney W. Grimes 	case VM_INHERIT_SHARE:
1460df8bae1dSRodney W. Grimes 		break;
1461df8bae1dSRodney W. Grimes 	default:
1462df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ARGUMENT);
1463df8bae1dSRodney W. Grimes 	}
1464df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1465df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1466df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
1467df8bae1dSRodney W. Grimes 		entry = temp_entry;
1468df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
14690d94caffSDavid Greenman 	} else
1470df8bae1dSRodney W. Grimes 		entry = temp_entry->next;
1471df8bae1dSRodney W. Grimes 	while ((entry != &map->header) && (entry->start < end)) {
1472df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
1473df8bae1dSRodney W. Grimes 		entry->inheritance = new_inheritance;
147444428f62SAlan Cox 		vm_map_simplify_entry(map, entry);
1475df8bae1dSRodney W. Grimes 		entry = entry->next;
1476df8bae1dSRodney W. Grimes 	}
1477df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1478df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1479df8bae1dSRodney W. Grimes }
1480df8bae1dSRodney W. Grimes 
1481df8bae1dSRodney W. Grimes /*
14827aaaa4fdSJohn Dyson  * Implement the semantics of mlock
14837aaaa4fdSJohn Dyson  */
14847aaaa4fdSJohn Dyson int
14851b40f8c0SMatthew Dillon vm_map_user_pageable(
14861b40f8c0SMatthew Dillon 	vm_map_t map,
14871b40f8c0SMatthew Dillon 	vm_offset_t start,
14881b40f8c0SMatthew Dillon 	vm_offset_t end,
14891b40f8c0SMatthew Dillon 	boolean_t new_pageable)
14907aaaa4fdSJohn Dyson {
1491b44959ceSTor Egge 	vm_map_entry_t entry;
14927aaaa4fdSJohn Dyson 	vm_map_entry_t start_entry;
1493b44959ceSTor Egge 	vm_offset_t estart;
1494e7673b84STor Egge 	vm_offset_t eend;
14957aaaa4fdSJohn Dyson 	int rv;
14967aaaa4fdSJohn Dyson 
14977aaaa4fdSJohn Dyson 	vm_map_lock(map);
14987aaaa4fdSJohn Dyson 	VM_MAP_RANGE_CHECK(map, start, end);
14997aaaa4fdSJohn Dyson 
15007aaaa4fdSJohn Dyson 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
15017aaaa4fdSJohn Dyson 		vm_map_unlock(map);
15027aaaa4fdSJohn Dyson 		return (KERN_INVALID_ADDRESS);
15037aaaa4fdSJohn Dyson 	}
15047aaaa4fdSJohn Dyson 
15057aaaa4fdSJohn Dyson 	if (new_pageable) {
15067aaaa4fdSJohn Dyson 
15077aaaa4fdSJohn Dyson 		entry = start_entry;
15087aaaa4fdSJohn Dyson 		vm_map_clip_start(map, entry, start);
15097aaaa4fdSJohn Dyson 
15107aaaa4fdSJohn Dyson 		/*
15117aaaa4fdSJohn Dyson 		 * Now decrement the wiring count for each region. If a region
15127aaaa4fdSJohn Dyson 		 * becomes completely unwired, unwire its physical pages and
15137aaaa4fdSJohn Dyson 		 * mappings.
15147aaaa4fdSJohn Dyson 		 */
15157aaaa4fdSJohn Dyson 		while ((entry != &map->header) && (entry->start < end)) {
1516afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
15177aaaa4fdSJohn Dyson 				vm_map_clip_end(map, entry, end);
1518afa07f7eSJohn Dyson 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
15197aaaa4fdSJohn Dyson 				entry->wired_count--;
15207aaaa4fdSJohn Dyson 				if (entry->wired_count == 0)
15217aaaa4fdSJohn Dyson 					vm_fault_unwire(map, entry->start, entry->end);
15227aaaa4fdSJohn Dyson 			}
1523b44959ceSTor Egge 			vm_map_simplify_entry(map,entry);
15247aaaa4fdSJohn Dyson 			entry = entry->next;
15257aaaa4fdSJohn Dyson 		}
15267aaaa4fdSJohn Dyson 	} else {
15277aaaa4fdSJohn Dyson 
15287aaaa4fdSJohn Dyson 		entry = start_entry;
15297aaaa4fdSJohn Dyson 
15307aaaa4fdSJohn Dyson 		while ((entry != &map->header) && (entry->start < end)) {
15317aaaa4fdSJohn Dyson 
1532afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
15337aaaa4fdSJohn Dyson 				entry = entry->next;
15347aaaa4fdSJohn Dyson 				continue;
15357aaaa4fdSJohn Dyson 			}
15367aaaa4fdSJohn Dyson 
15377aaaa4fdSJohn Dyson 			if (entry->wired_count != 0) {
15387aaaa4fdSJohn Dyson 				entry->wired_count++;
1539afa07f7eSJohn Dyson 				entry->eflags |= MAP_ENTRY_USER_WIRED;
15407aaaa4fdSJohn Dyson 				entry = entry->next;
15417aaaa4fdSJohn Dyson 				continue;
15427aaaa4fdSJohn Dyson 			}
15437aaaa4fdSJohn Dyson 
15447aaaa4fdSJohn Dyson 			/* Here on entry being newly wired */
15457aaaa4fdSJohn Dyson 
15469fdfe602SMatthew Dillon 			if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1547afa07f7eSJohn Dyson 				int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
15487aaaa4fdSJohn Dyson 				if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
15497aaaa4fdSJohn Dyson 
15507aaaa4fdSJohn Dyson 					vm_object_shadow(&entry->object.vm_object,
15517aaaa4fdSJohn Dyson 					    &entry->offset,
1552c2e11a03SJohn Dyson 					    atop(entry->end - entry->start));
1553afa07f7eSJohn Dyson 					entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
15547aaaa4fdSJohn Dyson 
15554e71e795SMatthew Dillon 				} else if (entry->object.vm_object == NULL &&
15564e71e795SMatthew Dillon 					   !map->system_map) {
15577aaaa4fdSJohn Dyson 
15587aaaa4fdSJohn Dyson 					entry->object.vm_object =
15597aaaa4fdSJohn Dyson 					    vm_object_allocate(OBJT_DEFAULT,
1560c2e11a03SJohn Dyson 						atop(entry->end - entry->start));
15617aaaa4fdSJohn Dyson 					entry->offset = (vm_offset_t) 0;
15627aaaa4fdSJohn Dyson 
15637aaaa4fdSJohn Dyson 				}
15647aaaa4fdSJohn Dyson 			}
15657aaaa4fdSJohn Dyson 
15667aaaa4fdSJohn Dyson 			vm_map_clip_start(map, entry, start);
15677aaaa4fdSJohn Dyson 			vm_map_clip_end(map, entry, end);
15687aaaa4fdSJohn Dyson 
15697aaaa4fdSJohn Dyson 			entry->wired_count++;
1570afa07f7eSJohn Dyson 			entry->eflags |= MAP_ENTRY_USER_WIRED;
1571b44959ceSTor Egge 			estart = entry->start;
1572e7673b84STor Egge 			eend = entry->end;
15737aaaa4fdSJohn Dyson 
15747aaaa4fdSJohn Dyson 			/* First we need to allow map modifications */
157525adb370SBrian Feldman 			vm_map_set_recursive(map);
157625adb370SBrian Feldman 			vm_map_lock_downgrade(map);
157747221757SJohn Dyson 			map->timestamp++;
15787aaaa4fdSJohn Dyson 
15797aaaa4fdSJohn Dyson 			rv = vm_fault_user_wire(map, entry->start, entry->end);
15807aaaa4fdSJohn Dyson 			if (rv) {
158125adb370SBrian Feldman 
15827aaaa4fdSJohn Dyson 				entry->wired_count--;
1583afa07f7eSJohn Dyson 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
158425adb370SBrian Feldman 
158525adb370SBrian Feldman 				vm_map_clear_recursive(map);
15867aaaa4fdSJohn Dyson 				vm_map_unlock(map);
15877aaaa4fdSJohn Dyson 
1588e7673b84STor Egge 				/*
1589e7673b84STor Egge 				 * At this point, the map is unlocked, and
1590e7673b84STor Egge 				 * entry might no longer be valid.  Use copy
1591e7673b84STor Egge 				 * of entry start value obtained while entry
1592e7673b84STor Egge 				 * was valid.
1593e7673b84STor Egge 				 */
1594e7673b84STor Egge 				(void) vm_map_user_pageable(map, start, estart,
1595e7673b84STor Egge 							    TRUE);
15967aaaa4fdSJohn Dyson 				return rv;
15977aaaa4fdSJohn Dyson 			}
15987aaaa4fdSJohn Dyson 
159925adb370SBrian Feldman 			vm_map_clear_recursive(map);
160025adb370SBrian Feldman 			if (vm_map_lock_upgrade(map)) {
1601b44959ceSTor Egge 				vm_map_lock(map);
1602b44959ceSTor Egge 				if (vm_map_lookup_entry(map, estart, &entry)
1603b44959ceSTor Egge 				    == FALSE) {
1604b44959ceSTor Egge 					vm_map_unlock(map);
1605e7673b84STor Egge 					/*
1606e7673b84STor Egge 					 * vm_fault_user_wire succeded, thus
1607e7673b84STor Egge 					 * the area between start and eend
1608e7673b84STor Egge 					 * is wired and has to be unwired
1609e7673b84STor Egge 					 * here as part of the cleanup.
1610e7673b84STor Egge 					 */
1611b44959ceSTor Egge 					(void) vm_map_user_pageable(map,
1612b44959ceSTor Egge 								    start,
1613e7673b84STor Egge 								    eend,
1614b44959ceSTor Egge 								    TRUE);
1615b44959ceSTor Egge 					return (KERN_INVALID_ADDRESS);
1616b44959ceSTor Egge 				}
1617b44959ceSTor Egge 			}
1618b44959ceSTor Egge 			vm_map_simplify_entry(map,entry);
16197aaaa4fdSJohn Dyson 		}
16207aaaa4fdSJohn Dyson 	}
162147221757SJohn Dyson 	map->timestamp++;
16227aaaa4fdSJohn Dyson 	vm_map_unlock(map);
16237aaaa4fdSJohn Dyson 	return KERN_SUCCESS;
16247aaaa4fdSJohn Dyson }
16257aaaa4fdSJohn Dyson 
16267aaaa4fdSJohn Dyson /*
1627df8bae1dSRodney W. Grimes  *	vm_map_pageable:
1628df8bae1dSRodney W. Grimes  *
1629df8bae1dSRodney W. Grimes  *	Sets the pageability of the specified address
1630df8bae1dSRodney W. Grimes  *	range in the target map.  Regions specified
1631df8bae1dSRodney W. Grimes  *	as not pageable require locked-down physical
1632df8bae1dSRodney W. Grimes  *	memory and physical page maps.
1633df8bae1dSRodney W. Grimes  *
1634df8bae1dSRodney W. Grimes  *	The map must not be locked, but a reference
1635df8bae1dSRodney W. Grimes  *	must remain to the map throughout the call.
1636df8bae1dSRodney W. Grimes  */
1637df8bae1dSRodney W. Grimes int
16381b40f8c0SMatthew Dillon vm_map_pageable(
16391b40f8c0SMatthew Dillon 	vm_map_t map,
16401b40f8c0SMatthew Dillon 	vm_offset_t start,
16411b40f8c0SMatthew Dillon 	vm_offset_t end,
16421b40f8c0SMatthew Dillon 	boolean_t new_pageable)
1643df8bae1dSRodney W. Grimes {
1644c0877f10SJohn Dyson 	vm_map_entry_t entry;
1645df8bae1dSRodney W. Grimes 	vm_map_entry_t start_entry;
1646c0877f10SJohn Dyson 	vm_offset_t failed = 0;
1647df8bae1dSRodney W. Grimes 	int rv;
1648df8bae1dSRodney W. Grimes 
16490cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
16500cddd8f0SMatthew Dillon 
1651df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1652df8bae1dSRodney W. Grimes 
1653df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1654df8bae1dSRodney W. Grimes 
1655df8bae1dSRodney W. Grimes 	/*
16560d94caffSDavid Greenman 	 * Only one pageability change may take place at one time, since
16570d94caffSDavid Greenman 	 * vm_fault assumes it will be called only once for each
16580d94caffSDavid Greenman 	 * wiring/unwiring.  Therefore, we have to make sure we're actually
16590d94caffSDavid Greenman 	 * changing the pageability for the entire region.  We do so before
16600d94caffSDavid Greenman 	 * making any changes.
1661df8bae1dSRodney W. Grimes 	 */
1662df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1663df8bae1dSRodney W. Grimes 		vm_map_unlock(map);
1664df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ADDRESS);
1665df8bae1dSRodney W. Grimes 	}
1666df8bae1dSRodney W. Grimes 	entry = start_entry;
1667df8bae1dSRodney W. Grimes 
1668df8bae1dSRodney W. Grimes 	/*
16690d94caffSDavid Greenman 	 * Actions are rather different for wiring and unwiring, so we have
16700d94caffSDavid Greenman 	 * two separate cases.
1671df8bae1dSRodney W. Grimes 	 */
1672df8bae1dSRodney W. Grimes 	if (new_pageable) {
1673df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1674df8bae1dSRodney W. Grimes 
1675df8bae1dSRodney W. Grimes 		/*
16760d94caffSDavid Greenman 		 * Unwiring.  First ensure that the range to be unwired is
16770d94caffSDavid Greenman 		 * really wired down and that there are no holes.
1678df8bae1dSRodney W. Grimes 		 */
1679df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1680df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0 ||
1681df8bae1dSRodney W. Grimes 			    (entry->end < end &&
1682df8bae1dSRodney W. Grimes 				(entry->next == &map->header ||
1683df8bae1dSRodney W. Grimes 				    entry->next->start > entry->end))) {
1684df8bae1dSRodney W. Grimes 				vm_map_unlock(map);
1685df8bae1dSRodney W. Grimes 				return (KERN_INVALID_ARGUMENT);
1686df8bae1dSRodney W. Grimes 			}
1687df8bae1dSRodney W. Grimes 			entry = entry->next;
1688df8bae1dSRodney W. Grimes 		}
1689df8bae1dSRodney W. Grimes 
1690df8bae1dSRodney W. Grimes 		/*
16910d94caffSDavid Greenman 		 * Now decrement the wiring count for each region. If a region
16920d94caffSDavid Greenman 		 * becomes completely unwired, unwire its physical pages and
16930d94caffSDavid Greenman 		 * mappings.
1694df8bae1dSRodney W. Grimes 		 */
1695df8bae1dSRodney W. Grimes 		entry = start_entry;
1696df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1697df8bae1dSRodney W. Grimes 			vm_map_clip_end(map, entry, end);
1698df8bae1dSRodney W. Grimes 
1699df8bae1dSRodney W. Grimes 			entry->wired_count--;
1700df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0)
1701df8bae1dSRodney W. Grimes 				vm_fault_unwire(map, entry->start, entry->end);
1702df8bae1dSRodney W. Grimes 
170344428f62SAlan Cox 			vm_map_simplify_entry(map, entry);
170444428f62SAlan Cox 
1705df8bae1dSRodney W. Grimes 			entry = entry->next;
1706df8bae1dSRodney W. Grimes 		}
17070d94caffSDavid Greenman 	} else {
1708df8bae1dSRodney W. Grimes 		/*
1709df8bae1dSRodney W. Grimes 		 * Wiring.  We must do this in two passes:
1710df8bae1dSRodney W. Grimes 		 *
17110d94caffSDavid Greenman 		 * 1.  Holding the write lock, we create any shadow or zero-fill
17120d94caffSDavid Greenman 		 * objects that need to be created. Then we clip each map
17130d94caffSDavid Greenman 		 * entry to the region to be wired and increment its wiring
17140d94caffSDavid Greenman 		 * count.  We create objects before clipping the map entries
1715df8bae1dSRodney W. Grimes 		 * to avoid object proliferation.
1716df8bae1dSRodney W. Grimes 		 *
17170d94caffSDavid Greenman 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
17180d94caffSDavid Greenman 		 * fault in the pages for any newly wired area (wired_count is
17190d94caffSDavid Greenman 		 * 1).
1720df8bae1dSRodney W. Grimes 		 *
17210d94caffSDavid Greenman 		 * Downgrading to a read lock for vm_fault_wire avoids a possible
172224a1cce3SDavid Greenman 		 * deadlock with another process that may have faulted on one
17230d94caffSDavid Greenman 		 * of the pages to be wired (it would mark the page busy,
17240d94caffSDavid Greenman 		 * blocking us, then in turn block on the map lock that we
17250d94caffSDavid Greenman 		 * hold).  Because of problems in the recursive lock package,
17260d94caffSDavid Greenman 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
17270d94caffSDavid Greenman 		 * any actions that require the write lock must be done
17280d94caffSDavid Greenman 		 * beforehand.  Because we keep the read lock on the map, the
17290d94caffSDavid Greenman 		 * copy-on-write status of the entries we modify here cannot
17300d94caffSDavid Greenman 		 * change.
1731df8bae1dSRodney W. Grimes 		 */
1732df8bae1dSRodney W. Grimes 
1733df8bae1dSRodney W. Grimes 		/*
1734df8bae1dSRodney W. Grimes 		 * Pass 1.
1735df8bae1dSRodney W. Grimes 		 */
1736df8bae1dSRodney W. Grimes 		while ((entry != &map->header) && (entry->start < end)) {
1737df8bae1dSRodney W. Grimes 			if (entry->wired_count == 0) {
1738df8bae1dSRodney W. Grimes 
1739df8bae1dSRodney W. Grimes 				/*
1740df8bae1dSRodney W. Grimes 				 * Perform actions of vm_map_lookup that need
1741df8bae1dSRodney W. Grimes 				 * the write lock on the map: create a shadow
1742df8bae1dSRodney W. Grimes 				 * object for a copy-on-write region, or an
1743df8bae1dSRodney W. Grimes 				 * object for a zero-fill region.
1744df8bae1dSRodney W. Grimes 				 *
1745df8bae1dSRodney W. Grimes 				 * We don't have to do this for entries that
1746ad5fca3bSAlan Cox 				 * point to sub maps, because we won't
1747ad5fca3bSAlan Cox 				 * hold the lock on the sub map.
1748df8bae1dSRodney W. Grimes 				 */
17499fdfe602SMatthew Dillon 				if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1750afa07f7eSJohn Dyson 					int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1751b5b40fa6SJohn Dyson 					if (copyflag &&
1752df8bae1dSRodney W. Grimes 					    ((entry->protection & VM_PROT_WRITE) != 0)) {
1753df8bae1dSRodney W. Grimes 
1754df8bae1dSRodney W. Grimes 						vm_object_shadow(&entry->object.vm_object,
1755df8bae1dSRodney W. Grimes 						    &entry->offset,
1756c2e11a03SJohn Dyson 						    atop(entry->end - entry->start));
1757afa07f7eSJohn Dyson 						entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
17584e71e795SMatthew Dillon 					} else if (entry->object.vm_object == NULL &&
17594e71e795SMatthew Dillon 						   !map->system_map) {
1760df8bae1dSRodney W. Grimes 						entry->object.vm_object =
1761a316d390SJohn Dyson 						    vm_object_allocate(OBJT_DEFAULT,
1762c2e11a03SJohn Dyson 							atop(entry->end - entry->start));
1763df8bae1dSRodney W. Grimes 						entry->offset = (vm_offset_t) 0;
1764df8bae1dSRodney W. Grimes 					}
1765df8bae1dSRodney W. Grimes 				}
1766df8bae1dSRodney W. Grimes 			}
1767df8bae1dSRodney W. Grimes 			vm_map_clip_start(map, entry, start);
1768df8bae1dSRodney W. Grimes 			vm_map_clip_end(map, entry, end);
1769df8bae1dSRodney W. Grimes 			entry->wired_count++;
1770df8bae1dSRodney W. Grimes 
1771df8bae1dSRodney W. Grimes 			/*
1772df8bae1dSRodney W. Grimes 			 * Check for holes
1773df8bae1dSRodney W. Grimes 			 */
1774df8bae1dSRodney W. Grimes 			if (entry->end < end &&
1775df8bae1dSRodney W. Grimes 			    (entry->next == &map->header ||
1776df8bae1dSRodney W. Grimes 				entry->next->start > entry->end)) {
1777df8bae1dSRodney W. Grimes 				/*
17780d94caffSDavid Greenman 				 * Found one.  Object creation actions do not
17790d94caffSDavid Greenman 				 * need to be undone, but the wired counts
17800d94caffSDavid Greenman 				 * need to be restored.
1781df8bae1dSRodney W. Grimes 				 */
1782df8bae1dSRodney W. Grimes 				while (entry != &map->header && entry->end > start) {
1783df8bae1dSRodney W. Grimes 					entry->wired_count--;
1784df8bae1dSRodney W. Grimes 					entry = entry->prev;
1785df8bae1dSRodney W. Grimes 				}
1786df8bae1dSRodney W. Grimes 				vm_map_unlock(map);
1787df8bae1dSRodney W. Grimes 				return (KERN_INVALID_ARGUMENT);
1788df8bae1dSRodney W. Grimes 			}
1789df8bae1dSRodney W. Grimes 			entry = entry->next;
1790df8bae1dSRodney W. Grimes 		}
1791df8bae1dSRodney W. Grimes 
1792df8bae1dSRodney W. Grimes 		/*
1793df8bae1dSRodney W. Grimes 		 * Pass 2.
1794df8bae1dSRodney W. Grimes 		 */
1795df8bae1dSRodney W. Grimes 
1796df8bae1dSRodney W. Grimes 		/*
1797df8bae1dSRodney W. Grimes 		 * HACK HACK HACK HACK
1798df8bae1dSRodney W. Grimes 		 *
179924a1cce3SDavid Greenman 		 * If we are wiring in the kernel map or a submap of it,
180024a1cce3SDavid Greenman 		 * unlock the map to avoid deadlocks.  We trust that the
180124a1cce3SDavid Greenman 		 * kernel is well-behaved, and therefore will not do
180224a1cce3SDavid Greenman 		 * anything destructive to this region of the map while
180324a1cce3SDavid Greenman 		 * we have it unlocked.  We cannot trust user processes
180424a1cce3SDavid Greenman 		 * to do the same.
1805df8bae1dSRodney W. Grimes 		 *
1806df8bae1dSRodney W. Grimes 		 * HACK HACK HACK HACK
1807df8bae1dSRodney W. Grimes 		 */
1808df8bae1dSRodney W. Grimes 		if (vm_map_pmap(map) == kernel_pmap) {
1809df8bae1dSRodney W. Grimes 			vm_map_unlock(map);	/* trust me ... */
18100d94caffSDavid Greenman 		} else {
181103e9c6c1SJohn Dyson 			vm_map_lock_downgrade(map);
1812df8bae1dSRodney W. Grimes 		}
1813df8bae1dSRodney W. Grimes 
1814df8bae1dSRodney W. Grimes 		rv = 0;
1815df8bae1dSRodney W. Grimes 		entry = start_entry;
1816df8bae1dSRodney W. Grimes 		while (entry != &map->header && entry->start < end) {
1817df8bae1dSRodney W. Grimes 			/*
18180d94caffSDavid Greenman 			 * If vm_fault_wire fails for any page we need to undo
18190d94caffSDavid Greenman 			 * what has been done.  We decrement the wiring count
18200d94caffSDavid Greenman 			 * for those pages which have not yet been wired (now)
18210d94caffSDavid Greenman 			 * and unwire those that have (later).
1822df8bae1dSRodney W. Grimes 			 *
1823df8bae1dSRodney W. Grimes 			 * XXX this violates the locking protocol on the map,
1824df8bae1dSRodney W. Grimes 			 * needs to be fixed.
1825df8bae1dSRodney W. Grimes 			 */
1826df8bae1dSRodney W. Grimes 			if (rv)
1827df8bae1dSRodney W. Grimes 				entry->wired_count--;
1828df8bae1dSRodney W. Grimes 			else if (entry->wired_count == 1) {
1829df8bae1dSRodney W. Grimes 				rv = vm_fault_wire(map, entry->start, entry->end);
1830df8bae1dSRodney W. Grimes 				if (rv) {
1831df8bae1dSRodney W. Grimes 					failed = entry->start;
1832df8bae1dSRodney W. Grimes 					entry->wired_count--;
1833df8bae1dSRodney W. Grimes 				}
1834df8bae1dSRodney W. Grimes 			}
1835df8bae1dSRodney W. Grimes 			entry = entry->next;
1836df8bae1dSRodney W. Grimes 		}
1837df8bae1dSRodney W. Grimes 
183825adb370SBrian Feldman 		if (vm_map_pmap(map) == kernel_pmap) {
183925adb370SBrian Feldman 			vm_map_lock(map);
184025adb370SBrian Feldman 		}
1841df8bae1dSRodney W. Grimes 		if (rv) {
184225adb370SBrian Feldman 			vm_map_unlock(map);
1843df8bae1dSRodney W. Grimes 			(void) vm_map_pageable(map, start, failed, TRUE);
1844df8bae1dSRodney W. Grimes 			return (rv);
1845df8bae1dSRodney W. Grimes 		}
1846e7673b84STor Egge 		/*
1847e7673b84STor Egge 		 * An exclusive lock on the map is needed in order to call
1848e7673b84STor Egge 		 * vm_map_simplify_entry().  If the current lock on the map
1849e7673b84STor Egge 		 * is only a shared lock, an upgrade is needed.
1850e7673b84STor Egge 		 */
1851e7673b84STor Egge 		if (vm_map_pmap(map) != kernel_pmap &&
1852e7673b84STor Egge 		    vm_map_lock_upgrade(map)) {
1853e7673b84STor Egge 			vm_map_lock(map);
1854e7673b84STor Egge 			if (vm_map_lookup_entry(map, start, &start_entry) ==
1855e7673b84STor Egge 			    FALSE) {
1856e7673b84STor Egge 				vm_map_unlock(map);
1857e7673b84STor Egge 				return KERN_SUCCESS;
1858e7673b84STor Egge 			}
1859e7673b84STor Egge 		}
1860b7b2aac2SJohn Dyson 		vm_map_simplify_entry(map, start_entry);
1861df8bae1dSRodney W. Grimes 	}
1862df8bae1dSRodney W. Grimes 
1863df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1864df8bae1dSRodney W. Grimes 
1865df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1866df8bae1dSRodney W. Grimes }
1867df8bae1dSRodney W. Grimes 
1868df8bae1dSRodney W. Grimes /*
1869df8bae1dSRodney W. Grimes  * vm_map_clean
1870df8bae1dSRodney W. Grimes  *
1871df8bae1dSRodney W. Grimes  * Push any dirty cached pages in the address range to their pager.
1872df8bae1dSRodney W. Grimes  * If syncio is TRUE, dirty pages are written synchronously.
1873df8bae1dSRodney W. Grimes  * If invalidate is TRUE, any cached pages are freed as well.
1874df8bae1dSRodney W. Grimes  *
1875df8bae1dSRodney W. Grimes  * Returns an error if any part of the specified range is not mapped.
1876df8bae1dSRodney W. Grimes  */
1877df8bae1dSRodney W. Grimes int
18781b40f8c0SMatthew Dillon vm_map_clean(
18791b40f8c0SMatthew Dillon 	vm_map_t map,
18801b40f8c0SMatthew Dillon 	vm_offset_t start,
18811b40f8c0SMatthew Dillon 	vm_offset_t end,
18821b40f8c0SMatthew Dillon 	boolean_t syncio,
18831b40f8c0SMatthew Dillon 	boolean_t invalidate)
1884df8bae1dSRodney W. Grimes {
1885c0877f10SJohn Dyson 	vm_map_entry_t current;
1886df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1887df8bae1dSRodney W. Grimes 	vm_size_t size;
1888df8bae1dSRodney W. Grimes 	vm_object_t object;
1889a316d390SJohn Dyson 	vm_ooffset_t offset;
1890df8bae1dSRodney W. Grimes 
18910cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
18920cddd8f0SMatthew Dillon 
1893df8bae1dSRodney W. Grimes 	vm_map_lock_read(map);
1894df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1895df8bae1dSRodney W. Grimes 	if (!vm_map_lookup_entry(map, start, &entry)) {
1896df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map);
1897df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ADDRESS);
1898df8bae1dSRodney W. Grimes 	}
1899df8bae1dSRodney W. Grimes 	/*
1900df8bae1dSRodney W. Grimes 	 * Make a first pass to check for holes.
1901df8bae1dSRodney W. Grimes 	 */
1902df8bae1dSRodney W. Grimes 	for (current = entry; current->start < end; current = current->next) {
1903afa07f7eSJohn Dyson 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1904df8bae1dSRodney W. Grimes 			vm_map_unlock_read(map);
1905df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ARGUMENT);
1906df8bae1dSRodney W. Grimes 		}
1907df8bae1dSRodney W. Grimes 		if (end > current->end &&
1908df8bae1dSRodney W. Grimes 		    (current->next == &map->header ||
1909df8bae1dSRodney W. Grimes 			current->end != current->next->start)) {
1910df8bae1dSRodney W. Grimes 			vm_map_unlock_read(map);
1911df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ADDRESS);
1912df8bae1dSRodney W. Grimes 		}
1913df8bae1dSRodney W. Grimes 	}
1914df8bae1dSRodney W. Grimes 
1915cf2819ccSJohn Dyson 	if (invalidate)
1916cf2819ccSJohn Dyson 		pmap_remove(vm_map_pmap(map), start, end);
1917df8bae1dSRodney W. Grimes 	/*
1918df8bae1dSRodney W. Grimes 	 * Make a second pass, cleaning/uncaching pages from the indicated
1919df8bae1dSRodney W. Grimes 	 * objects as we go.
1920df8bae1dSRodney W. Grimes 	 */
1921df8bae1dSRodney W. Grimes 	for (current = entry; current->start < end; current = current->next) {
1922df8bae1dSRodney W. Grimes 		offset = current->offset + (start - current->start);
1923df8bae1dSRodney W. Grimes 		size = (end <= current->end ? end : current->end) - start;
19249fdfe602SMatthew Dillon 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1925c0877f10SJohn Dyson 			vm_map_t smap;
1926df8bae1dSRodney W. Grimes 			vm_map_entry_t tentry;
1927df8bae1dSRodney W. Grimes 			vm_size_t tsize;
1928df8bae1dSRodney W. Grimes 
19299fdfe602SMatthew Dillon 			smap = current->object.sub_map;
1930df8bae1dSRodney W. Grimes 			vm_map_lock_read(smap);
1931df8bae1dSRodney W. Grimes 			(void) vm_map_lookup_entry(smap, offset, &tentry);
1932df8bae1dSRodney W. Grimes 			tsize = tentry->end - offset;
1933df8bae1dSRodney W. Grimes 			if (tsize < size)
1934df8bae1dSRodney W. Grimes 				size = tsize;
1935df8bae1dSRodney W. Grimes 			object = tentry->object.vm_object;
1936df8bae1dSRodney W. Grimes 			offset = tentry->offset + (offset - tentry->start);
1937df8bae1dSRodney W. Grimes 			vm_map_unlock_read(smap);
1938df8bae1dSRodney W. Grimes 		} else {
1939df8bae1dSRodney W. Grimes 			object = current->object.vm_object;
1940df8bae1dSRodney W. Grimes 		}
19418a02c104SJohn Dyson 		/*
19428a02c104SJohn Dyson 		 * Note that there is absolutely no sense in writing out
19438a02c104SJohn Dyson 		 * anonymous objects, so we track down the vnode object
19448a02c104SJohn Dyson 		 * to write out.
19458a02c104SJohn Dyson 		 * We invalidate (remove) all pages from the address space
19468a02c104SJohn Dyson 		 * anyway, for semantic correctness.
19478c5dffe8SMatthew Dillon 		 *
19488c5dffe8SMatthew Dillon 		 * note: certain anonymous maps, such as MAP_NOSYNC maps,
19498c5dffe8SMatthew Dillon 		 * may start out with a NULL object.
19508a02c104SJohn Dyson 		 */
19518c5dffe8SMatthew Dillon 		while (object && object->backing_object) {
19528a02c104SJohn Dyson 			object = object->backing_object;
19538a02c104SJohn Dyson 			offset += object->backing_object_offset;
19548a02c104SJohn Dyson 			if (object->size < OFF_TO_IDX(offset + size))
19558a02c104SJohn Dyson 				size = IDX_TO_OFF(object->size) - offset;
19568a02c104SJohn Dyson 		}
1957ff359f84SMatthew Dillon 		if (object && (object->type == OBJT_VNODE) &&
1958ff359f84SMatthew Dillon 		    (current->protection & VM_PROT_WRITE)) {
1959df8bae1dSRodney W. Grimes 			/*
1960ff359f84SMatthew Dillon 			 * Flush pages if writing is allowed, invalidate them
1961ff359f84SMatthew Dillon 			 * if invalidation requested.  Pages undergoing I/O
1962ff359f84SMatthew Dillon 			 * will be ignored by vm_object_page_remove().
1963f5cf85d4SDavid Greenman 			 *
1964ff359f84SMatthew Dillon 			 * We cannot lock the vnode and then wait for paging
1965ff359f84SMatthew Dillon 			 * to complete without deadlocking against vm_fault.
1966ff359f84SMatthew Dillon 			 * Instead we simply call vm_object_page_remove() and
1967ff359f84SMatthew Dillon 			 * allow it to block internally on a page-by-page
1968ff359f84SMatthew Dillon 			 * basis when it encounters pages undergoing async
1969ff359f84SMatthew Dillon 			 * I/O.
1970df8bae1dSRodney W. Grimes 			 */
19718f9110f6SJohn Dyson 			int flags;
1972ff359f84SMatthew Dillon 
1973ff359f84SMatthew Dillon 			vm_object_reference(object);
1974b40ce416SJulian Elischer 			vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
19758f9110f6SJohn Dyson 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
19768f9110f6SJohn Dyson 			flags |= invalidate ? OBJPC_INVAL : 0;
1977a316d390SJohn Dyson 			vm_object_page_clean(object,
1978a316d390SJohn Dyson 			    OFF_TO_IDX(offset),
19792be70f79SJohn Dyson 			    OFF_TO_IDX(offset + size + PAGE_MASK),
19808f9110f6SJohn Dyson 			    flags);
1981cf2819ccSJohn Dyson 			if (invalidate) {
1982ff359f84SMatthew Dillon 				/*vm_object_pip_wait(object, "objmcl");*/
1983a316d390SJohn Dyson 				vm_object_page_remove(object,
1984a316d390SJohn Dyson 				    OFF_TO_IDX(offset),
19852be70f79SJohn Dyson 				    OFF_TO_IDX(offset + size + PAGE_MASK),
1986a316d390SJohn Dyson 				    FALSE);
1987cf2819ccSJohn Dyson 			}
1988b40ce416SJulian Elischer 			VOP_UNLOCK(object->handle, 0, curthread);
1989ff359f84SMatthew Dillon 			vm_object_deallocate(object);
1990a02051c3SJohn Dyson 		}
1991df8bae1dSRodney W. Grimes 		start += size;
1992df8bae1dSRodney W. Grimes 	}
1993df8bae1dSRodney W. Grimes 
1994df8bae1dSRodney W. Grimes 	vm_map_unlock_read(map);
1995df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1996df8bae1dSRodney W. Grimes }
1997df8bae1dSRodney W. Grimes 
1998df8bae1dSRodney W. Grimes /*
1999df8bae1dSRodney W. Grimes  *	vm_map_entry_unwire:	[ internal use only ]
2000df8bae1dSRodney W. Grimes  *
2001df8bae1dSRodney W. Grimes  *	Make the region specified by this entry pageable.
2002df8bae1dSRodney W. Grimes  *
2003df8bae1dSRodney W. Grimes  *	The map in question should be locked.
2004df8bae1dSRodney W. Grimes  *	[This is the reason for this routine's existence.]
2005df8bae1dSRodney W. Grimes  */
20060362d7d7SJohn Dyson static void
20071b40f8c0SMatthew Dillon vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2008df8bae1dSRodney W. Grimes {
2009df8bae1dSRodney W. Grimes 	vm_fault_unwire(map, entry->start, entry->end);
2010df8bae1dSRodney W. Grimes 	entry->wired_count = 0;
2011df8bae1dSRodney W. Grimes }
2012df8bae1dSRodney W. Grimes 
2013df8bae1dSRodney W. Grimes /*
2014df8bae1dSRodney W. Grimes  *	vm_map_entry_delete:	[ internal use only ]
2015df8bae1dSRodney W. Grimes  *
2016df8bae1dSRodney W. Grimes  *	Deallocate the given entry from the target map.
2017df8bae1dSRodney W. Grimes  */
20180362d7d7SJohn Dyson static void
20191b40f8c0SMatthew Dillon vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2020df8bae1dSRodney W. Grimes {
2021df8bae1dSRodney W. Grimes 	vm_map_entry_unlink(map, entry);
2022df8bae1dSRodney W. Grimes 	map->size -= entry->end - entry->start;
2023df8bae1dSRodney W. Grimes 
20249fdfe602SMatthew Dillon 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2025df8bae1dSRodney W. Grimes 		vm_object_deallocate(entry->object.vm_object);
2026b5b40fa6SJohn Dyson 	}
2027df8bae1dSRodney W. Grimes 
2028df8bae1dSRodney W. Grimes 	vm_map_entry_dispose(map, entry);
2029df8bae1dSRodney W. Grimes }
2030df8bae1dSRodney W. Grimes 
2031df8bae1dSRodney W. Grimes /*
2032df8bae1dSRodney W. Grimes  *	vm_map_delete:	[ internal use only ]
2033df8bae1dSRodney W. Grimes  *
2034df8bae1dSRodney W. Grimes  *	Deallocates the given address range from the target
2035df8bae1dSRodney W. Grimes  *	map.
2036df8bae1dSRodney W. Grimes  */
2037df8bae1dSRodney W. Grimes int
20381b40f8c0SMatthew Dillon vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2039df8bae1dSRodney W. Grimes {
2040cbd8ec09SJohn Dyson 	vm_object_t object;
2041c0877f10SJohn Dyson 	vm_map_entry_t entry;
2042df8bae1dSRodney W. Grimes 	vm_map_entry_t first_entry;
2043df8bae1dSRodney W. Grimes 
20440cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
20450cddd8f0SMatthew Dillon 
2046df8bae1dSRodney W. Grimes 	/*
2047df8bae1dSRodney W. Grimes 	 * Find the start of the region, and clip it
2048df8bae1dSRodney W. Grimes 	 */
2049876318ecSAlan Cox 	if (!vm_map_lookup_entry(map, start, &first_entry))
2050df8bae1dSRodney W. Grimes 		entry = first_entry->next;
2051876318ecSAlan Cox 	else {
2052df8bae1dSRodney W. Grimes 		entry = first_entry;
2053df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
2054df8bae1dSRodney W. Grimes 	}
2055df8bae1dSRodney W. Grimes 
2056df8bae1dSRodney W. Grimes 	/*
2057df8bae1dSRodney W. Grimes 	 * Save the free space hint
2058df8bae1dSRodney W. Grimes 	 */
2059b18bfc3dSJohn Dyson 	if (entry == &map->header) {
2060b18bfc3dSJohn Dyson 		map->first_free = &map->header;
20612dbea5d2SJohn Dyson 	} else if (map->first_free->start >= start) {
2062df8bae1dSRodney W. Grimes 		map->first_free = entry->prev;
20632dbea5d2SJohn Dyson 	}
2064df8bae1dSRodney W. Grimes 
2065df8bae1dSRodney W. Grimes 	/*
2066df8bae1dSRodney W. Grimes 	 * Step through all entries in this region
2067df8bae1dSRodney W. Grimes 	 */
2068df8bae1dSRodney W. Grimes 	while ((entry != &map->header) && (entry->start < end)) {
2069df8bae1dSRodney W. Grimes 		vm_map_entry_t next;
2070b18bfc3dSJohn Dyson 		vm_offset_t s, e;
2071cbd8ec09SJohn Dyson 		vm_pindex_t offidxstart, offidxend, count;
2072df8bae1dSRodney W. Grimes 
2073df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
2074df8bae1dSRodney W. Grimes 
2075df8bae1dSRodney W. Grimes 		s = entry->start;
2076df8bae1dSRodney W. Grimes 		e = entry->end;
2077c0877f10SJohn Dyson 		next = entry->next;
2078df8bae1dSRodney W. Grimes 
2079cbd8ec09SJohn Dyson 		offidxstart = OFF_TO_IDX(entry->offset);
2080cbd8ec09SJohn Dyson 		count = OFF_TO_IDX(e - s);
2081cbd8ec09SJohn Dyson 		object = entry->object.vm_object;
20822dbea5d2SJohn Dyson 
2083df8bae1dSRodney W. Grimes 		/*
20840d94caffSDavid Greenman 		 * Unwire before removing addresses from the pmap; otherwise,
20850d94caffSDavid Greenman 		 * unwiring will put the entries back in the pmap.
2086df8bae1dSRodney W. Grimes 		 */
2087c0877f10SJohn Dyson 		if (entry->wired_count != 0) {
2088df8bae1dSRodney W. Grimes 			vm_map_entry_unwire(map, entry);
2089c0877f10SJohn Dyson 		}
2090df8bae1dSRodney W. Grimes 
2091cbd8ec09SJohn Dyson 		offidxend = offidxstart + count;
2092df8bae1dSRodney W. Grimes 
2093c0877f10SJohn Dyson 		if ((object == kernel_object) || (object == kmem_object)) {
20942dbea5d2SJohn Dyson 			vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2095b18bfc3dSJohn Dyson 		} else {
2096df8bae1dSRodney W. Grimes 			pmap_remove(map->pmap, s, e);
2097876318ecSAlan Cox 			if (object != NULL &&
2098876318ecSAlan Cox 			    object->ref_count != 1 &&
2099876318ecSAlan Cox 			    (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
2100876318ecSAlan Cox 			    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
21012dbea5d2SJohn Dyson 				vm_object_collapse(object);
21022dbea5d2SJohn Dyson 				vm_object_page_remove(object, offidxstart, offidxend, FALSE);
21032dbea5d2SJohn Dyson 				if (object->type == OBJT_SWAP) {
2104cbd8ec09SJohn Dyson 					swap_pager_freespace(object, offidxstart, count);
21052dbea5d2SJohn Dyson 				}
2106876318ecSAlan Cox 				if (offidxend >= object->size &&
2107876318ecSAlan Cox 				    offidxstart < object->size) {
2108c0877f10SJohn Dyson 					object->size = offidxstart;
2109c0877f10SJohn Dyson 				}
21102dbea5d2SJohn Dyson 			}
2111b18bfc3dSJohn Dyson 		}
2112df8bae1dSRodney W. Grimes 
2113df8bae1dSRodney W. Grimes 		/*
21140d94caffSDavid Greenman 		 * Delete the entry (which may delete the object) only after
21150d94caffSDavid Greenman 		 * removing all pmap entries pointing to its pages.
21160d94caffSDavid Greenman 		 * (Otherwise, its page frames may be reallocated, and any
21170d94caffSDavid Greenman 		 * modify bits will be set in the wrong object!)
2118df8bae1dSRodney W. Grimes 		 */
2119df8bae1dSRodney W. Grimes 		vm_map_entry_delete(map, entry);
2120df8bae1dSRodney W. Grimes 		entry = next;
2121df8bae1dSRodney W. Grimes 	}
2122df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
2123df8bae1dSRodney W. Grimes }
2124df8bae1dSRodney W. Grimes 
2125df8bae1dSRodney W. Grimes /*
2126df8bae1dSRodney W. Grimes  *	vm_map_remove:
2127df8bae1dSRodney W. Grimes  *
2128df8bae1dSRodney W. Grimes  *	Remove the given address range from the target map.
2129df8bae1dSRodney W. Grimes  *	This is the exported form of vm_map_delete.
2130df8bae1dSRodney W. Grimes  */
2131df8bae1dSRodney W. Grimes int
21321b40f8c0SMatthew Dillon vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2133df8bae1dSRodney W. Grimes {
2134c0877f10SJohn Dyson 	int result, s = 0;
21358d6e8edeSDavid Greenman 
21360cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
21370cddd8f0SMatthew Dillon 
213808442f8aSBosko Milekic 	if (map == kmem_map)
2139b18bfc3dSJohn Dyson 		s = splvm();
2140df8bae1dSRodney W. Grimes 
2141df8bae1dSRodney W. Grimes 	vm_map_lock(map);
2142df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
2143df8bae1dSRodney W. Grimes 	result = vm_map_delete(map, start, end);
2144df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
2145df8bae1dSRodney W. Grimes 
214608442f8aSBosko Milekic 	if (map == kmem_map)
21478d6e8edeSDavid Greenman 		splx(s);
21488d6e8edeSDavid Greenman 
2149df8bae1dSRodney W. Grimes 	return (result);
2150df8bae1dSRodney W. Grimes }
2151df8bae1dSRodney W. Grimes 
2152df8bae1dSRodney W. Grimes /*
2153df8bae1dSRodney W. Grimes  *	vm_map_check_protection:
2154df8bae1dSRodney W. Grimes  *
2155df8bae1dSRodney W. Grimes  *	Assert that the target map allows the specified
2156df8bae1dSRodney W. Grimes  *	privilege on the entire address region given.
2157df8bae1dSRodney W. Grimes  *	The entire region must be allocated.
2158df8bae1dSRodney W. Grimes  */
21590d94caffSDavid Greenman boolean_t
2160b9dcd593SBruce Evans vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2161b9dcd593SBruce Evans 			vm_prot_t protection)
2162df8bae1dSRodney W. Grimes {
2163c0877f10SJohn Dyson 	vm_map_entry_t entry;
2164df8bae1dSRodney W. Grimes 	vm_map_entry_t tmp_entry;
2165df8bae1dSRodney W. Grimes 
21662f6c16e1SAlan Cox 	vm_map_lock_read(map);
2167df8bae1dSRodney W. Grimes 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
21682f6c16e1SAlan Cox 		vm_map_unlock_read(map);
2169df8bae1dSRodney W. Grimes 		return (FALSE);
2170df8bae1dSRodney W. Grimes 	}
2171df8bae1dSRodney W. Grimes 	entry = tmp_entry;
2172df8bae1dSRodney W. Grimes 
2173df8bae1dSRodney W. Grimes 	while (start < end) {
2174df8bae1dSRodney W. Grimes 		if (entry == &map->header) {
21752f6c16e1SAlan Cox 			vm_map_unlock_read(map);
2176df8bae1dSRodney W. Grimes 			return (FALSE);
2177df8bae1dSRodney W. Grimes 		}
2178df8bae1dSRodney W. Grimes 		/*
2179df8bae1dSRodney W. Grimes 		 * No holes allowed!
2180df8bae1dSRodney W. Grimes 		 */
2181df8bae1dSRodney W. Grimes 		if (start < entry->start) {
21822f6c16e1SAlan Cox 			vm_map_unlock_read(map);
2183df8bae1dSRodney W. Grimes 			return (FALSE);
2184df8bae1dSRodney W. Grimes 		}
2185df8bae1dSRodney W. Grimes 		/*
2186df8bae1dSRodney W. Grimes 		 * Check protection associated with entry.
2187df8bae1dSRodney W. Grimes 		 */
2188df8bae1dSRodney W. Grimes 		if ((entry->protection & protection) != protection) {
21892f6c16e1SAlan Cox 			vm_map_unlock_read(map);
2190df8bae1dSRodney W. Grimes 			return (FALSE);
2191df8bae1dSRodney W. Grimes 		}
2192df8bae1dSRodney W. Grimes 		/* go to next entry */
2193df8bae1dSRodney W. Grimes 		start = entry->end;
2194df8bae1dSRodney W. Grimes 		entry = entry->next;
2195df8bae1dSRodney W. Grimes 	}
21962f6c16e1SAlan Cox 	vm_map_unlock_read(map);
2197df8bae1dSRodney W. Grimes 	return (TRUE);
2198df8bae1dSRodney W. Grimes }
2199df8bae1dSRodney W. Grimes 
220086524867SJohn Dyson /*
220186524867SJohn Dyson  * Split the pages in a map entry into a new object.  This affords
220286524867SJohn Dyson  * easier removal of unused pages, and keeps object inheritance from
220386524867SJohn Dyson  * being a negative impact on memory usage.
220486524867SJohn Dyson  */
2205c0877f10SJohn Dyson static void
22061b40f8c0SMatthew Dillon vm_map_split(vm_map_entry_t entry)
2207c0877f10SJohn Dyson {
220886524867SJohn Dyson 	vm_page_t m;
2209bd6be915SJohn Dyson 	vm_object_t orig_object, new_object, source;
2210c0877f10SJohn Dyson 	vm_offset_t s, e;
2211c0877f10SJohn Dyson 	vm_pindex_t offidxstart, offidxend, idx;
2212c0877f10SJohn Dyson 	vm_size_t size;
2213c0877f10SJohn Dyson 	vm_ooffset_t offset;
2214c0877f10SJohn Dyson 
22150cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
22160cddd8f0SMatthew Dillon 
2217c0877f10SJohn Dyson 	orig_object = entry->object.vm_object;
2218c0877f10SJohn Dyson 	if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
2219c0877f10SJohn Dyson 		return;
2220c0877f10SJohn Dyson 	if (orig_object->ref_count <= 1)
2221c0877f10SJohn Dyson 		return;
2222c0877f10SJohn Dyson 
2223c0877f10SJohn Dyson 	offset = entry->offset;
2224c0877f10SJohn Dyson 	s = entry->start;
2225c0877f10SJohn Dyson 	e = entry->end;
2226c0877f10SJohn Dyson 
2227c0877f10SJohn Dyson 	offidxstart = OFF_TO_IDX(offset);
2228c0877f10SJohn Dyson 	offidxend = offidxstart + OFF_TO_IDX(e - s);
2229c0877f10SJohn Dyson 	size = offidxend - offidxstart;
2230c0877f10SJohn Dyson 
2231c0877f10SJohn Dyson 	new_object = vm_pager_allocate(orig_object->type,
22326cde7a16SDavid Greenman 		NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL);
2233c0877f10SJohn Dyson 	if (new_object == NULL)
2234c0877f10SJohn Dyson 		return;
2235c0877f10SJohn Dyson 
2236bd6be915SJohn Dyson 	source = orig_object->backing_object;
2237bd6be915SJohn Dyson 	if (source != NULL) {
2238bd6be915SJohn Dyson 		vm_object_reference(source);	/* Referenced by new_object */
2239bd6be915SJohn Dyson 		TAILQ_INSERT_TAIL(&source->shadow_head,
2240bd6be915SJohn Dyson 				  new_object, shadow_list);
2241069e9bc1SDoug Rabson 		vm_object_clear_flag(source, OBJ_ONEMAPPING);
2242bd6be915SJohn Dyson 		new_object->backing_object_offset =
2243a0fce827SJohn Polstra 			orig_object->backing_object_offset + IDX_TO_OFF(offidxstart);
2244bd6be915SJohn Dyson 		new_object->backing_object = source;
2245bd6be915SJohn Dyson 		source->shadow_count++;
2246bd6be915SJohn Dyson 		source->generation++;
2247bd6be915SJohn Dyson 	}
2248bd6be915SJohn Dyson 
2249c0877f10SJohn Dyson 	for (idx = 0; idx < size; idx++) {
2250c0877f10SJohn Dyson 		vm_page_t m;
2251c0877f10SJohn Dyson 
2252c0877f10SJohn Dyson 	retry:
2253c0877f10SJohn Dyson 		m = vm_page_lookup(orig_object, offidxstart + idx);
2254c0877f10SJohn Dyson 		if (m == NULL)
2255c0877f10SJohn Dyson 			continue;
22561c7c3c6aSMatthew Dillon 
22571c7c3c6aSMatthew Dillon 		/*
22581c7c3c6aSMatthew Dillon 		 * We must wait for pending I/O to complete before we can
22591c7c3c6aSMatthew Dillon 		 * rename the page.
2260d1bf5d56SMatthew Dillon 		 *
2261d1bf5d56SMatthew Dillon 		 * We do not have to VM_PROT_NONE the page as mappings should
2262d1bf5d56SMatthew Dillon 		 * not be changed by this operation.
22631c7c3c6aSMatthew Dillon 		 */
22641c7c3c6aSMatthew Dillon 		if (vm_page_sleep_busy(m, TRUE, "spltwt"))
2265c0877f10SJohn Dyson 			goto retry;
2266c0877f10SJohn Dyson 
2267e69763a3SDoug Rabson 		vm_page_busy(m);
2268c0877f10SJohn Dyson 		vm_page_rename(m, new_object, idx);
22697dbf82dcSMatthew Dillon 		/* page automatically made dirty by rename and cache handled */
2270e69763a3SDoug Rabson 		vm_page_busy(m);
2271c0877f10SJohn Dyson 	}
2272c0877f10SJohn Dyson 
2273c0877f10SJohn Dyson 	if (orig_object->type == OBJT_SWAP) {
2274d474eaaaSDoug Rabson 		vm_object_pip_add(orig_object, 1);
2275c0877f10SJohn Dyson 		/*
2276c0877f10SJohn Dyson 		 * copy orig_object pages into new_object
2277c0877f10SJohn Dyson 		 * and destroy unneeded pages in
2278c0877f10SJohn Dyson 		 * shadow object.
2279c0877f10SJohn Dyson 		 */
22801c7c3c6aSMatthew Dillon 		swap_pager_copy(orig_object, new_object, offidxstart, 0);
2281c0877f10SJohn Dyson 		vm_object_pip_wakeup(orig_object);
2282c0877f10SJohn Dyson 	}
2283c0877f10SJohn Dyson 
228486524867SJohn Dyson 	for (idx = 0; idx < size; idx++) {
228586524867SJohn Dyson 		m = vm_page_lookup(new_object, idx);
228686524867SJohn Dyson 		if (m) {
2287e69763a3SDoug Rabson 			vm_page_wakeup(m);
228886524867SJohn Dyson 		}
228986524867SJohn Dyson 	}
229086524867SJohn Dyson 
2291c0877f10SJohn Dyson 	entry->object.vm_object = new_object;
2292c0877f10SJohn Dyson 	entry->offset = 0LL;
2293c0877f10SJohn Dyson 	vm_object_deallocate(orig_object);
2294c0877f10SJohn Dyson }
2295c0877f10SJohn Dyson 
2296df8bae1dSRodney W. Grimes /*
2297df8bae1dSRodney W. Grimes  *	vm_map_copy_entry:
2298df8bae1dSRodney W. Grimes  *
2299df8bae1dSRodney W. Grimes  *	Copies the contents of the source entry to the destination
2300df8bae1dSRodney W. Grimes  *	entry.  The entries *must* be aligned properly.
2301df8bae1dSRodney W. Grimes  */
2302f708ef1bSPoul-Henning Kamp static void
23031b40f8c0SMatthew Dillon vm_map_copy_entry(
23041b40f8c0SMatthew Dillon 	vm_map_t src_map,
23051b40f8c0SMatthew Dillon 	vm_map_t dst_map,
23061b40f8c0SMatthew Dillon 	vm_map_entry_t src_entry,
23071b40f8c0SMatthew Dillon 	vm_map_entry_t dst_entry)
2308df8bae1dSRodney W. Grimes {
2309c0877f10SJohn Dyson 	vm_object_t src_object;
2310c0877f10SJohn Dyson 
23119fdfe602SMatthew Dillon 	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2312df8bae1dSRodney W. Grimes 		return;
2313df8bae1dSRodney W. Grimes 
2314df8bae1dSRodney W. Grimes 	if (src_entry->wired_count == 0) {
2315df8bae1dSRodney W. Grimes 
2316df8bae1dSRodney W. Grimes 		/*
23170d94caffSDavid Greenman 		 * If the source entry is marked needs_copy, it is already
23180d94caffSDavid Greenman 		 * write-protected.
2319df8bae1dSRodney W. Grimes 		 */
2320afa07f7eSJohn Dyson 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2321df8bae1dSRodney W. Grimes 			pmap_protect(src_map->pmap,
2322df8bae1dSRodney W. Grimes 			    src_entry->start,
2323df8bae1dSRodney W. Grimes 			    src_entry->end,
2324df8bae1dSRodney W. Grimes 			    src_entry->protection & ~VM_PROT_WRITE);
2325df8bae1dSRodney W. Grimes 		}
2326b18bfc3dSJohn Dyson 
2327df8bae1dSRodney W. Grimes 		/*
2328df8bae1dSRodney W. Grimes 		 * Make a copy of the object.
2329df8bae1dSRodney W. Grimes 		 */
23308aef1712SMatthew Dillon 		if ((src_object = src_entry->object.vm_object) != NULL) {
2331c0877f10SJohn Dyson 
2332c0877f10SJohn Dyson 			if ((src_object->handle == NULL) &&
2333c0877f10SJohn Dyson 				(src_object->type == OBJT_DEFAULT ||
2334c0877f10SJohn Dyson 				 src_object->type == OBJT_SWAP)) {
2335c0877f10SJohn Dyson 				vm_object_collapse(src_object);
233696fb8cf2SJohn Dyson 				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2337c0877f10SJohn Dyson 					vm_map_split(src_entry);
2338c0877f10SJohn Dyson 					src_object = src_entry->object.vm_object;
2339c0877f10SJohn Dyson 				}
2340c0877f10SJohn Dyson 			}
2341c0877f10SJohn Dyson 
2342c0877f10SJohn Dyson 			vm_object_reference(src_object);
2343069e9bc1SDoug Rabson 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2344c0877f10SJohn Dyson 			dst_entry->object.vm_object = src_object;
2345afa07f7eSJohn Dyson 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2346afa07f7eSJohn Dyson 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2347b18bfc3dSJohn Dyson 			dst_entry->offset = src_entry->offset;
2348b18bfc3dSJohn Dyson 		} else {
2349b18bfc3dSJohn Dyson 			dst_entry->object.vm_object = NULL;
2350b18bfc3dSJohn Dyson 			dst_entry->offset = 0;
2351b18bfc3dSJohn Dyson 		}
2352df8bae1dSRodney W. Grimes 
2353df8bae1dSRodney W. Grimes 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2354df8bae1dSRodney W. Grimes 		    dst_entry->end - dst_entry->start, src_entry->start);
23550d94caffSDavid Greenman 	} else {
2356df8bae1dSRodney W. Grimes 		/*
2357df8bae1dSRodney W. Grimes 		 * Of course, wired down pages can't be set copy-on-write.
23580d94caffSDavid Greenman 		 * Cause wired pages to be copied into the new map by
23590d94caffSDavid Greenman 		 * simulating faults (the new pages are pageable)
2360df8bae1dSRodney W. Grimes 		 */
2361df8bae1dSRodney W. Grimes 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2362df8bae1dSRodney W. Grimes 	}
2363df8bae1dSRodney W. Grimes }
2364df8bae1dSRodney W. Grimes 
2365df8bae1dSRodney W. Grimes /*
2366df8bae1dSRodney W. Grimes  * vmspace_fork:
2367df8bae1dSRodney W. Grimes  * Create a new process vmspace structure and vm_map
2368df8bae1dSRodney W. Grimes  * based on those of an existing process.  The new map
2369df8bae1dSRodney W. Grimes  * is based on the old map, according to the inheritance
2370df8bae1dSRodney W. Grimes  * values on the regions in that map.
2371df8bae1dSRodney W. Grimes  *
2372df8bae1dSRodney W. Grimes  * The source map must not be locked.
2373df8bae1dSRodney W. Grimes  */
2374df8bae1dSRodney W. Grimes struct vmspace *
23751b40f8c0SMatthew Dillon vmspace_fork(struct vmspace *vm1)
2376df8bae1dSRodney W. Grimes {
2377c0877f10SJohn Dyson 	struct vmspace *vm2;
2378df8bae1dSRodney W. Grimes 	vm_map_t old_map = &vm1->vm_map;
2379df8bae1dSRodney W. Grimes 	vm_map_t new_map;
2380df8bae1dSRodney W. Grimes 	vm_map_entry_t old_entry;
2381df8bae1dSRodney W. Grimes 	vm_map_entry_t new_entry;
2382de5f6a77SJohn Dyson 	vm_object_t object;
2383df8bae1dSRodney W. Grimes 
23840cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
23850cddd8f0SMatthew Dillon 
2386df8bae1dSRodney W. Grimes 	vm_map_lock(old_map);
2387b823bbd6SMatthew Dillon 	old_map->infork = 1;
2388df8bae1dSRodney W. Grimes 
23892d8acc0fSJohn Dyson 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2390df8bae1dSRodney W. Grimes 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2391582ec34cSAlfred Perlstein 	    (caddr_t) &vm1->vm_endcopy - (caddr_t) &vm1->vm_startcopy);
2392df8bae1dSRodney W. Grimes 	new_map = &vm2->vm_map;	/* XXX */
239347221757SJohn Dyson 	new_map->timestamp = 1;
2394df8bae1dSRodney W. Grimes 
2395df8bae1dSRodney W. Grimes 	old_entry = old_map->header.next;
2396df8bae1dSRodney W. Grimes 
2397df8bae1dSRodney W. Grimes 	while (old_entry != &old_map->header) {
2398afa07f7eSJohn Dyson 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2399df8bae1dSRodney W. Grimes 			panic("vm_map_fork: encountered a submap");
2400df8bae1dSRodney W. Grimes 
2401df8bae1dSRodney W. Grimes 		switch (old_entry->inheritance) {
2402df8bae1dSRodney W. Grimes 		case VM_INHERIT_NONE:
2403df8bae1dSRodney W. Grimes 			break;
2404df8bae1dSRodney W. Grimes 
2405df8bae1dSRodney W. Grimes 		case VM_INHERIT_SHARE:
2406df8bae1dSRodney W. Grimes 			/*
2407fed9a903SJohn Dyson 			 * Clone the entry, creating the shared object if necessary.
2408fed9a903SJohn Dyson 			 */
2409fed9a903SJohn Dyson 			object = old_entry->object.vm_object;
2410fed9a903SJohn Dyson 			if (object == NULL) {
2411fed9a903SJohn Dyson 				object = vm_object_allocate(OBJT_DEFAULT,
2412c2e11a03SJohn Dyson 					atop(old_entry->end - old_entry->start));
2413fed9a903SJohn Dyson 				old_entry->object.vm_object = object;
2414fed9a903SJohn Dyson 				old_entry->offset = (vm_offset_t) 0;
24159a2f6362SAlan Cox 			}
24169a2f6362SAlan Cox 
24179a2f6362SAlan Cox 			/*
24189a2f6362SAlan Cox 			 * Add the reference before calling vm_object_shadow
24199a2f6362SAlan Cox 			 * to insure that a shadow object is created.
24209a2f6362SAlan Cox 			 */
24219a2f6362SAlan Cox 			vm_object_reference(object);
24229a2f6362SAlan Cox 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
24235069bf57SJohn Dyson 				vm_object_shadow(&old_entry->object.vm_object,
24245069bf57SJohn Dyson 					&old_entry->offset,
2425c2e11a03SJohn Dyson 					atop(old_entry->end - old_entry->start));
24265069bf57SJohn Dyson 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2427d30344bdSIan Dowse 				/* Transfer the second reference too. */
2428d30344bdSIan Dowse 				vm_object_reference(
2429d30344bdSIan Dowse 				    old_entry->object.vm_object);
2430d30344bdSIan Dowse 				vm_object_deallocate(object);
24315069bf57SJohn Dyson 				object = old_entry->object.vm_object;
2432fed9a903SJohn Dyson 			}
2433069e9bc1SDoug Rabson 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
2434fed9a903SJohn Dyson 
2435fed9a903SJohn Dyson 			/*
2436ad5fca3bSAlan Cox 			 * Clone the entry, referencing the shared object.
2437df8bae1dSRodney W. Grimes 			 */
2438df8bae1dSRodney W. Grimes 			new_entry = vm_map_entry_create(new_map);
2439df8bae1dSRodney W. Grimes 			*new_entry = *old_entry;
2440028fe6ecSTor Egge 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2441df8bae1dSRodney W. Grimes 			new_entry->wired_count = 0;
2442df8bae1dSRodney W. Grimes 
2443df8bae1dSRodney W. Grimes 			/*
24440d94caffSDavid Greenman 			 * Insert the entry into the new map -- we know we're
24450d94caffSDavid Greenman 			 * inserting at the end of the new map.
2446df8bae1dSRodney W. Grimes 			 */
2447df8bae1dSRodney W. Grimes 			vm_map_entry_link(new_map, new_map->header.prev,
2448df8bae1dSRodney W. Grimes 			    new_entry);
2449df8bae1dSRodney W. Grimes 
2450df8bae1dSRodney W. Grimes 			/*
2451df8bae1dSRodney W. Grimes 			 * Update the physical map
2452df8bae1dSRodney W. Grimes 			 */
2453df8bae1dSRodney W. Grimes 			pmap_copy(new_map->pmap, old_map->pmap,
2454df8bae1dSRodney W. Grimes 			    new_entry->start,
2455df8bae1dSRodney W. Grimes 			    (old_entry->end - old_entry->start),
2456df8bae1dSRodney W. Grimes 			    old_entry->start);
2457df8bae1dSRodney W. Grimes 			break;
2458df8bae1dSRodney W. Grimes 
2459df8bae1dSRodney W. Grimes 		case VM_INHERIT_COPY:
2460df8bae1dSRodney W. Grimes 			/*
2461df8bae1dSRodney W. Grimes 			 * Clone the entry and link into the map.
2462df8bae1dSRodney W. Grimes 			 */
2463df8bae1dSRodney W. Grimes 			new_entry = vm_map_entry_create(new_map);
2464df8bae1dSRodney W. Grimes 			*new_entry = *old_entry;
2465028fe6ecSTor Egge 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2466df8bae1dSRodney W. Grimes 			new_entry->wired_count = 0;
2467df8bae1dSRodney W. Grimes 			new_entry->object.vm_object = NULL;
2468df8bae1dSRodney W. Grimes 			vm_map_entry_link(new_map, new_map->header.prev,
2469df8bae1dSRodney W. Grimes 			    new_entry);
2470bd7e5f99SJohn Dyson 			vm_map_copy_entry(old_map, new_map, old_entry,
2471bd7e5f99SJohn Dyson 			    new_entry);
2472df8bae1dSRodney W. Grimes 			break;
2473df8bae1dSRodney W. Grimes 		}
2474df8bae1dSRodney W. Grimes 		old_entry = old_entry->next;
2475df8bae1dSRodney W. Grimes 	}
2476df8bae1dSRodney W. Grimes 
2477df8bae1dSRodney W. Grimes 	new_map->size = old_map->size;
2478b823bbd6SMatthew Dillon 	old_map->infork = 0;
2479df8bae1dSRodney W. Grimes 	vm_map_unlock(old_map);
2480df8bae1dSRodney W. Grimes 
2481df8bae1dSRodney W. Grimes 	return (vm2);
2482df8bae1dSRodney W. Grimes }
2483df8bae1dSRodney W. Grimes 
248494f7e29aSAlan Cox int
248594f7e29aSAlan Cox vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
248694f7e29aSAlan Cox 	      vm_prot_t prot, vm_prot_t max, int cow)
248794f7e29aSAlan Cox {
248894f7e29aSAlan Cox 	vm_map_entry_t prev_entry;
248994f7e29aSAlan Cox 	vm_map_entry_t new_stack_entry;
249094f7e29aSAlan Cox 	vm_size_t      init_ssize;
249194f7e29aSAlan Cox 	int            rv;
249294f7e29aSAlan Cox 
24930cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
24940cddd8f0SMatthew Dillon 
249594f7e29aSAlan Cox 	if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
249694f7e29aSAlan Cox 		return (KERN_NO_SPACE);
249794f7e29aSAlan Cox 
2498cbc89bfbSPaul Saab 	if (max_ssize < sgrowsiz)
249994f7e29aSAlan Cox 		init_ssize = max_ssize;
250094f7e29aSAlan Cox 	else
2501cbc89bfbSPaul Saab 		init_ssize = sgrowsiz;
250294f7e29aSAlan Cox 
250394f7e29aSAlan Cox 	vm_map_lock(map);
250494f7e29aSAlan Cox 
250594f7e29aSAlan Cox 	/* If addr is already mapped, no go */
250694f7e29aSAlan Cox 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
250794f7e29aSAlan Cox 		vm_map_unlock(map);
250894f7e29aSAlan Cox 		return (KERN_NO_SPACE);
250994f7e29aSAlan Cox 	}
251094f7e29aSAlan Cox 
251194f7e29aSAlan Cox 	/* If we can't accomodate max_ssize in the current mapping,
251294f7e29aSAlan Cox 	 * no go.  However, we need to be aware that subsequent user
251394f7e29aSAlan Cox 	 * mappings might map into the space we have reserved for
251494f7e29aSAlan Cox 	 * stack, and currently this space is not protected.
251594f7e29aSAlan Cox 	 *
251694f7e29aSAlan Cox 	 * Hopefully we will at least detect this condition
251794f7e29aSAlan Cox 	 * when we try to grow the stack.
251894f7e29aSAlan Cox 	 */
251994f7e29aSAlan Cox 	if ((prev_entry->next != &map->header) &&
252094f7e29aSAlan Cox 	    (prev_entry->next->start < addrbos + max_ssize)) {
252194f7e29aSAlan Cox 		vm_map_unlock(map);
252294f7e29aSAlan Cox 		return (KERN_NO_SPACE);
252394f7e29aSAlan Cox 	}
252494f7e29aSAlan Cox 
252594f7e29aSAlan Cox 	/* We initially map a stack of only init_ssize.  We will
252694f7e29aSAlan Cox 	 * grow as needed later.  Since this is to be a grow
252794f7e29aSAlan Cox 	 * down stack, we map at the top of the range.
252894f7e29aSAlan Cox 	 *
252994f7e29aSAlan Cox 	 * Note: we would normally expect prot and max to be
253094f7e29aSAlan Cox 	 * VM_PROT_ALL, and cow to be 0.  Possibly we should
253194f7e29aSAlan Cox 	 * eliminate these as input parameters, and just
253294f7e29aSAlan Cox 	 * pass these values here in the insert call.
253394f7e29aSAlan Cox 	 */
253494f7e29aSAlan Cox 	rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
253594f7e29aSAlan Cox 	                   addrbos + max_ssize, prot, max, cow);
253694f7e29aSAlan Cox 
253794f7e29aSAlan Cox 	/* Now set the avail_ssize amount */
253894f7e29aSAlan Cox 	if (rv == KERN_SUCCESS){
253929b45e9eSAlan Cox 		if (prev_entry != &map->header)
254029b45e9eSAlan Cox 			vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize);
254194f7e29aSAlan Cox 		new_stack_entry = prev_entry->next;
254294f7e29aSAlan Cox 		if (new_stack_entry->end   != addrbos + max_ssize ||
254394f7e29aSAlan Cox 		    new_stack_entry->start != addrbos + max_ssize - init_ssize)
254494f7e29aSAlan Cox 			panic ("Bad entry start/end for new stack entry");
254594f7e29aSAlan Cox 		else
254694f7e29aSAlan Cox 			new_stack_entry->avail_ssize = max_ssize - init_ssize;
254794f7e29aSAlan Cox 	}
254894f7e29aSAlan Cox 
254994f7e29aSAlan Cox 	vm_map_unlock(map);
255094f7e29aSAlan Cox 	return (rv);
255194f7e29aSAlan Cox }
255294f7e29aSAlan Cox 
255394f7e29aSAlan Cox /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
255494f7e29aSAlan Cox  * desired address is already mapped, or if we successfully grow
255594f7e29aSAlan Cox  * the stack.  Also returns KERN_SUCCESS if addr is outside the
255694f7e29aSAlan Cox  * stack range (this is strange, but preserves compatibility with
255794f7e29aSAlan Cox  * the grow function in vm_machdep.c).
255894f7e29aSAlan Cox  */
255994f7e29aSAlan Cox int
256094f7e29aSAlan Cox vm_map_growstack (struct proc *p, vm_offset_t addr)
256194f7e29aSAlan Cox {
256294f7e29aSAlan Cox 	vm_map_entry_t prev_entry;
256394f7e29aSAlan Cox 	vm_map_entry_t stack_entry;
256494f7e29aSAlan Cox 	vm_map_entry_t new_stack_entry;
256594f7e29aSAlan Cox 	struct vmspace *vm = p->p_vmspace;
256694f7e29aSAlan Cox 	vm_map_t map = &vm->vm_map;
256794f7e29aSAlan Cox 	vm_offset_t    end;
256894f7e29aSAlan Cox 	int      grow_amount;
256994f7e29aSAlan Cox 	int      rv;
257094f7e29aSAlan Cox 	int      is_procstack;
257123955314SAlfred Perlstein 
25720cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
257323955314SAlfred Perlstein 
257494f7e29aSAlan Cox Retry:
257594f7e29aSAlan Cox 	vm_map_lock_read(map);
257694f7e29aSAlan Cox 
257794f7e29aSAlan Cox 	/* If addr is already in the entry range, no need to grow.*/
257894f7e29aSAlan Cox 	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
257994f7e29aSAlan Cox 		vm_map_unlock_read(map);
25800cddd8f0SMatthew Dillon 		return (KERN_SUCCESS);
258194f7e29aSAlan Cox 	}
258294f7e29aSAlan Cox 
258394f7e29aSAlan Cox 	if ((stack_entry = prev_entry->next) == &map->header) {
258494f7e29aSAlan Cox 		vm_map_unlock_read(map);
25850cddd8f0SMatthew Dillon 		return (KERN_SUCCESS);
258694f7e29aSAlan Cox 	}
258794f7e29aSAlan Cox 	if (prev_entry == &map->header)
258894f7e29aSAlan Cox 		end = stack_entry->start - stack_entry->avail_ssize;
258994f7e29aSAlan Cox 	else
259094f7e29aSAlan Cox 		end = prev_entry->end;
259194f7e29aSAlan Cox 
259294f7e29aSAlan Cox 	/* This next test mimics the old grow function in vm_machdep.c.
259394f7e29aSAlan Cox 	 * It really doesn't quite make sense, but we do it anyway
259494f7e29aSAlan Cox 	 * for compatibility.
259594f7e29aSAlan Cox 	 *
259694f7e29aSAlan Cox 	 * If not growable stack, return success.  This signals the
259794f7e29aSAlan Cox 	 * caller to proceed as he would normally with normal vm.
259894f7e29aSAlan Cox 	 */
259994f7e29aSAlan Cox 	if (stack_entry->avail_ssize < 1 ||
260094f7e29aSAlan Cox 	    addr >= stack_entry->start ||
260194f7e29aSAlan Cox 	    addr <  stack_entry->start - stack_entry->avail_ssize) {
260294f7e29aSAlan Cox 		vm_map_unlock_read(map);
26030cddd8f0SMatthew Dillon 		return (KERN_SUCCESS);
260494f7e29aSAlan Cox 	}
260594f7e29aSAlan Cox 
260694f7e29aSAlan Cox 	/* Find the minimum grow amount */
260794f7e29aSAlan Cox 	grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
260894f7e29aSAlan Cox 	if (grow_amount > stack_entry->avail_ssize) {
260994f7e29aSAlan Cox 		vm_map_unlock_read(map);
26100cddd8f0SMatthew Dillon 		return (KERN_NO_SPACE);
261194f7e29aSAlan Cox 	}
261294f7e29aSAlan Cox 
261394f7e29aSAlan Cox 	/* If there is no longer enough space between the entries
261494f7e29aSAlan Cox 	 * nogo, and adjust the available space.  Note: this
261594f7e29aSAlan Cox 	 * should only happen if the user has mapped into the
261694f7e29aSAlan Cox 	 * stack area after the stack was created, and is
261794f7e29aSAlan Cox 	 * probably an error.
261894f7e29aSAlan Cox 	 *
261994f7e29aSAlan Cox 	 * This also effectively destroys any guard page the user
262094f7e29aSAlan Cox 	 * might have intended by limiting the stack size.
262194f7e29aSAlan Cox 	 */
262294f7e29aSAlan Cox 	if (grow_amount > stack_entry->start - end) {
262325adb370SBrian Feldman 		if (vm_map_lock_upgrade(map))
262494f7e29aSAlan Cox 			goto Retry;
262594f7e29aSAlan Cox 
262694f7e29aSAlan Cox 		stack_entry->avail_ssize = stack_entry->start - end;
262794f7e29aSAlan Cox 
262894f7e29aSAlan Cox 		vm_map_unlock(map);
26290cddd8f0SMatthew Dillon 		return (KERN_NO_SPACE);
263094f7e29aSAlan Cox 	}
263194f7e29aSAlan Cox 
263294f7e29aSAlan Cox 	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
263394f7e29aSAlan Cox 
263494f7e29aSAlan Cox 	/* If this is the main process stack, see if we're over the
263594f7e29aSAlan Cox 	 * stack limit.
263694f7e29aSAlan Cox 	 */
26376389da78SAlan Cox 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
263894f7e29aSAlan Cox 			     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
263994f7e29aSAlan Cox 		vm_map_unlock_read(map);
26400cddd8f0SMatthew Dillon 		return (KERN_NO_SPACE);
264194f7e29aSAlan Cox 	}
264294f7e29aSAlan Cox 
264394f7e29aSAlan Cox 	/* Round up the grow amount modulo SGROWSIZ */
2644cbc89bfbSPaul Saab 	grow_amount = roundup (grow_amount, sgrowsiz);
264594f7e29aSAlan Cox 	if (grow_amount > stack_entry->avail_ssize) {
264694f7e29aSAlan Cox 		grow_amount = stack_entry->avail_ssize;
264794f7e29aSAlan Cox 	}
26486389da78SAlan Cox 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
264994f7e29aSAlan Cox 	                     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
265094f7e29aSAlan Cox 		grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
26516389da78SAlan Cox 		              ctob(vm->vm_ssize);
265294f7e29aSAlan Cox 	}
265394f7e29aSAlan Cox 
265425adb370SBrian Feldman 	if (vm_map_lock_upgrade(map))
265594f7e29aSAlan Cox 		goto Retry;
265694f7e29aSAlan Cox 
265794f7e29aSAlan Cox 	/* Get the preliminary new entry start value */
265894f7e29aSAlan Cox 	addr = stack_entry->start - grow_amount;
265994f7e29aSAlan Cox 
266094f7e29aSAlan Cox 	/* If this puts us into the previous entry, cut back our growth
266194f7e29aSAlan Cox 	 * to the available space.  Also, see the note above.
266294f7e29aSAlan Cox 	 */
266394f7e29aSAlan Cox 	if (addr < end) {
266494f7e29aSAlan Cox 		stack_entry->avail_ssize = stack_entry->start - end;
266594f7e29aSAlan Cox 		addr = end;
266694f7e29aSAlan Cox 	}
266794f7e29aSAlan Cox 
266894f7e29aSAlan Cox 	rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
266929b45e9eSAlan Cox 			   VM_PROT_ALL,
267029b45e9eSAlan Cox 			   VM_PROT_ALL,
267194f7e29aSAlan Cox 			   0);
267294f7e29aSAlan Cox 
267394f7e29aSAlan Cox 	/* Adjust the available stack space by the amount we grew. */
267494f7e29aSAlan Cox 	if (rv == KERN_SUCCESS) {
267529b45e9eSAlan Cox 		if (prev_entry != &map->header)
267629b45e9eSAlan Cox 			vm_map_clip_end(map, prev_entry, addr);
267794f7e29aSAlan Cox 		new_stack_entry = prev_entry->next;
267894f7e29aSAlan Cox 		if (new_stack_entry->end   != stack_entry->start  ||
267994f7e29aSAlan Cox 		    new_stack_entry->start != addr)
268094f7e29aSAlan Cox 			panic ("Bad stack grow start/end in new stack entry");
268194f7e29aSAlan Cox 		else {
268294f7e29aSAlan Cox 			new_stack_entry->avail_ssize = stack_entry->avail_ssize -
268394f7e29aSAlan Cox 							(new_stack_entry->end -
268494f7e29aSAlan Cox 							 new_stack_entry->start);
268594f7e29aSAlan Cox 			if (is_procstack)
26866389da78SAlan Cox 				vm->vm_ssize += btoc(new_stack_entry->end -
26876389da78SAlan Cox 						     new_stack_entry->start);
268894f7e29aSAlan Cox 		}
268994f7e29aSAlan Cox 	}
269094f7e29aSAlan Cox 
269194f7e29aSAlan Cox 	vm_map_unlock(map);
26920cddd8f0SMatthew Dillon 	return (rv);
269394f7e29aSAlan Cox }
269494f7e29aSAlan Cox 
2695df8bae1dSRodney W. Grimes /*
26965856e12eSJohn Dyson  * Unshare the specified VM space for exec.  If other processes are
26975856e12eSJohn Dyson  * mapped to it, then create a new one.  The new vmspace is null.
26985856e12eSJohn Dyson  */
26995856e12eSJohn Dyson void
27001b40f8c0SMatthew Dillon vmspace_exec(struct proc *p)
27011b40f8c0SMatthew Dillon {
27025856e12eSJohn Dyson 	struct vmspace *oldvmspace = p->p_vmspace;
27035856e12eSJohn Dyson 	struct vmspace *newvmspace;
27045856e12eSJohn Dyson 	vm_map_t map = &p->p_vmspace->vm_map;
27055856e12eSJohn Dyson 
27060cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
27072d8acc0fSJohn Dyson 	newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
27085856e12eSJohn Dyson 	bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
27095856e12eSJohn Dyson 	    (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
27105856e12eSJohn Dyson 	/*
27115856e12eSJohn Dyson 	 * This code is written like this for prototype purposes.  The
27125856e12eSJohn Dyson 	 * goal is to avoid running down the vmspace here, but let the
27135856e12eSJohn Dyson 	 * other process's that are still using the vmspace to finally
27145856e12eSJohn Dyson 	 * run it down.  Even though there is little or no chance of blocking
27155856e12eSJohn Dyson 	 * here, it is a good idea to keep this form for future mods.
27165856e12eSJohn Dyson 	 */
27175856e12eSJohn Dyson 	p->p_vmspace = newvmspace;
2718d4da2dbaSAlan Cox 	pmap_pinit2(vmspace_pmap(newvmspace));
271921c641b2SJohn Baldwin 	vmspace_free(oldvmspace);
2720b40ce416SJulian Elischer 	if (p == curthread->td_proc)		/* XXXKSE ? */
2721b40ce416SJulian Elischer 		pmap_activate(curthread);
27225856e12eSJohn Dyson }
27235856e12eSJohn Dyson 
27245856e12eSJohn Dyson /*
27255856e12eSJohn Dyson  * Unshare the specified VM space for forcing COW.  This
27265856e12eSJohn Dyson  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
27275856e12eSJohn Dyson  */
27285856e12eSJohn Dyson void
27291b40f8c0SMatthew Dillon vmspace_unshare(struct proc *p)
27301b40f8c0SMatthew Dillon {
27315856e12eSJohn Dyson 	struct vmspace *oldvmspace = p->p_vmspace;
27325856e12eSJohn Dyson 	struct vmspace *newvmspace;
27335856e12eSJohn Dyson 
27340cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
27355856e12eSJohn Dyson 	if (oldvmspace->vm_refcnt == 1)
27365856e12eSJohn Dyson 		return;
27375856e12eSJohn Dyson 	newvmspace = vmspace_fork(oldvmspace);
27385856e12eSJohn Dyson 	p->p_vmspace = newvmspace;
2739d4da2dbaSAlan Cox 	pmap_pinit2(vmspace_pmap(newvmspace));
274021c641b2SJohn Baldwin 	vmspace_free(oldvmspace);
2741b40ce416SJulian Elischer 	if (p == curthread->td_proc)		/* XXXKSE ? */
2742b40ce416SJulian Elischer 		pmap_activate(curthread);
27435856e12eSJohn Dyson }
27445856e12eSJohn Dyson 
27455856e12eSJohn Dyson /*
2746df8bae1dSRodney W. Grimes  *	vm_map_lookup:
2747df8bae1dSRodney W. Grimes  *
2748df8bae1dSRodney W. Grimes  *	Finds the VM object, offset, and
2749df8bae1dSRodney W. Grimes  *	protection for a given virtual address in the
2750df8bae1dSRodney W. Grimes  *	specified map, assuming a page fault of the
2751df8bae1dSRodney W. Grimes  *	type specified.
2752df8bae1dSRodney W. Grimes  *
2753df8bae1dSRodney W. Grimes  *	Leaves the map in question locked for read; return
2754df8bae1dSRodney W. Grimes  *	values are guaranteed until a vm_map_lookup_done
2755df8bae1dSRodney W. Grimes  *	call is performed.  Note that the map argument
2756df8bae1dSRodney W. Grimes  *	is in/out; the returned map must be used in
2757df8bae1dSRodney W. Grimes  *	the call to vm_map_lookup_done.
2758df8bae1dSRodney W. Grimes  *
2759df8bae1dSRodney W. Grimes  *	A handle (out_entry) is returned for use in
2760df8bae1dSRodney W. Grimes  *	vm_map_lookup_done, to make that fast.
2761df8bae1dSRodney W. Grimes  *
2762df8bae1dSRodney W. Grimes  *	If a lookup is requested with "write protection"
2763df8bae1dSRodney W. Grimes  *	specified, the map may be changed to perform virtual
2764df8bae1dSRodney W. Grimes  *	copying operations, although the data referenced will
2765df8bae1dSRodney W. Grimes  *	remain the same.
2766df8bae1dSRodney W. Grimes  */
2767df8bae1dSRodney W. Grimes int
2768b9dcd593SBruce Evans vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
2769b9dcd593SBruce Evans 	      vm_offset_t vaddr,
277047221757SJohn Dyson 	      vm_prot_t fault_typea,
2771b9dcd593SBruce Evans 	      vm_map_entry_t *out_entry,	/* OUT */
2772b9dcd593SBruce Evans 	      vm_object_t *object,		/* OUT */
2773b9dcd593SBruce Evans 	      vm_pindex_t *pindex,		/* OUT */
2774b9dcd593SBruce Evans 	      vm_prot_t *out_prot,		/* OUT */
27752d8acc0fSJohn Dyson 	      boolean_t *wired)			/* OUT */
2776df8bae1dSRodney W. Grimes {
2777c0877f10SJohn Dyson 	vm_map_entry_t entry;
2778c0877f10SJohn Dyson 	vm_map_t map = *var_map;
2779c0877f10SJohn Dyson 	vm_prot_t prot;
278047221757SJohn Dyson 	vm_prot_t fault_type = fault_typea;
2781df8bae1dSRodney W. Grimes 
2782df8bae1dSRodney W. Grimes RetryLookup:;
2783df8bae1dSRodney W. Grimes 	/*
2784df8bae1dSRodney W. Grimes 	 * Lookup the faulting address.
2785df8bae1dSRodney W. Grimes 	 */
2786df8bae1dSRodney W. Grimes 
2787df8bae1dSRodney W. Grimes 	vm_map_lock_read(map);
2788df8bae1dSRodney W. Grimes #define	RETURN(why) \
2789df8bae1dSRodney W. Grimes 		{ \
2790df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map); \
2791df8bae1dSRodney W. Grimes 		return (why); \
2792df8bae1dSRodney W. Grimes 		}
2793df8bae1dSRodney W. Grimes 
2794df8bae1dSRodney W. Grimes 	/*
27950d94caffSDavid Greenman 	 * If the map has an interesting hint, try it before calling full
27960d94caffSDavid Greenman 	 * blown lookup routine.
2797df8bae1dSRodney W. Grimes 	 */
27984e94f402SAlan Cox 	entry = map->root;
2799df8bae1dSRodney W. Grimes 	*out_entry = entry;
28004e94f402SAlan Cox 	if (entry == NULL ||
2801df8bae1dSRodney W. Grimes 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
2802df8bae1dSRodney W. Grimes 		/*
28030d94caffSDavid Greenman 		 * Entry was either not a valid hint, or the vaddr was not
28040d94caffSDavid Greenman 		 * contained in the entry, so do a full lookup.
2805df8bae1dSRodney W. Grimes 		 */
28064e94f402SAlan Cox 		if (!vm_map_lookup_entry(map, vaddr, out_entry))
2807df8bae1dSRodney W. Grimes 			RETURN(KERN_INVALID_ADDRESS);
2808df8bae1dSRodney W. Grimes 
28094e94f402SAlan Cox 		entry = *out_entry;
2810df8bae1dSRodney W. Grimes 	}
2811b7b2aac2SJohn Dyson 
2812df8bae1dSRodney W. Grimes 	/*
2813df8bae1dSRodney W. Grimes 	 * Handle submaps.
2814df8bae1dSRodney W. Grimes 	 */
2815afa07f7eSJohn Dyson 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2816df8bae1dSRodney W. Grimes 		vm_map_t old_map = map;
2817df8bae1dSRodney W. Grimes 
2818df8bae1dSRodney W. Grimes 		*var_map = map = entry->object.sub_map;
2819df8bae1dSRodney W. Grimes 		vm_map_unlock_read(old_map);
2820df8bae1dSRodney W. Grimes 		goto RetryLookup;
2821df8bae1dSRodney W. Grimes 	}
2822a04c970aSJohn Dyson 
2823df8bae1dSRodney W. Grimes 	/*
28240d94caffSDavid Greenman 	 * Check whether this task is allowed to have this page.
2825a04c970aSJohn Dyson 	 * Note the special case for MAP_ENTRY_COW
2826a04c970aSJohn Dyson 	 * pages with an override.  This is to implement a forced
2827a04c970aSJohn Dyson 	 * COW for debuggers.
2828df8bae1dSRodney W. Grimes 	 */
2829480ba2f5SJohn Dyson 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
2830480ba2f5SJohn Dyson 		prot = entry->max_protection;
2831480ba2f5SJohn Dyson 	else
2832df8bae1dSRodney W. Grimes 		prot = entry->protection;
283347221757SJohn Dyson 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
283447221757SJohn Dyson 	if ((fault_type & prot) != fault_type) {
283547221757SJohn Dyson 			RETURN(KERN_PROTECTION_FAILURE);
283647221757SJohn Dyson 	}
28372ed14a92SAlan Cox 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
283847221757SJohn Dyson 	    (entry->eflags & MAP_ENTRY_COW) &&
28392ed14a92SAlan Cox 	    (fault_type & VM_PROT_WRITE) &&
284047221757SJohn Dyson 	    (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
2841df8bae1dSRodney W. Grimes 		RETURN(KERN_PROTECTION_FAILURE);
2842a04c970aSJohn Dyson 	}
2843df8bae1dSRodney W. Grimes 
2844df8bae1dSRodney W. Grimes 	/*
28450d94caffSDavid Greenman 	 * If this page is not pageable, we have to get it for all possible
28460d94caffSDavid Greenman 	 * accesses.
2847df8bae1dSRodney W. Grimes 	 */
284805f0fdd2SPoul-Henning Kamp 	*wired = (entry->wired_count != 0);
284905f0fdd2SPoul-Henning Kamp 	if (*wired)
2850df8bae1dSRodney W. Grimes 		prot = fault_type = entry->protection;
2851df8bae1dSRodney W. Grimes 
2852df8bae1dSRodney W. Grimes 	/*
2853df8bae1dSRodney W. Grimes 	 * If the entry was copy-on-write, we either ...
2854df8bae1dSRodney W. Grimes 	 */
2855afa07f7eSJohn Dyson 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2856df8bae1dSRodney W. Grimes 		/*
28570d94caffSDavid Greenman 		 * If we want to write the page, we may as well handle that
2858ad5fca3bSAlan Cox 		 * now since we've got the map locked.
2859df8bae1dSRodney W. Grimes 		 *
28600d94caffSDavid Greenman 		 * If we don't need to write the page, we just demote the
28610d94caffSDavid Greenman 		 * permissions allowed.
2862df8bae1dSRodney W. Grimes 		 */
2863df8bae1dSRodney W. Grimes 		if (fault_type & VM_PROT_WRITE) {
2864df8bae1dSRodney W. Grimes 			/*
28650d94caffSDavid Greenman 			 * Make a new object, and place it in the object
28660d94caffSDavid Greenman 			 * chain.  Note that no new references have appeared
2867ad5fca3bSAlan Cox 			 * -- one just moved from the map to the new
28680d94caffSDavid Greenman 			 * object.
2869df8bae1dSRodney W. Grimes 			 */
287025adb370SBrian Feldman 			if (vm_map_lock_upgrade(map))
2871df8bae1dSRodney W. Grimes 				goto RetryLookup;
287215fdd586SAlan Cox 			mtx_lock(&Giant);
2873df8bae1dSRodney W. Grimes 			vm_object_shadow(
2874df8bae1dSRodney W. Grimes 			    &entry->object.vm_object,
2875df8bae1dSRodney W. Grimes 			    &entry->offset,
2876c2e11a03SJohn Dyson 			    atop(entry->end - entry->start));
287715fdd586SAlan Cox 			mtx_unlock(&Giant);
2878afa07f7eSJohn Dyson 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
28799b09b6c7SMatthew Dillon 			vm_map_lock_downgrade(map);
28800d94caffSDavid Greenman 		} else {
2881df8bae1dSRodney W. Grimes 			/*
28820d94caffSDavid Greenman 			 * We're attempting to read a copy-on-write page --
28830d94caffSDavid Greenman 			 * don't allow writes.
2884df8bae1dSRodney W. Grimes 			 */
28852d8acc0fSJohn Dyson 			prot &= ~VM_PROT_WRITE;
2886df8bae1dSRodney W. Grimes 		}
2887df8bae1dSRodney W. Grimes 	}
28882d8acc0fSJohn Dyson 
2889df8bae1dSRodney W. Grimes 	/*
2890df8bae1dSRodney W. Grimes 	 * Create an object if necessary.
2891df8bae1dSRodney W. Grimes 	 */
28924e71e795SMatthew Dillon 	if (entry->object.vm_object == NULL &&
28934e71e795SMatthew Dillon 	    !map->system_map) {
289425adb370SBrian Feldman 		if (vm_map_lock_upgrade(map))
2895df8bae1dSRodney W. Grimes 			goto RetryLookup;
289624a1cce3SDavid Greenman 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2897c2e11a03SJohn Dyson 		    atop(entry->end - entry->start));
2898df8bae1dSRodney W. Grimes 		entry->offset = 0;
28999b09b6c7SMatthew Dillon 		vm_map_lock_downgrade(map);
2900df8bae1dSRodney W. Grimes 	}
2901b5b40fa6SJohn Dyson 
2902df8bae1dSRodney W. Grimes 	/*
29030d94caffSDavid Greenman 	 * Return the object/offset from this entry.  If the entry was
29040d94caffSDavid Greenman 	 * copy-on-write or empty, it has been fixed up.
2905df8bae1dSRodney W. Grimes 	 */
29069b09b6c7SMatthew Dillon 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
2907df8bae1dSRodney W. Grimes 	*object = entry->object.vm_object;
2908df8bae1dSRodney W. Grimes 
2909df8bae1dSRodney W. Grimes 	/*
2910df8bae1dSRodney W. Grimes 	 * Return whether this is the only map sharing this data.
2911df8bae1dSRodney W. Grimes 	 */
2912df8bae1dSRodney W. Grimes 	*out_prot = prot;
2913df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
2914df8bae1dSRodney W. Grimes 
2915df8bae1dSRodney W. Grimes #undef	RETURN
2916df8bae1dSRodney W. Grimes }
2917df8bae1dSRodney W. Grimes 
2918df8bae1dSRodney W. Grimes /*
2919df8bae1dSRodney W. Grimes  *	vm_map_lookup_done:
2920df8bae1dSRodney W. Grimes  *
2921df8bae1dSRodney W. Grimes  *	Releases locks acquired by a vm_map_lookup
2922df8bae1dSRodney W. Grimes  *	(according to the handle returned by that lookup).
2923df8bae1dSRodney W. Grimes  */
29240d94caffSDavid Greenman void
29251b40f8c0SMatthew Dillon vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
2926df8bae1dSRodney W. Grimes {
2927df8bae1dSRodney W. Grimes 	/*
2928df8bae1dSRodney W. Grimes 	 * Unlock the main-level map
2929df8bae1dSRodney W. Grimes 	 */
2930df8bae1dSRodney W. Grimes 	vm_map_unlock_read(map);
2931df8bae1dSRodney W. Grimes }
2932df8bae1dSRodney W. Grimes 
2933c50fe92bSAlan Cox #ifdef ENABLE_VFS_IOOPT
29341efb74fbSJohn Dyson /*
2935c50fe92bSAlan Cox  * Experimental support for zero-copy I/O
2936c50fe92bSAlan Cox  *
29371efb74fbSJohn Dyson  * Implement uiomove with VM operations.  This handles (and collateral changes)
29381efb74fbSJohn Dyson  * support every combination of source object modification, and COW type
29391efb74fbSJohn Dyson  * operations.
29401efb74fbSJohn Dyson  */
29411efb74fbSJohn Dyson int
29421b40f8c0SMatthew Dillon vm_uiomove(
29431b40f8c0SMatthew Dillon 	vm_map_t mapa,
29441b40f8c0SMatthew Dillon 	vm_object_t srcobject,
29451b40f8c0SMatthew Dillon 	off_t cp,
29461b40f8c0SMatthew Dillon 	int cnta,
29471b40f8c0SMatthew Dillon 	vm_offset_t uaddra,
29481b40f8c0SMatthew Dillon 	int *npages)
29491efb74fbSJohn Dyson {
29501efb74fbSJohn Dyson 	vm_map_t map;
295147221757SJohn Dyson 	vm_object_t first_object, oldobject, object;
29522d8acc0fSJohn Dyson 	vm_map_entry_t entry;
29531efb74fbSJohn Dyson 	vm_prot_t prot;
29542d8acc0fSJohn Dyson 	boolean_t wired;
29551efb74fbSJohn Dyson 	int tcnt, rv;
29562d8acc0fSJohn Dyson 	vm_offset_t uaddr, start, end, tend;
29571efb74fbSJohn Dyson 	vm_pindex_t first_pindex, osize, oindex;
29581efb74fbSJohn Dyson 	off_t ooffset;
295947221757SJohn Dyson 	int cnt;
29601efb74fbSJohn Dyson 
29610cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
29620cddd8f0SMatthew Dillon 
296395e5e988SJohn Dyson 	if (npages)
296495e5e988SJohn Dyson 		*npages = 0;
296595e5e988SJohn Dyson 
296647221757SJohn Dyson 	cnt = cnta;
29672d8acc0fSJohn Dyson 	uaddr = uaddra;
29682d8acc0fSJohn Dyson 
29691efb74fbSJohn Dyson 	while (cnt > 0) {
29701efb74fbSJohn Dyson 		map = mapa;
29711efb74fbSJohn Dyson 
29721efb74fbSJohn Dyson 		if ((vm_map_lookup(&map, uaddr,
29732d8acc0fSJohn Dyson 			VM_PROT_READ, &entry, &first_object,
29742d8acc0fSJohn Dyson 			&first_pindex, &prot, &wired)) != KERN_SUCCESS) {
29751efb74fbSJohn Dyson 			return EFAULT;
29761efb74fbSJohn Dyson 		}
29771efb74fbSJohn Dyson 
29782d8acc0fSJohn Dyson 		vm_map_clip_start(map, entry, uaddr);
29791efb74fbSJohn Dyson 
29801efb74fbSJohn Dyson 		tcnt = cnt;
29812d8acc0fSJohn Dyson 		tend = uaddr + tcnt;
29822d8acc0fSJohn Dyson 		if (tend > entry->end) {
29832d8acc0fSJohn Dyson 			tcnt = entry->end - uaddr;
29842d8acc0fSJohn Dyson 			tend = entry->end;
29852d8acc0fSJohn Dyson 		}
29861efb74fbSJohn Dyson 
29872d8acc0fSJohn Dyson 		vm_map_clip_end(map, entry, tend);
29881efb74fbSJohn Dyson 
29892d8acc0fSJohn Dyson 		start = entry->start;
29902d8acc0fSJohn Dyson 		end = entry->end;
29911efb74fbSJohn Dyson 
2992c2e11a03SJohn Dyson 		osize = atop(tcnt);
299395e5e988SJohn Dyson 
2994925a3a41SJohn Dyson 		oindex = OFF_TO_IDX(cp);
299595e5e988SJohn Dyson 		if (npages) {
2996925a3a41SJohn Dyson 			vm_pindex_t idx;
299795e5e988SJohn Dyson 			for (idx = 0; idx < osize; idx++) {
299895e5e988SJohn Dyson 				vm_page_t m;
2999925a3a41SJohn Dyson 				if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
30002d8acc0fSJohn Dyson 					vm_map_lookup_done(map, entry);
300195e5e988SJohn Dyson 					return 0;
300295e5e988SJohn Dyson 				}
30031c7c3c6aSMatthew Dillon 				/*
30041c7c3c6aSMatthew Dillon 				 * disallow busy or invalid pages, but allow
30051c7c3c6aSMatthew Dillon 				 * m->busy pages if they are entirely valid.
30061c7c3c6aSMatthew Dillon 				 */
3007925a3a41SJohn Dyson 				if ((m->flags & PG_BUSY) ||
300895e5e988SJohn Dyson 					((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
30092d8acc0fSJohn Dyson 					vm_map_lookup_done(map, entry);
301095e5e988SJohn Dyson 					return 0;
301195e5e988SJohn Dyson 				}
301295e5e988SJohn Dyson 			}
301395e5e988SJohn Dyson 		}
301495e5e988SJohn Dyson 
30151efb74fbSJohn Dyson /*
30161efb74fbSJohn Dyson  * If we are changing an existing map entry, just redirect
30171efb74fbSJohn Dyson  * the object, and change mappings.
30181efb74fbSJohn Dyson  */
30192d8acc0fSJohn Dyson 		if ((first_object->type == OBJT_VNODE) &&
30202d8acc0fSJohn Dyson 			((oldobject = entry->object.vm_object) == first_object)) {
30212d8acc0fSJohn Dyson 
30222d8acc0fSJohn Dyson 			if ((entry->offset != cp) || (oldobject != srcobject)) {
30232d8acc0fSJohn Dyson 				/*
30242d8acc0fSJohn Dyson    				* Remove old window into the file
30252d8acc0fSJohn Dyson    				*/
30262d8acc0fSJohn Dyson 				pmap_remove (map->pmap, uaddr, tend);
30272d8acc0fSJohn Dyson 
30282d8acc0fSJohn Dyson 				/*
30292d8acc0fSJohn Dyson    				* Force copy on write for mmaped regions
30302d8acc0fSJohn Dyson    				*/
30312d8acc0fSJohn Dyson 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
30322d8acc0fSJohn Dyson 
30332d8acc0fSJohn Dyson 				/*
30342d8acc0fSJohn Dyson    				* Point the object appropriately
30352d8acc0fSJohn Dyson    				*/
30362d8acc0fSJohn Dyson 				if (oldobject != srcobject) {
30372d8acc0fSJohn Dyson 
30382d8acc0fSJohn Dyson 				/*
30392d8acc0fSJohn Dyson    				* Set the object optimization hint flag
30402d8acc0fSJohn Dyson    				*/
3041069e9bc1SDoug Rabson 					vm_object_set_flag(srcobject, OBJ_OPT);
30422d8acc0fSJohn Dyson 					vm_object_reference(srcobject);
30432d8acc0fSJohn Dyson 					entry->object.vm_object = srcobject;
30442d8acc0fSJohn Dyson 
30452d8acc0fSJohn Dyson 					if (oldobject) {
30462d8acc0fSJohn Dyson 						vm_object_deallocate(oldobject);
30472d8acc0fSJohn Dyson 					}
30482d8acc0fSJohn Dyson 				}
30492d8acc0fSJohn Dyson 
30502d8acc0fSJohn Dyson 				entry->offset = cp;
30512d8acc0fSJohn Dyson 				map->timestamp++;
30522d8acc0fSJohn Dyson 			} else {
30532d8acc0fSJohn Dyson 				pmap_remove (map->pmap, uaddr, tend);
30542d8acc0fSJohn Dyson 			}
30552d8acc0fSJohn Dyson 
30562d8acc0fSJohn Dyson 		} else if ((first_object->ref_count == 1) &&
3057925a3a41SJohn Dyson 			(first_object->size == osize) &&
305847221757SJohn Dyson 			((first_object->type == OBJT_DEFAULT) ||
305947221757SJohn Dyson 				(first_object->type == OBJT_SWAP)) ) {
3060925a3a41SJohn Dyson 
3061925a3a41SJohn Dyson 			oldobject = first_object->backing_object;
3062925a3a41SJohn Dyson 
3063925a3a41SJohn Dyson 			if ((first_object->backing_object_offset != cp) ||
3064925a3a41SJohn Dyson 				(oldobject != srcobject)) {
3065925a3a41SJohn Dyson 				/*
3066925a3a41SJohn Dyson    				* Remove old window into the file
3067925a3a41SJohn Dyson    				*/
30682d8acc0fSJohn Dyson 				pmap_remove (map->pmap, uaddr, tend);
3069925a3a41SJohn Dyson 
3070925a3a41SJohn Dyson 				/*
307147221757SJohn Dyson 				 * Remove unneeded old pages
307247221757SJohn Dyson 				 */
307347221757SJohn Dyson 				vm_object_page_remove(first_object, 0, 0, 0);
307447221757SJohn Dyson 
307547221757SJohn Dyson 				/*
307647221757SJohn Dyson 				 * Invalidate swap space
307747221757SJohn Dyson 				 */
307847221757SJohn Dyson 				if (first_object->type == OBJT_SWAP) {
307947221757SJohn Dyson 					swap_pager_freespace(first_object,
30801c7c3c6aSMatthew Dillon 						0,
308147221757SJohn Dyson 						first_object->size);
308247221757SJohn Dyson 				}
308347221757SJohn Dyson 
308447221757SJohn Dyson 				/*
3085925a3a41SJohn Dyson    				 * Force copy on write for mmaped regions
3086925a3a41SJohn Dyson    				 */
308747221757SJohn Dyson 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
30881efb74fbSJohn Dyson 
30891efb74fbSJohn Dyson 				/*
30901efb74fbSJohn Dyson    				 * Point the object appropriately
30911efb74fbSJohn Dyson    				 */
3092925a3a41SJohn Dyson 				if (oldobject != srcobject) {
3093925a3a41SJohn Dyson 					/*
3094925a3a41SJohn Dyson    					 * Set the object optimization hint flag
3095925a3a41SJohn Dyson    					 */
3096069e9bc1SDoug Rabson 					vm_object_set_flag(srcobject, OBJ_OPT);
3097925a3a41SJohn Dyson 					vm_object_reference(srcobject);
3098925a3a41SJohn Dyson 
3099925a3a41SJohn Dyson 					if (oldobject) {
3100925a3a41SJohn Dyson 						TAILQ_REMOVE(&oldobject->shadow_head,
3101925a3a41SJohn Dyson 							first_object, shadow_list);
3102925a3a41SJohn Dyson 						oldobject->shadow_count--;
3103b4309055SMatthew Dillon 						/* XXX bump generation? */
3104925a3a41SJohn Dyson 						vm_object_deallocate(oldobject);
3105925a3a41SJohn Dyson 					}
3106925a3a41SJohn Dyson 
3107925a3a41SJohn Dyson 					TAILQ_INSERT_TAIL(&srcobject->shadow_head,
3108925a3a41SJohn Dyson 						first_object, shadow_list);
3109925a3a41SJohn Dyson 					srcobject->shadow_count++;
3110b4309055SMatthew Dillon 					/* XXX bump generation? */
3111925a3a41SJohn Dyson 
3112925a3a41SJohn Dyson 					first_object->backing_object = srcobject;
3113925a3a41SJohn Dyson 				}
31141efb74fbSJohn Dyson 				first_object->backing_object_offset = cp;
31152d8acc0fSJohn Dyson 				map->timestamp++;
3116925a3a41SJohn Dyson 			} else {
31172d8acc0fSJohn Dyson 				pmap_remove (map->pmap, uaddr, tend);
3118925a3a41SJohn Dyson 			}
31191efb74fbSJohn Dyson /*
31201efb74fbSJohn Dyson  * Otherwise, we have to do a logical mmap.
31211efb74fbSJohn Dyson  */
31221efb74fbSJohn Dyson 		} else {
31231efb74fbSJohn Dyson 
3124069e9bc1SDoug Rabson 			vm_object_set_flag(srcobject, OBJ_OPT);
3125925a3a41SJohn Dyson 			vm_object_reference(srcobject);
31261efb74fbSJohn Dyson 
31272d8acc0fSJohn Dyson 			pmap_remove (map->pmap, uaddr, tend);
31281efb74fbSJohn Dyson 
312947221757SJohn Dyson 			vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
313025adb370SBrian Feldman 			vm_map_lock_upgrade(map);
31311efb74fbSJohn Dyson 
31322d8acc0fSJohn Dyson 			if (entry == &map->header) {
31331efb74fbSJohn Dyson 				map->first_free = &map->header;
31341efb74fbSJohn Dyson 			} else if (map->first_free->start >= start) {
31352d8acc0fSJohn Dyson 				map->first_free = entry->prev;
31361efb74fbSJohn Dyson 			}
31371efb74fbSJohn Dyson 
31382d8acc0fSJohn Dyson 			vm_map_entry_delete(map, entry);
31391efb74fbSJohn Dyson 
31402d8acc0fSJohn Dyson 			object = srcobject;
31412d8acc0fSJohn Dyson 			ooffset = cp;
31422d8acc0fSJohn Dyson 
31432d8acc0fSJohn Dyson 			rv = vm_map_insert(map, object, ooffset, start, tend,
3144e5f13bddSAlan Cox 				VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
31451efb74fbSJohn Dyson 
31461efb74fbSJohn Dyson 			if (rv != KERN_SUCCESS)
31471efb74fbSJohn Dyson 				panic("vm_uiomove: could not insert new entry: %d", rv);
31481efb74fbSJohn Dyson 		}
31491efb74fbSJohn Dyson 
31501efb74fbSJohn Dyson /*
31511efb74fbSJohn Dyson  * Map the window directly, if it is already in memory
31521efb74fbSJohn Dyson  */
31532d8acc0fSJohn Dyson 		pmap_object_init_pt(map->pmap, uaddr,
31542d8acc0fSJohn Dyson 			srcobject, oindex, tcnt, 0);
31551efb74fbSJohn Dyson 
315647221757SJohn Dyson 		map->timestamp++;
31571efb74fbSJohn Dyson 		vm_map_unlock(map);
31581efb74fbSJohn Dyson 
31591efb74fbSJohn Dyson 		cnt -= tcnt;
31602d8acc0fSJohn Dyson 		uaddr += tcnt;
31611efb74fbSJohn Dyson 		cp += tcnt;
316295e5e988SJohn Dyson 		if (npages)
316395e5e988SJohn Dyson 			*npages += osize;
31641efb74fbSJohn Dyson 	}
31651efb74fbSJohn Dyson 	return 0;
31661efb74fbSJohn Dyson }
3167c50fe92bSAlan Cox #endif
31681efb74fbSJohn Dyson 
3169c7c34a24SBruce Evans #include "opt_ddb.h"
3170c3cb3e12SDavid Greenman #ifdef DDB
3171c7c34a24SBruce Evans #include <sys/kernel.h>
3172c7c34a24SBruce Evans 
3173c7c34a24SBruce Evans #include <ddb/ddb.h>
3174c7c34a24SBruce Evans 
3175df8bae1dSRodney W. Grimes /*
3176df8bae1dSRodney W. Grimes  *	vm_map_print:	[ debug ]
3177df8bae1dSRodney W. Grimes  */
3178c7c34a24SBruce Evans DB_SHOW_COMMAND(map, vm_map_print)
3179df8bae1dSRodney W. Grimes {
318095e5e988SJohn Dyson 	static int nlines;
3181c7c34a24SBruce Evans 	/* XXX convert args. */
3182c0877f10SJohn Dyson 	vm_map_t map = (vm_map_t)addr;
3183c7c34a24SBruce Evans 	boolean_t full = have_addr;
3184df8bae1dSRodney W. Grimes 
3185c0877f10SJohn Dyson 	vm_map_entry_t entry;
3186c7c34a24SBruce Evans 
3187e5f251d2SAlan Cox 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3188e5f251d2SAlan Cox 	    (void *)map,
3189101eeb7fSBruce Evans 	    (void *)map->pmap, map->nentries, map->timestamp);
319095e5e988SJohn Dyson 	nlines++;
3191df8bae1dSRodney W. Grimes 
3192c7c34a24SBruce Evans 	if (!full && db_indent)
3193df8bae1dSRodney W. Grimes 		return;
3194df8bae1dSRodney W. Grimes 
3195c7c34a24SBruce Evans 	db_indent += 2;
3196df8bae1dSRodney W. Grimes 	for (entry = map->header.next; entry != &map->header;
3197df8bae1dSRodney W. Grimes 	    entry = entry->next) {
3198fc62ef1fSBruce Evans 		db_iprintf("map entry %p: start=%p, end=%p\n",
3199fc62ef1fSBruce Evans 		    (void *)entry, (void *)entry->start, (void *)entry->end);
320095e5e988SJohn Dyson 		nlines++;
3201e5f251d2SAlan Cox 		{
3202df8bae1dSRodney W. Grimes 			static char *inheritance_name[4] =
3203df8bae1dSRodney W. Grimes 			{"share", "copy", "none", "donate_copy"};
32040d94caffSDavid Greenman 
320595e5e988SJohn Dyson 			db_iprintf(" prot=%x/%x/%s",
3206df8bae1dSRodney W. Grimes 			    entry->protection,
3207df8bae1dSRodney W. Grimes 			    entry->max_protection,
32088aef1712SMatthew Dillon 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
3209df8bae1dSRodney W. Grimes 			if (entry->wired_count != 0)
321095e5e988SJohn Dyson 				db_printf(", wired");
3211df8bae1dSRodney W. Grimes 		}
32129fdfe602SMatthew Dillon 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3213101eeb7fSBruce Evans 			/* XXX no %qd in kernel.  Truncate entry->offset. */
3214101eeb7fSBruce Evans 			db_printf(", share=%p, offset=0x%lx\n",
32159fdfe602SMatthew Dillon 			    (void *)entry->object.sub_map,
3216ecbb00a2SDoug Rabson 			    (long)entry->offset);
321795e5e988SJohn Dyson 			nlines++;
3218df8bae1dSRodney W. Grimes 			if ((entry->prev == &map->header) ||
32199fdfe602SMatthew Dillon 			    (entry->prev->object.sub_map !=
32209fdfe602SMatthew Dillon 				entry->object.sub_map)) {
3221c7c34a24SBruce Evans 				db_indent += 2;
3222101eeb7fSBruce Evans 				vm_map_print((db_expr_t)(intptr_t)
32239fdfe602SMatthew Dillon 					     entry->object.sub_map,
3224914181e7SBruce Evans 					     full, 0, (char *)0);
3225c7c34a24SBruce Evans 				db_indent -= 2;
3226df8bae1dSRodney W. Grimes 			}
32270d94caffSDavid Greenman 		} else {
3228101eeb7fSBruce Evans 			/* XXX no %qd in kernel.  Truncate entry->offset. */
3229101eeb7fSBruce Evans 			db_printf(", object=%p, offset=0x%lx",
3230101eeb7fSBruce Evans 			    (void *)entry->object.vm_object,
3231ecbb00a2SDoug Rabson 			    (long)entry->offset);
3232afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_COW)
3233c7c34a24SBruce Evans 				db_printf(", copy (%s)",
3234afa07f7eSJohn Dyson 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3235c7c34a24SBruce Evans 			db_printf("\n");
323695e5e988SJohn Dyson 			nlines++;
3237df8bae1dSRodney W. Grimes 
3238df8bae1dSRodney W. Grimes 			if ((entry->prev == &map->header) ||
3239df8bae1dSRodney W. Grimes 			    (entry->prev->object.vm_object !=
3240df8bae1dSRodney W. Grimes 				entry->object.vm_object)) {
3241c7c34a24SBruce Evans 				db_indent += 2;
3242101eeb7fSBruce Evans 				vm_object_print((db_expr_t)(intptr_t)
3243101eeb7fSBruce Evans 						entry->object.vm_object,
3244914181e7SBruce Evans 						full, 0, (char *)0);
324595e5e988SJohn Dyson 				nlines += 4;
3246c7c34a24SBruce Evans 				db_indent -= 2;
3247df8bae1dSRodney W. Grimes 			}
3248df8bae1dSRodney W. Grimes 		}
3249df8bae1dSRodney W. Grimes 	}
3250c7c34a24SBruce Evans 	db_indent -= 2;
325195e5e988SJohn Dyson 	if (db_indent == 0)
325295e5e988SJohn Dyson 		nlines = 0;
3253df8bae1dSRodney W. Grimes }
325495e5e988SJohn Dyson 
325595e5e988SJohn Dyson 
325695e5e988SJohn Dyson DB_SHOW_COMMAND(procvm, procvm)
325795e5e988SJohn Dyson {
325895e5e988SJohn Dyson 	struct proc *p;
325995e5e988SJohn Dyson 
326095e5e988SJohn Dyson 	if (have_addr) {
326195e5e988SJohn Dyson 		p = (struct proc *) addr;
326295e5e988SJohn Dyson 	} else {
326395e5e988SJohn Dyson 		p = curproc;
326495e5e988SJohn Dyson 	}
326595e5e988SJohn Dyson 
3266ac1e407bSBruce Evans 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3267ac1e407bSBruce Evans 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3268b1028ad1SLuoqi Chen 	    (void *)vmspace_pmap(p->p_vmspace));
326995e5e988SJohn Dyson 
3270101eeb7fSBruce Evans 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
327195e5e988SJohn Dyson }
327295e5e988SJohn Dyson 
3273c7c34a24SBruce Evans #endif /* DDB */
3274