xref: /freebsd/sys/vm/vm_map.c (revision 3db161e07937ad130a89eb68afb1967c9ee5c7dd)
1df8bae1dSRodney W. Grimes /*
2df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  *
5df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
6df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
9df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
10df8bae1dSRodney W. Grimes  * are met:
11df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
12df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
13df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
15df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
16df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
175929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
18df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
19df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
20df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
21df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
22df8bae1dSRodney W. Grimes  *    without specific prior written permission.
23df8bae1dSRodney W. Grimes  *
24df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
35df8bae1dSRodney W. Grimes  *
363c4dd356SDavid Greenman  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  *
39df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40df8bae1dSRodney W. Grimes  * All rights reserved.
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43df8bae1dSRodney W. Grimes  *
44df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
45df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
46df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
47df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
48df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
49df8bae1dSRodney W. Grimes  *
50df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
55df8bae1dSRodney W. Grimes  *
56df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57df8bae1dSRodney W. Grimes  *  School of Computer Science
58df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
59df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
60df8bae1dSRodney W. Grimes  *
61df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
62df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
633c4dd356SDavid Greenman  *
64c3aac50fSPeter Wemm  * $FreeBSD$
65df8bae1dSRodney W. Grimes  */
66df8bae1dSRodney W. Grimes 
67df8bae1dSRodney W. Grimes /*
68df8bae1dSRodney W. Grimes  *	Virtual memory mapping module.
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes #include <sys/param.h>
72df8bae1dSRodney W. Grimes #include <sys/systm.h>
7361d80e90SJohn Baldwin #include <sys/ktr.h>
74fb919e4dSMark Murray #include <sys/lock.h>
75fb919e4dSMark Murray #include <sys/mutex.h>
76b5e8ce9fSBruce Evans #include <sys/proc.h>
77efeaf95aSDavid Greenman #include <sys/vmmeter.h>
78867a482dSJohn Dyson #include <sys/mman.h>
791efb74fbSJohn Dyson #include <sys/vnode.h>
802267af78SJulian Elischer #include <sys/resourcevar.h>
8105ba50f5SJake Burkholder #include <sys/sysent.h>
82cd034a5bSMaxime Henrion #include <sys/stdint.h>
833db161e0SMatthew Dillon #include <sys/shm.h>
84df8bae1dSRodney W. Grimes 
85df8bae1dSRodney W. Grimes #include <vm/vm.h>
86efeaf95aSDavid Greenman #include <vm/vm_param.h>
87efeaf95aSDavid Greenman #include <vm/pmap.h>
88efeaf95aSDavid Greenman #include <vm/vm_map.h>
89df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
90df8bae1dSRodney W. Grimes #include <vm/vm_object.h>
9147221757SJohn Dyson #include <vm/vm_pager.h>
9226f9a767SRodney W. Grimes #include <vm/vm_kern.h>
93efeaf95aSDavid Greenman #include <vm/vm_extern.h>
9421cd6e62SSeigo Tanimura #include <vm/swap_pager.h>
95670d17b5SJeff Roberson #include <vm/uma.h>
96df8bae1dSRodney W. Grimes 
97df8bae1dSRodney W. Grimes /*
98df8bae1dSRodney W. Grimes  *	Virtual memory maps provide for the mapping, protection,
99df8bae1dSRodney W. Grimes  *	and sharing of virtual memory objects.  In addition,
100df8bae1dSRodney W. Grimes  *	this module provides for an efficient virtual copy of
101df8bae1dSRodney W. Grimes  *	memory from one map to another.
102df8bae1dSRodney W. Grimes  *
103df8bae1dSRodney W. Grimes  *	Synchronization is required prior to most operations.
104df8bae1dSRodney W. Grimes  *
105df8bae1dSRodney W. Grimes  *	Maps consist of an ordered doubly-linked list of simple
106df8bae1dSRodney W. Grimes  *	entries; a single hint is used to speed up lookups.
107df8bae1dSRodney W. Grimes  *
108956f3135SPhilippe Charnier  *	Since portions of maps are specified by start/end addresses,
109df8bae1dSRodney W. Grimes  *	which may not align with existing map entries, all
110df8bae1dSRodney W. Grimes  *	routines merely "clip" entries to these start/end values.
111df8bae1dSRodney W. Grimes  *	[That is, an entry is split into two, bordering at a
112df8bae1dSRodney W. Grimes  *	start or end value.]  Note that these clippings may not
113df8bae1dSRodney W. Grimes  *	always be necessary (as the two resulting entries are then
114df8bae1dSRodney W. Grimes  *	not changed); however, the clipping is done for convenience.
115df8bae1dSRodney W. Grimes  *
116df8bae1dSRodney W. Grimes  *	As mentioned above, virtual copy operations are performed
117ad5fca3bSAlan Cox  *	by copying VM object references from one map to
118df8bae1dSRodney W. Grimes  *	another, and then marking both regions as copy-on-write.
119df8bae1dSRodney W. Grimes  */
120df8bae1dSRodney W. Grimes 
121df8bae1dSRodney W. Grimes /*
122df8bae1dSRodney W. Grimes  *	vm_map_startup:
123df8bae1dSRodney W. Grimes  *
124df8bae1dSRodney W. Grimes  *	Initialize the vm_map module.  Must be called before
125df8bae1dSRodney W. Grimes  *	any other vm_map routines.
126df8bae1dSRodney W. Grimes  *
127df8bae1dSRodney W. Grimes  *	Map and entry structures are allocated from the general
128df8bae1dSRodney W. Grimes  *	purpose memory pool with some exceptions:
129df8bae1dSRodney W. Grimes  *
130df8bae1dSRodney W. Grimes  *	- The kernel map and kmem submap are allocated statically.
131df8bae1dSRodney W. Grimes  *	- Kernel map entries are allocated out of a static pool.
132df8bae1dSRodney W. Grimes  *
133df8bae1dSRodney W. Grimes  *	These restrictions are necessary since malloc() uses the
134df8bae1dSRodney W. Grimes  *	maps and requires map entries.
135df8bae1dSRodney W. Grimes  */
136df8bae1dSRodney W. Grimes 
1373a92e5d5SAlan Cox static struct mtx map_sleep_mtx;
1388355f576SJeff Roberson static uma_zone_t mapentzone;
1398355f576SJeff Roberson static uma_zone_t kmapentzone;
1408355f576SJeff Roberson static uma_zone_t mapzone;
1418355f576SJeff Roberson static uma_zone_t vmspace_zone;
1428355f576SJeff Roberson static struct vm_object kmapentobj;
1438355f576SJeff Roberson static void vmspace_zinit(void *mem, int size);
1448355f576SJeff Roberson static void vmspace_zfini(void *mem, int size);
1458355f576SJeff Roberson static void vm_map_zinit(void *mem, int size);
1468355f576SJeff Roberson static void vm_map_zfini(void *mem, int size);
1478355f576SJeff Roberson static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max);
1481fc43fd1SAlan Cox 
1498355f576SJeff Roberson #ifdef INVARIANTS
1508355f576SJeff Roberson static void vm_map_zdtor(void *mem, int size, void *arg);
1518355f576SJeff Roberson static void vmspace_zdtor(void *mem, int size, void *arg);
1528355f576SJeff Roberson #endif
153b18bfc3dSJohn Dyson 
1540d94caffSDavid Greenman void
1551b40f8c0SMatthew Dillon vm_map_startup(void)
156df8bae1dSRodney W. Grimes {
1573a92e5d5SAlan Cox 	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
1588355f576SJeff Roberson 	mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
1598355f576SJeff Roberson #ifdef INVARIANTS
1608355f576SJeff Roberson 	    vm_map_zdtor,
1618355f576SJeff Roberson #else
1628355f576SJeff Roberson 	    NULL,
1638355f576SJeff Roberson #endif
1648355f576SJeff Roberson 	    vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
1658355f576SJeff Roberson 	uma_prealloc(mapzone, MAX_KMAP);
166670d17b5SJeff Roberson 	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
16718aa2de5SJeff Roberson 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
16818aa2de5SJeff Roberson 	    UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
169670d17b5SJeff Roberson 	uma_prealloc(kmapentzone, MAX_KMAPENT);
170670d17b5SJeff Roberson 	mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
171670d17b5SJeff Roberson 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1728355f576SJeff Roberson 	uma_prealloc(mapentzone, MAX_MAPENT);
173df8bae1dSRodney W. Grimes }
174df8bae1dSRodney W. Grimes 
1758355f576SJeff Roberson static void
1768355f576SJeff Roberson vmspace_zfini(void *mem, int size)
1778355f576SJeff Roberson {
1788355f576SJeff Roberson 	struct vmspace *vm;
1798355f576SJeff Roberson 
1808355f576SJeff Roberson 	vm = (struct vmspace *)mem;
1818355f576SJeff Roberson 
1828355f576SJeff Roberson 	vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
1838355f576SJeff Roberson }
1848355f576SJeff Roberson 
1858355f576SJeff Roberson static void
1868355f576SJeff Roberson vmspace_zinit(void *mem, int size)
1878355f576SJeff Roberson {
1888355f576SJeff Roberson 	struct vmspace *vm;
1898355f576SJeff Roberson 
1908355f576SJeff Roberson 	vm = (struct vmspace *)mem;
1918355f576SJeff Roberson 
1928355f576SJeff Roberson 	vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map));
1938355f576SJeff Roberson }
1948355f576SJeff Roberson 
1958355f576SJeff Roberson static void
1968355f576SJeff Roberson vm_map_zfini(void *mem, int size)
1978355f576SJeff Roberson {
1988355f576SJeff Roberson 	vm_map_t map;
1998355f576SJeff Roberson 
2008355f576SJeff Roberson 	map = (vm_map_t)mem;
20136daaecdSAlan Cox 	mtx_destroy(&map->system_mtx);
2028355f576SJeff Roberson 	lockdestroy(&map->lock);
2038355f576SJeff Roberson }
2048355f576SJeff Roberson 
2058355f576SJeff Roberson static void
2068355f576SJeff Roberson vm_map_zinit(void *mem, int size)
2078355f576SJeff Roberson {
2088355f576SJeff Roberson 	vm_map_t map;
2098355f576SJeff Roberson 
2108355f576SJeff Roberson 	map = (vm_map_t)mem;
2118355f576SJeff Roberson 	map->nentries = 0;
2128355f576SJeff Roberson 	map->size = 0;
2138355f576SJeff Roberson 	map->infork = 0;
21436daaecdSAlan Cox 	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF);
215515630b1SAlan Cox 	lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
2168355f576SJeff Roberson }
2178355f576SJeff Roberson 
2188355f576SJeff Roberson #ifdef INVARIANTS
2198355f576SJeff Roberson static void
2208355f576SJeff Roberson vmspace_zdtor(void *mem, int size, void *arg)
2218355f576SJeff Roberson {
2228355f576SJeff Roberson 	struct vmspace *vm;
2238355f576SJeff Roberson 
2248355f576SJeff Roberson 	vm = (struct vmspace *)mem;
2258355f576SJeff Roberson 
2268355f576SJeff Roberson 	vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
2278355f576SJeff Roberson }
2288355f576SJeff Roberson static void
2298355f576SJeff Roberson vm_map_zdtor(void *mem, int size, void *arg)
2308355f576SJeff Roberson {
2318355f576SJeff Roberson 	vm_map_t map;
2328355f576SJeff Roberson 
2338355f576SJeff Roberson 	map = (vm_map_t)mem;
2348355f576SJeff Roberson 	KASSERT(map->nentries == 0,
2358355f576SJeff Roberson 	    ("map %p nentries == %d on free.",
2368355f576SJeff Roberson 	    map, map->nentries));
2378355f576SJeff Roberson 	KASSERT(map->size == 0,
2388355f576SJeff Roberson 	    ("map %p size == %lu on free.",
2399eb6e519SJeff Roberson 	    map, (unsigned long)map->size));
2408355f576SJeff Roberson 	KASSERT(map->infork == 0,
2418355f576SJeff Roberson 	    ("map %p infork == %d on free.",
2428355f576SJeff Roberson 	    map, map->infork));
2438355f576SJeff Roberson }
2448355f576SJeff Roberson #endif	/* INVARIANTS */
2458355f576SJeff Roberson 
246df8bae1dSRodney W. Grimes /*
247df8bae1dSRodney W. Grimes  * Allocate a vmspace structure, including a vm_map and pmap,
248df8bae1dSRodney W. Grimes  * and initialize those structures.  The refcnt is set to 1.
249df8bae1dSRodney W. Grimes  * The remaining fields must be initialized by the caller.
250df8bae1dSRodney W. Grimes  */
251df8bae1dSRodney W. Grimes struct vmspace *
2522d8acc0fSJohn Dyson vmspace_alloc(min, max)
253df8bae1dSRodney W. Grimes 	vm_offset_t min, max;
254df8bae1dSRodney W. Grimes {
255c0877f10SJohn Dyson 	struct vmspace *vm;
2560d94caffSDavid Greenman 
2570cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
2588355f576SJeff Roberson 	vm = uma_zalloc(vmspace_zone, M_WAITOK);
25921c641b2SJohn Baldwin 	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
2608355f576SJeff Roberson 	_vm_map_init(&vm->vm_map, min, max);
261b1028ad1SLuoqi Chen 	pmap_pinit(vmspace_pmap(vm));
262b1028ad1SLuoqi Chen 	vm->vm_map.pmap = vmspace_pmap(vm);		/* XXX */
263df8bae1dSRodney W. Grimes 	vm->vm_refcnt = 1;
2642d8acc0fSJohn Dyson 	vm->vm_shm = NULL;
265389d2b6eSMatthew Dillon 	vm->vm_exitingcnt = 0;
266df8bae1dSRodney W. Grimes 	return (vm);
267df8bae1dSRodney W. Grimes }
268df8bae1dSRodney W. Grimes 
269df8bae1dSRodney W. Grimes void
2701b40f8c0SMatthew Dillon vm_init2(void)
2711b40f8c0SMatthew Dillon {
2729e7c1bceSPeter Wemm 	uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count,
27322a97b04SAlan Cox 	    (VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE) / 8);
2748355f576SJeff Roberson 	vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
2758355f576SJeff Roberson #ifdef INVARIANTS
2768355f576SJeff Roberson 	    vmspace_zdtor,
2778355f576SJeff Roberson #else
2788355f576SJeff Roberson 	    NULL,
2798355f576SJeff Roberson #endif
2808355f576SJeff Roberson 	    vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
281ba9be04cSJohn Dyson 	pmap_init2();
2823075778bSJohn Dyson }
2833075778bSJohn Dyson 
284582ec34cSAlfred Perlstein static __inline void
285582ec34cSAlfred Perlstein vmspace_dofree(struct vmspace *vm)
286df8bae1dSRodney W. Grimes {
28721c641b2SJohn Baldwin 	CTR1(KTR_VM, "vmspace_free: %p", vm);
2883db161e0SMatthew Dillon 
2893db161e0SMatthew Dillon 	/*
2903db161e0SMatthew Dillon 	 * Make sure any SysV shm is freed, it might not have been in
2913db161e0SMatthew Dillon 	 * exit1().
2923db161e0SMatthew Dillon 	 */
2933db161e0SMatthew Dillon 	shmexit(vm);
2943db161e0SMatthew Dillon 
29530dcfc09SJohn Dyson 	/*
296df8bae1dSRodney W. Grimes 	 * Lock the map, to wait out all other references to it.
2970d94caffSDavid Greenman 	 * Delete all of the mappings and pages they hold, then call
2980d94caffSDavid Greenman 	 * the pmap module to reclaim anything left.
299df8bae1dSRodney W. Grimes 	 */
300df8bae1dSRodney W. Grimes 	vm_map_lock(&vm->vm_map);
301df8bae1dSRodney W. Grimes 	(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
302df8bae1dSRodney W. Grimes 	    vm->vm_map.max_offset);
303a1f6d91cSDavid Greenman 	vm_map_unlock(&vm->vm_map);
3048355f576SJeff Roberson 
305b1028ad1SLuoqi Chen 	pmap_release(vmspace_pmap(vm));
3068355f576SJeff Roberson 	uma_zfree(vmspace_zone, vm);
307df8bae1dSRodney W. Grimes }
308582ec34cSAlfred Perlstein 
309582ec34cSAlfred Perlstein void
310582ec34cSAlfred Perlstein vmspace_free(struct vmspace *vm)
311582ec34cSAlfred Perlstein {
312582ec34cSAlfred Perlstein 	GIANT_REQUIRED;
313582ec34cSAlfred Perlstein 
314582ec34cSAlfred Perlstein 	if (vm->vm_refcnt == 0)
315582ec34cSAlfred Perlstein 		panic("vmspace_free: attempt to free already freed vmspace");
316582ec34cSAlfred Perlstein 
317389d2b6eSMatthew Dillon 	if (--vm->vm_refcnt == 0 && vm->vm_exitingcnt == 0)
318582ec34cSAlfred Perlstein 		vmspace_dofree(vm);
319582ec34cSAlfred Perlstein }
320582ec34cSAlfred Perlstein 
321582ec34cSAlfred Perlstein void
322582ec34cSAlfred Perlstein vmspace_exitfree(struct proc *p)
323582ec34cSAlfred Perlstein {
324334f7061SPeter Wemm 	struct vmspace *vm;
325582ec34cSAlfred Perlstein 
326334f7061SPeter Wemm 	GIANT_REQUIRED;
327334f7061SPeter Wemm 	vm = p->p_vmspace;
328334f7061SPeter Wemm 	p->p_vmspace = NULL;
329389d2b6eSMatthew Dillon 
330389d2b6eSMatthew Dillon 	/*
331389d2b6eSMatthew Dillon 	 * cleanup by parent process wait()ing on exiting child.  vm_refcnt
332389d2b6eSMatthew Dillon 	 * may not be 0 (e.g. fork() and child exits without exec()ing).
333389d2b6eSMatthew Dillon 	 * exitingcnt may increment above 0 and drop back down to zero
334389d2b6eSMatthew Dillon 	 * several times while vm_refcnt is held non-zero.  vm_refcnt
335389d2b6eSMatthew Dillon 	 * may also increment above 0 and drop back down to zero several
336389d2b6eSMatthew Dillon 	 * times while vm_exitingcnt is held non-zero.
337389d2b6eSMatthew Dillon 	 *
338389d2b6eSMatthew Dillon 	 * The last wait on the exiting child's vmspace will clean up
339389d2b6eSMatthew Dillon 	 * the remainder of the vmspace.
340389d2b6eSMatthew Dillon 	 */
341389d2b6eSMatthew Dillon 	if (--vm->vm_exitingcnt == 0 && vm->vm_refcnt == 0)
342334f7061SPeter Wemm 		vmspace_dofree(vm);
343334f7061SPeter Wemm }
344df8bae1dSRodney W. Grimes 
345df8bae1dSRodney W. Grimes /*
346ff2b5645SMatthew Dillon  * vmspace_swap_count() - count the approximate swap useage in pages for a
347ff2b5645SMatthew Dillon  *			  vmspace.
348ff2b5645SMatthew Dillon  *
349ff2b5645SMatthew Dillon  *	Swap useage is determined by taking the proportional swap used by
350ff2b5645SMatthew Dillon  *	VM objects backing the VM map.  To make up for fractional losses,
351ff2b5645SMatthew Dillon  *	if the VM object has any swap use at all the associated map entries
352ff2b5645SMatthew Dillon  *	count for at least 1 swap page.
353ff2b5645SMatthew Dillon  */
354ff2b5645SMatthew Dillon int
355ff2b5645SMatthew Dillon vmspace_swap_count(struct vmspace *vmspace)
356ff2b5645SMatthew Dillon {
357ff2b5645SMatthew Dillon 	vm_map_t map = &vmspace->vm_map;
358ff2b5645SMatthew Dillon 	vm_map_entry_t cur;
359ff2b5645SMatthew Dillon 	int count = 0;
360ff2b5645SMatthew Dillon 
361d974f03cSAlan Cox 	vm_map_lock_read(map);
362ff2b5645SMatthew Dillon 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
363ff2b5645SMatthew Dillon 		vm_object_t object;
364ff2b5645SMatthew Dillon 
365ff2b5645SMatthew Dillon 		if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
366ff2b5645SMatthew Dillon 		    (object = cur->object.vm_object) != NULL &&
367ff2b5645SMatthew Dillon 		    object->type == OBJT_SWAP
368ff2b5645SMatthew Dillon 		) {
369ff2b5645SMatthew Dillon 			int n = (cur->end - cur->start) / PAGE_SIZE;
370ff2b5645SMatthew Dillon 
371ff2b5645SMatthew Dillon 			if (object->un_pager.swp.swp_bcount) {
372ef6a93efSMatthew Dillon 				count += object->un_pager.swp.swp_bcount *
373ef6a93efSMatthew Dillon 				    SWAP_META_PAGES * n / object->size + 1;
374ff2b5645SMatthew Dillon 			}
375ff2b5645SMatthew Dillon 		}
376ff2b5645SMatthew Dillon 	}
377d974f03cSAlan Cox 	vm_map_unlock_read(map);
378ff2b5645SMatthew Dillon 	return (count);
379ff2b5645SMatthew Dillon }
380ff2b5645SMatthew Dillon 
3811b40f8c0SMatthew Dillon void
382780b1c09SAlan Cox _vm_map_lock(vm_map_t map, const char *file, int line)
3831b40f8c0SMatthew Dillon {
384bc91c510SAlan Cox 	int error;
385bc91c510SAlan Cox 
38693bc4879SAlan Cox 	if (map->system_map)
38736daaecdSAlan Cox 		_mtx_lock_flags(&map->system_mtx, 0, file, line);
38836daaecdSAlan Cox 	else {
389bc91c510SAlan Cox 		error = lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread);
390bc91c510SAlan Cox 		KASSERT(error == 0, ("%s: failed to get lock", __func__));
39136daaecdSAlan Cox 	}
3921b40f8c0SMatthew Dillon 	map->timestamp++;
3931b40f8c0SMatthew Dillon }
3941b40f8c0SMatthew Dillon 
3951b40f8c0SMatthew Dillon void
396780b1c09SAlan Cox _vm_map_unlock(vm_map_t map, const char *file, int line)
3970e0af8ecSBrian Feldman {
398bc91c510SAlan Cox 
39936daaecdSAlan Cox 	if (map->system_map)
40036daaecdSAlan Cox 		_mtx_unlock_flags(&map->system_mtx, 0, file, line);
40136daaecdSAlan Cox 	else
402bc91c510SAlan Cox 		lockmgr(&map->lock, LK_RELEASE, NULL, curthread);
4030e0af8ecSBrian Feldman }
4040e0af8ecSBrian Feldman 
4050e0af8ecSBrian Feldman void
406780b1c09SAlan Cox _vm_map_lock_read(vm_map_t map, const char *file, int line)
4070e0af8ecSBrian Feldman {
408bc91c510SAlan Cox 	int error;
409bc91c510SAlan Cox 
41093bc4879SAlan Cox 	if (map->system_map)
41136daaecdSAlan Cox 		_mtx_lock_flags(&map->system_mtx, 0, file, line);
41236daaecdSAlan Cox 	else {
413bc91c510SAlan Cox 		error = lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread);
414bc91c510SAlan Cox 		KASSERT(error == 0, ("%s: failed to get lock", __func__));
4150e0af8ecSBrian Feldman 	}
41636daaecdSAlan Cox }
4170e0af8ecSBrian Feldman 
4180e0af8ecSBrian Feldman void
419780b1c09SAlan Cox _vm_map_unlock_read(vm_map_t map, const char *file, int line)
4200e0af8ecSBrian Feldman {
421bc91c510SAlan Cox 
42236daaecdSAlan Cox 	if (map->system_map)
42336daaecdSAlan Cox 		_mtx_unlock_flags(&map->system_mtx, 0, file, line);
42436daaecdSAlan Cox 	else
425bc91c510SAlan Cox 		lockmgr(&map->lock, LK_RELEASE, NULL, curthread);
42625adb370SBrian Feldman }
42725adb370SBrian Feldman 
428d974f03cSAlan Cox int
429780b1c09SAlan Cox _vm_map_trylock(vm_map_t map, const char *file, int line)
430d974f03cSAlan Cox {
43125adb370SBrian Feldman 	int error;
43225adb370SBrian Feldman 
43336daaecdSAlan Cox 	error = map->system_map ?
43436daaecdSAlan Cox 	    !_mtx_trylock(&map->system_mtx, 0, file, line) :
43536daaecdSAlan Cox 	    lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, NULL, curthread);
4363a92e5d5SAlan Cox 	if (error == 0)
4373a92e5d5SAlan Cox 		map->timestamp++;
438bc91c510SAlan Cox 	return (error == 0);
4390e0af8ecSBrian Feldman }
4400e0af8ecSBrian Feldman 
4410e0af8ecSBrian Feldman int
442780b1c09SAlan Cox _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
4430e0af8ecSBrian Feldman {
444bc91c510SAlan Cox 
44536daaecdSAlan Cox 	if (map->system_map) {
44636daaecdSAlan Cox #ifdef INVARIANTS
44736daaecdSAlan Cox 		_mtx_assert(&map->system_mtx, MA_OWNED, file, line);
44836daaecdSAlan Cox #endif
44936daaecdSAlan Cox 	} else
450bc91c510SAlan Cox 		KASSERT(lockstatus(&map->lock, curthread) == LK_EXCLUSIVE,
451bc91c510SAlan Cox 		    ("%s: lock not held", __func__));
452bc91c510SAlan Cox 	map->timestamp++;
453bc91c510SAlan Cox 	return (0);
4540e0af8ecSBrian Feldman }
4550e0af8ecSBrian Feldman 
4560e0af8ecSBrian Feldman void
457780b1c09SAlan Cox _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
4581b40f8c0SMatthew Dillon {
459bc91c510SAlan Cox 
46036daaecdSAlan Cox 	if (map->system_map) {
46136daaecdSAlan Cox #ifdef INVARIANTS
46236daaecdSAlan Cox 		_mtx_assert(&map->system_mtx, MA_OWNED, file, line);
46336daaecdSAlan Cox #endif
46436daaecdSAlan Cox 	} else
465bc91c510SAlan Cox 		KASSERT(lockstatus(&map->lock, curthread) == LK_EXCLUSIVE,
466bc91c510SAlan Cox 		    ("%s: lock not held", __func__));
46725adb370SBrian Feldman }
46825adb370SBrian Feldman 
469acd9a301SAlan Cox /*
470acd9a301SAlan Cox  *	vm_map_unlock_and_wait:
471acd9a301SAlan Cox  */
4729688f931SAlan Cox int
473acd9a301SAlan Cox vm_map_unlock_and_wait(vm_map_t map, boolean_t user_wait)
474acd9a301SAlan Cox {
475acd9a301SAlan Cox 
4763a92e5d5SAlan Cox 	mtx_lock(&map_sleep_mtx);
477acd9a301SAlan Cox 	vm_map_unlock(map);
4783a92e5d5SAlan Cox 	return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 0));
479acd9a301SAlan Cox }
480acd9a301SAlan Cox 
481acd9a301SAlan Cox /*
482acd9a301SAlan Cox  *	vm_map_wakeup:
483acd9a301SAlan Cox  */
4849688f931SAlan Cox void
485acd9a301SAlan Cox vm_map_wakeup(vm_map_t map)
486acd9a301SAlan Cox {
487acd9a301SAlan Cox 
488b49ecb86SAlan Cox 	/*
4893a92e5d5SAlan Cox 	 * Acquire and release map_sleep_mtx to prevent a wakeup()
4903a92e5d5SAlan Cox 	 * from being performed (and lost) between the vm_map_unlock()
4913a92e5d5SAlan Cox 	 * and the msleep() in vm_map_unlock_and_wait().
492b49ecb86SAlan Cox 	 */
4933a92e5d5SAlan Cox 	mtx_lock(&map_sleep_mtx);
4943a92e5d5SAlan Cox 	mtx_unlock(&map_sleep_mtx);
495acd9a301SAlan Cox 	wakeup(&map->root);
496acd9a301SAlan Cox }
497acd9a301SAlan Cox 
4981b40f8c0SMatthew Dillon long
4991b40f8c0SMatthew Dillon vmspace_resident_count(struct vmspace *vmspace)
5001b40f8c0SMatthew Dillon {
5011b40f8c0SMatthew Dillon 	return pmap_resident_count(vmspace_pmap(vmspace));
5021b40f8c0SMatthew Dillon }
5031b40f8c0SMatthew Dillon 
504ff2b5645SMatthew Dillon /*
505df8bae1dSRodney W. Grimes  *	vm_map_create:
506df8bae1dSRodney W. Grimes  *
507df8bae1dSRodney W. Grimes  *	Creates and returns a new empty VM map with
508df8bae1dSRodney W. Grimes  *	the given physical map structure, and having
509df8bae1dSRodney W. Grimes  *	the given lower and upper address bounds.
510df8bae1dSRodney W. Grimes  */
5110d94caffSDavid Greenman vm_map_t
5121b40f8c0SMatthew Dillon vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
513df8bae1dSRodney W. Grimes {
514c0877f10SJohn Dyson 	vm_map_t result;
515df8bae1dSRodney W. Grimes 
5168355f576SJeff Roberson 	result = uma_zalloc(mapzone, M_WAITOK);
51721c641b2SJohn Baldwin 	CTR1(KTR_VM, "vm_map_create: %p", result);
5188355f576SJeff Roberson 	_vm_map_init(result, min, max);
519df8bae1dSRodney W. Grimes 	result->pmap = pmap;
520df8bae1dSRodney W. Grimes 	return (result);
521df8bae1dSRodney W. Grimes }
522df8bae1dSRodney W. Grimes 
523df8bae1dSRodney W. Grimes /*
524df8bae1dSRodney W. Grimes  * Initialize an existing vm_map structure
525df8bae1dSRodney W. Grimes  * such as that in the vmspace structure.
526df8bae1dSRodney W. Grimes  * The pmap is set elsewhere.
527df8bae1dSRodney W. Grimes  */
5288355f576SJeff Roberson static void
5298355f576SJeff Roberson _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
530df8bae1dSRodney W. Grimes {
53121c641b2SJohn Baldwin 
532df8bae1dSRodney W. Grimes 	map->header.next = map->header.prev = &map->header;
5339688f931SAlan Cox 	map->needs_wakeup = FALSE;
5343075778bSJohn Dyson 	map->system_map = 0;
535df8bae1dSRodney W. Grimes 	map->min_offset = min;
536df8bae1dSRodney W. Grimes 	map->max_offset = max;
537df8bae1dSRodney W. Grimes 	map->first_free = &map->header;
5384e94f402SAlan Cox 	map->root = NULL;
539df8bae1dSRodney W. Grimes 	map->timestamp = 0;
540df8bae1dSRodney W. Grimes }
541df8bae1dSRodney W. Grimes 
542a18b1f1dSJason Evans void
5438355f576SJeff Roberson vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
544a18b1f1dSJason Evans {
5458355f576SJeff Roberson 	_vm_map_init(map, min, max);
54636daaecdSAlan Cox 	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF);
547515630b1SAlan Cox 	lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
548a18b1f1dSJason Evans }
549a18b1f1dSJason Evans 
550df8bae1dSRodney W. Grimes /*
551b18bfc3dSJohn Dyson  *	vm_map_entry_dispose:	[ internal use only ]
552b18bfc3dSJohn Dyson  *
553b18bfc3dSJohn Dyson  *	Inverse of vm_map_entry_create.
554b18bfc3dSJohn Dyson  */
55562487bb4SJohn Dyson static void
5561b40f8c0SMatthew Dillon vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
557b18bfc3dSJohn Dyson {
5582b4a2c27SAlan Cox 	uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
559b18bfc3dSJohn Dyson }
560b18bfc3dSJohn Dyson 
561b18bfc3dSJohn Dyson /*
562df8bae1dSRodney W. Grimes  *	vm_map_entry_create:	[ internal use only ]
563df8bae1dSRodney W. Grimes  *
564df8bae1dSRodney W. Grimes  *	Allocates a VM map entry for insertion.
565b28cb1caSAlfred Perlstein  *	No entry fields are filled in.
566df8bae1dSRodney W. Grimes  */
567f708ef1bSPoul-Henning Kamp static vm_map_entry_t
5681b40f8c0SMatthew Dillon vm_map_entry_create(vm_map_t map)
569df8bae1dSRodney W. Grimes {
5701f6889a1SMatthew Dillon 	vm_map_entry_t new_entry;
5711f6889a1SMatthew Dillon 
5722b4a2c27SAlan Cox 	if (map->system_map)
5732b4a2c27SAlan Cox 		new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
5742b4a2c27SAlan Cox 	else
5752b4a2c27SAlan Cox 		new_entry = uma_zalloc(mapentzone, M_WAITOK);
5761f6889a1SMatthew Dillon 	if (new_entry == NULL)
5771f6889a1SMatthew Dillon 		panic("vm_map_entry_create: kernel resources exhausted");
5781f6889a1SMatthew Dillon 	return (new_entry);
579df8bae1dSRodney W. Grimes }
580df8bae1dSRodney W. Grimes 
581df8bae1dSRodney W. Grimes /*
582794316a8SAlan Cox  *	vm_map_entry_set_behavior:
583794316a8SAlan Cox  *
584794316a8SAlan Cox  *	Set the expected access behavior, either normal, random, or
585794316a8SAlan Cox  *	sequential.
586794316a8SAlan Cox  */
587794316a8SAlan Cox static __inline void
588794316a8SAlan Cox vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
589794316a8SAlan Cox {
590794316a8SAlan Cox 	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
591794316a8SAlan Cox 	    (behavior & MAP_ENTRY_BEHAV_MASK);
592794316a8SAlan Cox }
593794316a8SAlan Cox 
594794316a8SAlan Cox /*
5954e94f402SAlan Cox  *	vm_map_entry_splay:
5964e94f402SAlan Cox  *
5974e94f402SAlan Cox  *	Implements Sleator and Tarjan's top-down splay algorithm.  Returns
5984e94f402SAlan Cox  *	the vm_map_entry containing the given address.  If, however, that
5994e94f402SAlan Cox  *	address is not found in the vm_map, returns a vm_map_entry that is
6004e94f402SAlan Cox  *	adjacent to the address, coming before or after it.
6014e94f402SAlan Cox  */
6024e94f402SAlan Cox static vm_map_entry_t
6034e94f402SAlan Cox vm_map_entry_splay(vm_offset_t address, vm_map_entry_t root)
6044e94f402SAlan Cox {
6054e94f402SAlan Cox 	struct vm_map_entry dummy;
6064e94f402SAlan Cox 	vm_map_entry_t lefttreemax, righttreemin, y;
6074e94f402SAlan Cox 
6084e94f402SAlan Cox 	if (root == NULL)
6094e94f402SAlan Cox 		return (root);
6104e94f402SAlan Cox 	lefttreemax = righttreemin = &dummy;
61161c075b6SAlan Cox 	for (;; root = y) {
6124e94f402SAlan Cox 		if (address < root->start) {
61361c075b6SAlan Cox 			if ((y = root->left) == NULL)
6144e94f402SAlan Cox 				break;
61561c075b6SAlan Cox 			if (address < y->start) {
6164e94f402SAlan Cox 				/* Rotate right. */
6174e94f402SAlan Cox 				root->left = y->right;
6184e94f402SAlan Cox 				y->right = root;
6194e94f402SAlan Cox 				root = y;
62061c075b6SAlan Cox 				if ((y = root->left) == NULL)
6214e94f402SAlan Cox 					break;
6224e94f402SAlan Cox 			}
6234e94f402SAlan Cox 			/* Link into the new root's right tree. */
6244e94f402SAlan Cox 			righttreemin->left = root;
6254e94f402SAlan Cox 			righttreemin = root;
6264e94f402SAlan Cox 		} else if (address >= root->end) {
62761c075b6SAlan Cox 			if ((y = root->right) == NULL)
6284e94f402SAlan Cox 				break;
62961c075b6SAlan Cox 			if (address >= y->end) {
6304e94f402SAlan Cox 				/* Rotate left. */
6314e94f402SAlan Cox 				root->right = y->left;
6324e94f402SAlan Cox 				y->left = root;
6334e94f402SAlan Cox 				root = y;
63461c075b6SAlan Cox 				if ((y = root->right) == NULL)
6354e94f402SAlan Cox 					break;
6364e94f402SAlan Cox 			}
6374e94f402SAlan Cox 			/* Link into the new root's left tree. */
6384e94f402SAlan Cox 			lefttreemax->right = root;
6394e94f402SAlan Cox 			lefttreemax = root;
6404e94f402SAlan Cox 		} else
6414e94f402SAlan Cox 			break;
6424e94f402SAlan Cox 	}
6434e94f402SAlan Cox 	/* Assemble the new root. */
6444e94f402SAlan Cox 	lefttreemax->right = root->left;
6454e94f402SAlan Cox 	righttreemin->left = root->right;
6464e94f402SAlan Cox 	root->left = dummy.right;
6474e94f402SAlan Cox 	root->right = dummy.left;
6484e94f402SAlan Cox 	return (root);
6494e94f402SAlan Cox }
6504e94f402SAlan Cox 
6514e94f402SAlan Cox /*
652df8bae1dSRodney W. Grimes  *	vm_map_entry_{un,}link:
653df8bae1dSRodney W. Grimes  *
654df8bae1dSRodney W. Grimes  *	Insert/remove entries from maps.
655df8bae1dSRodney W. Grimes  */
6564e94f402SAlan Cox static void
65799c81ca9SAlan Cox vm_map_entry_link(vm_map_t map,
65899c81ca9SAlan Cox 		  vm_map_entry_t after_where,
65999c81ca9SAlan Cox 		  vm_map_entry_t entry)
66099c81ca9SAlan Cox {
66121c641b2SJohn Baldwin 
66221c641b2SJohn Baldwin 	CTR4(KTR_VM,
66321c641b2SJohn Baldwin 	    "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
66421c641b2SJohn Baldwin 	    map->nentries, entry, after_where);
66599c81ca9SAlan Cox 	map->nentries++;
66699c81ca9SAlan Cox 	entry->prev = after_where;
66799c81ca9SAlan Cox 	entry->next = after_where->next;
66899c81ca9SAlan Cox 	entry->next->prev = entry;
66999c81ca9SAlan Cox 	after_where->next = entry;
6704e94f402SAlan Cox 
6714e94f402SAlan Cox 	if (after_where != &map->header) {
6724e94f402SAlan Cox 		if (after_where != map->root)
6734e94f402SAlan Cox 			vm_map_entry_splay(after_where->start, map->root);
6744e94f402SAlan Cox 		entry->right = after_where->right;
6754e94f402SAlan Cox 		entry->left = after_where;
6764e94f402SAlan Cox 		after_where->right = NULL;
6774e94f402SAlan Cox 	} else {
6784e94f402SAlan Cox 		entry->right = map->root;
6794e94f402SAlan Cox 		entry->left = NULL;
6804e94f402SAlan Cox 	}
6814e94f402SAlan Cox 	map->root = entry;
682df8bae1dSRodney W. Grimes }
68399c81ca9SAlan Cox 
6844e94f402SAlan Cox static void
68599c81ca9SAlan Cox vm_map_entry_unlink(vm_map_t map,
68699c81ca9SAlan Cox 		    vm_map_entry_t entry)
68799c81ca9SAlan Cox {
6884e94f402SAlan Cox 	vm_map_entry_t next, prev, root;
68999c81ca9SAlan Cox 
6904e94f402SAlan Cox 	if (entry != map->root)
6914e94f402SAlan Cox 		vm_map_entry_splay(entry->start, map->root);
6924e94f402SAlan Cox 	if (entry->left == NULL)
6934e94f402SAlan Cox 		root = entry->right;
6944e94f402SAlan Cox 	else {
6954e94f402SAlan Cox 		root = vm_map_entry_splay(entry->start, entry->left);
6964e94f402SAlan Cox 		root->right = entry->right;
6974e94f402SAlan Cox 	}
6984e94f402SAlan Cox 	map->root = root;
6994e94f402SAlan Cox 
7004e94f402SAlan Cox 	prev = entry->prev;
7014e94f402SAlan Cox 	next = entry->next;
70299c81ca9SAlan Cox 	next->prev = prev;
70399c81ca9SAlan Cox 	prev->next = next;
70499c81ca9SAlan Cox 	map->nentries--;
70521c641b2SJohn Baldwin 	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
70621c641b2SJohn Baldwin 	    map->nentries, entry);
707df8bae1dSRodney W. Grimes }
708df8bae1dSRodney W. Grimes 
709df8bae1dSRodney W. Grimes /*
710df8bae1dSRodney W. Grimes  *	vm_map_lookup_entry:	[ internal use only ]
711df8bae1dSRodney W. Grimes  *
712df8bae1dSRodney W. Grimes  *	Finds the map entry containing (or
713df8bae1dSRodney W. Grimes  *	immediately preceding) the specified address
714df8bae1dSRodney W. Grimes  *	in the given map; the entry is returned
715df8bae1dSRodney W. Grimes  *	in the "entry" parameter.  The boolean
716df8bae1dSRodney W. Grimes  *	result indicates whether the address is
717df8bae1dSRodney W. Grimes  *	actually contained in the map.
718df8bae1dSRodney W. Grimes  */
7190d94caffSDavid Greenman boolean_t
7201b40f8c0SMatthew Dillon vm_map_lookup_entry(
7211b40f8c0SMatthew Dillon 	vm_map_t map,
7221b40f8c0SMatthew Dillon 	vm_offset_t address,
7231b40f8c0SMatthew Dillon 	vm_map_entry_t *entry)	/* OUT */
724df8bae1dSRodney W. Grimes {
725c0877f10SJohn Dyson 	vm_map_entry_t cur;
726df8bae1dSRodney W. Grimes 
7274e94f402SAlan Cox 	cur = vm_map_entry_splay(address, map->root);
7284e94f402SAlan Cox 	if (cur == NULL)
7294e94f402SAlan Cox 		*entry = &map->header;
7304e94f402SAlan Cox 	else {
7314e94f402SAlan Cox 		map->root = cur;
732df8bae1dSRodney W. Grimes 
733df8bae1dSRodney W. Grimes 		if (address >= cur->start) {
734df8bae1dSRodney W. Grimes 			*entry = cur;
7354e94f402SAlan Cox 			if (cur->end > address)
736df8bae1dSRodney W. Grimes 				return (TRUE);
7374e94f402SAlan Cox 		} else
738df8bae1dSRodney W. Grimes 			*entry = cur->prev;
7394e94f402SAlan Cox 	}
740df8bae1dSRodney W. Grimes 	return (FALSE);
741df8bae1dSRodney W. Grimes }
742df8bae1dSRodney W. Grimes 
743df8bae1dSRodney W. Grimes /*
74430dcfc09SJohn Dyson  *	vm_map_insert:
74530dcfc09SJohn Dyson  *
74630dcfc09SJohn Dyson  *	Inserts the given whole VM object into the target
74730dcfc09SJohn Dyson  *	map at the specified address range.  The object's
74830dcfc09SJohn Dyson  *	size should match that of the address range.
74930dcfc09SJohn Dyson  *
75030dcfc09SJohn Dyson  *	Requires that the map be locked, and leaves it so.
7512aaeadf8SMatthew Dillon  *
7522aaeadf8SMatthew Dillon  *	If object is non-NULL, ref count must be bumped by caller
7532aaeadf8SMatthew Dillon  *	prior to making call to account for the new entry.
75430dcfc09SJohn Dyson  */
75530dcfc09SJohn Dyson int
756b9dcd593SBruce Evans vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
757b9dcd593SBruce Evans 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
758b9dcd593SBruce Evans 	      int cow)
75930dcfc09SJohn Dyson {
760c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
761c0877f10SJohn Dyson 	vm_map_entry_t prev_entry;
76230dcfc09SJohn Dyson 	vm_map_entry_t temp_entry;
7639730a5daSPaul Saab 	vm_eflags_t protoeflags;
76430dcfc09SJohn Dyson 
76530dcfc09SJohn Dyson 	/*
76630dcfc09SJohn Dyson 	 * Check that the start and end points are not bogus.
76730dcfc09SJohn Dyson 	 */
76830dcfc09SJohn Dyson 	if ((start < map->min_offset) || (end > map->max_offset) ||
76930dcfc09SJohn Dyson 	    (start >= end))
77030dcfc09SJohn Dyson 		return (KERN_INVALID_ADDRESS);
77130dcfc09SJohn Dyson 
77230dcfc09SJohn Dyson 	/*
77330dcfc09SJohn Dyson 	 * Find the entry prior to the proposed starting address; if it's part
77430dcfc09SJohn Dyson 	 * of an existing entry, this range is bogus.
77530dcfc09SJohn Dyson 	 */
77630dcfc09SJohn Dyson 	if (vm_map_lookup_entry(map, start, &temp_entry))
77730dcfc09SJohn Dyson 		return (KERN_NO_SPACE);
77830dcfc09SJohn Dyson 
77930dcfc09SJohn Dyson 	prev_entry = temp_entry;
78030dcfc09SJohn Dyson 
78130dcfc09SJohn Dyson 	/*
78230dcfc09SJohn Dyson 	 * Assert that the next entry doesn't overlap the end point.
78330dcfc09SJohn Dyson 	 */
78430dcfc09SJohn Dyson 	if ((prev_entry->next != &map->header) &&
78530dcfc09SJohn Dyson 	    (prev_entry->next->start < end))
78630dcfc09SJohn Dyson 		return (KERN_NO_SPACE);
78730dcfc09SJohn Dyson 
788afa07f7eSJohn Dyson 	protoeflags = 0;
789afa07f7eSJohn Dyson 
790afa07f7eSJohn Dyson 	if (cow & MAP_COPY_ON_WRITE)
791e5f13bddSAlan Cox 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
792afa07f7eSJohn Dyson 
7934e045f93SAlan Cox 	if (cow & MAP_NOFAULT) {
794afa07f7eSJohn Dyson 		protoeflags |= MAP_ENTRY_NOFAULT;
795afa07f7eSJohn Dyson 
7964e045f93SAlan Cox 		KASSERT(object == NULL,
7974e045f93SAlan Cox 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
7984e045f93SAlan Cox 	}
7994f79d873SMatthew Dillon 	if (cow & MAP_DISABLE_SYNCER)
8004f79d873SMatthew Dillon 		protoeflags |= MAP_ENTRY_NOSYNC;
8019730a5daSPaul Saab 	if (cow & MAP_DISABLE_COREDUMP)
8029730a5daSPaul Saab 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
8034f79d873SMatthew Dillon 
8042aaeadf8SMatthew Dillon 	if (object) {
80530dcfc09SJohn Dyson 		/*
8062aaeadf8SMatthew Dillon 		 * When object is non-NULL, it could be shared with another
8072aaeadf8SMatthew Dillon 		 * process.  We have to set or clear OBJ_ONEMAPPING
8082aaeadf8SMatthew Dillon 		 * appropriately.
80930dcfc09SJohn Dyson 		 */
8104eaa1179SAlan Cox 		vm_object_lock(object);
8112aaeadf8SMatthew Dillon 		if ((object->ref_count > 1) || (object->shadow_count != 0)) {
8122aaeadf8SMatthew Dillon 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
8132aaeadf8SMatthew Dillon 		}
8144eaa1179SAlan Cox 		vm_object_unlock(object);
8154e045f93SAlan Cox 	}
8164e045f93SAlan Cox 	else if ((prev_entry != &map->header) &&
8174e045f93SAlan Cox 		 (prev_entry->eflags == protoeflags) &&
8188cc7e047SJohn Dyson 		 (prev_entry->end == start) &&
8194e045f93SAlan Cox 		 (prev_entry->wired_count == 0) &&
8204e045f93SAlan Cox 		 ((prev_entry->object.vm_object == NULL) ||
8218cc7e047SJohn Dyson 		  vm_object_coalesce(prev_entry->object.vm_object,
82230dcfc09SJohn Dyson 				     OFF_TO_IDX(prev_entry->offset),
8238cc7e047SJohn Dyson 				     (vm_size_t)(prev_entry->end - prev_entry->start),
824cdc2c291SJohn Dyson 				     (vm_size_t)(end - prev_entry->end)))) {
82530dcfc09SJohn Dyson 		/*
8262aaeadf8SMatthew Dillon 		 * We were able to extend the object.  Determine if we
8272aaeadf8SMatthew Dillon 		 * can extend the previous map entry to include the
8282aaeadf8SMatthew Dillon 		 * new range as well.
82930dcfc09SJohn Dyson 		 */
8308cc7e047SJohn Dyson 		if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
8318cc7e047SJohn Dyson 		    (prev_entry->protection == prot) &&
8328cc7e047SJohn Dyson 		    (prev_entry->max_protection == max)) {
83330dcfc09SJohn Dyson 			map->size += (end - prev_entry->end);
83430dcfc09SJohn Dyson 			prev_entry->end = end;
8354e71e795SMatthew Dillon 			vm_map_simplify_entry(map, prev_entry);
83630dcfc09SJohn Dyson 			return (KERN_SUCCESS);
83730dcfc09SJohn Dyson 		}
8388cc7e047SJohn Dyson 
8392aaeadf8SMatthew Dillon 		/*
8402aaeadf8SMatthew Dillon 		 * If we can extend the object but cannot extend the
8412aaeadf8SMatthew Dillon 		 * map entry, we have to create a new map entry.  We
8422aaeadf8SMatthew Dillon 		 * must bump the ref count on the extended object to
8434e71e795SMatthew Dillon 		 * account for it.  object may be NULL.
8442aaeadf8SMatthew Dillon 		 */
8452aaeadf8SMatthew Dillon 		object = prev_entry->object.vm_object;
8462aaeadf8SMatthew Dillon 		offset = prev_entry->offset +
8472aaeadf8SMatthew Dillon 			(prev_entry->end - prev_entry->start);
8488cc7e047SJohn Dyson 		vm_object_reference(object);
849b18bfc3dSJohn Dyson 	}
8502aaeadf8SMatthew Dillon 
8512aaeadf8SMatthew Dillon 	/*
8522aaeadf8SMatthew Dillon 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
8532aaeadf8SMatthew Dillon 	 * in things like the buffer map where we manage kva but do not manage
8542aaeadf8SMatthew Dillon 	 * backing objects.
8552aaeadf8SMatthew Dillon 	 */
8568cc7e047SJohn Dyson 
85730dcfc09SJohn Dyson 	/*
85830dcfc09SJohn Dyson 	 * Create a new entry
85930dcfc09SJohn Dyson 	 */
86030dcfc09SJohn Dyson 	new_entry = vm_map_entry_create(map);
86130dcfc09SJohn Dyson 	new_entry->start = start;
86230dcfc09SJohn Dyson 	new_entry->end = end;
86330dcfc09SJohn Dyson 
864afa07f7eSJohn Dyson 	new_entry->eflags = protoeflags;
86530dcfc09SJohn Dyson 	new_entry->object.vm_object = object;
86630dcfc09SJohn Dyson 	new_entry->offset = offset;
8672267af78SJulian Elischer 	new_entry->avail_ssize = 0;
8682267af78SJulian Elischer 
86930dcfc09SJohn Dyson 	new_entry->inheritance = VM_INHERIT_DEFAULT;
87030dcfc09SJohn Dyson 	new_entry->protection = prot;
87130dcfc09SJohn Dyson 	new_entry->max_protection = max;
87230dcfc09SJohn Dyson 	new_entry->wired_count = 0;
873e5f251d2SAlan Cox 
87430dcfc09SJohn Dyson 	/*
87530dcfc09SJohn Dyson 	 * Insert the new entry into the list
87630dcfc09SJohn Dyson 	 */
87730dcfc09SJohn Dyson 	vm_map_entry_link(map, prev_entry, new_entry);
87830dcfc09SJohn Dyson 	map->size += new_entry->end - new_entry->start;
87930dcfc09SJohn Dyson 
88030dcfc09SJohn Dyson 	/*
88130dcfc09SJohn Dyson 	 * Update the free space hint
88230dcfc09SJohn Dyson 	 */
88367bf6868SJohn Dyson 	if ((map->first_free == prev_entry) &&
8844f79d873SMatthew Dillon 	    (prev_entry->end >= new_entry->start)) {
88530dcfc09SJohn Dyson 		map->first_free = new_entry;
8864f79d873SMatthew Dillon 	}
88730dcfc09SJohn Dyson 
8881a484d28SMatthew Dillon #if 0
8891a484d28SMatthew Dillon 	/*
8901a484d28SMatthew Dillon 	 * Temporarily removed to avoid MAP_STACK panic, due to
8911a484d28SMatthew Dillon 	 * MAP_STACK being a huge hack.  Will be added back in
8921a484d28SMatthew Dillon 	 * when MAP_STACK (and the user stack mapping) is fixed.
8931a484d28SMatthew Dillon 	 */
8944e71e795SMatthew Dillon 	/*
8954e71e795SMatthew Dillon 	 * It may be possible to simplify the entry
8964e71e795SMatthew Dillon 	 */
8974e71e795SMatthew Dillon 	vm_map_simplify_entry(map, new_entry);
8981a484d28SMatthew Dillon #endif
8994e71e795SMatthew Dillon 
9004f79d873SMatthew Dillon 	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
90140974827SAlan Cox 		mtx_lock(&Giant);
902e972780aSAlan Cox 		pmap_object_init_pt(map->pmap, start,
903e972780aSAlan Cox 				    object, OFF_TO_IDX(offset), end - start,
904e972780aSAlan Cox 				    cow & MAP_PREFAULT_PARTIAL);
90540974827SAlan Cox 		mtx_unlock(&Giant);
9064f79d873SMatthew Dillon 	}
907e972780aSAlan Cox 
90830dcfc09SJohn Dyson 	return (KERN_SUCCESS);
90930dcfc09SJohn Dyson }
91030dcfc09SJohn Dyson 
91130dcfc09SJohn Dyson /*
912df8bae1dSRodney W. Grimes  * Find sufficient space for `length' bytes in the given map, starting at
913df8bae1dSRodney W. Grimes  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
914df8bae1dSRodney W. Grimes  */
915df8bae1dSRodney W. Grimes int
9161b40f8c0SMatthew Dillon vm_map_findspace(
9171b40f8c0SMatthew Dillon 	vm_map_t map,
9181b40f8c0SMatthew Dillon 	vm_offset_t start,
9191b40f8c0SMatthew Dillon 	vm_size_t length,
9201b40f8c0SMatthew Dillon 	vm_offset_t *addr)
921df8bae1dSRodney W. Grimes {
922c0877f10SJohn Dyson 	vm_map_entry_t entry, next;
923c0877f10SJohn Dyson 	vm_offset_t end;
924df8bae1dSRodney W. Grimes 
925df8bae1dSRodney W. Grimes 	if (start < map->min_offset)
926df8bae1dSRodney W. Grimes 		start = map->min_offset;
927df8bae1dSRodney W. Grimes 	if (start > map->max_offset)
928df8bae1dSRodney W. Grimes 		return (1);
929df8bae1dSRodney W. Grimes 
930df8bae1dSRodney W. Grimes 	/*
9310d94caffSDavid Greenman 	 * Look for the first possible address; if there's already something
9320d94caffSDavid Greenman 	 * at this address, we have to start after it.
933df8bae1dSRodney W. Grimes 	 */
934df8bae1dSRodney W. Grimes 	if (start == map->min_offset) {
93567bf6868SJohn Dyson 		if ((entry = map->first_free) != &map->header)
936df8bae1dSRodney W. Grimes 			start = entry->end;
937df8bae1dSRodney W. Grimes 	} else {
938df8bae1dSRodney W. Grimes 		vm_map_entry_t tmp;
9390d94caffSDavid Greenman 
940df8bae1dSRodney W. Grimes 		if (vm_map_lookup_entry(map, start, &tmp))
941df8bae1dSRodney W. Grimes 			start = tmp->end;
942df8bae1dSRodney W. Grimes 		entry = tmp;
943df8bae1dSRodney W. Grimes 	}
944df8bae1dSRodney W. Grimes 
945df8bae1dSRodney W. Grimes 	/*
9460d94caffSDavid Greenman 	 * Look through the rest of the map, trying to fit a new region in the
9470d94caffSDavid Greenman 	 * gap between existing regions, or after the very last region.
948df8bae1dSRodney W. Grimes 	 */
949df8bae1dSRodney W. Grimes 	for (;; start = (entry = next)->end) {
950df8bae1dSRodney W. Grimes 		/*
951df8bae1dSRodney W. Grimes 		 * Find the end of the proposed new region.  Be sure we didn't
952df8bae1dSRodney W. Grimes 		 * go beyond the end of the map, or wrap around the address;
953df8bae1dSRodney W. Grimes 		 * if so, we lose.  Otherwise, if this is the last entry, or
954df8bae1dSRodney W. Grimes 		 * if the proposed new region fits before the next entry, we
955df8bae1dSRodney W. Grimes 		 * win.
956df8bae1dSRodney W. Grimes 		 */
957df8bae1dSRodney W. Grimes 		end = start + length;
958df8bae1dSRodney W. Grimes 		if (end > map->max_offset || end < start)
959df8bae1dSRodney W. Grimes 			return (1);
960df8bae1dSRodney W. Grimes 		next = entry->next;
961df8bae1dSRodney W. Grimes 		if (next == &map->header || next->start >= end)
962df8bae1dSRodney W. Grimes 			break;
963df8bae1dSRodney W. Grimes 	}
964df8bae1dSRodney W. Grimes 	*addr = start;
96599448ed1SJohn Dyson 	if (map == kernel_map) {
96699448ed1SJohn Dyson 		vm_offset_t ksize;
96799448ed1SJohn Dyson 		if ((ksize = round_page(start + length)) > kernel_vm_end) {
96840974827SAlan Cox 			mtx_lock(&Giant);
96999448ed1SJohn Dyson 			pmap_growkernel(ksize);
97040974827SAlan Cox 			mtx_unlock(&Giant);
97199448ed1SJohn Dyson 		}
97299448ed1SJohn Dyson 	}
973df8bae1dSRodney W. Grimes 	return (0);
974df8bae1dSRodney W. Grimes }
975df8bae1dSRodney W. Grimes 
976df8bae1dSRodney W. Grimes /*
977df8bae1dSRodney W. Grimes  *	vm_map_find finds an unallocated region in the target address
978df8bae1dSRodney W. Grimes  *	map with the given length.  The search is defined to be
979df8bae1dSRodney W. Grimes  *	first-fit from the specified address; the region found is
980df8bae1dSRodney W. Grimes  *	returned in the same parameter.
981df8bae1dSRodney W. Grimes  *
9822aaeadf8SMatthew Dillon  *	If object is non-NULL, ref count must be bumped by caller
9832aaeadf8SMatthew Dillon  *	prior to making call to account for the new entry.
984df8bae1dSRodney W. Grimes  */
985df8bae1dSRodney W. Grimes int
986b9dcd593SBruce Evans vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
987b9dcd593SBruce Evans 	    vm_offset_t *addr,	/* IN/OUT */
988b9dcd593SBruce Evans 	    vm_size_t length, boolean_t find_space, vm_prot_t prot,
989b9dcd593SBruce Evans 	    vm_prot_t max, int cow)
990df8bae1dSRodney W. Grimes {
991c0877f10SJohn Dyson 	vm_offset_t start;
9928d6e8edeSDavid Greenman 	int result, s = 0;
993df8bae1dSRodney W. Grimes 
994df8bae1dSRodney W. Grimes 	start = *addr;
9958d6e8edeSDavid Greenman 
99608442f8aSBosko Milekic 	if (map == kmem_map)
997b18bfc3dSJohn Dyson 		s = splvm();
9988d6e8edeSDavid Greenman 
999bea41bcfSDavid Greenman 	vm_map_lock(map);
1000df8bae1dSRodney W. Grimes 	if (find_space) {
1001df8bae1dSRodney W. Grimes 		if (vm_map_findspace(map, start, length, addr)) {
1002df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
100308442f8aSBosko Milekic 			if (map == kmem_map)
10048d6e8edeSDavid Greenman 				splx(s);
1005df8bae1dSRodney W. Grimes 			return (KERN_NO_SPACE);
1006df8bae1dSRodney W. Grimes 		}
1007df8bae1dSRodney W. Grimes 		start = *addr;
1008df8bae1dSRodney W. Grimes 	}
1009bd7e5f99SJohn Dyson 	result = vm_map_insert(map, object, offset,
1010bd7e5f99SJohn Dyson 		start, start + length, prot, max, cow);
1011df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
10128d6e8edeSDavid Greenman 
101308442f8aSBosko Milekic 	if (map == kmem_map)
10148d6e8edeSDavid Greenman 		splx(s);
10158d6e8edeSDavid Greenman 
1016df8bae1dSRodney W. Grimes 	return (result);
1017df8bae1dSRodney W. Grimes }
1018df8bae1dSRodney W. Grimes 
1019df8bae1dSRodney W. Grimes /*
1020b7b2aac2SJohn Dyson  *	vm_map_simplify_entry:
102167bf6868SJohn Dyson  *
10224e71e795SMatthew Dillon  *	Simplify the given map entry by merging with either neighbor.  This
10234e71e795SMatthew Dillon  *	routine also has the ability to merge with both neighbors.
10244e71e795SMatthew Dillon  *
10254e71e795SMatthew Dillon  *	The map must be locked.
10264e71e795SMatthew Dillon  *
10274e71e795SMatthew Dillon  *	This routine guarentees that the passed entry remains valid (though
10284e71e795SMatthew Dillon  *	possibly extended).  When merging, this routine may delete one or
10294e71e795SMatthew Dillon  *	both neighbors.
1030df8bae1dSRodney W. Grimes  */
1031b7b2aac2SJohn Dyson void
10321b40f8c0SMatthew Dillon vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1033df8bae1dSRodney W. Grimes {
1034308c24baSJohn Dyson 	vm_map_entry_t next, prev;
1035b7b2aac2SJohn Dyson 	vm_size_t prevsize, esize;
1036df8bae1dSRodney W. Grimes 
1037acd9a301SAlan Cox 	if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
1038df8bae1dSRodney W. Grimes 		return;
1039308c24baSJohn Dyson 
1040308c24baSJohn Dyson 	prev = entry->prev;
1041308c24baSJohn Dyson 	if (prev != &map->header) {
104267bf6868SJohn Dyson 		prevsize = prev->end - prev->start;
104367bf6868SJohn Dyson 		if ( (prev->end == entry->start) &&
104467bf6868SJohn Dyson 		     (prev->object.vm_object == entry->object.vm_object) &&
104595e5e988SJohn Dyson 		     (!prev->object.vm_object ||
104667bf6868SJohn Dyson 			(prev->offset + prevsize == entry->offset)) &&
1047afa07f7eSJohn Dyson 		     (prev->eflags == entry->eflags) &&
104867bf6868SJohn Dyson 		     (prev->protection == entry->protection) &&
104967bf6868SJohn Dyson 		     (prev->max_protection == entry->max_protection) &&
105067bf6868SJohn Dyson 		     (prev->inheritance == entry->inheritance) &&
1051b7b2aac2SJohn Dyson 		     (prev->wired_count == entry->wired_count)) {
1052308c24baSJohn Dyson 			if (map->first_free == prev)
1053308c24baSJohn Dyson 				map->first_free = entry;
1054308c24baSJohn Dyson 			vm_map_entry_unlink(map, prev);
1055308c24baSJohn Dyson 			entry->start = prev->start;
1056308c24baSJohn Dyson 			entry->offset = prev->offset;
1057b18bfc3dSJohn Dyson 			if (prev->object.vm_object)
1058308c24baSJohn Dyson 				vm_object_deallocate(prev->object.vm_object);
1059308c24baSJohn Dyson 			vm_map_entry_dispose(map, prev);
1060308c24baSJohn Dyson 		}
1061308c24baSJohn Dyson 	}
1062de5f6a77SJohn Dyson 
1063de5f6a77SJohn Dyson 	next = entry->next;
1064308c24baSJohn Dyson 	if (next != &map->header) {
106567bf6868SJohn Dyson 		esize = entry->end - entry->start;
106667bf6868SJohn Dyson 		if ((entry->end == next->start) &&
106767bf6868SJohn Dyson 		    (next->object.vm_object == entry->object.vm_object) &&
106867bf6868SJohn Dyson 		     (!entry->object.vm_object ||
106967bf6868SJohn Dyson 			(entry->offset + esize == next->offset)) &&
1070afa07f7eSJohn Dyson 		    (next->eflags == entry->eflags) &&
107167bf6868SJohn Dyson 		    (next->protection == entry->protection) &&
107267bf6868SJohn Dyson 		    (next->max_protection == entry->max_protection) &&
107367bf6868SJohn Dyson 		    (next->inheritance == entry->inheritance) &&
1074b7b2aac2SJohn Dyson 		    (next->wired_count == entry->wired_count)) {
1075308c24baSJohn Dyson 			if (map->first_free == next)
1076308c24baSJohn Dyson 				map->first_free = entry;
1077de5f6a77SJohn Dyson 			vm_map_entry_unlink(map, next);
1078de5f6a77SJohn Dyson 			entry->end = next->end;
1079b18bfc3dSJohn Dyson 			if (next->object.vm_object)
1080de5f6a77SJohn Dyson 				vm_object_deallocate(next->object.vm_object);
1081de5f6a77SJohn Dyson 			vm_map_entry_dispose(map, next);
1082df8bae1dSRodney W. Grimes 	        }
1083df8bae1dSRodney W. Grimes 	}
1084de5f6a77SJohn Dyson }
1085df8bae1dSRodney W. Grimes /*
1086df8bae1dSRodney W. Grimes  *	vm_map_clip_start:	[ internal use only ]
1087df8bae1dSRodney W. Grimes  *
1088df8bae1dSRodney W. Grimes  *	Asserts that the given entry begins at or after
1089df8bae1dSRodney W. Grimes  *	the specified address; if necessary,
1090df8bae1dSRodney W. Grimes  *	it splits the entry into two.
1091df8bae1dSRodney W. Grimes  */
1092df8bae1dSRodney W. Grimes #define vm_map_clip_start(map, entry, startaddr) \
1093df8bae1dSRodney W. Grimes { \
1094df8bae1dSRodney W. Grimes 	if (startaddr > entry->start) \
1095df8bae1dSRodney W. Grimes 		_vm_map_clip_start(map, entry, startaddr); \
1096df8bae1dSRodney W. Grimes }
1097df8bae1dSRodney W. Grimes 
1098df8bae1dSRodney W. Grimes /*
1099df8bae1dSRodney W. Grimes  *	This routine is called only when it is known that
1100df8bae1dSRodney W. Grimes  *	the entry must be split.
1101df8bae1dSRodney W. Grimes  */
11020d94caffSDavid Greenman static void
11031b40f8c0SMatthew Dillon _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1104df8bae1dSRodney W. Grimes {
1105c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
1106df8bae1dSRodney W. Grimes 
1107df8bae1dSRodney W. Grimes 	/*
11080d94caffSDavid Greenman 	 * Split off the front portion -- note that we must insert the new
11090d94caffSDavid Greenman 	 * entry BEFORE this one, so that this entry has the specified
11100d94caffSDavid Greenman 	 * starting address.
1111df8bae1dSRodney W. Grimes 	 */
1112f32dbbeeSJohn Dyson 	vm_map_simplify_entry(map, entry);
1113f32dbbeeSJohn Dyson 
111411cccda1SJohn Dyson 	/*
111511cccda1SJohn Dyson 	 * If there is no object backing this entry, we might as well create
111611cccda1SJohn Dyson 	 * one now.  If we defer it, an object can get created after the map
111711cccda1SJohn Dyson 	 * is clipped, and individual objects will be created for the split-up
111811cccda1SJohn Dyson 	 * map.  This is a bit of a hack, but is also about the best place to
111911cccda1SJohn Dyson 	 * put this improvement.
112011cccda1SJohn Dyson 	 */
11214e71e795SMatthew Dillon 	if (entry->object.vm_object == NULL && !map->system_map) {
112211cccda1SJohn Dyson 		vm_object_t object;
112311cccda1SJohn Dyson 		object = vm_object_allocate(OBJT_DEFAULT,
1124c2e11a03SJohn Dyson 				atop(entry->end - entry->start));
112511cccda1SJohn Dyson 		entry->object.vm_object = object;
112611cccda1SJohn Dyson 		entry->offset = 0;
112711cccda1SJohn Dyson 	}
112811cccda1SJohn Dyson 
1129df8bae1dSRodney W. Grimes 	new_entry = vm_map_entry_create(map);
1130df8bae1dSRodney W. Grimes 	*new_entry = *entry;
1131df8bae1dSRodney W. Grimes 
1132df8bae1dSRodney W. Grimes 	new_entry->end = start;
1133df8bae1dSRodney W. Grimes 	entry->offset += (start - entry->start);
1134df8bae1dSRodney W. Grimes 	entry->start = start;
1135df8bae1dSRodney W. Grimes 
1136df8bae1dSRodney W. Grimes 	vm_map_entry_link(map, entry->prev, new_entry);
1137df8bae1dSRodney W. Grimes 
11389fdfe602SMatthew Dillon 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1139df8bae1dSRodney W. Grimes 		vm_object_reference(new_entry->object.vm_object);
1140df8bae1dSRodney W. Grimes 	}
1141c0877f10SJohn Dyson }
1142df8bae1dSRodney W. Grimes 
1143df8bae1dSRodney W. Grimes /*
1144df8bae1dSRodney W. Grimes  *	vm_map_clip_end:	[ internal use only ]
1145df8bae1dSRodney W. Grimes  *
1146df8bae1dSRodney W. Grimes  *	Asserts that the given entry ends at or before
1147df8bae1dSRodney W. Grimes  *	the specified address; if necessary,
1148df8bae1dSRodney W. Grimes  *	it splits the entry into two.
1149df8bae1dSRodney W. Grimes  */
1150df8bae1dSRodney W. Grimes #define vm_map_clip_end(map, entry, endaddr) \
1151df8bae1dSRodney W. Grimes { \
1152af045176SPoul-Henning Kamp 	if ((endaddr) < (entry->end)) \
1153af045176SPoul-Henning Kamp 		_vm_map_clip_end((map), (entry), (endaddr)); \
1154df8bae1dSRodney W. Grimes }
1155df8bae1dSRodney W. Grimes 
1156df8bae1dSRodney W. Grimes /*
1157df8bae1dSRodney W. Grimes  *	This routine is called only when it is known that
1158df8bae1dSRodney W. Grimes  *	the entry must be split.
1159df8bae1dSRodney W. Grimes  */
11600d94caffSDavid Greenman static void
11611b40f8c0SMatthew Dillon _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1162df8bae1dSRodney W. Grimes {
1163c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
1164df8bae1dSRodney W. Grimes 
1165df8bae1dSRodney W. Grimes 	/*
116611cccda1SJohn Dyson 	 * If there is no object backing this entry, we might as well create
116711cccda1SJohn Dyson 	 * one now.  If we defer it, an object can get created after the map
116811cccda1SJohn Dyson 	 * is clipped, and individual objects will be created for the split-up
116911cccda1SJohn Dyson 	 * map.  This is a bit of a hack, but is also about the best place to
117011cccda1SJohn Dyson 	 * put this improvement.
117111cccda1SJohn Dyson 	 */
11724e71e795SMatthew Dillon 	if (entry->object.vm_object == NULL && !map->system_map) {
117311cccda1SJohn Dyson 		vm_object_t object;
117411cccda1SJohn Dyson 		object = vm_object_allocate(OBJT_DEFAULT,
1175c2e11a03SJohn Dyson 				atop(entry->end - entry->start));
117611cccda1SJohn Dyson 		entry->object.vm_object = object;
117711cccda1SJohn Dyson 		entry->offset = 0;
117811cccda1SJohn Dyson 	}
117911cccda1SJohn Dyson 
118011cccda1SJohn Dyson 	/*
11810d94caffSDavid Greenman 	 * Create a new entry and insert it AFTER the specified entry
1182df8bae1dSRodney W. Grimes 	 */
1183df8bae1dSRodney W. Grimes 	new_entry = vm_map_entry_create(map);
1184df8bae1dSRodney W. Grimes 	*new_entry = *entry;
1185df8bae1dSRodney W. Grimes 
1186df8bae1dSRodney W. Grimes 	new_entry->start = entry->end = end;
1187df8bae1dSRodney W. Grimes 	new_entry->offset += (end - entry->start);
1188df8bae1dSRodney W. Grimes 
1189df8bae1dSRodney W. Grimes 	vm_map_entry_link(map, entry, new_entry);
1190df8bae1dSRodney W. Grimes 
11919fdfe602SMatthew Dillon 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1192df8bae1dSRodney W. Grimes 		vm_object_reference(new_entry->object.vm_object);
1193df8bae1dSRodney W. Grimes 	}
1194c0877f10SJohn Dyson }
1195df8bae1dSRodney W. Grimes 
1196df8bae1dSRodney W. Grimes /*
1197df8bae1dSRodney W. Grimes  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
1198df8bae1dSRodney W. Grimes  *
1199df8bae1dSRodney W. Grimes  *	Asserts that the starting and ending region
1200df8bae1dSRodney W. Grimes  *	addresses fall within the valid range of the map.
1201df8bae1dSRodney W. Grimes  */
1202df8bae1dSRodney W. Grimes #define	VM_MAP_RANGE_CHECK(map, start, end)		\
1203df8bae1dSRodney W. Grimes 		{					\
1204df8bae1dSRodney W. Grimes 		if (start < vm_map_min(map))		\
1205df8bae1dSRodney W. Grimes 			start = vm_map_min(map);	\
1206df8bae1dSRodney W. Grimes 		if (end > vm_map_max(map))		\
1207df8bae1dSRodney W. Grimes 			end = vm_map_max(map);		\
1208df8bae1dSRodney W. Grimes 		if (start > end)			\
1209df8bae1dSRodney W. Grimes 			start = end;			\
1210df8bae1dSRodney W. Grimes 		}
1211df8bae1dSRodney W. Grimes 
1212df8bae1dSRodney W. Grimes /*
1213df8bae1dSRodney W. Grimes  *	vm_map_submap:		[ kernel use only ]
1214df8bae1dSRodney W. Grimes  *
1215df8bae1dSRodney W. Grimes  *	Mark the given range as handled by a subordinate map.
1216df8bae1dSRodney W. Grimes  *
1217df8bae1dSRodney W. Grimes  *	This range must have been created with vm_map_find,
1218df8bae1dSRodney W. Grimes  *	and no other operations may have been performed on this
1219df8bae1dSRodney W. Grimes  *	range prior to calling vm_map_submap.
1220df8bae1dSRodney W. Grimes  *
1221df8bae1dSRodney W. Grimes  *	Only a limited number of operations can be performed
1222df8bae1dSRodney W. Grimes  *	within this rage after calling vm_map_submap:
1223df8bae1dSRodney W. Grimes  *		vm_fault
1224df8bae1dSRodney W. Grimes  *	[Don't try vm_map_copy!]
1225df8bae1dSRodney W. Grimes  *
1226df8bae1dSRodney W. Grimes  *	To remove a submapping, one must first remove the
1227df8bae1dSRodney W. Grimes  *	range from the superior map, and then destroy the
1228df8bae1dSRodney W. Grimes  *	submap (if desired).  [Better yet, don't try it.]
1229df8bae1dSRodney W. Grimes  */
1230df8bae1dSRodney W. Grimes int
12311b40f8c0SMatthew Dillon vm_map_submap(
12321b40f8c0SMatthew Dillon 	vm_map_t map,
12331b40f8c0SMatthew Dillon 	vm_offset_t start,
12341b40f8c0SMatthew Dillon 	vm_offset_t end,
12351b40f8c0SMatthew Dillon 	vm_map_t submap)
1236df8bae1dSRodney W. Grimes {
1237df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1238c0877f10SJohn Dyson 	int result = KERN_INVALID_ARGUMENT;
1239df8bae1dSRodney W. Grimes 
1240df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1241df8bae1dSRodney W. Grimes 
1242df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1243df8bae1dSRodney W. Grimes 
1244df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &entry)) {
1245df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
12460d94caffSDavid Greenman 	} else
1247df8bae1dSRodney W. Grimes 		entry = entry->next;
1248df8bae1dSRodney W. Grimes 
1249df8bae1dSRodney W. Grimes 	vm_map_clip_end(map, entry, end);
1250df8bae1dSRodney W. Grimes 
1251df8bae1dSRodney W. Grimes 	if ((entry->start == start) && (entry->end == end) &&
12529fdfe602SMatthew Dillon 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1253afa07f7eSJohn Dyson 	    (entry->object.vm_object == NULL)) {
12542d8acc0fSJohn Dyson 		entry->object.sub_map = submap;
1255afa07f7eSJohn Dyson 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1256df8bae1dSRodney W. Grimes 		result = KERN_SUCCESS;
1257df8bae1dSRodney W. Grimes 	}
1258df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1259df8bae1dSRodney W. Grimes 
1260df8bae1dSRodney W. Grimes 	return (result);
1261df8bae1dSRodney W. Grimes }
1262df8bae1dSRodney W. Grimes 
1263df8bae1dSRodney W. Grimes /*
1264df8bae1dSRodney W. Grimes  *	vm_map_protect:
1265df8bae1dSRodney W. Grimes  *
1266df8bae1dSRodney W. Grimes  *	Sets the protection of the specified address
1267df8bae1dSRodney W. Grimes  *	region in the target map.  If "set_max" is
1268df8bae1dSRodney W. Grimes  *	specified, the maximum protection is to be set;
1269df8bae1dSRodney W. Grimes  *	otherwise, only the current protection is affected.
1270df8bae1dSRodney W. Grimes  */
1271df8bae1dSRodney W. Grimes int
1272b9dcd593SBruce Evans vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1273b9dcd593SBruce Evans 	       vm_prot_t new_prot, boolean_t set_max)
1274df8bae1dSRodney W. Grimes {
1275c0877f10SJohn Dyson 	vm_map_entry_t current;
1276df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1277df8bae1dSRodney W. Grimes 
1278df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1279df8bae1dSRodney W. Grimes 
1280df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1281df8bae1dSRodney W. Grimes 
1282df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &entry)) {
1283df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
1284b7b2aac2SJohn Dyson 	} else {
1285df8bae1dSRodney W. Grimes 		entry = entry->next;
1286b7b2aac2SJohn Dyson 	}
1287df8bae1dSRodney W. Grimes 
1288df8bae1dSRodney W. Grimes 	/*
12890d94caffSDavid Greenman 	 * Make a first pass to check for protection violations.
1290df8bae1dSRodney W. Grimes 	 */
1291df8bae1dSRodney W. Grimes 	current = entry;
1292df8bae1dSRodney W. Grimes 	while ((current != &map->header) && (current->start < end)) {
1293afa07f7eSJohn Dyson 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1294a1f6d91cSDavid Greenman 			vm_map_unlock(map);
1295df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ARGUMENT);
1296a1f6d91cSDavid Greenman 		}
1297df8bae1dSRodney W. Grimes 		if ((new_prot & current->max_protection) != new_prot) {
1298df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
1299df8bae1dSRodney W. Grimes 			return (KERN_PROTECTION_FAILURE);
1300df8bae1dSRodney W. Grimes 		}
1301df8bae1dSRodney W. Grimes 		current = current->next;
1302df8bae1dSRodney W. Grimes 	}
1303df8bae1dSRodney W. Grimes 
1304df8bae1dSRodney W. Grimes 	/*
13050d94caffSDavid Greenman 	 * Go back and fix up protections. [Note that clipping is not
13060d94caffSDavid Greenman 	 * necessary the second time.]
1307df8bae1dSRodney W. Grimes 	 */
1308df8bae1dSRodney W. Grimes 	current = entry;
1309df8bae1dSRodney W. Grimes 	while ((current != &map->header) && (current->start < end)) {
1310df8bae1dSRodney W. Grimes 		vm_prot_t old_prot;
1311df8bae1dSRodney W. Grimes 
1312df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, current, end);
1313df8bae1dSRodney W. Grimes 
1314df8bae1dSRodney W. Grimes 		old_prot = current->protection;
1315df8bae1dSRodney W. Grimes 		if (set_max)
1316df8bae1dSRodney W. Grimes 			current->protection =
1317df8bae1dSRodney W. Grimes 			    (current->max_protection = new_prot) &
1318df8bae1dSRodney W. Grimes 			    old_prot;
1319df8bae1dSRodney W. Grimes 		else
1320df8bae1dSRodney W. Grimes 			current->protection = new_prot;
1321df8bae1dSRodney W. Grimes 
1322df8bae1dSRodney W. Grimes 		/*
13230d94caffSDavid Greenman 		 * Update physical map if necessary. Worry about copy-on-write
13240d94caffSDavid Greenman 		 * here -- CHECK THIS XXX
1325df8bae1dSRodney W. Grimes 		 */
1326df8bae1dSRodney W. Grimes 		if (current->protection != old_prot) {
132747c3ccc4SAlan Cox 			mtx_lock(&Giant);
132885e03a7eSAlan Cox 			vm_page_lock_queues();
1329afa07f7eSJohn Dyson #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1330df8bae1dSRodney W. Grimes 							VM_PROT_ALL)
1331df8bae1dSRodney W. Grimes 			pmap_protect(map->pmap, current->start,
1332df8bae1dSRodney W. Grimes 			    current->end,
13331c85e3dfSAlan Cox 			    current->protection & MASK(current));
1334df8bae1dSRodney W. Grimes #undef	MASK
133585e03a7eSAlan Cox 			vm_page_unlock_queues();
133647c3ccc4SAlan Cox 			mtx_unlock(&Giant);
1337df8bae1dSRodney W. Grimes 		}
13387d78abc9SJohn Dyson 		vm_map_simplify_entry(map, current);
1339df8bae1dSRodney W. Grimes 		current = current->next;
1340df8bae1dSRodney W. Grimes 	}
1341df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1342df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1343df8bae1dSRodney W. Grimes }
1344df8bae1dSRodney W. Grimes 
1345df8bae1dSRodney W. Grimes /*
1346867a482dSJohn Dyson  *	vm_map_madvise:
1347867a482dSJohn Dyson  *
1348867a482dSJohn Dyson  * 	This routine traverses a processes map handling the madvise
1349f7fc307aSAlan Cox  *	system call.  Advisories are classified as either those effecting
1350f7fc307aSAlan Cox  *	the vm_map_entry structure, or those effecting the underlying
1351f7fc307aSAlan Cox  *	objects.
1352867a482dSJohn Dyson  */
1353b4309055SMatthew Dillon int
13541b40f8c0SMatthew Dillon vm_map_madvise(
13551b40f8c0SMatthew Dillon 	vm_map_t map,
13561b40f8c0SMatthew Dillon 	vm_offset_t start,
13571b40f8c0SMatthew Dillon 	vm_offset_t end,
13581b40f8c0SMatthew Dillon 	int behav)
1359867a482dSJohn Dyson {
1360f7fc307aSAlan Cox 	vm_map_entry_t current, entry;
1361b4309055SMatthew Dillon 	int modify_map = 0;
1362867a482dSJohn Dyson 
1363b4309055SMatthew Dillon 	/*
1364b4309055SMatthew Dillon 	 * Some madvise calls directly modify the vm_map_entry, in which case
1365b4309055SMatthew Dillon 	 * we need to use an exclusive lock on the map and we need to perform
1366b4309055SMatthew Dillon 	 * various clipping operations.  Otherwise we only need a read-lock
1367b4309055SMatthew Dillon 	 * on the map.
1368b4309055SMatthew Dillon 	 */
1369b4309055SMatthew Dillon 	switch(behav) {
1370b4309055SMatthew Dillon 	case MADV_NORMAL:
1371b4309055SMatthew Dillon 	case MADV_SEQUENTIAL:
1372b4309055SMatthew Dillon 	case MADV_RANDOM:
13734f79d873SMatthew Dillon 	case MADV_NOSYNC:
13744f79d873SMatthew Dillon 	case MADV_AUTOSYNC:
13759730a5daSPaul Saab 	case MADV_NOCORE:
13769730a5daSPaul Saab 	case MADV_CORE:
1377b4309055SMatthew Dillon 		modify_map = 1;
1378867a482dSJohn Dyson 		vm_map_lock(map);
1379b4309055SMatthew Dillon 		break;
1380b4309055SMatthew Dillon 	case MADV_WILLNEED:
1381b4309055SMatthew Dillon 	case MADV_DONTNEED:
1382b4309055SMatthew Dillon 	case MADV_FREE:
1383f7fc307aSAlan Cox 		vm_map_lock_read(map);
1384b4309055SMatthew Dillon 		break;
1385b4309055SMatthew Dillon 	default:
1386b4309055SMatthew Dillon 		return (KERN_INVALID_ARGUMENT);
1387b4309055SMatthew Dillon 	}
1388b4309055SMatthew Dillon 
1389b4309055SMatthew Dillon 	/*
1390b4309055SMatthew Dillon 	 * Locate starting entry and clip if necessary.
1391b4309055SMatthew Dillon 	 */
1392867a482dSJohn Dyson 	VM_MAP_RANGE_CHECK(map, start, end);
1393867a482dSJohn Dyson 
1394867a482dSJohn Dyson 	if (vm_map_lookup_entry(map, start, &entry)) {
1395f7fc307aSAlan Cox 		if (modify_map)
1396867a482dSJohn Dyson 			vm_map_clip_start(map, entry, start);
1397b4309055SMatthew Dillon 	} else {
1398867a482dSJohn Dyson 		entry = entry->next;
1399b4309055SMatthew Dillon 	}
1400867a482dSJohn Dyson 
1401f7fc307aSAlan Cox 	if (modify_map) {
1402f7fc307aSAlan Cox 		/*
1403f7fc307aSAlan Cox 		 * madvise behaviors that are implemented in the vm_map_entry.
1404f7fc307aSAlan Cox 		 *
1405f7fc307aSAlan Cox 		 * We clip the vm_map_entry so that behavioral changes are
1406f7fc307aSAlan Cox 		 * limited to the specified address range.
1407f7fc307aSAlan Cox 		 */
1408867a482dSJohn Dyson 		for (current = entry;
1409867a482dSJohn Dyson 		     (current != &map->header) && (current->start < end);
1410b4309055SMatthew Dillon 		     current = current->next
1411b4309055SMatthew Dillon 		) {
1412f7fc307aSAlan Cox 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1413867a482dSJohn Dyson 				continue;
1414fed9a903SJohn Dyson 
141547221757SJohn Dyson 			vm_map_clip_end(map, current, end);
1416fed9a903SJohn Dyson 
1417f7fc307aSAlan Cox 			switch (behav) {
1418867a482dSJohn Dyson 			case MADV_NORMAL:
14197f866e4bSAlan Cox 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1420867a482dSJohn Dyson 				break;
1421867a482dSJohn Dyson 			case MADV_SEQUENTIAL:
14227f866e4bSAlan Cox 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1423867a482dSJohn Dyson 				break;
1424867a482dSJohn Dyson 			case MADV_RANDOM:
14257f866e4bSAlan Cox 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1426867a482dSJohn Dyson 				break;
14274f79d873SMatthew Dillon 			case MADV_NOSYNC:
14284f79d873SMatthew Dillon 				current->eflags |= MAP_ENTRY_NOSYNC;
14294f79d873SMatthew Dillon 				break;
14304f79d873SMatthew Dillon 			case MADV_AUTOSYNC:
14314f79d873SMatthew Dillon 				current->eflags &= ~MAP_ENTRY_NOSYNC;
14324f79d873SMatthew Dillon 				break;
14339730a5daSPaul Saab 			case MADV_NOCORE:
14349730a5daSPaul Saab 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
14359730a5daSPaul Saab 				break;
14369730a5daSPaul Saab 			case MADV_CORE:
14379730a5daSPaul Saab 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
14389730a5daSPaul Saab 				break;
1439867a482dSJohn Dyson 			default:
1440867a482dSJohn Dyson 				break;
1441867a482dSJohn Dyson 			}
1442f7fc307aSAlan Cox 			vm_map_simplify_entry(map, current);
1443867a482dSJohn Dyson 		}
1444867a482dSJohn Dyson 		vm_map_unlock(map);
1445b4309055SMatthew Dillon 	} else {
1446f7fc307aSAlan Cox 		vm_pindex_t pindex;
1447f7fc307aSAlan Cox 		int count;
1448f7fc307aSAlan Cox 
1449f7fc307aSAlan Cox 		/*
1450f7fc307aSAlan Cox 		 * madvise behaviors that are implemented in the underlying
1451f7fc307aSAlan Cox 		 * vm_object.
1452f7fc307aSAlan Cox 		 *
1453f7fc307aSAlan Cox 		 * Since we don't clip the vm_map_entry, we have to clip
1454f7fc307aSAlan Cox 		 * the vm_object pindex and count.
1455f7fc307aSAlan Cox 		 */
1456f7fc307aSAlan Cox 		for (current = entry;
1457f7fc307aSAlan Cox 		     (current != &map->header) && (current->start < end);
1458b4309055SMatthew Dillon 		     current = current->next
1459b4309055SMatthew Dillon 		) {
14605f99b57cSMatthew Dillon 			vm_offset_t useStart;
14615f99b57cSMatthew Dillon 
1462f7fc307aSAlan Cox 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1463f7fc307aSAlan Cox 				continue;
1464f7fc307aSAlan Cox 
1465f7fc307aSAlan Cox 			pindex = OFF_TO_IDX(current->offset);
1466f7fc307aSAlan Cox 			count = atop(current->end - current->start);
14675f99b57cSMatthew Dillon 			useStart = current->start;
1468f7fc307aSAlan Cox 
1469f7fc307aSAlan Cox 			if (current->start < start) {
1470f7fc307aSAlan Cox 				pindex += atop(start - current->start);
1471f7fc307aSAlan Cox 				count -= atop(start - current->start);
14725f99b57cSMatthew Dillon 				useStart = start;
1473f7fc307aSAlan Cox 			}
1474f7fc307aSAlan Cox 			if (current->end > end)
1475f7fc307aSAlan Cox 				count -= atop(current->end - end);
1476f7fc307aSAlan Cox 
1477f7fc307aSAlan Cox 			if (count <= 0)
1478f7fc307aSAlan Cox 				continue;
1479f7fc307aSAlan Cox 
1480f7fc307aSAlan Cox 			vm_object_madvise(current->object.vm_object,
1481f7fc307aSAlan Cox 					  pindex, count, behav);
1482b4309055SMatthew Dillon 			if (behav == MADV_WILLNEED) {
1483094f6d26SAlan Cox 				mtx_lock(&Giant);
1484b4309055SMatthew Dillon 				pmap_object_init_pt(
1485b4309055SMatthew Dillon 				    map->pmap,
14865f99b57cSMatthew Dillon 				    useStart,
1487f7fc307aSAlan Cox 				    current->object.vm_object,
1488b4309055SMatthew Dillon 				    pindex,
1489b4309055SMatthew Dillon 				    (count << PAGE_SHIFT),
1490e3026983SMatthew Dillon 				    MAP_PREFAULT_MADVISE
1491b4309055SMatthew Dillon 				);
1492094f6d26SAlan Cox 				mtx_unlock(&Giant);
1493f7fc307aSAlan Cox 			}
1494f7fc307aSAlan Cox 		}
1495f7fc307aSAlan Cox 		vm_map_unlock_read(map);
1496f7fc307aSAlan Cox 	}
1497b4309055SMatthew Dillon 	return (0);
1498867a482dSJohn Dyson }
1499867a482dSJohn Dyson 
1500867a482dSJohn Dyson 
1501867a482dSJohn Dyson /*
1502df8bae1dSRodney W. Grimes  *	vm_map_inherit:
1503df8bae1dSRodney W. Grimes  *
1504df8bae1dSRodney W. Grimes  *	Sets the inheritance of the specified address
1505df8bae1dSRodney W. Grimes  *	range in the target map.  Inheritance
1506df8bae1dSRodney W. Grimes  *	affects how the map will be shared with
1507df8bae1dSRodney W. Grimes  *	child maps at the time of vm_map_fork.
1508df8bae1dSRodney W. Grimes  */
1509df8bae1dSRodney W. Grimes int
1510b9dcd593SBruce Evans vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1511b9dcd593SBruce Evans 	       vm_inherit_t new_inheritance)
1512df8bae1dSRodney W. Grimes {
1513c0877f10SJohn Dyson 	vm_map_entry_t entry;
1514df8bae1dSRodney W. Grimes 	vm_map_entry_t temp_entry;
1515df8bae1dSRodney W. Grimes 
1516df8bae1dSRodney W. Grimes 	switch (new_inheritance) {
1517df8bae1dSRodney W. Grimes 	case VM_INHERIT_NONE:
1518df8bae1dSRodney W. Grimes 	case VM_INHERIT_COPY:
1519df8bae1dSRodney W. Grimes 	case VM_INHERIT_SHARE:
1520df8bae1dSRodney W. Grimes 		break;
1521df8bae1dSRodney W. Grimes 	default:
1522df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ARGUMENT);
1523df8bae1dSRodney W. Grimes 	}
1524df8bae1dSRodney W. Grimes 	vm_map_lock(map);
1525df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1526df8bae1dSRodney W. Grimes 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
1527df8bae1dSRodney W. Grimes 		entry = temp_entry;
1528df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
15290d94caffSDavid Greenman 	} else
1530df8bae1dSRodney W. Grimes 		entry = temp_entry->next;
1531df8bae1dSRodney W. Grimes 	while ((entry != &map->header) && (entry->start < end)) {
1532df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
1533df8bae1dSRodney W. Grimes 		entry->inheritance = new_inheritance;
153444428f62SAlan Cox 		vm_map_simplify_entry(map, entry);
1535df8bae1dSRodney W. Grimes 		entry = entry->next;
1536df8bae1dSRodney W. Grimes 	}
1537df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
1538df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1539df8bae1dSRodney W. Grimes }
1540df8bae1dSRodney W. Grimes 
1541df8bae1dSRodney W. Grimes /*
1542acd9a301SAlan Cox  *	vm_map_unwire:
1543acd9a301SAlan Cox  *
1544e27e17b7SAlan Cox  *	Implements both kernel and user unwiring.
1545acd9a301SAlan Cox  */
1546acd9a301SAlan Cox int
1547acd9a301SAlan Cox vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
1548acd9a301SAlan Cox 	boolean_t user_unwire)
1549acd9a301SAlan Cox {
1550acd9a301SAlan Cox 	vm_map_entry_t entry, first_entry, tmp_entry;
1551acd9a301SAlan Cox 	vm_offset_t saved_start;
1552acd9a301SAlan Cox 	unsigned int last_timestamp;
1553acd9a301SAlan Cox 	int rv;
1554acd9a301SAlan Cox 	boolean_t need_wakeup, result;
1555acd9a301SAlan Cox 
1556acd9a301SAlan Cox 	vm_map_lock(map);
1557acd9a301SAlan Cox 	VM_MAP_RANGE_CHECK(map, start, end);
1558acd9a301SAlan Cox 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
1559acd9a301SAlan Cox 		vm_map_unlock(map);
1560acd9a301SAlan Cox 		return (KERN_INVALID_ADDRESS);
1561acd9a301SAlan Cox 	}
1562acd9a301SAlan Cox 	last_timestamp = map->timestamp;
1563acd9a301SAlan Cox 	entry = first_entry;
1564acd9a301SAlan Cox 	while (entry != &map->header && entry->start < end) {
1565acd9a301SAlan Cox 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1566acd9a301SAlan Cox 			/*
1567acd9a301SAlan Cox 			 * We have not yet clipped the entry.
1568acd9a301SAlan Cox 			 */
1569acd9a301SAlan Cox 			saved_start = (start >= entry->start) ? start :
1570acd9a301SAlan Cox 			    entry->start;
1571acd9a301SAlan Cox 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1572acd9a301SAlan Cox 			if (vm_map_unlock_and_wait(map, user_unwire)) {
1573acd9a301SAlan Cox 				/*
1574acd9a301SAlan Cox 				 * Allow interruption of user unwiring?
1575acd9a301SAlan Cox 				 */
1576acd9a301SAlan Cox 			}
1577acd9a301SAlan Cox 			vm_map_lock(map);
1578acd9a301SAlan Cox 			if (last_timestamp+1 != map->timestamp) {
1579acd9a301SAlan Cox 				/*
1580acd9a301SAlan Cox 				 * Look again for the entry because the map was
1581acd9a301SAlan Cox 				 * modified while it was unlocked.
1582acd9a301SAlan Cox 				 * Specifically, the entry may have been
1583acd9a301SAlan Cox 				 * clipped, merged, or deleted.
1584acd9a301SAlan Cox 				 */
1585acd9a301SAlan Cox 				if (!vm_map_lookup_entry(map, saved_start,
1586acd9a301SAlan Cox 				    &tmp_entry)) {
1587acd9a301SAlan Cox 					if (saved_start == start) {
1588acd9a301SAlan Cox 						/*
1589acd9a301SAlan Cox 						 * First_entry has been deleted.
1590acd9a301SAlan Cox 						 */
1591acd9a301SAlan Cox 						vm_map_unlock(map);
1592acd9a301SAlan Cox 						return (KERN_INVALID_ADDRESS);
1593acd9a301SAlan Cox 					}
1594acd9a301SAlan Cox 					end = saved_start;
1595acd9a301SAlan Cox 					rv = KERN_INVALID_ADDRESS;
1596acd9a301SAlan Cox 					goto done;
1597acd9a301SAlan Cox 				}
1598acd9a301SAlan Cox 				if (entry == first_entry)
1599acd9a301SAlan Cox 					first_entry = tmp_entry;
1600acd9a301SAlan Cox 				else
1601acd9a301SAlan Cox 					first_entry = NULL;
1602acd9a301SAlan Cox 				entry = tmp_entry;
1603acd9a301SAlan Cox 			}
1604acd9a301SAlan Cox 			last_timestamp = map->timestamp;
1605acd9a301SAlan Cox 			continue;
1606acd9a301SAlan Cox 		}
1607acd9a301SAlan Cox 		vm_map_clip_start(map, entry, start);
1608acd9a301SAlan Cox 		vm_map_clip_end(map, entry, end);
1609acd9a301SAlan Cox 		/*
1610acd9a301SAlan Cox 		 * Mark the entry in case the map lock is released.  (See
1611acd9a301SAlan Cox 		 * above.)
1612acd9a301SAlan Cox 		 */
1613acd9a301SAlan Cox 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1614acd9a301SAlan Cox 		/*
1615acd9a301SAlan Cox 		 * Check the map for holes in the specified region.
1616acd9a301SAlan Cox 		 */
1617acd9a301SAlan Cox 		if (entry->end < end && (entry->next == &map->header ||
1618acd9a301SAlan Cox 		    entry->next->start > entry->end)) {
1619acd9a301SAlan Cox 			end = entry->end;
1620acd9a301SAlan Cox 			rv = KERN_INVALID_ADDRESS;
1621acd9a301SAlan Cox 			goto done;
1622acd9a301SAlan Cox 		}
1623acd9a301SAlan Cox 		/*
1624acd9a301SAlan Cox 		 * Require that the entry is wired.
1625acd9a301SAlan Cox 		 */
1626acd9a301SAlan Cox 		if (entry->wired_count == 0 || (user_unwire &&
1627acd9a301SAlan Cox 		    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)) {
1628acd9a301SAlan Cox 			end = entry->end;
1629acd9a301SAlan Cox 			rv = KERN_INVALID_ARGUMENT;
1630acd9a301SAlan Cox 			goto done;
1631acd9a301SAlan Cox 		}
1632acd9a301SAlan Cox 		entry = entry->next;
1633acd9a301SAlan Cox 	}
1634acd9a301SAlan Cox 	rv = KERN_SUCCESS;
1635acd9a301SAlan Cox done:
1636e27e17b7SAlan Cox 	need_wakeup = FALSE;
1637acd9a301SAlan Cox 	if (first_entry == NULL) {
1638acd9a301SAlan Cox 		result = vm_map_lookup_entry(map, start, &first_entry);
1639acd9a301SAlan Cox 		KASSERT(result, ("vm_map_unwire: lookup failed"));
1640acd9a301SAlan Cox 	}
1641acd9a301SAlan Cox 	entry = first_entry;
1642acd9a301SAlan Cox 	while (entry != &map->header && entry->start < end) {
1643b2f3846aSAlan Cox 		if (rv == KERN_SUCCESS) {
1644b2f3846aSAlan Cox 			if (user_unwire)
1645b2f3846aSAlan Cox 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1646b2f3846aSAlan Cox 			entry->wired_count--;
1647b2f3846aSAlan Cox 			if (entry->wired_count == 0) {
1648b2f3846aSAlan Cox 				/*
1649b2f3846aSAlan Cox 				 * Retain the map lock.
1650b2f3846aSAlan Cox 				 */
1651b2f3846aSAlan Cox 				vm_fault_unwire(map, entry->start, entry->end);
1652b2f3846aSAlan Cox 			}
1653b2f3846aSAlan Cox 		}
1654acd9a301SAlan Cox 		KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
1655acd9a301SAlan Cox 			("vm_map_unwire: in-transition flag missing"));
1656acd9a301SAlan Cox 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1657acd9a301SAlan Cox 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1658acd9a301SAlan Cox 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1659acd9a301SAlan Cox 			need_wakeup = TRUE;
1660acd9a301SAlan Cox 		}
1661acd9a301SAlan Cox 		vm_map_simplify_entry(map, entry);
1662acd9a301SAlan Cox 		entry = entry->next;
1663acd9a301SAlan Cox 	}
1664acd9a301SAlan Cox 	vm_map_unlock(map);
1665acd9a301SAlan Cox 	if (need_wakeup)
1666acd9a301SAlan Cox 		vm_map_wakeup(map);
1667acd9a301SAlan Cox 	return (rv);
1668acd9a301SAlan Cox }
1669acd9a301SAlan Cox 
1670acd9a301SAlan Cox /*
1671e27e17b7SAlan Cox  *	vm_map_wire:
1672e27e17b7SAlan Cox  *
1673e27e17b7SAlan Cox  *	Implements both kernel and user wiring.
1674e27e17b7SAlan Cox  */
1675e27e17b7SAlan Cox int
1676e27e17b7SAlan Cox vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
1677e27e17b7SAlan Cox 	boolean_t user_wire)
1678e27e17b7SAlan Cox {
167912d7cc84SAlan Cox 	vm_map_entry_t entry, first_entry, tmp_entry;
168012d7cc84SAlan Cox 	vm_offset_t saved_end, saved_start;
168112d7cc84SAlan Cox 	unsigned int last_timestamp;
168212d7cc84SAlan Cox 	int rv;
168312d7cc84SAlan Cox 	boolean_t need_wakeup, result;
1684e27e17b7SAlan Cox 
168512d7cc84SAlan Cox 	vm_map_lock(map);
168612d7cc84SAlan Cox 	VM_MAP_RANGE_CHECK(map, start, end);
168712d7cc84SAlan Cox 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
168812d7cc84SAlan Cox 		vm_map_unlock(map);
168912d7cc84SAlan Cox 		return (KERN_INVALID_ADDRESS);
169012d7cc84SAlan Cox 	}
169112d7cc84SAlan Cox 	last_timestamp = map->timestamp;
169212d7cc84SAlan Cox 	entry = first_entry;
169312d7cc84SAlan Cox 	while (entry != &map->header && entry->start < end) {
169412d7cc84SAlan Cox 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
169512d7cc84SAlan Cox 			/*
169612d7cc84SAlan Cox 			 * We have not yet clipped the entry.
169712d7cc84SAlan Cox 			 */
169812d7cc84SAlan Cox 			saved_start = (start >= entry->start) ? start :
169912d7cc84SAlan Cox 			    entry->start;
170012d7cc84SAlan Cox 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
170112d7cc84SAlan Cox 			if (vm_map_unlock_and_wait(map, user_wire)) {
170212d7cc84SAlan Cox 				/*
170312d7cc84SAlan Cox 				 * Allow interruption of user wiring?
170412d7cc84SAlan Cox 				 */
170512d7cc84SAlan Cox 			}
170612d7cc84SAlan Cox 			vm_map_lock(map);
170712d7cc84SAlan Cox 			if (last_timestamp + 1 != map->timestamp) {
170812d7cc84SAlan Cox 				/*
170912d7cc84SAlan Cox 				 * Look again for the entry because the map was
171012d7cc84SAlan Cox 				 * modified while it was unlocked.
171112d7cc84SAlan Cox 				 * Specifically, the entry may have been
171212d7cc84SAlan Cox 				 * clipped, merged, or deleted.
171312d7cc84SAlan Cox 				 */
171412d7cc84SAlan Cox 				if (!vm_map_lookup_entry(map, saved_start,
171512d7cc84SAlan Cox 				    &tmp_entry)) {
171612d7cc84SAlan Cox 					if (saved_start == start) {
171712d7cc84SAlan Cox 						/*
171812d7cc84SAlan Cox 						 * first_entry has been deleted.
171912d7cc84SAlan Cox 						 */
172012d7cc84SAlan Cox 						vm_map_unlock(map);
172112d7cc84SAlan Cox 						return (KERN_INVALID_ADDRESS);
172212d7cc84SAlan Cox 					}
172312d7cc84SAlan Cox 					end = saved_start;
172412d7cc84SAlan Cox 					rv = KERN_INVALID_ADDRESS;
172512d7cc84SAlan Cox 					goto done;
172612d7cc84SAlan Cox 				}
172712d7cc84SAlan Cox 				if (entry == first_entry)
172812d7cc84SAlan Cox 					first_entry = tmp_entry;
172912d7cc84SAlan Cox 				else
173012d7cc84SAlan Cox 					first_entry = NULL;
173112d7cc84SAlan Cox 				entry = tmp_entry;
173212d7cc84SAlan Cox 			}
173312d7cc84SAlan Cox 			last_timestamp = map->timestamp;
173412d7cc84SAlan Cox 			continue;
173512d7cc84SAlan Cox 		}
173612d7cc84SAlan Cox 		vm_map_clip_start(map, entry, start);
173712d7cc84SAlan Cox 		vm_map_clip_end(map, entry, end);
173812d7cc84SAlan Cox 		/*
173912d7cc84SAlan Cox 		 * Mark the entry in case the map lock is released.  (See
174012d7cc84SAlan Cox 		 * above.)
174112d7cc84SAlan Cox 		 */
174212d7cc84SAlan Cox 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
174312d7cc84SAlan Cox 		/*
174412d7cc84SAlan Cox 		 *
174512d7cc84SAlan Cox 		 */
174612d7cc84SAlan Cox 		if (entry->wired_count == 0) {
174712d7cc84SAlan Cox 			entry->wired_count++;
174812d7cc84SAlan Cox 			saved_start = entry->start;
174912d7cc84SAlan Cox 			saved_end = entry->end;
175012d7cc84SAlan Cox 			/*
175112d7cc84SAlan Cox 			 * Release the map lock, relying on the in-transition
175212d7cc84SAlan Cox 			 * mark.
175312d7cc84SAlan Cox 			 */
175412d7cc84SAlan Cox 			vm_map_unlock(map);
1755ef594d31SAlan Cox 			rv = vm_fault_wire(map, saved_start, saved_end,
1756ef594d31SAlan Cox 			    user_wire);
175712d7cc84SAlan Cox 			vm_map_lock(map);
175812d7cc84SAlan Cox 			if (last_timestamp + 1 != map->timestamp) {
175912d7cc84SAlan Cox 				/*
176012d7cc84SAlan Cox 				 * Look again for the entry because the map was
176112d7cc84SAlan Cox 				 * modified while it was unlocked.  The entry
176212d7cc84SAlan Cox 				 * may have been clipped, but NOT merged or
176312d7cc84SAlan Cox 				 * deleted.
176412d7cc84SAlan Cox 				 */
176512d7cc84SAlan Cox 				result = vm_map_lookup_entry(map, saved_start,
176612d7cc84SAlan Cox 				    &tmp_entry);
176712d7cc84SAlan Cox 				KASSERT(result, ("vm_map_wire: lookup failed"));
176812d7cc84SAlan Cox 				if (entry == first_entry)
176912d7cc84SAlan Cox 					first_entry = tmp_entry;
177012d7cc84SAlan Cox 				else
177112d7cc84SAlan Cox 					first_entry = NULL;
177212d7cc84SAlan Cox 				entry = tmp_entry;
177328c58286SAlan Cox 				while (entry->end < saved_end) {
177428c58286SAlan Cox 					if (rv != KERN_SUCCESS) {
177528c58286SAlan Cox 						KASSERT(entry->wired_count == 1,
177628c58286SAlan Cox 						    ("vm_map_wire: bad count"));
177728c58286SAlan Cox 						entry->wired_count = -1;
177828c58286SAlan Cox 					}
177912d7cc84SAlan Cox 					entry = entry->next;
178012d7cc84SAlan Cox 				}
178128c58286SAlan Cox 			}
178212d7cc84SAlan Cox 			last_timestamp = map->timestamp;
178312d7cc84SAlan Cox 			if (rv != KERN_SUCCESS) {
178428c58286SAlan Cox 				KASSERT(entry->wired_count == 1,
178528c58286SAlan Cox 				    ("vm_map_wire: bad count"));
178612d7cc84SAlan Cox 				/*
178728c58286SAlan Cox 				 * Assign an out-of-range value to represent
178828c58286SAlan Cox 				 * the failure to wire this entry.
178912d7cc84SAlan Cox 				 */
179028c58286SAlan Cox 				entry->wired_count = -1;
179112d7cc84SAlan Cox 				end = entry->end;
179212d7cc84SAlan Cox 				goto done;
179312d7cc84SAlan Cox 			}
179412d7cc84SAlan Cox 		} else if (!user_wire ||
179512d7cc84SAlan Cox 			   (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
179612d7cc84SAlan Cox 			entry->wired_count++;
179712d7cc84SAlan Cox 		}
179812d7cc84SAlan Cox 		/*
179912d7cc84SAlan Cox 		 * Check the map for holes in the specified region.
180012d7cc84SAlan Cox 		 */
180112d7cc84SAlan Cox 		if (entry->end < end && (entry->next == &map->header ||
180212d7cc84SAlan Cox 		    entry->next->start > entry->end)) {
180312d7cc84SAlan Cox 			end = entry->end;
180412d7cc84SAlan Cox 			rv = KERN_INVALID_ADDRESS;
180512d7cc84SAlan Cox 			goto done;
180612d7cc84SAlan Cox 		}
180712d7cc84SAlan Cox 		entry = entry->next;
180812d7cc84SAlan Cox 	}
180912d7cc84SAlan Cox 	rv = KERN_SUCCESS;
181012d7cc84SAlan Cox done:
181112d7cc84SAlan Cox 	need_wakeup = FALSE;
181212d7cc84SAlan Cox 	if (first_entry == NULL) {
181312d7cc84SAlan Cox 		result = vm_map_lookup_entry(map, start, &first_entry);
181412d7cc84SAlan Cox 		KASSERT(result, ("vm_map_wire: lookup failed"));
181512d7cc84SAlan Cox 	}
181612d7cc84SAlan Cox 	entry = first_entry;
181712d7cc84SAlan Cox 	while (entry != &map->header && entry->start < end) {
181812d7cc84SAlan Cox 		if (rv == KERN_SUCCESS) {
181912d7cc84SAlan Cox 			if (user_wire)
182012d7cc84SAlan Cox 				entry->eflags |= MAP_ENTRY_USER_WIRED;
182128c58286SAlan Cox 		} else if (entry->wired_count == -1) {
182228c58286SAlan Cox 			/*
182328c58286SAlan Cox 			 * Wiring failed on this entry.  Thus, unwiring is
182428c58286SAlan Cox 			 * unnecessary.
182528c58286SAlan Cox 			 */
182628c58286SAlan Cox 			entry->wired_count = 0;
182712d7cc84SAlan Cox 		} else {
1828f6116791SAlan Cox 			if (!user_wire ||
1829f6116791SAlan Cox 			    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)
183012d7cc84SAlan Cox 				entry->wired_count--;
183112d7cc84SAlan Cox 			if (entry->wired_count == 0) {
183212d7cc84SAlan Cox 				/*
183312d7cc84SAlan Cox 				 * Retain the map lock.
183412d7cc84SAlan Cox 				 */
183512d7cc84SAlan Cox 				vm_fault_unwire(map, entry->start, entry->end);
183612d7cc84SAlan Cox 			}
183712d7cc84SAlan Cox 		}
183812d7cc84SAlan Cox 		KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
183912d7cc84SAlan Cox 			("vm_map_wire: in-transition flag missing"));
184012d7cc84SAlan Cox 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
184112d7cc84SAlan Cox 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
184212d7cc84SAlan Cox 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
184312d7cc84SAlan Cox 			need_wakeup = TRUE;
184412d7cc84SAlan Cox 		}
184512d7cc84SAlan Cox 		vm_map_simplify_entry(map, entry);
184612d7cc84SAlan Cox 		entry = entry->next;
184712d7cc84SAlan Cox 	}
184812d7cc84SAlan Cox 	vm_map_unlock(map);
184912d7cc84SAlan Cox 	if (need_wakeup)
185012d7cc84SAlan Cox 		vm_map_wakeup(map);
185112d7cc84SAlan Cox 	return (rv);
1852e27e17b7SAlan Cox }
1853e27e17b7SAlan Cox 
1854e27e17b7SAlan Cox /*
1855df8bae1dSRodney W. Grimes  * vm_map_clean
1856df8bae1dSRodney W. Grimes  *
1857df8bae1dSRodney W. Grimes  * Push any dirty cached pages in the address range to their pager.
1858df8bae1dSRodney W. Grimes  * If syncio is TRUE, dirty pages are written synchronously.
1859df8bae1dSRodney W. Grimes  * If invalidate is TRUE, any cached pages are freed as well.
1860df8bae1dSRodney W. Grimes  *
1861df8bae1dSRodney W. Grimes  * Returns an error if any part of the specified range is not mapped.
1862df8bae1dSRodney W. Grimes  */
1863df8bae1dSRodney W. Grimes int
18641b40f8c0SMatthew Dillon vm_map_clean(
18651b40f8c0SMatthew Dillon 	vm_map_t map,
18661b40f8c0SMatthew Dillon 	vm_offset_t start,
18671b40f8c0SMatthew Dillon 	vm_offset_t end,
18681b40f8c0SMatthew Dillon 	boolean_t syncio,
18691b40f8c0SMatthew Dillon 	boolean_t invalidate)
1870df8bae1dSRodney W. Grimes {
1871c0877f10SJohn Dyson 	vm_map_entry_t current;
1872df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
1873df8bae1dSRodney W. Grimes 	vm_size_t size;
1874df8bae1dSRodney W. Grimes 	vm_object_t object;
1875a316d390SJohn Dyson 	vm_ooffset_t offset;
1876df8bae1dSRodney W. Grimes 
18770cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
18780cddd8f0SMatthew Dillon 
1879df8bae1dSRodney W. Grimes 	vm_map_lock_read(map);
1880df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
1881df8bae1dSRodney W. Grimes 	if (!vm_map_lookup_entry(map, start, &entry)) {
1882df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map);
1883df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ADDRESS);
1884df8bae1dSRodney W. Grimes 	}
1885df8bae1dSRodney W. Grimes 	/*
1886df8bae1dSRodney W. Grimes 	 * Make a first pass to check for holes.
1887df8bae1dSRodney W. Grimes 	 */
1888df8bae1dSRodney W. Grimes 	for (current = entry; current->start < end; current = current->next) {
1889afa07f7eSJohn Dyson 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1890df8bae1dSRodney W. Grimes 			vm_map_unlock_read(map);
1891df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ARGUMENT);
1892df8bae1dSRodney W. Grimes 		}
1893df8bae1dSRodney W. Grimes 		if (end > current->end &&
1894df8bae1dSRodney W. Grimes 		    (current->next == &map->header ||
1895df8bae1dSRodney W. Grimes 			current->end != current->next->start)) {
1896df8bae1dSRodney W. Grimes 			vm_map_unlock_read(map);
1897df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ADDRESS);
1898df8bae1dSRodney W. Grimes 		}
1899df8bae1dSRodney W. Grimes 	}
1900df8bae1dSRodney W. Grimes 
1901bc105a67SAlan Cox 	if (invalidate) {
1902bc105a67SAlan Cox 		vm_page_lock_queues();
1903bc105a67SAlan Cox 		pmap_remove(map->pmap, start, end);
1904bc105a67SAlan Cox 		vm_page_unlock_queues();
1905bc105a67SAlan Cox 	}
1906df8bae1dSRodney W. Grimes 	/*
1907df8bae1dSRodney W. Grimes 	 * Make a second pass, cleaning/uncaching pages from the indicated
1908df8bae1dSRodney W. Grimes 	 * objects as we go.
1909df8bae1dSRodney W. Grimes 	 */
1910df8bae1dSRodney W. Grimes 	for (current = entry; current->start < end; current = current->next) {
1911df8bae1dSRodney W. Grimes 		offset = current->offset + (start - current->start);
1912df8bae1dSRodney W. Grimes 		size = (end <= current->end ? end : current->end) - start;
19139fdfe602SMatthew Dillon 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1914c0877f10SJohn Dyson 			vm_map_t smap;
1915df8bae1dSRodney W. Grimes 			vm_map_entry_t tentry;
1916df8bae1dSRodney W. Grimes 			vm_size_t tsize;
1917df8bae1dSRodney W. Grimes 
19189fdfe602SMatthew Dillon 			smap = current->object.sub_map;
1919df8bae1dSRodney W. Grimes 			vm_map_lock_read(smap);
1920df8bae1dSRodney W. Grimes 			(void) vm_map_lookup_entry(smap, offset, &tentry);
1921df8bae1dSRodney W. Grimes 			tsize = tentry->end - offset;
1922df8bae1dSRodney W. Grimes 			if (tsize < size)
1923df8bae1dSRodney W. Grimes 				size = tsize;
1924df8bae1dSRodney W. Grimes 			object = tentry->object.vm_object;
1925df8bae1dSRodney W. Grimes 			offset = tentry->offset + (offset - tentry->start);
1926df8bae1dSRodney W. Grimes 			vm_map_unlock_read(smap);
1927df8bae1dSRodney W. Grimes 		} else {
1928df8bae1dSRodney W. Grimes 			object = current->object.vm_object;
1929df8bae1dSRodney W. Grimes 		}
19308a02c104SJohn Dyson 		/*
19318a02c104SJohn Dyson 		 * Note that there is absolutely no sense in writing out
19328a02c104SJohn Dyson 		 * anonymous objects, so we track down the vnode object
19338a02c104SJohn Dyson 		 * to write out.
19348a02c104SJohn Dyson 		 * We invalidate (remove) all pages from the address space
19358a02c104SJohn Dyson 		 * anyway, for semantic correctness.
19368c5dffe8SMatthew Dillon 		 *
19378c5dffe8SMatthew Dillon 		 * note: certain anonymous maps, such as MAP_NOSYNC maps,
19388c5dffe8SMatthew Dillon 		 * may start out with a NULL object.
19398a02c104SJohn Dyson 		 */
19408c5dffe8SMatthew Dillon 		while (object && object->backing_object) {
19418a02c104SJohn Dyson 			object = object->backing_object;
19428a02c104SJohn Dyson 			offset += object->backing_object_offset;
19438a02c104SJohn Dyson 			if (object->size < OFF_TO_IDX(offset + size))
19448a02c104SJohn Dyson 				size = IDX_TO_OFF(object->size) - offset;
19458a02c104SJohn Dyson 		}
1946ff359f84SMatthew Dillon 		if (object && (object->type == OBJT_VNODE) &&
1947ff359f84SMatthew Dillon 		    (current->protection & VM_PROT_WRITE)) {
1948df8bae1dSRodney W. Grimes 			/*
1949ff359f84SMatthew Dillon 			 * Flush pages if writing is allowed, invalidate them
1950ff359f84SMatthew Dillon 			 * if invalidation requested.  Pages undergoing I/O
1951ff359f84SMatthew Dillon 			 * will be ignored by vm_object_page_remove().
1952f5cf85d4SDavid Greenman 			 *
1953ff359f84SMatthew Dillon 			 * We cannot lock the vnode and then wait for paging
1954ff359f84SMatthew Dillon 			 * to complete without deadlocking against vm_fault.
1955ff359f84SMatthew Dillon 			 * Instead we simply call vm_object_page_remove() and
1956ff359f84SMatthew Dillon 			 * allow it to block internally on a page-by-page
1957ff359f84SMatthew Dillon 			 * basis when it encounters pages undergoing async
1958ff359f84SMatthew Dillon 			 * I/O.
1959df8bae1dSRodney W. Grimes 			 */
19608f9110f6SJohn Dyson 			int flags;
1961ff359f84SMatthew Dillon 
1962ff359f84SMatthew Dillon 			vm_object_reference(object);
1963b40ce416SJulian Elischer 			vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
19648f9110f6SJohn Dyson 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
19658f9110f6SJohn Dyson 			flags |= invalidate ? OBJPC_INVAL : 0;
1966a316d390SJohn Dyson 			vm_object_page_clean(object,
1967a316d390SJohn Dyson 			    OFF_TO_IDX(offset),
19682be70f79SJohn Dyson 			    OFF_TO_IDX(offset + size + PAGE_MASK),
19698f9110f6SJohn Dyson 			    flags);
19704a2eca23SMatthew N. Dodd 			VOP_UNLOCK(object->handle, 0, curthread);
19714a2eca23SMatthew N. Dodd 			vm_object_deallocate(object);
19724a2eca23SMatthew N. Dodd 		}
19734a2eca23SMatthew N. Dodd 		if (object && invalidate &&
19744a2eca23SMatthew N. Dodd 		    ((object->type == OBJT_VNODE) ||
19754a2eca23SMatthew N. Dodd 		     (object->type == OBJT_DEVICE))) {
19764a2eca23SMatthew N. Dodd 			vm_object_reference(object);
19775e83956aSAlan Cox 			vm_object_lock(object);
1978a316d390SJohn Dyson 			vm_object_page_remove(object,
1979a316d390SJohn Dyson 			    OFF_TO_IDX(offset),
19802be70f79SJohn Dyson 			    OFF_TO_IDX(offset + size + PAGE_MASK),
1981a316d390SJohn Dyson 			    FALSE);
19825e83956aSAlan Cox 			vm_object_unlock(object);
1983ff359f84SMatthew Dillon 			vm_object_deallocate(object);
1984a02051c3SJohn Dyson                 }
1985df8bae1dSRodney W. Grimes 		start += size;
1986df8bae1dSRodney W. Grimes 	}
1987df8bae1dSRodney W. Grimes 
1988df8bae1dSRodney W. Grimes 	vm_map_unlock_read(map);
1989df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
1990df8bae1dSRodney W. Grimes }
1991df8bae1dSRodney W. Grimes 
1992df8bae1dSRodney W. Grimes /*
1993df8bae1dSRodney W. Grimes  *	vm_map_entry_unwire:	[ internal use only ]
1994df8bae1dSRodney W. Grimes  *
1995df8bae1dSRodney W. Grimes  *	Make the region specified by this entry pageable.
1996df8bae1dSRodney W. Grimes  *
1997df8bae1dSRodney W. Grimes  *	The map in question should be locked.
1998df8bae1dSRodney W. Grimes  *	[This is the reason for this routine's existence.]
1999df8bae1dSRodney W. Grimes  */
20000362d7d7SJohn Dyson static void
20011b40f8c0SMatthew Dillon vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2002df8bae1dSRodney W. Grimes {
2003df8bae1dSRodney W. Grimes 	vm_fault_unwire(map, entry->start, entry->end);
2004df8bae1dSRodney W. Grimes 	entry->wired_count = 0;
2005df8bae1dSRodney W. Grimes }
2006df8bae1dSRodney W. Grimes 
2007df8bae1dSRodney W. Grimes /*
2008df8bae1dSRodney W. Grimes  *	vm_map_entry_delete:	[ internal use only ]
2009df8bae1dSRodney W. Grimes  *
2010df8bae1dSRodney W. Grimes  *	Deallocate the given entry from the target map.
2011df8bae1dSRodney W. Grimes  */
20120362d7d7SJohn Dyson static void
20131b40f8c0SMatthew Dillon vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2014df8bae1dSRodney W. Grimes {
2015df8bae1dSRodney W. Grimes 	vm_map_entry_unlink(map, entry);
2016df8bae1dSRodney W. Grimes 	map->size -= entry->end - entry->start;
2017df8bae1dSRodney W. Grimes 
20189fdfe602SMatthew Dillon 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2019df8bae1dSRodney W. Grimes 		vm_object_deallocate(entry->object.vm_object);
2020b5b40fa6SJohn Dyson 	}
2021df8bae1dSRodney W. Grimes 
2022df8bae1dSRodney W. Grimes 	vm_map_entry_dispose(map, entry);
2023df8bae1dSRodney W. Grimes }
2024df8bae1dSRodney W. Grimes 
2025df8bae1dSRodney W. Grimes /*
2026df8bae1dSRodney W. Grimes  *	vm_map_delete:	[ internal use only ]
2027df8bae1dSRodney W. Grimes  *
2028df8bae1dSRodney W. Grimes  *	Deallocates the given address range from the target
2029df8bae1dSRodney W. Grimes  *	map.
2030df8bae1dSRodney W. Grimes  */
2031df8bae1dSRodney W. Grimes int
20321b40f8c0SMatthew Dillon vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2033df8bae1dSRodney W. Grimes {
2034cbd8ec09SJohn Dyson 	vm_object_t object;
2035c0877f10SJohn Dyson 	vm_map_entry_t entry;
2036df8bae1dSRodney W. Grimes 	vm_map_entry_t first_entry;
2037df8bae1dSRodney W. Grimes 
2038df8bae1dSRodney W. Grimes 	/*
2039df8bae1dSRodney W. Grimes 	 * Find the start of the region, and clip it
2040df8bae1dSRodney W. Grimes 	 */
2041876318ecSAlan Cox 	if (!vm_map_lookup_entry(map, start, &first_entry))
2042df8bae1dSRodney W. Grimes 		entry = first_entry->next;
2043876318ecSAlan Cox 	else {
2044df8bae1dSRodney W. Grimes 		entry = first_entry;
2045df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
2046df8bae1dSRodney W. Grimes 	}
2047df8bae1dSRodney W. Grimes 
2048df8bae1dSRodney W. Grimes 	/*
2049df8bae1dSRodney W. Grimes 	 * Save the free space hint
2050df8bae1dSRodney W. Grimes 	 */
2051b18bfc3dSJohn Dyson 	if (entry == &map->header) {
2052b18bfc3dSJohn Dyson 		map->first_free = &map->header;
20532dbea5d2SJohn Dyson 	} else if (map->first_free->start >= start) {
2054df8bae1dSRodney W. Grimes 		map->first_free = entry->prev;
20552dbea5d2SJohn Dyson 	}
2056df8bae1dSRodney W. Grimes 
2057df8bae1dSRodney W. Grimes 	/*
2058df8bae1dSRodney W. Grimes 	 * Step through all entries in this region
2059df8bae1dSRodney W. Grimes 	 */
2060df8bae1dSRodney W. Grimes 	while ((entry != &map->header) && (entry->start < end)) {
2061df8bae1dSRodney W. Grimes 		vm_map_entry_t next;
2062b18bfc3dSJohn Dyson 		vm_offset_t s, e;
2063cbd8ec09SJohn Dyson 		vm_pindex_t offidxstart, offidxend, count;
2064df8bae1dSRodney W. Grimes 
206573b2baceSAlan Cox 		/*
206673b2baceSAlan Cox 		 * Wait for wiring or unwiring of an entry to complete.
206773b2baceSAlan Cox 		 */
206873b2baceSAlan Cox 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) {
206973b2baceSAlan Cox 			unsigned int last_timestamp;
207073b2baceSAlan Cox 			vm_offset_t saved_start;
207173b2baceSAlan Cox 			vm_map_entry_t tmp_entry;
207273b2baceSAlan Cox 
207373b2baceSAlan Cox 			saved_start = entry->start;
207473b2baceSAlan Cox 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
207573b2baceSAlan Cox 			last_timestamp = map->timestamp;
207673b2baceSAlan Cox 			(void) vm_map_unlock_and_wait(map, FALSE);
207773b2baceSAlan Cox 			vm_map_lock(map);
207873b2baceSAlan Cox 			if (last_timestamp + 1 != map->timestamp) {
207973b2baceSAlan Cox 				/*
208073b2baceSAlan Cox 				 * Look again for the entry because the map was
208173b2baceSAlan Cox 				 * modified while it was unlocked.
208273b2baceSAlan Cox 				 * Specifically, the entry may have been
208373b2baceSAlan Cox 				 * clipped, merged, or deleted.
208473b2baceSAlan Cox 				 */
208573b2baceSAlan Cox 				if (!vm_map_lookup_entry(map, saved_start,
208673b2baceSAlan Cox 							 &tmp_entry))
208773b2baceSAlan Cox 					entry = tmp_entry->next;
208873b2baceSAlan Cox 				else {
208973b2baceSAlan Cox 					entry = tmp_entry;
209073b2baceSAlan Cox 					vm_map_clip_start(map, entry,
209173b2baceSAlan Cox 							  saved_start);
209273b2baceSAlan Cox 				}
209373b2baceSAlan Cox 			}
209473b2baceSAlan Cox 			continue;
209573b2baceSAlan Cox 		}
2096df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
2097df8bae1dSRodney W. Grimes 
2098df8bae1dSRodney W. Grimes 		s = entry->start;
2099df8bae1dSRodney W. Grimes 		e = entry->end;
2100c0877f10SJohn Dyson 		next = entry->next;
2101df8bae1dSRodney W. Grimes 
2102cbd8ec09SJohn Dyson 		offidxstart = OFF_TO_IDX(entry->offset);
2103cbd8ec09SJohn Dyson 		count = OFF_TO_IDX(e - s);
2104cbd8ec09SJohn Dyson 		object = entry->object.vm_object;
21052dbea5d2SJohn Dyson 
2106df8bae1dSRodney W. Grimes 		/*
21070d94caffSDavid Greenman 		 * Unwire before removing addresses from the pmap; otherwise,
21080d94caffSDavid Greenman 		 * unwiring will put the entries back in the pmap.
2109df8bae1dSRodney W. Grimes 		 */
2110c0877f10SJohn Dyson 		if (entry->wired_count != 0) {
2111df8bae1dSRodney W. Grimes 			vm_map_entry_unwire(map, entry);
2112c0877f10SJohn Dyson 		}
2113df8bae1dSRodney W. Grimes 
2114cbd8ec09SJohn Dyson 		offidxend = offidxstart + count;
2115df8bae1dSRodney W. Grimes 
2116c0877f10SJohn Dyson 		if ((object == kernel_object) || (object == kmem_object)) {
21175e83956aSAlan Cox 			vm_object_lock(object);
21182dbea5d2SJohn Dyson 			vm_object_page_remove(object, offidxstart, offidxend, FALSE);
21195e83956aSAlan Cox 			vm_object_unlock(object);
2120b18bfc3dSJohn Dyson 		} else {
21215e83956aSAlan Cox 			vm_object_lock(object);
2122bc105a67SAlan Cox 			vm_page_lock_queues();
2123df8bae1dSRodney W. Grimes 			pmap_remove(map->pmap, s, e);
2124bc105a67SAlan Cox 			vm_page_unlock_queues();
2125876318ecSAlan Cox 			if (object != NULL &&
2126876318ecSAlan Cox 			    object->ref_count != 1 &&
2127876318ecSAlan Cox 			    (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
2128876318ecSAlan Cox 			    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
21292dbea5d2SJohn Dyson 				vm_object_collapse(object);
21302dbea5d2SJohn Dyson 				vm_object_page_remove(object, offidxstart, offidxend, FALSE);
21312dbea5d2SJohn Dyson 				if (object->type == OBJT_SWAP) {
2132cbd8ec09SJohn Dyson 					swap_pager_freespace(object, offidxstart, count);
21332dbea5d2SJohn Dyson 				}
2134876318ecSAlan Cox 				if (offidxend >= object->size &&
2135876318ecSAlan Cox 				    offidxstart < object->size) {
2136c0877f10SJohn Dyson 					object->size = offidxstart;
2137c0877f10SJohn Dyson 				}
21382dbea5d2SJohn Dyson 			}
21395e83956aSAlan Cox 			vm_object_unlock(object);
2140b18bfc3dSJohn Dyson 		}
2141df8bae1dSRodney W. Grimes 
2142df8bae1dSRodney W. Grimes 		/*
21430d94caffSDavid Greenman 		 * Delete the entry (which may delete the object) only after
21440d94caffSDavid Greenman 		 * removing all pmap entries pointing to its pages.
21450d94caffSDavid Greenman 		 * (Otherwise, its page frames may be reallocated, and any
21460d94caffSDavid Greenman 		 * modify bits will be set in the wrong object!)
2147df8bae1dSRodney W. Grimes 		 */
2148df8bae1dSRodney W. Grimes 		vm_map_entry_delete(map, entry);
2149df8bae1dSRodney W. Grimes 		entry = next;
2150df8bae1dSRodney W. Grimes 	}
2151df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
2152df8bae1dSRodney W. Grimes }
2153df8bae1dSRodney W. Grimes 
2154df8bae1dSRodney W. Grimes /*
2155df8bae1dSRodney W. Grimes  *	vm_map_remove:
2156df8bae1dSRodney W. Grimes  *
2157df8bae1dSRodney W. Grimes  *	Remove the given address range from the target map.
2158df8bae1dSRodney W. Grimes  *	This is the exported form of vm_map_delete.
2159df8bae1dSRodney W. Grimes  */
2160df8bae1dSRodney W. Grimes int
21611b40f8c0SMatthew Dillon vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2162df8bae1dSRodney W. Grimes {
2163c0877f10SJohn Dyson 	int result, s = 0;
21648d6e8edeSDavid Greenman 
216508442f8aSBosko Milekic 	if (map == kmem_map)
2166b18bfc3dSJohn Dyson 		s = splvm();
2167df8bae1dSRodney W. Grimes 
2168df8bae1dSRodney W. Grimes 	vm_map_lock(map);
2169df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
2170df8bae1dSRodney W. Grimes 	result = vm_map_delete(map, start, end);
2171df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
2172df8bae1dSRodney W. Grimes 
217308442f8aSBosko Milekic 	if (map == kmem_map)
21748d6e8edeSDavid Greenman 		splx(s);
21758d6e8edeSDavid Greenman 
2176df8bae1dSRodney W. Grimes 	return (result);
2177df8bae1dSRodney W. Grimes }
2178df8bae1dSRodney W. Grimes 
2179df8bae1dSRodney W. Grimes /*
2180df8bae1dSRodney W. Grimes  *	vm_map_check_protection:
2181df8bae1dSRodney W. Grimes  *
2182df8bae1dSRodney W. Grimes  *	Assert that the target map allows the specified
2183df8bae1dSRodney W. Grimes  *	privilege on the entire address region given.
2184df8bae1dSRodney W. Grimes  *	The entire region must be allocated.
2185df8bae1dSRodney W. Grimes  */
21860d94caffSDavid Greenman boolean_t
2187b9dcd593SBruce Evans vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2188b9dcd593SBruce Evans 			vm_prot_t protection)
2189df8bae1dSRodney W. Grimes {
2190c0877f10SJohn Dyson 	vm_map_entry_t entry;
2191df8bae1dSRodney W. Grimes 	vm_map_entry_t tmp_entry;
2192df8bae1dSRodney W. Grimes 
21932f6c16e1SAlan Cox 	vm_map_lock_read(map);
2194df8bae1dSRodney W. Grimes 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
21952f6c16e1SAlan Cox 		vm_map_unlock_read(map);
2196df8bae1dSRodney W. Grimes 		return (FALSE);
2197df8bae1dSRodney W. Grimes 	}
2198df8bae1dSRodney W. Grimes 	entry = tmp_entry;
2199df8bae1dSRodney W. Grimes 
2200df8bae1dSRodney W. Grimes 	while (start < end) {
2201df8bae1dSRodney W. Grimes 		if (entry == &map->header) {
22022f6c16e1SAlan Cox 			vm_map_unlock_read(map);
2203df8bae1dSRodney W. Grimes 			return (FALSE);
2204df8bae1dSRodney W. Grimes 		}
2205df8bae1dSRodney W. Grimes 		/*
2206df8bae1dSRodney W. Grimes 		 * No holes allowed!
2207df8bae1dSRodney W. Grimes 		 */
2208df8bae1dSRodney W. Grimes 		if (start < entry->start) {
22092f6c16e1SAlan Cox 			vm_map_unlock_read(map);
2210df8bae1dSRodney W. Grimes 			return (FALSE);
2211df8bae1dSRodney W. Grimes 		}
2212df8bae1dSRodney W. Grimes 		/*
2213df8bae1dSRodney W. Grimes 		 * Check protection associated with entry.
2214df8bae1dSRodney W. Grimes 		 */
2215df8bae1dSRodney W. Grimes 		if ((entry->protection & protection) != protection) {
22162f6c16e1SAlan Cox 			vm_map_unlock_read(map);
2217df8bae1dSRodney W. Grimes 			return (FALSE);
2218df8bae1dSRodney W. Grimes 		}
2219df8bae1dSRodney W. Grimes 		/* go to next entry */
2220df8bae1dSRodney W. Grimes 		start = entry->end;
2221df8bae1dSRodney W. Grimes 		entry = entry->next;
2222df8bae1dSRodney W. Grimes 	}
22232f6c16e1SAlan Cox 	vm_map_unlock_read(map);
2224df8bae1dSRodney W. Grimes 	return (TRUE);
2225df8bae1dSRodney W. Grimes }
2226df8bae1dSRodney W. Grimes 
222786524867SJohn Dyson /*
2228df8bae1dSRodney W. Grimes  *	vm_map_copy_entry:
2229df8bae1dSRodney W. Grimes  *
2230df8bae1dSRodney W. Grimes  *	Copies the contents of the source entry to the destination
2231df8bae1dSRodney W. Grimes  *	entry.  The entries *must* be aligned properly.
2232df8bae1dSRodney W. Grimes  */
2233f708ef1bSPoul-Henning Kamp static void
22341b40f8c0SMatthew Dillon vm_map_copy_entry(
22351b40f8c0SMatthew Dillon 	vm_map_t src_map,
22361b40f8c0SMatthew Dillon 	vm_map_t dst_map,
22371b40f8c0SMatthew Dillon 	vm_map_entry_t src_entry,
22381b40f8c0SMatthew Dillon 	vm_map_entry_t dst_entry)
2239df8bae1dSRodney W. Grimes {
2240c0877f10SJohn Dyson 	vm_object_t src_object;
2241c0877f10SJohn Dyson 
22429fdfe602SMatthew Dillon 	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2243df8bae1dSRodney W. Grimes 		return;
2244df8bae1dSRodney W. Grimes 
2245df8bae1dSRodney W. Grimes 	if (src_entry->wired_count == 0) {
2246df8bae1dSRodney W. Grimes 
2247df8bae1dSRodney W. Grimes 		/*
22480d94caffSDavid Greenman 		 * If the source entry is marked needs_copy, it is already
22490d94caffSDavid Greenman 		 * write-protected.
2250df8bae1dSRodney W. Grimes 		 */
2251afa07f7eSJohn Dyson 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
225285e03a7eSAlan Cox 			vm_page_lock_queues();
2253df8bae1dSRodney W. Grimes 			pmap_protect(src_map->pmap,
2254df8bae1dSRodney W. Grimes 			    src_entry->start,
2255df8bae1dSRodney W. Grimes 			    src_entry->end,
2256df8bae1dSRodney W. Grimes 			    src_entry->protection & ~VM_PROT_WRITE);
225785e03a7eSAlan Cox 			vm_page_unlock_queues();
2258df8bae1dSRodney W. Grimes 		}
2259b18bfc3dSJohn Dyson 
2260df8bae1dSRodney W. Grimes 		/*
2261df8bae1dSRodney W. Grimes 		 * Make a copy of the object.
2262df8bae1dSRodney W. Grimes 		 */
22638aef1712SMatthew Dillon 		if ((src_object = src_entry->object.vm_object) != NULL) {
2264c0877f10SJohn Dyson 
2265c0877f10SJohn Dyson 			if ((src_object->handle == NULL) &&
2266c0877f10SJohn Dyson 				(src_object->type == OBJT_DEFAULT ||
2267c0877f10SJohn Dyson 				 src_object->type == OBJT_SWAP)) {
2268c0877f10SJohn Dyson 				vm_object_collapse(src_object);
226996fb8cf2SJohn Dyson 				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2270c5aaa06dSAlan Cox 					vm_object_split(src_entry);
2271c0877f10SJohn Dyson 					src_object = src_entry->object.vm_object;
2272c0877f10SJohn Dyson 				}
2273c0877f10SJohn Dyson 			}
2274c0877f10SJohn Dyson 
2275c0877f10SJohn Dyson 			vm_object_reference(src_object);
2276a6864937SAlan Cox 			vm_object_lock(src_object);
2277069e9bc1SDoug Rabson 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2278a6864937SAlan Cox 			vm_object_unlock(src_object);
2279c0877f10SJohn Dyson 			dst_entry->object.vm_object = src_object;
2280afa07f7eSJohn Dyson 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2281afa07f7eSJohn Dyson 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2282b18bfc3dSJohn Dyson 			dst_entry->offset = src_entry->offset;
2283b18bfc3dSJohn Dyson 		} else {
2284b18bfc3dSJohn Dyson 			dst_entry->object.vm_object = NULL;
2285b18bfc3dSJohn Dyson 			dst_entry->offset = 0;
2286b18bfc3dSJohn Dyson 		}
2287df8bae1dSRodney W. Grimes 
2288df8bae1dSRodney W. Grimes 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2289df8bae1dSRodney W. Grimes 		    dst_entry->end - dst_entry->start, src_entry->start);
22900d94caffSDavid Greenman 	} else {
2291df8bae1dSRodney W. Grimes 		/*
2292df8bae1dSRodney W. Grimes 		 * Of course, wired down pages can't be set copy-on-write.
22930d94caffSDavid Greenman 		 * Cause wired pages to be copied into the new map by
22940d94caffSDavid Greenman 		 * simulating faults (the new pages are pageable)
2295df8bae1dSRodney W. Grimes 		 */
2296df8bae1dSRodney W. Grimes 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2297df8bae1dSRodney W. Grimes 	}
2298df8bae1dSRodney W. Grimes }
2299df8bae1dSRodney W. Grimes 
2300df8bae1dSRodney W. Grimes /*
2301df8bae1dSRodney W. Grimes  * vmspace_fork:
2302df8bae1dSRodney W. Grimes  * Create a new process vmspace structure and vm_map
2303df8bae1dSRodney W. Grimes  * based on those of an existing process.  The new map
2304df8bae1dSRodney W. Grimes  * is based on the old map, according to the inheritance
2305df8bae1dSRodney W. Grimes  * values on the regions in that map.
2306df8bae1dSRodney W. Grimes  *
2307df8bae1dSRodney W. Grimes  * The source map must not be locked.
2308df8bae1dSRodney W. Grimes  */
2309df8bae1dSRodney W. Grimes struct vmspace *
23101b40f8c0SMatthew Dillon vmspace_fork(struct vmspace *vm1)
2311df8bae1dSRodney W. Grimes {
2312c0877f10SJohn Dyson 	struct vmspace *vm2;
2313df8bae1dSRodney W. Grimes 	vm_map_t old_map = &vm1->vm_map;
2314df8bae1dSRodney W. Grimes 	vm_map_t new_map;
2315df8bae1dSRodney W. Grimes 	vm_map_entry_t old_entry;
2316df8bae1dSRodney W. Grimes 	vm_map_entry_t new_entry;
2317de5f6a77SJohn Dyson 	vm_object_t object;
2318df8bae1dSRodney W. Grimes 
23190cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
23200cddd8f0SMatthew Dillon 
2321df8bae1dSRodney W. Grimes 	vm_map_lock(old_map);
2322b823bbd6SMatthew Dillon 	old_map->infork = 1;
2323df8bae1dSRodney W. Grimes 
23242d8acc0fSJohn Dyson 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2325df8bae1dSRodney W. Grimes 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2326582ec34cSAlfred Perlstein 	    (caddr_t) &vm1->vm_endcopy - (caddr_t) &vm1->vm_startcopy);
2327df8bae1dSRodney W. Grimes 	new_map = &vm2->vm_map;	/* XXX */
232847221757SJohn Dyson 	new_map->timestamp = 1;
2329df8bae1dSRodney W. Grimes 
2330df8bae1dSRodney W. Grimes 	old_entry = old_map->header.next;
2331df8bae1dSRodney W. Grimes 
2332df8bae1dSRodney W. Grimes 	while (old_entry != &old_map->header) {
2333afa07f7eSJohn Dyson 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2334df8bae1dSRodney W. Grimes 			panic("vm_map_fork: encountered a submap");
2335df8bae1dSRodney W. Grimes 
2336df8bae1dSRodney W. Grimes 		switch (old_entry->inheritance) {
2337df8bae1dSRodney W. Grimes 		case VM_INHERIT_NONE:
2338df8bae1dSRodney W. Grimes 			break;
2339df8bae1dSRodney W. Grimes 
2340df8bae1dSRodney W. Grimes 		case VM_INHERIT_SHARE:
2341df8bae1dSRodney W. Grimes 			/*
2342fed9a903SJohn Dyson 			 * Clone the entry, creating the shared object if necessary.
2343fed9a903SJohn Dyson 			 */
2344fed9a903SJohn Dyson 			object = old_entry->object.vm_object;
2345fed9a903SJohn Dyson 			if (object == NULL) {
2346fed9a903SJohn Dyson 				object = vm_object_allocate(OBJT_DEFAULT,
2347c2e11a03SJohn Dyson 					atop(old_entry->end - old_entry->start));
2348fed9a903SJohn Dyson 				old_entry->object.vm_object = object;
2349fed9a903SJohn Dyson 				old_entry->offset = (vm_offset_t) 0;
23509a2f6362SAlan Cox 			}
23519a2f6362SAlan Cox 
23529a2f6362SAlan Cox 			/*
23539a2f6362SAlan Cox 			 * Add the reference before calling vm_object_shadow
23549a2f6362SAlan Cox 			 * to insure that a shadow object is created.
23559a2f6362SAlan Cox 			 */
23569a2f6362SAlan Cox 			vm_object_reference(object);
23579a2f6362SAlan Cox 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
23585069bf57SJohn Dyson 				vm_object_shadow(&old_entry->object.vm_object,
23595069bf57SJohn Dyson 					&old_entry->offset,
2360c2e11a03SJohn Dyson 					atop(old_entry->end - old_entry->start));
23615069bf57SJohn Dyson 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2362d30344bdSIan Dowse 				/* Transfer the second reference too. */
2363d30344bdSIan Dowse 				vm_object_reference(
2364d30344bdSIan Dowse 				    old_entry->object.vm_object);
2365d30344bdSIan Dowse 				vm_object_deallocate(object);
23665069bf57SJohn Dyson 				object = old_entry->object.vm_object;
2367fed9a903SJohn Dyson 			}
2368a6864937SAlan Cox 			vm_object_lock(object);
2369069e9bc1SDoug Rabson 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
2370a6864937SAlan Cox 			vm_object_unlock(object);
2371fed9a903SJohn Dyson 
2372fed9a903SJohn Dyson 			/*
2373ad5fca3bSAlan Cox 			 * Clone the entry, referencing the shared object.
2374df8bae1dSRodney W. Grimes 			 */
2375df8bae1dSRodney W. Grimes 			new_entry = vm_map_entry_create(new_map);
2376df8bae1dSRodney W. Grimes 			*new_entry = *old_entry;
2377028fe6ecSTor Egge 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2378df8bae1dSRodney W. Grimes 			new_entry->wired_count = 0;
2379df8bae1dSRodney W. Grimes 
2380df8bae1dSRodney W. Grimes 			/*
23810d94caffSDavid Greenman 			 * Insert the entry into the new map -- we know we're
23820d94caffSDavid Greenman 			 * inserting at the end of the new map.
2383df8bae1dSRodney W. Grimes 			 */
2384df8bae1dSRodney W. Grimes 			vm_map_entry_link(new_map, new_map->header.prev,
2385df8bae1dSRodney W. Grimes 			    new_entry);
2386df8bae1dSRodney W. Grimes 
2387df8bae1dSRodney W. Grimes 			/*
2388df8bae1dSRodney W. Grimes 			 * Update the physical map
2389df8bae1dSRodney W. Grimes 			 */
2390df8bae1dSRodney W. Grimes 			pmap_copy(new_map->pmap, old_map->pmap,
2391df8bae1dSRodney W. Grimes 			    new_entry->start,
2392df8bae1dSRodney W. Grimes 			    (old_entry->end - old_entry->start),
2393df8bae1dSRodney W. Grimes 			    old_entry->start);
2394df8bae1dSRodney W. Grimes 			break;
2395df8bae1dSRodney W. Grimes 
2396df8bae1dSRodney W. Grimes 		case VM_INHERIT_COPY:
2397df8bae1dSRodney W. Grimes 			/*
2398df8bae1dSRodney W. Grimes 			 * Clone the entry and link into the map.
2399df8bae1dSRodney W. Grimes 			 */
2400df8bae1dSRodney W. Grimes 			new_entry = vm_map_entry_create(new_map);
2401df8bae1dSRodney W. Grimes 			*new_entry = *old_entry;
2402028fe6ecSTor Egge 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2403df8bae1dSRodney W. Grimes 			new_entry->wired_count = 0;
2404df8bae1dSRodney W. Grimes 			new_entry->object.vm_object = NULL;
2405df8bae1dSRodney W. Grimes 			vm_map_entry_link(new_map, new_map->header.prev,
2406df8bae1dSRodney W. Grimes 			    new_entry);
2407bd7e5f99SJohn Dyson 			vm_map_copy_entry(old_map, new_map, old_entry,
2408bd7e5f99SJohn Dyson 			    new_entry);
2409df8bae1dSRodney W. Grimes 			break;
2410df8bae1dSRodney W. Grimes 		}
2411df8bae1dSRodney W. Grimes 		old_entry = old_entry->next;
2412df8bae1dSRodney W. Grimes 	}
2413df8bae1dSRodney W. Grimes 
2414df8bae1dSRodney W. Grimes 	new_map->size = old_map->size;
2415b823bbd6SMatthew Dillon 	old_map->infork = 0;
2416df8bae1dSRodney W. Grimes 	vm_map_unlock(old_map);
2417df8bae1dSRodney W. Grimes 
2418df8bae1dSRodney W. Grimes 	return (vm2);
2419df8bae1dSRodney W. Grimes }
2420df8bae1dSRodney W. Grimes 
242194f7e29aSAlan Cox int
242294f7e29aSAlan Cox vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
242394f7e29aSAlan Cox 	      vm_prot_t prot, vm_prot_t max, int cow)
242494f7e29aSAlan Cox {
242594f7e29aSAlan Cox 	vm_map_entry_t prev_entry;
242694f7e29aSAlan Cox 	vm_map_entry_t new_stack_entry;
242794f7e29aSAlan Cox 	vm_size_t      init_ssize;
242894f7e29aSAlan Cox 	int            rv;
242994f7e29aSAlan Cox 
243005ba50f5SJake Burkholder 	if (addrbos < vm_map_min(map))
243194f7e29aSAlan Cox 		return (KERN_NO_SPACE);
243294f7e29aSAlan Cox 
2433cbc89bfbSPaul Saab 	if (max_ssize < sgrowsiz)
243494f7e29aSAlan Cox 		init_ssize = max_ssize;
243594f7e29aSAlan Cox 	else
2436cbc89bfbSPaul Saab 		init_ssize = sgrowsiz;
243794f7e29aSAlan Cox 
243894f7e29aSAlan Cox 	vm_map_lock(map);
243994f7e29aSAlan Cox 
244094f7e29aSAlan Cox 	/* If addr is already mapped, no go */
244194f7e29aSAlan Cox 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
244294f7e29aSAlan Cox 		vm_map_unlock(map);
244394f7e29aSAlan Cox 		return (KERN_NO_SPACE);
244494f7e29aSAlan Cox 	}
244594f7e29aSAlan Cox 
2446a69ac174SMatthew Dillon 	/* If we would blow our VMEM resource limit, no go */
2447a69ac174SMatthew Dillon 	if (map->size + init_ssize >
2448a69ac174SMatthew Dillon 	    curthread->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2449a69ac174SMatthew Dillon 		vm_map_unlock(map);
2450a69ac174SMatthew Dillon 		return (KERN_NO_SPACE);
2451a69ac174SMatthew Dillon 	}
2452a69ac174SMatthew Dillon 
245394f7e29aSAlan Cox 	/* If we can't accomodate max_ssize in the current mapping,
245494f7e29aSAlan Cox 	 * no go.  However, we need to be aware that subsequent user
245594f7e29aSAlan Cox 	 * mappings might map into the space we have reserved for
245694f7e29aSAlan Cox 	 * stack, and currently this space is not protected.
245794f7e29aSAlan Cox 	 *
245894f7e29aSAlan Cox 	 * Hopefully we will at least detect this condition
245994f7e29aSAlan Cox 	 * when we try to grow the stack.
246094f7e29aSAlan Cox 	 */
246194f7e29aSAlan Cox 	if ((prev_entry->next != &map->header) &&
246294f7e29aSAlan Cox 	    (prev_entry->next->start < addrbos + max_ssize)) {
246394f7e29aSAlan Cox 		vm_map_unlock(map);
246494f7e29aSAlan Cox 		return (KERN_NO_SPACE);
246594f7e29aSAlan Cox 	}
246694f7e29aSAlan Cox 
246794f7e29aSAlan Cox 	/* We initially map a stack of only init_ssize.  We will
246894f7e29aSAlan Cox 	 * grow as needed later.  Since this is to be a grow
246994f7e29aSAlan Cox 	 * down stack, we map at the top of the range.
247094f7e29aSAlan Cox 	 *
247194f7e29aSAlan Cox 	 * Note: we would normally expect prot and max to be
247294f7e29aSAlan Cox 	 * VM_PROT_ALL, and cow to be 0.  Possibly we should
247394f7e29aSAlan Cox 	 * eliminate these as input parameters, and just
247494f7e29aSAlan Cox 	 * pass these values here in the insert call.
247594f7e29aSAlan Cox 	 */
247694f7e29aSAlan Cox 	rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
247794f7e29aSAlan Cox 	                   addrbos + max_ssize, prot, max, cow);
247894f7e29aSAlan Cox 
247994f7e29aSAlan Cox 	/* Now set the avail_ssize amount */
248094f7e29aSAlan Cox 	if (rv == KERN_SUCCESS){
248129b45e9eSAlan Cox 		if (prev_entry != &map->header)
248229b45e9eSAlan Cox 			vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize);
248394f7e29aSAlan Cox 		new_stack_entry = prev_entry->next;
248494f7e29aSAlan Cox 		if (new_stack_entry->end   != addrbos + max_ssize ||
248594f7e29aSAlan Cox 		    new_stack_entry->start != addrbos + max_ssize - init_ssize)
248694f7e29aSAlan Cox 			panic ("Bad entry start/end for new stack entry");
248794f7e29aSAlan Cox 		else
248894f7e29aSAlan Cox 			new_stack_entry->avail_ssize = max_ssize - init_ssize;
248994f7e29aSAlan Cox 	}
249094f7e29aSAlan Cox 
249194f7e29aSAlan Cox 	vm_map_unlock(map);
249294f7e29aSAlan Cox 	return (rv);
249394f7e29aSAlan Cox }
249494f7e29aSAlan Cox 
249594f7e29aSAlan Cox /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
249694f7e29aSAlan Cox  * desired address is already mapped, or if we successfully grow
249794f7e29aSAlan Cox  * the stack.  Also returns KERN_SUCCESS if addr is outside the
249894f7e29aSAlan Cox  * stack range (this is strange, but preserves compatibility with
249994f7e29aSAlan Cox  * the grow function in vm_machdep.c).
250094f7e29aSAlan Cox  */
250194f7e29aSAlan Cox int
250294f7e29aSAlan Cox vm_map_growstack (struct proc *p, vm_offset_t addr)
250394f7e29aSAlan Cox {
250494f7e29aSAlan Cox 	vm_map_entry_t prev_entry;
250594f7e29aSAlan Cox 	vm_map_entry_t stack_entry;
250694f7e29aSAlan Cox 	vm_map_entry_t new_stack_entry;
250794f7e29aSAlan Cox 	struct vmspace *vm = p->p_vmspace;
250894f7e29aSAlan Cox 	vm_map_t map = &vm->vm_map;
250994f7e29aSAlan Cox 	vm_offset_t    end;
251094f7e29aSAlan Cox 	int      grow_amount;
251194f7e29aSAlan Cox 	int      rv;
251294f7e29aSAlan Cox 	int      is_procstack;
251323955314SAlfred Perlstein 
25140cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
251523955314SAlfred Perlstein 
251694f7e29aSAlan Cox Retry:
251794f7e29aSAlan Cox 	vm_map_lock_read(map);
251894f7e29aSAlan Cox 
251994f7e29aSAlan Cox 	/* If addr is already in the entry range, no need to grow.*/
252094f7e29aSAlan Cox 	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
252194f7e29aSAlan Cox 		vm_map_unlock_read(map);
25220cddd8f0SMatthew Dillon 		return (KERN_SUCCESS);
252394f7e29aSAlan Cox 	}
252494f7e29aSAlan Cox 
252594f7e29aSAlan Cox 	if ((stack_entry = prev_entry->next) == &map->header) {
252694f7e29aSAlan Cox 		vm_map_unlock_read(map);
25270cddd8f0SMatthew Dillon 		return (KERN_SUCCESS);
252894f7e29aSAlan Cox 	}
252994f7e29aSAlan Cox 	if (prev_entry == &map->header)
253094f7e29aSAlan Cox 		end = stack_entry->start - stack_entry->avail_ssize;
253194f7e29aSAlan Cox 	else
253294f7e29aSAlan Cox 		end = prev_entry->end;
253394f7e29aSAlan Cox 
253494f7e29aSAlan Cox 	/* This next test mimics the old grow function in vm_machdep.c.
253594f7e29aSAlan Cox 	 * It really doesn't quite make sense, but we do it anyway
253694f7e29aSAlan Cox 	 * for compatibility.
253794f7e29aSAlan Cox 	 *
253894f7e29aSAlan Cox 	 * If not growable stack, return success.  This signals the
253994f7e29aSAlan Cox 	 * caller to proceed as he would normally with normal vm.
254094f7e29aSAlan Cox 	 */
254194f7e29aSAlan Cox 	if (stack_entry->avail_ssize < 1 ||
254294f7e29aSAlan Cox 	    addr >= stack_entry->start ||
254394f7e29aSAlan Cox 	    addr <  stack_entry->start - stack_entry->avail_ssize) {
254494f7e29aSAlan Cox 		vm_map_unlock_read(map);
25450cddd8f0SMatthew Dillon 		return (KERN_SUCCESS);
254694f7e29aSAlan Cox 	}
254794f7e29aSAlan Cox 
254894f7e29aSAlan Cox 	/* Find the minimum grow amount */
254994f7e29aSAlan Cox 	grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
255094f7e29aSAlan Cox 	if (grow_amount > stack_entry->avail_ssize) {
255194f7e29aSAlan Cox 		vm_map_unlock_read(map);
25520cddd8f0SMatthew Dillon 		return (KERN_NO_SPACE);
255394f7e29aSAlan Cox 	}
255494f7e29aSAlan Cox 
255594f7e29aSAlan Cox 	/* If there is no longer enough space between the entries
255694f7e29aSAlan Cox 	 * nogo, and adjust the available space.  Note: this
255794f7e29aSAlan Cox 	 * should only happen if the user has mapped into the
255894f7e29aSAlan Cox 	 * stack area after the stack was created, and is
255994f7e29aSAlan Cox 	 * probably an error.
256094f7e29aSAlan Cox 	 *
256194f7e29aSAlan Cox 	 * This also effectively destroys any guard page the user
256294f7e29aSAlan Cox 	 * might have intended by limiting the stack size.
256394f7e29aSAlan Cox 	 */
256494f7e29aSAlan Cox 	if (grow_amount > stack_entry->start - end) {
256525adb370SBrian Feldman 		if (vm_map_lock_upgrade(map))
256694f7e29aSAlan Cox 			goto Retry;
256794f7e29aSAlan Cox 
256894f7e29aSAlan Cox 		stack_entry->avail_ssize = stack_entry->start - end;
256994f7e29aSAlan Cox 
257094f7e29aSAlan Cox 		vm_map_unlock(map);
25710cddd8f0SMatthew Dillon 		return (KERN_NO_SPACE);
257294f7e29aSAlan Cox 	}
257394f7e29aSAlan Cox 
257494f7e29aSAlan Cox 	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
257594f7e29aSAlan Cox 
257694f7e29aSAlan Cox 	/* If this is the main process stack, see if we're over the
257794f7e29aSAlan Cox 	 * stack limit.
257894f7e29aSAlan Cox 	 */
25796389da78SAlan Cox 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
258094f7e29aSAlan Cox 			     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
258194f7e29aSAlan Cox 		vm_map_unlock_read(map);
25820cddd8f0SMatthew Dillon 		return (KERN_NO_SPACE);
258394f7e29aSAlan Cox 	}
258494f7e29aSAlan Cox 
258594f7e29aSAlan Cox 	/* Round up the grow amount modulo SGROWSIZ */
2586cbc89bfbSPaul Saab 	grow_amount = roundup (grow_amount, sgrowsiz);
258794f7e29aSAlan Cox 	if (grow_amount > stack_entry->avail_ssize) {
258894f7e29aSAlan Cox 		grow_amount = stack_entry->avail_ssize;
258994f7e29aSAlan Cox 	}
25906389da78SAlan Cox 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
259194f7e29aSAlan Cox 	                     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
259294f7e29aSAlan Cox 		grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
25936389da78SAlan Cox 		              ctob(vm->vm_ssize);
259494f7e29aSAlan Cox 	}
259594f7e29aSAlan Cox 
2596a69ac174SMatthew Dillon 	/* If we would blow our VMEM resource limit, no go */
2597a69ac174SMatthew Dillon 	if (map->size + grow_amount >
2598a69ac174SMatthew Dillon 	    curthread->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2599a69ac174SMatthew Dillon 		vm_map_unlock_read(map);
2600a69ac174SMatthew Dillon 		return (KERN_NO_SPACE);
2601a69ac174SMatthew Dillon 	}
2602a69ac174SMatthew Dillon 
260325adb370SBrian Feldman 	if (vm_map_lock_upgrade(map))
260494f7e29aSAlan Cox 		goto Retry;
260594f7e29aSAlan Cox 
260694f7e29aSAlan Cox 	/* Get the preliminary new entry start value */
260794f7e29aSAlan Cox 	addr = stack_entry->start - grow_amount;
260894f7e29aSAlan Cox 
260994f7e29aSAlan Cox 	/* If this puts us into the previous entry, cut back our growth
261094f7e29aSAlan Cox 	 * to the available space.  Also, see the note above.
261194f7e29aSAlan Cox 	 */
261294f7e29aSAlan Cox 	if (addr < end) {
261394f7e29aSAlan Cox 		stack_entry->avail_ssize = stack_entry->start - end;
261494f7e29aSAlan Cox 		addr = end;
261594f7e29aSAlan Cox 	}
261694f7e29aSAlan Cox 
261794f7e29aSAlan Cox 	rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
261805ba50f5SJake Burkholder 	    p->p_sysent->sv_stackprot, VM_PROT_ALL, 0);
261994f7e29aSAlan Cox 
262094f7e29aSAlan Cox 	/* Adjust the available stack space by the amount we grew. */
262194f7e29aSAlan Cox 	if (rv == KERN_SUCCESS) {
262229b45e9eSAlan Cox 		if (prev_entry != &map->header)
262329b45e9eSAlan Cox 			vm_map_clip_end(map, prev_entry, addr);
262494f7e29aSAlan Cox 		new_stack_entry = prev_entry->next;
262594f7e29aSAlan Cox 		if (new_stack_entry->end   != stack_entry->start  ||
262694f7e29aSAlan Cox 		    new_stack_entry->start != addr)
262794f7e29aSAlan Cox 			panic ("Bad stack grow start/end in new stack entry");
262894f7e29aSAlan Cox 		else {
262994f7e29aSAlan Cox 			new_stack_entry->avail_ssize = stack_entry->avail_ssize -
263094f7e29aSAlan Cox 							(new_stack_entry->end -
263194f7e29aSAlan Cox 							 new_stack_entry->start);
263294f7e29aSAlan Cox 			if (is_procstack)
26336389da78SAlan Cox 				vm->vm_ssize += btoc(new_stack_entry->end -
26346389da78SAlan Cox 						     new_stack_entry->start);
263594f7e29aSAlan Cox 		}
263694f7e29aSAlan Cox 	}
263794f7e29aSAlan Cox 
263894f7e29aSAlan Cox 	vm_map_unlock(map);
26390cddd8f0SMatthew Dillon 	return (rv);
264094f7e29aSAlan Cox }
264194f7e29aSAlan Cox 
2642df8bae1dSRodney W. Grimes /*
26435856e12eSJohn Dyson  * Unshare the specified VM space for exec.  If other processes are
26445856e12eSJohn Dyson  * mapped to it, then create a new one.  The new vmspace is null.
26455856e12eSJohn Dyson  */
26465856e12eSJohn Dyson void
26473ebc1248SPeter Wemm vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
26481b40f8c0SMatthew Dillon {
26495856e12eSJohn Dyson 	struct vmspace *oldvmspace = p->p_vmspace;
26505856e12eSJohn Dyson 	struct vmspace *newvmspace;
26515856e12eSJohn Dyson 
26520cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
26533ebc1248SPeter Wemm 	newvmspace = vmspace_alloc(minuser, maxuser);
26545856e12eSJohn Dyson 	bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
26555856e12eSJohn Dyson 	    (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
26565856e12eSJohn Dyson 	/*
26575856e12eSJohn Dyson 	 * This code is written like this for prototype purposes.  The
26585856e12eSJohn Dyson 	 * goal is to avoid running down the vmspace here, but let the
26595856e12eSJohn Dyson 	 * other process's that are still using the vmspace to finally
26605856e12eSJohn Dyson 	 * run it down.  Even though there is little or no chance of blocking
26615856e12eSJohn Dyson 	 * here, it is a good idea to keep this form for future mods.
26625856e12eSJohn Dyson 	 */
26635856e12eSJohn Dyson 	p->p_vmspace = newvmspace;
2664d4da2dbaSAlan Cox 	pmap_pinit2(vmspace_pmap(newvmspace));
266521c641b2SJohn Baldwin 	vmspace_free(oldvmspace);
2666b40ce416SJulian Elischer 	if (p == curthread->td_proc)		/* XXXKSE ? */
2667b40ce416SJulian Elischer 		pmap_activate(curthread);
26685856e12eSJohn Dyson }
26695856e12eSJohn Dyson 
26705856e12eSJohn Dyson /*
26715856e12eSJohn Dyson  * Unshare the specified VM space for forcing COW.  This
26725856e12eSJohn Dyson  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
26735856e12eSJohn Dyson  */
26745856e12eSJohn Dyson void
26751b40f8c0SMatthew Dillon vmspace_unshare(struct proc *p)
26761b40f8c0SMatthew Dillon {
26775856e12eSJohn Dyson 	struct vmspace *oldvmspace = p->p_vmspace;
26785856e12eSJohn Dyson 	struct vmspace *newvmspace;
26795856e12eSJohn Dyson 
26800cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
26815856e12eSJohn Dyson 	if (oldvmspace->vm_refcnt == 1)
26825856e12eSJohn Dyson 		return;
26835856e12eSJohn Dyson 	newvmspace = vmspace_fork(oldvmspace);
26845856e12eSJohn Dyson 	p->p_vmspace = newvmspace;
2685d4da2dbaSAlan Cox 	pmap_pinit2(vmspace_pmap(newvmspace));
268621c641b2SJohn Baldwin 	vmspace_free(oldvmspace);
2687b40ce416SJulian Elischer 	if (p == curthread->td_proc)		/* XXXKSE ? */
2688b40ce416SJulian Elischer 		pmap_activate(curthread);
26895856e12eSJohn Dyson }
26905856e12eSJohn Dyson 
26915856e12eSJohn Dyson /*
2692df8bae1dSRodney W. Grimes  *	vm_map_lookup:
2693df8bae1dSRodney W. Grimes  *
2694df8bae1dSRodney W. Grimes  *	Finds the VM object, offset, and
2695df8bae1dSRodney W. Grimes  *	protection for a given virtual address in the
2696df8bae1dSRodney W. Grimes  *	specified map, assuming a page fault of the
2697df8bae1dSRodney W. Grimes  *	type specified.
2698df8bae1dSRodney W. Grimes  *
2699df8bae1dSRodney W. Grimes  *	Leaves the map in question locked for read; return
2700df8bae1dSRodney W. Grimes  *	values are guaranteed until a vm_map_lookup_done
2701df8bae1dSRodney W. Grimes  *	call is performed.  Note that the map argument
2702df8bae1dSRodney W. Grimes  *	is in/out; the returned map must be used in
2703df8bae1dSRodney W. Grimes  *	the call to vm_map_lookup_done.
2704df8bae1dSRodney W. Grimes  *
2705df8bae1dSRodney W. Grimes  *	A handle (out_entry) is returned for use in
2706df8bae1dSRodney W. Grimes  *	vm_map_lookup_done, to make that fast.
2707df8bae1dSRodney W. Grimes  *
2708df8bae1dSRodney W. Grimes  *	If a lookup is requested with "write protection"
2709df8bae1dSRodney W. Grimes  *	specified, the map may be changed to perform virtual
2710df8bae1dSRodney W. Grimes  *	copying operations, although the data referenced will
2711df8bae1dSRodney W. Grimes  *	remain the same.
2712df8bae1dSRodney W. Grimes  */
2713df8bae1dSRodney W. Grimes int
2714b9dcd593SBruce Evans vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
2715b9dcd593SBruce Evans 	      vm_offset_t vaddr,
271647221757SJohn Dyson 	      vm_prot_t fault_typea,
2717b9dcd593SBruce Evans 	      vm_map_entry_t *out_entry,	/* OUT */
2718b9dcd593SBruce Evans 	      vm_object_t *object,		/* OUT */
2719b9dcd593SBruce Evans 	      vm_pindex_t *pindex,		/* OUT */
2720b9dcd593SBruce Evans 	      vm_prot_t *out_prot,		/* OUT */
27212d8acc0fSJohn Dyson 	      boolean_t *wired)			/* OUT */
2722df8bae1dSRodney W. Grimes {
2723c0877f10SJohn Dyson 	vm_map_entry_t entry;
2724c0877f10SJohn Dyson 	vm_map_t map = *var_map;
2725c0877f10SJohn Dyson 	vm_prot_t prot;
272647221757SJohn Dyson 	vm_prot_t fault_type = fault_typea;
2727df8bae1dSRodney W. Grimes 
2728df8bae1dSRodney W. Grimes RetryLookup:;
2729df8bae1dSRodney W. Grimes 	/*
2730df8bae1dSRodney W. Grimes 	 * Lookup the faulting address.
2731df8bae1dSRodney W. Grimes 	 */
2732df8bae1dSRodney W. Grimes 
2733df8bae1dSRodney W. Grimes 	vm_map_lock_read(map);
2734df8bae1dSRodney W. Grimes #define	RETURN(why) \
2735df8bae1dSRodney W. Grimes 		{ \
2736df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map); \
2737df8bae1dSRodney W. Grimes 		return (why); \
2738df8bae1dSRodney W. Grimes 		}
2739df8bae1dSRodney W. Grimes 
2740df8bae1dSRodney W. Grimes 	/*
27410d94caffSDavid Greenman 	 * If the map has an interesting hint, try it before calling full
27420d94caffSDavid Greenman 	 * blown lookup routine.
2743df8bae1dSRodney W. Grimes 	 */
27444e94f402SAlan Cox 	entry = map->root;
2745df8bae1dSRodney W. Grimes 	*out_entry = entry;
27464e94f402SAlan Cox 	if (entry == NULL ||
2747df8bae1dSRodney W. Grimes 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
2748df8bae1dSRodney W. Grimes 		/*
27490d94caffSDavid Greenman 		 * Entry was either not a valid hint, or the vaddr was not
27500d94caffSDavid Greenman 		 * contained in the entry, so do a full lookup.
2751df8bae1dSRodney W. Grimes 		 */
27524e94f402SAlan Cox 		if (!vm_map_lookup_entry(map, vaddr, out_entry))
2753df8bae1dSRodney W. Grimes 			RETURN(KERN_INVALID_ADDRESS);
2754df8bae1dSRodney W. Grimes 
27554e94f402SAlan Cox 		entry = *out_entry;
2756df8bae1dSRodney W. Grimes 	}
2757b7b2aac2SJohn Dyson 
2758df8bae1dSRodney W. Grimes 	/*
2759df8bae1dSRodney W. Grimes 	 * Handle submaps.
2760df8bae1dSRodney W. Grimes 	 */
2761afa07f7eSJohn Dyson 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2762df8bae1dSRodney W. Grimes 		vm_map_t old_map = map;
2763df8bae1dSRodney W. Grimes 
2764df8bae1dSRodney W. Grimes 		*var_map = map = entry->object.sub_map;
2765df8bae1dSRodney W. Grimes 		vm_map_unlock_read(old_map);
2766df8bae1dSRodney W. Grimes 		goto RetryLookup;
2767df8bae1dSRodney W. Grimes 	}
2768a04c970aSJohn Dyson 
2769df8bae1dSRodney W. Grimes 	/*
27700d94caffSDavid Greenman 	 * Check whether this task is allowed to have this page.
2771a04c970aSJohn Dyson 	 * Note the special case for MAP_ENTRY_COW
2772a04c970aSJohn Dyson 	 * pages with an override.  This is to implement a forced
2773a04c970aSJohn Dyson 	 * COW for debuggers.
2774df8bae1dSRodney W. Grimes 	 */
2775480ba2f5SJohn Dyson 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
2776480ba2f5SJohn Dyson 		prot = entry->max_protection;
2777480ba2f5SJohn Dyson 	else
2778df8bae1dSRodney W. Grimes 		prot = entry->protection;
277947221757SJohn Dyson 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
278047221757SJohn Dyson 	if ((fault_type & prot) != fault_type) {
278147221757SJohn Dyson 			RETURN(KERN_PROTECTION_FAILURE);
278247221757SJohn Dyson 	}
27832ed14a92SAlan Cox 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
278447221757SJohn Dyson 	    (entry->eflags & MAP_ENTRY_COW) &&
27852ed14a92SAlan Cox 	    (fault_type & VM_PROT_WRITE) &&
278647221757SJohn Dyson 	    (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
2787df8bae1dSRodney W. Grimes 		RETURN(KERN_PROTECTION_FAILURE);
2788a04c970aSJohn Dyson 	}
2789df8bae1dSRodney W. Grimes 
2790df8bae1dSRodney W. Grimes 	/*
27910d94caffSDavid Greenman 	 * If this page is not pageable, we have to get it for all possible
27920d94caffSDavid Greenman 	 * accesses.
2793df8bae1dSRodney W. Grimes 	 */
279405f0fdd2SPoul-Henning Kamp 	*wired = (entry->wired_count != 0);
279505f0fdd2SPoul-Henning Kamp 	if (*wired)
2796df8bae1dSRodney W. Grimes 		prot = fault_type = entry->protection;
2797df8bae1dSRodney W. Grimes 
2798df8bae1dSRodney W. Grimes 	/*
2799df8bae1dSRodney W. Grimes 	 * If the entry was copy-on-write, we either ...
2800df8bae1dSRodney W. Grimes 	 */
2801afa07f7eSJohn Dyson 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2802df8bae1dSRodney W. Grimes 		/*
28030d94caffSDavid Greenman 		 * If we want to write the page, we may as well handle that
2804ad5fca3bSAlan Cox 		 * now since we've got the map locked.
2805df8bae1dSRodney W. Grimes 		 *
28060d94caffSDavid Greenman 		 * If we don't need to write the page, we just demote the
28070d94caffSDavid Greenman 		 * permissions allowed.
2808df8bae1dSRodney W. Grimes 		 */
2809df8bae1dSRodney W. Grimes 		if (fault_type & VM_PROT_WRITE) {
2810df8bae1dSRodney W. Grimes 			/*
28110d94caffSDavid Greenman 			 * Make a new object, and place it in the object
28120d94caffSDavid Greenman 			 * chain.  Note that no new references have appeared
2813ad5fca3bSAlan Cox 			 * -- one just moved from the map to the new
28140d94caffSDavid Greenman 			 * object.
2815df8bae1dSRodney W. Grimes 			 */
281625adb370SBrian Feldman 			if (vm_map_lock_upgrade(map))
2817df8bae1dSRodney W. Grimes 				goto RetryLookup;
28189917e010SAlan Cox 
2819df8bae1dSRodney W. Grimes 			vm_object_shadow(
2820df8bae1dSRodney W. Grimes 			    &entry->object.vm_object,
2821df8bae1dSRodney W. Grimes 			    &entry->offset,
2822c2e11a03SJohn Dyson 			    atop(entry->end - entry->start));
2823afa07f7eSJohn Dyson 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
28249917e010SAlan Cox 
28259b09b6c7SMatthew Dillon 			vm_map_lock_downgrade(map);
28260d94caffSDavid Greenman 		} else {
2827df8bae1dSRodney W. Grimes 			/*
28280d94caffSDavid Greenman 			 * We're attempting to read a copy-on-write page --
28290d94caffSDavid Greenman 			 * don't allow writes.
2830df8bae1dSRodney W. Grimes 			 */
28312d8acc0fSJohn Dyson 			prot &= ~VM_PROT_WRITE;
2832df8bae1dSRodney W. Grimes 		}
2833df8bae1dSRodney W. Grimes 	}
28342d8acc0fSJohn Dyson 
2835df8bae1dSRodney W. Grimes 	/*
2836df8bae1dSRodney W. Grimes 	 * Create an object if necessary.
2837df8bae1dSRodney W. Grimes 	 */
28384e71e795SMatthew Dillon 	if (entry->object.vm_object == NULL &&
28394e71e795SMatthew Dillon 	    !map->system_map) {
284025adb370SBrian Feldman 		if (vm_map_lock_upgrade(map))
2841df8bae1dSRodney W. Grimes 			goto RetryLookup;
284224a1cce3SDavid Greenman 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2843c2e11a03SJohn Dyson 		    atop(entry->end - entry->start));
2844df8bae1dSRodney W. Grimes 		entry->offset = 0;
28459b09b6c7SMatthew Dillon 		vm_map_lock_downgrade(map);
2846df8bae1dSRodney W. Grimes 	}
2847b5b40fa6SJohn Dyson 
2848df8bae1dSRodney W. Grimes 	/*
28490d94caffSDavid Greenman 	 * Return the object/offset from this entry.  If the entry was
28500d94caffSDavid Greenman 	 * copy-on-write or empty, it has been fixed up.
2851df8bae1dSRodney W. Grimes 	 */
28529b09b6c7SMatthew Dillon 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
2853df8bae1dSRodney W. Grimes 	*object = entry->object.vm_object;
2854df8bae1dSRodney W. Grimes 
2855df8bae1dSRodney W. Grimes 	/*
2856df8bae1dSRodney W. Grimes 	 * Return whether this is the only map sharing this data.
2857df8bae1dSRodney W. Grimes 	 */
2858df8bae1dSRodney W. Grimes 	*out_prot = prot;
2859df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
2860df8bae1dSRodney W. Grimes 
2861df8bae1dSRodney W. Grimes #undef	RETURN
2862df8bae1dSRodney W. Grimes }
2863df8bae1dSRodney W. Grimes 
2864df8bae1dSRodney W. Grimes /*
2865df8bae1dSRodney W. Grimes  *	vm_map_lookup_done:
2866df8bae1dSRodney W. Grimes  *
2867df8bae1dSRodney W. Grimes  *	Releases locks acquired by a vm_map_lookup
2868df8bae1dSRodney W. Grimes  *	(according to the handle returned by that lookup).
2869df8bae1dSRodney W. Grimes  */
28700d94caffSDavid Greenman void
28711b40f8c0SMatthew Dillon vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
2872df8bae1dSRodney W. Grimes {
2873df8bae1dSRodney W. Grimes 	/*
2874df8bae1dSRodney W. Grimes 	 * Unlock the main-level map
2875df8bae1dSRodney W. Grimes 	 */
2876df8bae1dSRodney W. Grimes 	vm_map_unlock_read(map);
2877df8bae1dSRodney W. Grimes }
2878df8bae1dSRodney W. Grimes 
2879c50fe92bSAlan Cox #ifdef ENABLE_VFS_IOOPT
28801efb74fbSJohn Dyson /*
2881c50fe92bSAlan Cox  * Experimental support for zero-copy I/O
2882c50fe92bSAlan Cox  *
28831efb74fbSJohn Dyson  * Implement uiomove with VM operations.  This handles (and collateral changes)
28841efb74fbSJohn Dyson  * support every combination of source object modification, and COW type
28851efb74fbSJohn Dyson  * operations.
28861efb74fbSJohn Dyson  */
28871efb74fbSJohn Dyson int
28881b40f8c0SMatthew Dillon vm_uiomove(
28891b40f8c0SMatthew Dillon 	vm_map_t mapa,
28901b40f8c0SMatthew Dillon 	vm_object_t srcobject,
28911b40f8c0SMatthew Dillon 	off_t cp,
28921b40f8c0SMatthew Dillon 	int cnta,
28931b40f8c0SMatthew Dillon 	vm_offset_t uaddra,
28941b40f8c0SMatthew Dillon 	int *npages)
28951efb74fbSJohn Dyson {
28961efb74fbSJohn Dyson 	vm_map_t map;
289747221757SJohn Dyson 	vm_object_t first_object, oldobject, object;
28982d8acc0fSJohn Dyson 	vm_map_entry_t entry;
28991efb74fbSJohn Dyson 	vm_prot_t prot;
29002d8acc0fSJohn Dyson 	boolean_t wired;
29011efb74fbSJohn Dyson 	int tcnt, rv;
29022d8acc0fSJohn Dyson 	vm_offset_t uaddr, start, end, tend;
290323f09d50SIan Dowse 	vm_pindex_t first_pindex, oindex;
290423f09d50SIan Dowse 	vm_size_t osize;
29051efb74fbSJohn Dyson 	off_t ooffset;
290647221757SJohn Dyson 	int cnt;
29071efb74fbSJohn Dyson 
29080cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
29090cddd8f0SMatthew Dillon 
291095e5e988SJohn Dyson 	if (npages)
291195e5e988SJohn Dyson 		*npages = 0;
291295e5e988SJohn Dyson 
291347221757SJohn Dyson 	cnt = cnta;
29142d8acc0fSJohn Dyson 	uaddr = uaddra;
29152d8acc0fSJohn Dyson 
29161efb74fbSJohn Dyson 	while (cnt > 0) {
29171efb74fbSJohn Dyson 		map = mapa;
29181efb74fbSJohn Dyson 
29191efb74fbSJohn Dyson 		if ((vm_map_lookup(&map, uaddr,
29202d8acc0fSJohn Dyson 			VM_PROT_READ, &entry, &first_object,
29212d8acc0fSJohn Dyson 			&first_pindex, &prot, &wired)) != KERN_SUCCESS) {
29221efb74fbSJohn Dyson 			return EFAULT;
29231efb74fbSJohn Dyson 		}
29241efb74fbSJohn Dyson 
29252d8acc0fSJohn Dyson 		vm_map_clip_start(map, entry, uaddr);
29261efb74fbSJohn Dyson 
29271efb74fbSJohn Dyson 		tcnt = cnt;
29282d8acc0fSJohn Dyson 		tend = uaddr + tcnt;
29292d8acc0fSJohn Dyson 		if (tend > entry->end) {
29302d8acc0fSJohn Dyson 			tcnt = entry->end - uaddr;
29312d8acc0fSJohn Dyson 			tend = entry->end;
29322d8acc0fSJohn Dyson 		}
29331efb74fbSJohn Dyson 
29342d8acc0fSJohn Dyson 		vm_map_clip_end(map, entry, tend);
29351efb74fbSJohn Dyson 
29362d8acc0fSJohn Dyson 		start = entry->start;
29372d8acc0fSJohn Dyson 		end = entry->end;
29381efb74fbSJohn Dyson 
2939c2e11a03SJohn Dyson 		osize = atop(tcnt);
294095e5e988SJohn Dyson 
2941925a3a41SJohn Dyson 		oindex = OFF_TO_IDX(cp);
294295e5e988SJohn Dyson 		if (npages) {
294323f09d50SIan Dowse 			vm_size_t idx;
294495e5e988SJohn Dyson 			for (idx = 0; idx < osize; idx++) {
294595e5e988SJohn Dyson 				vm_page_t m;
2946925a3a41SJohn Dyson 				if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
29472d8acc0fSJohn Dyson 					vm_map_lookup_done(map, entry);
294895e5e988SJohn Dyson 					return 0;
294995e5e988SJohn Dyson 				}
29501c7c3c6aSMatthew Dillon 				/*
29511c7c3c6aSMatthew Dillon 				 * disallow busy or invalid pages, but allow
29521c7c3c6aSMatthew Dillon 				 * m->busy pages if they are entirely valid.
29531c7c3c6aSMatthew Dillon 				 */
2954925a3a41SJohn Dyson 				if ((m->flags & PG_BUSY) ||
295595e5e988SJohn Dyson 					((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
29562d8acc0fSJohn Dyson 					vm_map_lookup_done(map, entry);
295795e5e988SJohn Dyson 					return 0;
295895e5e988SJohn Dyson 				}
295995e5e988SJohn Dyson 			}
296095e5e988SJohn Dyson 		}
296195e5e988SJohn Dyson 
29621efb74fbSJohn Dyson /*
29631efb74fbSJohn Dyson  * If we are changing an existing map entry, just redirect
29641efb74fbSJohn Dyson  * the object, and change mappings.
29651efb74fbSJohn Dyson  */
29662d8acc0fSJohn Dyson 		if ((first_object->type == OBJT_VNODE) &&
29672d8acc0fSJohn Dyson 			((oldobject = entry->object.vm_object) == first_object)) {
29682d8acc0fSJohn Dyson 
29692d8acc0fSJohn Dyson 			if ((entry->offset != cp) || (oldobject != srcobject)) {
29702d8acc0fSJohn Dyson 				/*
29712d8acc0fSJohn Dyson    				* Remove old window into the file
29722d8acc0fSJohn Dyson    				*/
2973bc105a67SAlan Cox 				vm_page_lock_queues();
29742d8acc0fSJohn Dyson 				pmap_remove(map->pmap, uaddr, tend);
2975bc105a67SAlan Cox 				vm_page_unlock_queues();
29762d8acc0fSJohn Dyson 
29772d8acc0fSJohn Dyson 				/*
29782d8acc0fSJohn Dyson    				* Force copy on write for mmaped regions
29792d8acc0fSJohn Dyson    				*/
29802d8acc0fSJohn Dyson 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
29812d8acc0fSJohn Dyson 
29822d8acc0fSJohn Dyson 				/*
29832d8acc0fSJohn Dyson    				* Point the object appropriately
29842d8acc0fSJohn Dyson    				*/
29852d8acc0fSJohn Dyson 				if (oldobject != srcobject) {
29862d8acc0fSJohn Dyson 
29872d8acc0fSJohn Dyson 				/*
29882d8acc0fSJohn Dyson    				* Set the object optimization hint flag
29892d8acc0fSJohn Dyson    				*/
2990069e9bc1SDoug Rabson 					vm_object_set_flag(srcobject, OBJ_OPT);
29912d8acc0fSJohn Dyson 					vm_object_reference(srcobject);
29922d8acc0fSJohn Dyson 					entry->object.vm_object = srcobject;
29932d8acc0fSJohn Dyson 
29942d8acc0fSJohn Dyson 					if (oldobject) {
29952d8acc0fSJohn Dyson 						vm_object_deallocate(oldobject);
29962d8acc0fSJohn Dyson 					}
29972d8acc0fSJohn Dyson 				}
29982d8acc0fSJohn Dyson 
29992d8acc0fSJohn Dyson 				entry->offset = cp;
30002d8acc0fSJohn Dyson 				map->timestamp++;
30012d8acc0fSJohn Dyson 			} else {
3002bc105a67SAlan Cox 				vm_page_lock_queues();
30032d8acc0fSJohn Dyson 				pmap_remove(map->pmap, uaddr, tend);
3004bc105a67SAlan Cox 				vm_page_unlock_queues();
30052d8acc0fSJohn Dyson 			}
30062d8acc0fSJohn Dyson 
30072d8acc0fSJohn Dyson 		} else if ((first_object->ref_count == 1) &&
3008925a3a41SJohn Dyson 			(first_object->size == osize) &&
300947221757SJohn Dyson 			((first_object->type == OBJT_DEFAULT) ||
301047221757SJohn Dyson 				(first_object->type == OBJT_SWAP)) ) {
3011925a3a41SJohn Dyson 
3012925a3a41SJohn Dyson 			oldobject = first_object->backing_object;
3013925a3a41SJohn Dyson 
3014925a3a41SJohn Dyson 			if ((first_object->backing_object_offset != cp) ||
3015925a3a41SJohn Dyson 				(oldobject != srcobject)) {
3016925a3a41SJohn Dyson 				/*
3017925a3a41SJohn Dyson    				* Remove old window into the file
3018925a3a41SJohn Dyson    				*/
3019bc105a67SAlan Cox 				vm_page_lock_queues();
30202d8acc0fSJohn Dyson 				pmap_remove(map->pmap, uaddr, tend);
3021bc105a67SAlan Cox 				vm_page_unlock_queues();
3022925a3a41SJohn Dyson 
3023925a3a41SJohn Dyson 				/*
302447221757SJohn Dyson 				 * Remove unneeded old pages
302547221757SJohn Dyson 				 */
30265e83956aSAlan Cox 				vm_object_lock(first_object);
302747221757SJohn Dyson 				vm_object_page_remove(first_object, 0, 0, 0);
30285e83956aSAlan Cox 				vm_object_unlock(first_object);
302947221757SJohn Dyson 
303047221757SJohn Dyson 				/*
303147221757SJohn Dyson 				 * Invalidate swap space
303247221757SJohn Dyson 				 */
303347221757SJohn Dyson 				if (first_object->type == OBJT_SWAP) {
303447221757SJohn Dyson 					swap_pager_freespace(first_object,
30351c7c3c6aSMatthew Dillon 						0,
303647221757SJohn Dyson 						first_object->size);
303747221757SJohn Dyson 				}
303847221757SJohn Dyson 
303947221757SJohn Dyson 				/*
3040925a3a41SJohn Dyson    				 * Force copy on write for mmaped regions
3041925a3a41SJohn Dyson    				 */
304247221757SJohn Dyson 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
30431efb74fbSJohn Dyson 
30441efb74fbSJohn Dyson 				/*
30451efb74fbSJohn Dyson    				 * Point the object appropriately
30461efb74fbSJohn Dyson    				 */
3047925a3a41SJohn Dyson 				if (oldobject != srcobject) {
3048925a3a41SJohn Dyson 					/*
3049925a3a41SJohn Dyson    					 * Set the object optimization hint flag
3050925a3a41SJohn Dyson    					 */
3051069e9bc1SDoug Rabson 					vm_object_set_flag(srcobject, OBJ_OPT);
3052925a3a41SJohn Dyson 					vm_object_reference(srcobject);
3053925a3a41SJohn Dyson 
3054925a3a41SJohn Dyson 					if (oldobject) {
3055925a3a41SJohn Dyson 						TAILQ_REMOVE(&oldobject->shadow_head,
3056925a3a41SJohn Dyson 							first_object, shadow_list);
3057925a3a41SJohn Dyson 						oldobject->shadow_count--;
3058b4309055SMatthew Dillon 						/* XXX bump generation? */
3059925a3a41SJohn Dyson 						vm_object_deallocate(oldobject);
3060925a3a41SJohn Dyson 					}
3061925a3a41SJohn Dyson 
3062925a3a41SJohn Dyson 					TAILQ_INSERT_TAIL(&srcobject->shadow_head,
3063925a3a41SJohn Dyson 						first_object, shadow_list);
3064925a3a41SJohn Dyson 					srcobject->shadow_count++;
3065b4309055SMatthew Dillon 					/* XXX bump generation? */
3066925a3a41SJohn Dyson 
3067925a3a41SJohn Dyson 					first_object->backing_object = srcobject;
3068925a3a41SJohn Dyson 				}
30691efb74fbSJohn Dyson 				first_object->backing_object_offset = cp;
30702d8acc0fSJohn Dyson 				map->timestamp++;
3071925a3a41SJohn Dyson 			} else {
3072bc105a67SAlan Cox 				vm_page_lock_queues();
30732d8acc0fSJohn Dyson 				pmap_remove(map->pmap, uaddr, tend);
3074bc105a67SAlan Cox 				vm_page_unlock_queues();
3075925a3a41SJohn Dyson 			}
30761efb74fbSJohn Dyson /*
30771efb74fbSJohn Dyson  * Otherwise, we have to do a logical mmap.
30781efb74fbSJohn Dyson  */
30791efb74fbSJohn Dyson 		} else {
30801efb74fbSJohn Dyson 
3081069e9bc1SDoug Rabson 			vm_object_set_flag(srcobject, OBJ_OPT);
3082925a3a41SJohn Dyson 			vm_object_reference(srcobject);
30831efb74fbSJohn Dyson 
3084bc105a67SAlan Cox 			vm_page_lock_queues();
30852d8acc0fSJohn Dyson 			pmap_remove(map->pmap, uaddr, tend);
3086bc105a67SAlan Cox 			vm_page_unlock_queues();
30871efb74fbSJohn Dyson 
308847221757SJohn Dyson 			vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
308925adb370SBrian Feldman 			vm_map_lock_upgrade(map);
30901efb74fbSJohn Dyson 
30912d8acc0fSJohn Dyson 			if (entry == &map->header) {
30921efb74fbSJohn Dyson 				map->first_free = &map->header;
30931efb74fbSJohn Dyson 			} else if (map->first_free->start >= start) {
30942d8acc0fSJohn Dyson 				map->first_free = entry->prev;
30951efb74fbSJohn Dyson 			}
30961efb74fbSJohn Dyson 
30972d8acc0fSJohn Dyson 			vm_map_entry_delete(map, entry);
30981efb74fbSJohn Dyson 
30992d8acc0fSJohn Dyson 			object = srcobject;
31002d8acc0fSJohn Dyson 			ooffset = cp;
31012d8acc0fSJohn Dyson 
31022d8acc0fSJohn Dyson 			rv = vm_map_insert(map, object, ooffset, start, tend,
3103e5f13bddSAlan Cox 				VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
31041efb74fbSJohn Dyson 
31051efb74fbSJohn Dyson 			if (rv != KERN_SUCCESS)
31061efb74fbSJohn Dyson 				panic("vm_uiomove: could not insert new entry: %d", rv);
31071efb74fbSJohn Dyson 		}
31081efb74fbSJohn Dyson 
31091efb74fbSJohn Dyson /*
31101efb74fbSJohn Dyson  * Map the window directly, if it is already in memory
31111efb74fbSJohn Dyson  */
31122d8acc0fSJohn Dyson 		pmap_object_init_pt(map->pmap, uaddr,
31132d8acc0fSJohn Dyson 			srcobject, oindex, tcnt, 0);
31141efb74fbSJohn Dyson 
311547221757SJohn Dyson 		map->timestamp++;
31161efb74fbSJohn Dyson 		vm_map_unlock(map);
31171efb74fbSJohn Dyson 
31181efb74fbSJohn Dyson 		cnt -= tcnt;
31192d8acc0fSJohn Dyson 		uaddr += tcnt;
31201efb74fbSJohn Dyson 		cp += tcnt;
312195e5e988SJohn Dyson 		if (npages)
312295e5e988SJohn Dyson 			*npages += osize;
31231efb74fbSJohn Dyson 	}
31241efb74fbSJohn Dyson 	return 0;
31251efb74fbSJohn Dyson }
3126c50fe92bSAlan Cox #endif
31271efb74fbSJohn Dyson 
3128c7c34a24SBruce Evans #include "opt_ddb.h"
3129c3cb3e12SDavid Greenman #ifdef DDB
3130c7c34a24SBruce Evans #include <sys/kernel.h>
3131c7c34a24SBruce Evans 
3132c7c34a24SBruce Evans #include <ddb/ddb.h>
3133c7c34a24SBruce Evans 
3134df8bae1dSRodney W. Grimes /*
3135df8bae1dSRodney W. Grimes  *	vm_map_print:	[ debug ]
3136df8bae1dSRodney W. Grimes  */
3137c7c34a24SBruce Evans DB_SHOW_COMMAND(map, vm_map_print)
3138df8bae1dSRodney W. Grimes {
313995e5e988SJohn Dyson 	static int nlines;
3140c7c34a24SBruce Evans 	/* XXX convert args. */
3141c0877f10SJohn Dyson 	vm_map_t map = (vm_map_t)addr;
3142c7c34a24SBruce Evans 	boolean_t full = have_addr;
3143df8bae1dSRodney W. Grimes 
3144c0877f10SJohn Dyson 	vm_map_entry_t entry;
3145c7c34a24SBruce Evans 
3146e5f251d2SAlan Cox 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3147e5f251d2SAlan Cox 	    (void *)map,
3148101eeb7fSBruce Evans 	    (void *)map->pmap, map->nentries, map->timestamp);
314995e5e988SJohn Dyson 	nlines++;
3150df8bae1dSRodney W. Grimes 
3151c7c34a24SBruce Evans 	if (!full && db_indent)
3152df8bae1dSRodney W. Grimes 		return;
3153df8bae1dSRodney W. Grimes 
3154c7c34a24SBruce Evans 	db_indent += 2;
3155df8bae1dSRodney W. Grimes 	for (entry = map->header.next; entry != &map->header;
3156df8bae1dSRodney W. Grimes 	    entry = entry->next) {
3157fc62ef1fSBruce Evans 		db_iprintf("map entry %p: start=%p, end=%p\n",
3158fc62ef1fSBruce Evans 		    (void *)entry, (void *)entry->start, (void *)entry->end);
315995e5e988SJohn Dyson 		nlines++;
3160e5f251d2SAlan Cox 		{
3161df8bae1dSRodney W. Grimes 			static char *inheritance_name[4] =
3162df8bae1dSRodney W. Grimes 			{"share", "copy", "none", "donate_copy"};
31630d94caffSDavid Greenman 
316495e5e988SJohn Dyson 			db_iprintf(" prot=%x/%x/%s",
3165df8bae1dSRodney W. Grimes 			    entry->protection,
3166df8bae1dSRodney W. Grimes 			    entry->max_protection,
31678aef1712SMatthew Dillon 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
3168df8bae1dSRodney W. Grimes 			if (entry->wired_count != 0)
316995e5e988SJohn Dyson 				db_printf(", wired");
3170df8bae1dSRodney W. Grimes 		}
31719fdfe602SMatthew Dillon 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3172cd034a5bSMaxime Henrion 			db_printf(", share=%p, offset=0x%jx\n",
31739fdfe602SMatthew Dillon 			    (void *)entry->object.sub_map,
3174cd034a5bSMaxime Henrion 			    (uintmax_t)entry->offset);
317595e5e988SJohn Dyson 			nlines++;
3176df8bae1dSRodney W. Grimes 			if ((entry->prev == &map->header) ||
31779fdfe602SMatthew Dillon 			    (entry->prev->object.sub_map !=
31789fdfe602SMatthew Dillon 				entry->object.sub_map)) {
3179c7c34a24SBruce Evans 				db_indent += 2;
3180101eeb7fSBruce Evans 				vm_map_print((db_expr_t)(intptr_t)
31819fdfe602SMatthew Dillon 					     entry->object.sub_map,
3182914181e7SBruce Evans 					     full, 0, (char *)0);
3183c7c34a24SBruce Evans 				db_indent -= 2;
3184df8bae1dSRodney W. Grimes 			}
31850d94caffSDavid Greenman 		} else {
3186cd034a5bSMaxime Henrion 			db_printf(", object=%p, offset=0x%jx",
3187101eeb7fSBruce Evans 			    (void *)entry->object.vm_object,
3188cd034a5bSMaxime Henrion 			    (uintmax_t)entry->offset);
3189afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_COW)
3190c7c34a24SBruce Evans 				db_printf(", copy (%s)",
3191afa07f7eSJohn Dyson 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3192c7c34a24SBruce Evans 			db_printf("\n");
319395e5e988SJohn Dyson 			nlines++;
3194df8bae1dSRodney W. Grimes 
3195df8bae1dSRodney W. Grimes 			if ((entry->prev == &map->header) ||
3196df8bae1dSRodney W. Grimes 			    (entry->prev->object.vm_object !=
3197df8bae1dSRodney W. Grimes 				entry->object.vm_object)) {
3198c7c34a24SBruce Evans 				db_indent += 2;
3199101eeb7fSBruce Evans 				vm_object_print((db_expr_t)(intptr_t)
3200101eeb7fSBruce Evans 						entry->object.vm_object,
3201914181e7SBruce Evans 						full, 0, (char *)0);
320295e5e988SJohn Dyson 				nlines += 4;
3203c7c34a24SBruce Evans 				db_indent -= 2;
3204df8bae1dSRodney W. Grimes 			}
3205df8bae1dSRodney W. Grimes 		}
3206df8bae1dSRodney W. Grimes 	}
3207c7c34a24SBruce Evans 	db_indent -= 2;
320895e5e988SJohn Dyson 	if (db_indent == 0)
320995e5e988SJohn Dyson 		nlines = 0;
3210df8bae1dSRodney W. Grimes }
321195e5e988SJohn Dyson 
321295e5e988SJohn Dyson 
321395e5e988SJohn Dyson DB_SHOW_COMMAND(procvm, procvm)
321495e5e988SJohn Dyson {
321595e5e988SJohn Dyson 	struct proc *p;
321695e5e988SJohn Dyson 
321795e5e988SJohn Dyson 	if (have_addr) {
321895e5e988SJohn Dyson 		p = (struct proc *) addr;
321995e5e988SJohn Dyson 	} else {
322095e5e988SJohn Dyson 		p = curproc;
322195e5e988SJohn Dyson 	}
322295e5e988SJohn Dyson 
3223ac1e407bSBruce Evans 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3224ac1e407bSBruce Evans 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3225b1028ad1SLuoqi Chen 	    (void *)vmspace_pmap(p->p_vmspace));
322695e5e988SJohn Dyson 
3227101eeb7fSBruce Evans 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
322895e5e988SJohn Dyson }
322995e5e988SJohn Dyson 
3230c7c34a24SBruce Evans #endif /* DDB */
3231