xref: /freebsd/sys/vm/vm_map.c (revision e6bd3a812d8f905291fa3965d4532a03e84f55a5)
160727d8bSWarner Losh /*-
2796df753SPedro F. Giffuni  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
351369649SPedro F. Giffuni  *
4df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
5df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
6df8bae1dSRodney W. Grimes  *
7df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
8df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
9df8bae1dSRodney W. Grimes  *
10df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
11df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
12df8bae1dSRodney W. Grimes  * are met:
13df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
15df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
17df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
18fbbd9655SWarner Losh  * 3. Neither the name of the University nor the names of its contributors
19df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
20df8bae1dSRodney W. Grimes  *    without specific prior written permission.
21df8bae1dSRodney W. Grimes  *
22df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
33df8bae1dSRodney W. Grimes  *
343c4dd356SDavid Greenman  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
35df8bae1dSRodney W. Grimes  *
36df8bae1dSRodney W. Grimes  *
37df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38df8bae1dSRodney W. Grimes  * All rights reserved.
39df8bae1dSRodney W. Grimes  *
40df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
43df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
44df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
45df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
46df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
47df8bae1dSRodney W. Grimes  *
48df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51df8bae1dSRodney W. Grimes  *
52df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55df8bae1dSRodney W. Grimes  *  School of Computer Science
56df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
57df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
58df8bae1dSRodney W. Grimes  *
59df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
60df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
61df8bae1dSRodney W. Grimes  */
62df8bae1dSRodney W. Grimes 
63df8bae1dSRodney W. Grimes /*
64df8bae1dSRodney W. Grimes  *	Virtual memory mapping module.
65df8bae1dSRodney W. Grimes  */
66df8bae1dSRodney W. Grimes 
67874651b1SDavid E. O'Brien #include <sys/cdefs.h>
68874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
69874651b1SDavid E. O'Brien 
70df8bae1dSRodney W. Grimes #include <sys/param.h>
71df8bae1dSRodney W. Grimes #include <sys/systm.h>
72156e8654SKonstantin Belousov #include <sys/elf.h>
739a6d144fSKonstantin Belousov #include <sys/kernel.h>
7461d80e90SJohn Baldwin #include <sys/ktr.h>
75fb919e4dSMark Murray #include <sys/lock.h>
76fb919e4dSMark Murray #include <sys/mutex.h>
77b5e8ce9fSBruce Evans #include <sys/proc.h>
78efeaf95aSDavid Greenman #include <sys/vmmeter.h>
79867a482dSJohn Dyson #include <sys/mman.h>
801efb74fbSJohn Dyson #include <sys/vnode.h>
811ba5ad42SEdward Tomasz Napierala #include <sys/racct.h>
822267af78SJulian Elischer #include <sys/resourcevar.h>
8389f6b863SAttilio Rao #include <sys/rwlock.h>
843fde38dfSMike Silbersack #include <sys/file.h>
859a6d144fSKonstantin Belousov #include <sys/sysctl.h>
8605ba50f5SJake Burkholder #include <sys/sysent.h>
873db161e0SMatthew Dillon #include <sys/shm.h>
88df8bae1dSRodney W. Grimes 
89df8bae1dSRodney W. Grimes #include <vm/vm.h>
90efeaf95aSDavid Greenman #include <vm/vm_param.h>
91efeaf95aSDavid Greenman #include <vm/pmap.h>
92efeaf95aSDavid Greenman #include <vm/vm_map.h>
93df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
9454a3a114SMark Johnston #include <vm/vm_pageout.h>
95df8bae1dSRodney W. Grimes #include <vm/vm_object.h>
9647221757SJohn Dyson #include <vm/vm_pager.h>
9726f9a767SRodney W. Grimes #include <vm/vm_kern.h>
98efeaf95aSDavid Greenman #include <vm/vm_extern.h>
9984110e7eSKonstantin Belousov #include <vm/vnode_pager.h>
10021cd6e62SSeigo Tanimura #include <vm/swap_pager.h>
101670d17b5SJeff Roberson #include <vm/uma.h>
102df8bae1dSRodney W. Grimes 
103df8bae1dSRodney W. Grimes /*
104df8bae1dSRodney W. Grimes  *	Virtual memory maps provide for the mapping, protection,
105df8bae1dSRodney W. Grimes  *	and sharing of virtual memory objects.  In addition,
106df8bae1dSRodney W. Grimes  *	this module provides for an efficient virtual copy of
107df8bae1dSRodney W. Grimes  *	memory from one map to another.
108df8bae1dSRodney W. Grimes  *
109df8bae1dSRodney W. Grimes  *	Synchronization is required prior to most operations.
110df8bae1dSRodney W. Grimes  *
111df8bae1dSRodney W. Grimes  *	Maps consist of an ordered doubly-linked list of simple
112e2abaaaaSAlan Cox  *	entries; a self-adjusting binary search tree of these
113e2abaaaaSAlan Cox  *	entries is used to speed up lookups.
114df8bae1dSRodney W. Grimes  *
115956f3135SPhilippe Charnier  *	Since portions of maps are specified by start/end addresses,
116df8bae1dSRodney W. Grimes  *	which may not align with existing map entries, all
117df8bae1dSRodney W. Grimes  *	routines merely "clip" entries to these start/end values.
118df8bae1dSRodney W. Grimes  *	[That is, an entry is split into two, bordering at a
119df8bae1dSRodney W. Grimes  *	start or end value.]  Note that these clippings may not
120df8bae1dSRodney W. Grimes  *	always be necessary (as the two resulting entries are then
121df8bae1dSRodney W. Grimes  *	not changed); however, the clipping is done for convenience.
122df8bae1dSRodney W. Grimes  *
123df8bae1dSRodney W. Grimes  *	As mentioned above, virtual copy operations are performed
124ad5fca3bSAlan Cox  *	by copying VM object references from one map to
125df8bae1dSRodney W. Grimes  *	another, and then marking both regions as copy-on-write.
126df8bae1dSRodney W. Grimes  */
127df8bae1dSRodney W. Grimes 
1283a92e5d5SAlan Cox static struct mtx map_sleep_mtx;
1298355f576SJeff Roberson static uma_zone_t mapentzone;
1308355f576SJeff Roberson static uma_zone_t kmapentzone;
1318355f576SJeff Roberson static uma_zone_t mapzone;
1328355f576SJeff Roberson static uma_zone_t vmspace_zone;
133b23f72e9SBrian Feldman static int vmspace_zinit(void *mem, int size, int flags);
134b23f72e9SBrian Feldman static int vm_map_zinit(void *mem, int ize, int flags);
13592351f16SAlan Cox static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
13692351f16SAlan Cox     vm_offset_t max);
1370b367bd8SKonstantin Belousov static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
138655c3490SKonstantin Belousov static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
13903462509SAlan Cox static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
14019bd0d9cSKonstantin Belousov static int vm_map_growstack(vm_map_t map, vm_offset_t addr,
14119bd0d9cSKonstantin Belousov     vm_map_entry_t gap_entry);
142077ec27cSAlan Cox static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
143077ec27cSAlan Cox     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
1448355f576SJeff Roberson #ifdef INVARIANTS
1458355f576SJeff Roberson static void vm_map_zdtor(void *mem, int size, void *arg);
1468355f576SJeff Roberson static void vmspace_zdtor(void *mem, int size, void *arg);
1478355f576SJeff Roberson #endif
1484648ba0aSKonstantin Belousov static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
1494648ba0aSKonstantin Belousov     vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
1504648ba0aSKonstantin Belousov     int cow);
15166cd575bSAlan Cox static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
15266cd575bSAlan Cox     vm_offset_t failed_addr);
153b18bfc3dSJohn Dyson 
154ef694c1aSEdward Tomasz Napierala #define	ENTRY_CHARGED(e) ((e)->cred != NULL || \
155ef694c1aSEdward Tomasz Napierala     ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
1563364c323SKonstantin Belousov      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
1573364c323SKonstantin Belousov 
15857051fdcSTor Egge /*
15957051fdcSTor Egge  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
16057051fdcSTor Egge  * stable.
16157051fdcSTor Egge  */
16257051fdcSTor Egge #define PROC_VMSPACE_LOCK(p) do { } while (0)
16357051fdcSTor Egge #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
16457051fdcSTor Egge 
165d239bd3cSKonstantin Belousov /*
166d239bd3cSKonstantin Belousov  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
167d239bd3cSKonstantin Belousov  *
168d239bd3cSKonstantin Belousov  *	Asserts that the starting and ending region
169d239bd3cSKonstantin Belousov  *	addresses fall within the valid range of the map.
170d239bd3cSKonstantin Belousov  */
171d239bd3cSKonstantin Belousov #define	VM_MAP_RANGE_CHECK(map, start, end)		\
172d239bd3cSKonstantin Belousov 		{					\
173d239bd3cSKonstantin Belousov 		if (start < vm_map_min(map))		\
174d239bd3cSKonstantin Belousov 			start = vm_map_min(map);	\
175d239bd3cSKonstantin Belousov 		if (end > vm_map_max(map))		\
176d239bd3cSKonstantin Belousov 			end = vm_map_max(map);		\
177d239bd3cSKonstantin Belousov 		if (start > end)			\
178d239bd3cSKonstantin Belousov 			start = end;			\
179d239bd3cSKonstantin Belousov 		}
180d239bd3cSKonstantin Belousov 
1816fecb26bSKonstantin Belousov /*
1826fecb26bSKonstantin Belousov  *	vm_map_startup:
1836fecb26bSKonstantin Belousov  *
1846fecb26bSKonstantin Belousov  *	Initialize the vm_map module.  Must be called before
1856fecb26bSKonstantin Belousov  *	any other vm_map routines.
1866fecb26bSKonstantin Belousov  *
1876fecb26bSKonstantin Belousov  *	Map and entry structures are allocated from the general
1886fecb26bSKonstantin Belousov  *	purpose memory pool with some exceptions:
1896fecb26bSKonstantin Belousov  *
1906fecb26bSKonstantin Belousov  *	- The kernel map and kmem submap are allocated statically.
1916fecb26bSKonstantin Belousov  *	- Kernel map entries are allocated out of a static pool.
1926fecb26bSKonstantin Belousov  *
1936fecb26bSKonstantin Belousov  *	These restrictions are necessary since malloc() uses the
1946fecb26bSKonstantin Belousov  *	maps and requires map entries.
1956fecb26bSKonstantin Belousov  */
1966fecb26bSKonstantin Belousov 
1970d94caffSDavid Greenman void
1981b40f8c0SMatthew Dillon vm_map_startup(void)
199df8bae1dSRodney W. Grimes {
2003a92e5d5SAlan Cox 	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
2018355f576SJeff Roberson 	mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
2028355f576SJeff Roberson #ifdef INVARIANTS
2038355f576SJeff Roberson 	    vm_map_zdtor,
2048355f576SJeff Roberson #else
2058355f576SJeff Roberson 	    NULL,
2068355f576SJeff Roberson #endif
207f872f6eaSAlan Cox 	    vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
2088355f576SJeff Roberson 	uma_prealloc(mapzone, MAX_KMAP);
209670d17b5SJeff Roberson 	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
21018aa2de5SJeff Roberson 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
21118aa2de5SJeff Roberson 	    UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
212670d17b5SJeff Roberson 	mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
213670d17b5SJeff Roberson 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
2145df87b21SJeff Roberson 	vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
2155df87b21SJeff Roberson #ifdef INVARIANTS
2165df87b21SJeff Roberson 	    vmspace_zdtor,
2175df87b21SJeff Roberson #else
2185df87b21SJeff Roberson 	    NULL,
2195df87b21SJeff Roberson #endif
220f872f6eaSAlan Cox 	    vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
2218355f576SJeff Roberson }
2228355f576SJeff Roberson 
223b23f72e9SBrian Feldman static int
224b23f72e9SBrian Feldman vmspace_zinit(void *mem, int size, int flags)
2258355f576SJeff Roberson {
2268355f576SJeff Roberson 	struct vmspace *vm;
2278355f576SJeff Roberson 
2288355f576SJeff Roberson 	vm = (struct vmspace *)mem;
2298355f576SJeff Roberson 
23089b57fcfSKonstantin Belousov 	vm->vm_map.pmap = NULL;
231b23f72e9SBrian Feldman 	(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
232e68c64f0SKonstantin Belousov 	PMAP_LOCK_INIT(vmspace_pmap(vm));
233b23f72e9SBrian Feldman 	return (0);
2348355f576SJeff Roberson }
2358355f576SJeff Roberson 
236b23f72e9SBrian Feldman static int
237b23f72e9SBrian Feldman vm_map_zinit(void *mem, int size, int flags)
2388355f576SJeff Roberson {
2398355f576SJeff Roberson 	vm_map_t map;
2408355f576SJeff Roberson 
2418355f576SJeff Roberson 	map = (vm_map_t)mem;
242763d9566STim Kientzle 	memset(map, 0, sizeof(*map));
243e30df26eSAlan Cox 	mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
244e30df26eSAlan Cox 	sx_init(&map->lock, "vm map (user)");
245b23f72e9SBrian Feldman 	return (0);
2468355f576SJeff Roberson }
2478355f576SJeff Roberson 
2488355f576SJeff Roberson #ifdef INVARIANTS
2498355f576SJeff Roberson static void
2508355f576SJeff Roberson vmspace_zdtor(void *mem, int size, void *arg)
2518355f576SJeff Roberson {
2528355f576SJeff Roberson 	struct vmspace *vm;
2538355f576SJeff Roberson 
2548355f576SJeff Roberson 	vm = (struct vmspace *)mem;
2558355f576SJeff Roberson 
2568355f576SJeff Roberson 	vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
2578355f576SJeff Roberson }
2588355f576SJeff Roberson static void
2598355f576SJeff Roberson vm_map_zdtor(void *mem, int size, void *arg)
2608355f576SJeff Roberson {
2618355f576SJeff Roberson 	vm_map_t map;
2628355f576SJeff Roberson 
2638355f576SJeff Roberson 	map = (vm_map_t)mem;
2648355f576SJeff Roberson 	KASSERT(map->nentries == 0,
2658355f576SJeff Roberson 	    ("map %p nentries == %d on free.",
2668355f576SJeff Roberson 	    map, map->nentries));
2678355f576SJeff Roberson 	KASSERT(map->size == 0,
2688355f576SJeff Roberson 	    ("map %p size == %lu on free.",
2699eb6e519SJeff Roberson 	    map, (unsigned long)map->size));
2708355f576SJeff Roberson }
2718355f576SJeff Roberson #endif	/* INVARIANTS */
2728355f576SJeff Roberson 
273df8bae1dSRodney W. Grimes /*
274df8bae1dSRodney W. Grimes  * Allocate a vmspace structure, including a vm_map and pmap,
275df8bae1dSRodney W. Grimes  * and initialize those structures.  The refcnt is set to 1.
27674d1d2b7SNeel Natu  *
27774d1d2b7SNeel Natu  * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
278df8bae1dSRodney W. Grimes  */
279df8bae1dSRodney W. Grimes struct vmspace *
28074d1d2b7SNeel Natu vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
281df8bae1dSRodney W. Grimes {
282c0877f10SJohn Dyson 	struct vmspace *vm;
2830d94caffSDavid Greenman 
284a163d034SWarner Losh 	vm = uma_zalloc(vmspace_zone, M_WAITOK);
28574d1d2b7SNeel Natu 	KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
28674d1d2b7SNeel Natu 	if (!pinit(vmspace_pmap(vm))) {
28789b57fcfSKonstantin Belousov 		uma_zfree(vmspace_zone, vm);
28889b57fcfSKonstantin Belousov 		return (NULL);
28989b57fcfSKonstantin Belousov 	}
29021c641b2SJohn Baldwin 	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
29192351f16SAlan Cox 	_vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
292df8bae1dSRodney W. Grimes 	vm->vm_refcnt = 1;
2932d8acc0fSJohn Dyson 	vm->vm_shm = NULL;
29451ab6c28SAlan Cox 	vm->vm_swrss = 0;
29551ab6c28SAlan Cox 	vm->vm_tsize = 0;
29651ab6c28SAlan Cox 	vm->vm_dsize = 0;
29751ab6c28SAlan Cox 	vm->vm_ssize = 0;
29851ab6c28SAlan Cox 	vm->vm_taddr = 0;
29951ab6c28SAlan Cox 	vm->vm_daddr = 0;
30051ab6c28SAlan Cox 	vm->vm_maxsaddr = 0;
301df8bae1dSRodney W. Grimes 	return (vm);
302df8bae1dSRodney W. Grimes }
303df8bae1dSRodney W. Grimes 
3044b5c9cf6SEdward Tomasz Napierala #ifdef RACCT
3051ba5ad42SEdward Tomasz Napierala static void
3061ba5ad42SEdward Tomasz Napierala vmspace_container_reset(struct proc *p)
3071ba5ad42SEdward Tomasz Napierala {
3081ba5ad42SEdward Tomasz Napierala 
3091ba5ad42SEdward Tomasz Napierala 	PROC_LOCK(p);
3101ba5ad42SEdward Tomasz Napierala 	racct_set(p, RACCT_DATA, 0);
3111ba5ad42SEdward Tomasz Napierala 	racct_set(p, RACCT_STACK, 0);
3121ba5ad42SEdward Tomasz Napierala 	racct_set(p, RACCT_RSS, 0);
3131ba5ad42SEdward Tomasz Napierala 	racct_set(p, RACCT_MEMLOCK, 0);
3141ba5ad42SEdward Tomasz Napierala 	racct_set(p, RACCT_VMEM, 0);
3151ba5ad42SEdward Tomasz Napierala 	PROC_UNLOCK(p);
3161ba5ad42SEdward Tomasz Napierala }
3174b5c9cf6SEdward Tomasz Napierala #endif
3181ba5ad42SEdward Tomasz Napierala 
31962a59e8fSWarner Losh static inline void
320582ec34cSAlfred Perlstein vmspace_dofree(struct vmspace *vm)
321df8bae1dSRodney W. Grimes {
3220ef12795SAlan Cox 
32321c641b2SJohn Baldwin 	CTR1(KTR_VM, "vmspace_free: %p", vm);
3243db161e0SMatthew Dillon 
3253db161e0SMatthew Dillon 	/*
3263db161e0SMatthew Dillon 	 * Make sure any SysV shm is freed, it might not have been in
3273db161e0SMatthew Dillon 	 * exit1().
3283db161e0SMatthew Dillon 	 */
3293db161e0SMatthew Dillon 	shmexit(vm);
3303db161e0SMatthew Dillon 
33130dcfc09SJohn Dyson 	/*
332df8bae1dSRodney W. Grimes 	 * Lock the map, to wait out all other references to it.
3330d94caffSDavid Greenman 	 * Delete all of the mappings and pages they hold, then call
3340d94caffSDavid Greenman 	 * the pmap module to reclaim anything left.
335df8bae1dSRodney W. Grimes 	 */
336f0165b1cSKonstantin Belousov 	(void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
337f0165b1cSKonstantin Belousov 	    vm_map_max(&vm->vm_map));
3388355f576SJeff Roberson 
3390ef12795SAlan Cox 	pmap_release(vmspace_pmap(vm));
3400ef12795SAlan Cox 	vm->vm_map.pmap = NULL;
3418355f576SJeff Roberson 	uma_zfree(vmspace_zone, vm);
342df8bae1dSRodney W. Grimes }
343582ec34cSAlfred Perlstein 
344582ec34cSAlfred Perlstein void
345582ec34cSAlfred Perlstein vmspace_free(struct vmspace *vm)
346582ec34cSAlfred Perlstein {
347582ec34cSAlfred Perlstein 
348423521aaSRyan Stone 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
349164a37a5SJohn Baldwin 	    "vmspace_free() called");
350423521aaSRyan Stone 
351582ec34cSAlfred Perlstein 	if (vm->vm_refcnt == 0)
352582ec34cSAlfred Perlstein 		panic("vmspace_free: attempt to free already freed vmspace");
353582ec34cSAlfred Perlstein 
3541a587ef2SJohn Baldwin 	if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
355582ec34cSAlfred Perlstein 		vmspace_dofree(vm);
356582ec34cSAlfred Perlstein }
357582ec34cSAlfred Perlstein 
358582ec34cSAlfred Perlstein void
359582ec34cSAlfred Perlstein vmspace_exitfree(struct proc *p)
360582ec34cSAlfred Perlstein {
361334f7061SPeter Wemm 	struct vmspace *vm;
362582ec34cSAlfred Perlstein 
36357051fdcSTor Egge 	PROC_VMSPACE_LOCK(p);
364334f7061SPeter Wemm 	vm = p->p_vmspace;
365334f7061SPeter Wemm 	p->p_vmspace = NULL;
36657051fdcSTor Egge 	PROC_VMSPACE_UNLOCK(p);
36757051fdcSTor Egge 	KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
36857051fdcSTor Egge 	vmspace_free(vm);
36957051fdcSTor Egge }
37057051fdcSTor Egge 
37157051fdcSTor Egge void
37257051fdcSTor Egge vmspace_exit(struct thread *td)
37357051fdcSTor Egge {
37457051fdcSTor Egge 	int refcnt;
37557051fdcSTor Egge 	struct vmspace *vm;
37657051fdcSTor Egge 	struct proc *p;
377389d2b6eSMatthew Dillon 
378389d2b6eSMatthew Dillon 	/*
37957051fdcSTor Egge 	 * Release user portion of address space.
38057051fdcSTor Egge 	 * This releases references to vnodes,
38157051fdcSTor Egge 	 * which could cause I/O if the file has been unlinked.
38257051fdcSTor Egge 	 * Need to do this early enough that we can still sleep.
383389d2b6eSMatthew Dillon 	 *
38457051fdcSTor Egge 	 * The last exiting process to reach this point releases as
38557051fdcSTor Egge 	 * much of the environment as it can. vmspace_dofree() is the
38657051fdcSTor Egge 	 * slower fallback in case another process had a temporary
38757051fdcSTor Egge 	 * reference to the vmspace.
388389d2b6eSMatthew Dillon 	 */
38957051fdcSTor Egge 
39057051fdcSTor Egge 	p = td->td_proc;
39157051fdcSTor Egge 	vm = p->p_vmspace;
39257051fdcSTor Egge 	atomic_add_int(&vmspace0.vm_refcnt, 1);
39357051fdcSTor Egge 	refcnt = vm->vm_refcnt;
39483764b44SMateusz Guzik 	do {
39557051fdcSTor Egge 		if (refcnt > 1 && p->p_vmspace != &vmspace0) {
39657051fdcSTor Egge 			/* Switch now since other proc might free vmspace */
39757051fdcSTor Egge 			PROC_VMSPACE_LOCK(p);
39857051fdcSTor Egge 			p->p_vmspace = &vmspace0;
39957051fdcSTor Egge 			PROC_VMSPACE_UNLOCK(p);
40057051fdcSTor Egge 			pmap_activate(td);
40157051fdcSTor Egge 		}
40283764b44SMateusz Guzik 	} while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt - 1));
40357051fdcSTor Egge 	if (refcnt == 1) {
40457051fdcSTor Egge 		if (p->p_vmspace != vm) {
40557051fdcSTor Egge 			/* vmspace not yet freed, switch back */
40657051fdcSTor Egge 			PROC_VMSPACE_LOCK(p);
40757051fdcSTor Egge 			p->p_vmspace = vm;
40857051fdcSTor Egge 			PROC_VMSPACE_UNLOCK(p);
40957051fdcSTor Egge 			pmap_activate(td);
41057051fdcSTor Egge 		}
41157051fdcSTor Egge 		pmap_remove_pages(vmspace_pmap(vm));
41257051fdcSTor Egge 		/* Switch now since this proc will free vmspace */
41357051fdcSTor Egge 		PROC_VMSPACE_LOCK(p);
41457051fdcSTor Egge 		p->p_vmspace = &vmspace0;
41557051fdcSTor Egge 		PROC_VMSPACE_UNLOCK(p);
41657051fdcSTor Egge 		pmap_activate(td);
417334f7061SPeter Wemm 		vmspace_dofree(vm);
418334f7061SPeter Wemm 	}
4194b5c9cf6SEdward Tomasz Napierala #ifdef RACCT
4204b5c9cf6SEdward Tomasz Napierala 	if (racct_enable)
4211ba5ad42SEdward Tomasz Napierala 		vmspace_container_reset(p);
4224b5c9cf6SEdward Tomasz Napierala #endif
42357051fdcSTor Egge }
42457051fdcSTor Egge 
42557051fdcSTor Egge /* Acquire reference to vmspace owned by another process. */
42657051fdcSTor Egge 
42757051fdcSTor Egge struct vmspace *
42857051fdcSTor Egge vmspace_acquire_ref(struct proc *p)
42957051fdcSTor Egge {
43057051fdcSTor Egge 	struct vmspace *vm;
43157051fdcSTor Egge 	int refcnt;
43257051fdcSTor Egge 
43357051fdcSTor Egge 	PROC_VMSPACE_LOCK(p);
43457051fdcSTor Egge 	vm = p->p_vmspace;
43557051fdcSTor Egge 	if (vm == NULL) {
43657051fdcSTor Egge 		PROC_VMSPACE_UNLOCK(p);
43757051fdcSTor Egge 		return (NULL);
43857051fdcSTor Egge 	}
43957051fdcSTor Egge 	refcnt = vm->vm_refcnt;
44083764b44SMateusz Guzik 	do {
44157051fdcSTor Egge 		if (refcnt <= 0) { 	/* Avoid 0->1 transition */
44257051fdcSTor Egge 			PROC_VMSPACE_UNLOCK(p);
44357051fdcSTor Egge 			return (NULL);
44457051fdcSTor Egge 		}
44583764b44SMateusz Guzik 	} while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt + 1));
44657051fdcSTor Egge 	if (vm != p->p_vmspace) {
44757051fdcSTor Egge 		PROC_VMSPACE_UNLOCK(p);
44857051fdcSTor Egge 		vmspace_free(vm);
44957051fdcSTor Egge 		return (NULL);
45057051fdcSTor Egge 	}
45157051fdcSTor Egge 	PROC_VMSPACE_UNLOCK(p);
45257051fdcSTor Egge 	return (vm);
45357051fdcSTor Egge }
454df8bae1dSRodney W. Grimes 
4558a4dc40fSJohn Baldwin /*
4568a4dc40fSJohn Baldwin  * Switch between vmspaces in an AIO kernel process.
4578a4dc40fSJohn Baldwin  *
4580b96ca33SJohn Baldwin  * The new vmspace is either the vmspace of a user process obtained
4590b96ca33SJohn Baldwin  * from an active AIO request or the initial vmspace of the AIO kernel
4600b96ca33SJohn Baldwin  * process (when it is idling).  Because user processes will block to
4610b96ca33SJohn Baldwin  * drain any active AIO requests before proceeding in exit() or
4620b96ca33SJohn Baldwin  * execve(), the reference count for vmspaces from AIO requests can
4630b96ca33SJohn Baldwin  * never be 0.  Similarly, AIO kernel processes hold an extra
4640b96ca33SJohn Baldwin  * reference on their initial vmspace for the life of the process.  As
4650b96ca33SJohn Baldwin  * a result, the 'newvm' vmspace always has a non-zero reference
4660b96ca33SJohn Baldwin  * count.  This permits an additional reference on 'newvm' to be
4670b96ca33SJohn Baldwin  * acquired via a simple atomic increment rather than the loop in
4680b96ca33SJohn Baldwin  * vmspace_acquire_ref() above.
4698a4dc40fSJohn Baldwin  */
4708a4dc40fSJohn Baldwin void
4718a4dc40fSJohn Baldwin vmspace_switch_aio(struct vmspace *newvm)
4728a4dc40fSJohn Baldwin {
4738a4dc40fSJohn Baldwin 	struct vmspace *oldvm;
4748a4dc40fSJohn Baldwin 
4758a4dc40fSJohn Baldwin 	/* XXX: Need some way to assert that this is an aio daemon. */
4768a4dc40fSJohn Baldwin 
4778a4dc40fSJohn Baldwin 	KASSERT(newvm->vm_refcnt > 0,
4788a4dc40fSJohn Baldwin 	    ("vmspace_switch_aio: newvm unreferenced"));
4798a4dc40fSJohn Baldwin 
4808a4dc40fSJohn Baldwin 	oldvm = curproc->p_vmspace;
4818a4dc40fSJohn Baldwin 	if (oldvm == newvm)
4828a4dc40fSJohn Baldwin 		return;
4838a4dc40fSJohn Baldwin 
4848a4dc40fSJohn Baldwin 	/*
4858a4dc40fSJohn Baldwin 	 * Point to the new address space and refer to it.
4868a4dc40fSJohn Baldwin 	 */
4878a4dc40fSJohn Baldwin 	curproc->p_vmspace = newvm;
4888a4dc40fSJohn Baldwin 	atomic_add_int(&newvm->vm_refcnt, 1);
4898a4dc40fSJohn Baldwin 
4908a4dc40fSJohn Baldwin 	/* Activate the new mapping. */
4918a4dc40fSJohn Baldwin 	pmap_activate(curthread);
4928a4dc40fSJohn Baldwin 
4938a4dc40fSJohn Baldwin 	vmspace_free(oldvm);
4948a4dc40fSJohn Baldwin }
4958a4dc40fSJohn Baldwin 
4961b40f8c0SMatthew Dillon void
497780b1c09SAlan Cox _vm_map_lock(vm_map_t map, const char *file, int line)
4981b40f8c0SMatthew Dillon {
499bc91c510SAlan Cox 
50093bc4879SAlan Cox 	if (map->system_map)
501ccdf2333SAttilio Rao 		mtx_lock_flags_(&map->system_mtx, 0, file, line);
50212c64974SMaxime Henrion 	else
5039fde98bbSAttilio Rao 		sx_xlock_(&map->lock, file, line);
5041b40f8c0SMatthew Dillon 	map->timestamp++;
5051b40f8c0SMatthew Dillon }
5061b40f8c0SMatthew Dillon 
50778022527SKonstantin Belousov void
50878022527SKonstantin Belousov vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add)
50978022527SKonstantin Belousov {
51067388836SKonstantin Belousov 	vm_object_t object;
51178022527SKonstantin Belousov 	struct vnode *vp;
51267388836SKonstantin Belousov 	bool vp_held;
51378022527SKonstantin Belousov 
51478022527SKonstantin Belousov 	if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0)
51578022527SKonstantin Belousov 		return;
51678022527SKonstantin Belousov 	KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
51778022527SKonstantin Belousov 	    ("Submap with execs"));
51878022527SKonstantin Belousov 	object = entry->object.vm_object;
51978022527SKonstantin Belousov 	KASSERT(object != NULL, ("No object for text, entry %p", entry));
52067388836SKonstantin Belousov 	if ((object->flags & OBJ_ANON) != 0)
52167388836SKonstantin Belousov 		object = object->handle;
52267388836SKonstantin Belousov 	else
52367388836SKonstantin Belousov 		KASSERT(object->backing_object == NULL,
52467388836SKonstantin Belousov 		    ("non-anon object %p shadows", object));
52567388836SKonstantin Belousov 	KASSERT(object != NULL, ("No content object for text, entry %p obj %p",
52667388836SKonstantin Belousov 	    entry, entry->object.vm_object));
52778022527SKonstantin Belousov 
52867388836SKonstantin Belousov 	/*
52967388836SKonstantin Belousov 	 * Mostly, we do not lock the backing object.  It is
53067388836SKonstantin Belousov 	 * referenced by the entry we are processing, so it cannot go
53167388836SKonstantin Belousov 	 * away.
53267388836SKonstantin Belousov 	 */
53332d2014dSKonstantin Belousov 	vp = NULL;
53467388836SKonstantin Belousov 	vp_held = false;
53532d2014dSKonstantin Belousov 	if (object->type == OBJT_DEAD) {
53678022527SKonstantin Belousov 		/*
53778022527SKonstantin Belousov 		 * For OBJT_DEAD objects, v_writecount was handled in
53878022527SKonstantin Belousov 		 * vnode_pager_dealloc().
53978022527SKonstantin Belousov 		 */
54032d2014dSKonstantin Belousov 	} else if (object->type == OBJT_VNODE) {
54132d2014dSKonstantin Belousov 		vp = object->handle;
54232d2014dSKonstantin Belousov 	} else if (object->type == OBJT_SWAP) {
54332d2014dSKonstantin Belousov 		KASSERT((object->flags & OBJ_TMPFS_NODE) != 0,
54432d2014dSKonstantin Belousov 		    ("vm_map_entry_set_vnode_text: swap and !TMPFS "
54532d2014dSKonstantin Belousov 		    "entry %p, object %p, add %d", entry, object, add));
54632d2014dSKonstantin Belousov 		/*
54732d2014dSKonstantin Belousov 		 * Tmpfs VREG node, which was reclaimed, has
54832d2014dSKonstantin Belousov 		 * OBJ_TMPFS_NODE flag set, but not OBJ_TMPFS.  In
54932d2014dSKonstantin Belousov 		 * this case there is no v_writecount to adjust.
55032d2014dSKonstantin Belousov 		 */
55167388836SKonstantin Belousov 		VM_OBJECT_RLOCK(object);
55267388836SKonstantin Belousov 		if ((object->flags & OBJ_TMPFS) != 0) {
55332d2014dSKonstantin Belousov 			vp = object->un_pager.swp.swp_tmpfs;
55467388836SKonstantin Belousov 			if (vp != NULL) {
55567388836SKonstantin Belousov 				vhold(vp);
55667388836SKonstantin Belousov 				vp_held = true;
55767388836SKonstantin Belousov 			}
55867388836SKonstantin Belousov 		}
55967388836SKonstantin Belousov 		VM_OBJECT_RUNLOCK(object);
56032d2014dSKonstantin Belousov 	} else {
56132d2014dSKonstantin Belousov 		KASSERT(0,
56278022527SKonstantin Belousov 		    ("vm_map_entry_set_vnode_text: wrong object type, "
56378022527SKonstantin Belousov 		    "entry %p, object %p, add %d", entry, object, add));
56432d2014dSKonstantin Belousov 	}
56532d2014dSKonstantin Belousov 	if (vp != NULL) {
566bb9e2184SKonstantin Belousov 		if (add) {
56778022527SKonstantin Belousov 			VOP_SET_TEXT_CHECKED(vp);
568bb9e2184SKonstantin Belousov 		} else {
569bb9e2184SKonstantin Belousov 			vn_lock(vp, LK_SHARED | LK_RETRY);
570bb9e2184SKonstantin Belousov 			VOP_UNSET_TEXT_CHECKED(vp);
571b249ce48SMateusz Guzik 			VOP_UNLOCK(vp);
572bb9e2184SKonstantin Belousov 		}
57367388836SKonstantin Belousov 		if (vp_held)
57467388836SKonstantin Belousov 			vdrop(vp);
575bb9e2184SKonstantin Belousov 	}
57678022527SKonstantin Belousov }
57778022527SKonstantin Belousov 
5787cdcf863SDoug Moore /*
5797cdcf863SDoug Moore  * Use a different name for this vm_map_entry field when it's use
5807cdcf863SDoug Moore  * is not consistent with its use as part of an ordered search tree.
5817cdcf863SDoug Moore  */
5827cdcf863SDoug Moore #define defer_next right
5837cdcf863SDoug Moore 
5840b367bd8SKonstantin Belousov static void
5850b367bd8SKonstantin Belousov vm_map_process_deferred(void)
5860e0af8ecSBrian Feldman {
5870b367bd8SKonstantin Belousov 	struct thread *td;
5886fbe60faSJohn Baldwin 	vm_map_entry_t entry, next;
58984110e7eSKonstantin Belousov 	vm_object_t object;
590655c3490SKonstantin Belousov 
5910b367bd8SKonstantin Belousov 	td = curthread;
5926fbe60faSJohn Baldwin 	entry = td->td_map_def_user;
5936fbe60faSJohn Baldwin 	td->td_map_def_user = NULL;
5946fbe60faSJohn Baldwin 	while (entry != NULL) {
5957cdcf863SDoug Moore 		next = entry->defer_next;
596fe7bcbafSKyle Evans 		MPASS((entry->eflags & (MAP_ENTRY_WRITECNT |
597fe7bcbafSKyle Evans 		    MAP_ENTRY_VN_EXEC)) != (MAP_ENTRY_WRITECNT |
59878022527SKonstantin Belousov 		    MAP_ENTRY_VN_EXEC));
599fe7bcbafSKyle Evans 		if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) {
60084110e7eSKonstantin Belousov 			/*
60184110e7eSKonstantin Belousov 			 * Decrement the object's writemappings and
60284110e7eSKonstantin Belousov 			 * possibly the vnode's v_writecount.
60384110e7eSKonstantin Belousov 			 */
60484110e7eSKonstantin Belousov 			KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
60584110e7eSKonstantin Belousov 			    ("Submap with writecount"));
60684110e7eSKonstantin Belousov 			object = entry->object.vm_object;
60784110e7eSKonstantin Belousov 			KASSERT(object != NULL, ("No object for writecount"));
608fe7bcbafSKyle Evans 			vm_pager_release_writecount(object, entry->start,
60984110e7eSKonstantin Belousov 			    entry->end);
61084110e7eSKonstantin Belousov 		}
61178022527SKonstantin Belousov 		vm_map_entry_set_vnode_text(entry, false);
6120b367bd8SKonstantin Belousov 		vm_map_entry_deallocate(entry, FALSE);
6136fbe60faSJohn Baldwin 		entry = next;
6140b367bd8SKonstantin Belousov 	}
6150b367bd8SKonstantin Belousov }
6160b367bd8SKonstantin Belousov 
617461587dcSDoug Moore #ifdef INVARIANTS
618461587dcSDoug Moore static void
619461587dcSDoug Moore _vm_map_assert_locked(vm_map_t map, const char *file, int line)
620461587dcSDoug Moore {
621461587dcSDoug Moore 
622461587dcSDoug Moore 	if (map->system_map)
623461587dcSDoug Moore 		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
624461587dcSDoug Moore 	else
625461587dcSDoug Moore 		sx_assert_(&map->lock, SA_XLOCKED, file, line);
626461587dcSDoug Moore }
627461587dcSDoug Moore 
628461587dcSDoug Moore #define	VM_MAP_ASSERT_LOCKED(map) \
629461587dcSDoug Moore     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
630461587dcSDoug Moore 
631461587dcSDoug Moore enum { VMMAP_CHECK_NONE, VMMAP_CHECK_UNLOCK, VMMAP_CHECK_ALL };
632461587dcSDoug Moore #ifdef DIAGNOSTIC
633461587dcSDoug Moore static int enable_vmmap_check = VMMAP_CHECK_UNLOCK;
634461587dcSDoug Moore #else
635461587dcSDoug Moore static int enable_vmmap_check = VMMAP_CHECK_NONE;
636461587dcSDoug Moore #endif
637461587dcSDoug Moore SYSCTL_INT(_debug, OID_AUTO, vmmap_check, CTLFLAG_RWTUN,
638461587dcSDoug Moore     &enable_vmmap_check, 0, "Enable vm map consistency checking");
639461587dcSDoug Moore 
640461587dcSDoug Moore static void _vm_map_assert_consistent(vm_map_t map, int check);
641461587dcSDoug Moore 
642461587dcSDoug Moore #define VM_MAP_ASSERT_CONSISTENT(map) \
643461587dcSDoug Moore     _vm_map_assert_consistent(map, VMMAP_CHECK_ALL)
644461587dcSDoug Moore #ifdef DIAGNOSTIC
645461587dcSDoug Moore #define VM_MAP_UNLOCK_CONSISTENT(map) do {				\
646461587dcSDoug Moore 	if (map->nupdates > map->nentries) {				\
647461587dcSDoug Moore 		_vm_map_assert_consistent(map, VMMAP_CHECK_UNLOCK);	\
648461587dcSDoug Moore 		map->nupdates = 0;					\
649461587dcSDoug Moore 	}								\
650461587dcSDoug Moore } while (0)
651461587dcSDoug Moore #else
652461587dcSDoug Moore #define VM_MAP_UNLOCK_CONSISTENT(map)
653461587dcSDoug Moore #endif
654461587dcSDoug Moore #else
655461587dcSDoug Moore #define	VM_MAP_ASSERT_LOCKED(map)
656461587dcSDoug Moore #define VM_MAP_ASSERT_CONSISTENT(map)
657461587dcSDoug Moore #define VM_MAP_UNLOCK_CONSISTENT(map)
658461587dcSDoug Moore #endif /* INVARIANTS */
659461587dcSDoug Moore 
6600b367bd8SKonstantin Belousov void
6610b367bd8SKonstantin Belousov _vm_map_unlock(vm_map_t map, const char *file, int line)
6620b367bd8SKonstantin Belousov {
6630b367bd8SKonstantin Belousov 
664461587dcSDoug Moore 	VM_MAP_UNLOCK_CONSISTENT(map);
6650b367bd8SKonstantin Belousov 	if (map->system_map)
666ccdf2333SAttilio Rao 		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
6670b367bd8SKonstantin Belousov 	else {
6689fde98bbSAttilio Rao 		sx_xunlock_(&map->lock, file, line);
6690b367bd8SKonstantin Belousov 		vm_map_process_deferred();
670655c3490SKonstantin Belousov 	}
6710e0af8ecSBrian Feldman }
6720e0af8ecSBrian Feldman 
6730e0af8ecSBrian Feldman void
674780b1c09SAlan Cox _vm_map_lock_read(vm_map_t map, const char *file, int line)
6750e0af8ecSBrian Feldman {
676bc91c510SAlan Cox 
67793bc4879SAlan Cox 	if (map->system_map)
678ccdf2333SAttilio Rao 		mtx_lock_flags_(&map->system_mtx, 0, file, line);
67912c64974SMaxime Henrion 	else
6809fde98bbSAttilio Rao 		sx_slock_(&map->lock, file, line);
68136daaecdSAlan Cox }
6820e0af8ecSBrian Feldman 
6830e0af8ecSBrian Feldman void
684780b1c09SAlan Cox _vm_map_unlock_read(vm_map_t map, const char *file, int line)
6850e0af8ecSBrian Feldman {
686bc91c510SAlan Cox 
68736daaecdSAlan Cox 	if (map->system_map)
688ccdf2333SAttilio Rao 		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
6890b367bd8SKonstantin Belousov 	else {
6909fde98bbSAttilio Rao 		sx_sunlock_(&map->lock, file, line);
6910b367bd8SKonstantin Belousov 		vm_map_process_deferred();
6920b367bd8SKonstantin Belousov 	}
69325adb370SBrian Feldman }
69425adb370SBrian Feldman 
695d974f03cSAlan Cox int
696780b1c09SAlan Cox _vm_map_trylock(vm_map_t map, const char *file, int line)
697d974f03cSAlan Cox {
69825adb370SBrian Feldman 	int error;
69925adb370SBrian Feldman 
70036daaecdSAlan Cox 	error = map->system_map ?
701ccdf2333SAttilio Rao 	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
7029fde98bbSAttilio Rao 	    !sx_try_xlock_(&map->lock, file, line);
7033a92e5d5SAlan Cox 	if (error == 0)
7043a92e5d5SAlan Cox 		map->timestamp++;
705bc91c510SAlan Cox 	return (error == 0);
7060e0af8ecSBrian Feldman }
7070e0af8ecSBrian Feldman 
7080e0af8ecSBrian Feldman int
70972d97679SDavid Schultz _vm_map_trylock_read(vm_map_t map, const char *file, int line)
71072d97679SDavid Schultz {
71172d97679SDavid Schultz 	int error;
71272d97679SDavid Schultz 
71372d97679SDavid Schultz 	error = map->system_map ?
714ccdf2333SAttilio Rao 	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
7159fde98bbSAttilio Rao 	    !sx_try_slock_(&map->lock, file, line);
71672d97679SDavid Schultz 	return (error == 0);
71772d97679SDavid Schultz }
71872d97679SDavid Schultz 
71905a8c414SAlan Cox /*
72005a8c414SAlan Cox  *	_vm_map_lock_upgrade:	[ internal use only ]
72105a8c414SAlan Cox  *
72205a8c414SAlan Cox  *	Tries to upgrade a read (shared) lock on the specified map to a write
72305a8c414SAlan Cox  *	(exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
72405a8c414SAlan Cox  *	non-zero value if the upgrade fails.  If the upgrade fails, the map is
72505a8c414SAlan Cox  *	returned without a read or write lock held.
72605a8c414SAlan Cox  *
72705a8c414SAlan Cox  *	Requires that the map be read locked.
72805a8c414SAlan Cox  */
72972d97679SDavid Schultz int
730780b1c09SAlan Cox _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
7310e0af8ecSBrian Feldman {
73205a8c414SAlan Cox 	unsigned int last_timestamp;
733bc91c510SAlan Cox 
73412c64974SMaxime Henrion 	if (map->system_map) {
735ccdf2333SAttilio Rao 		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
73605a8c414SAlan Cox 	} else {
7379fde98bbSAttilio Rao 		if (!sx_try_upgrade_(&map->lock, file, line)) {
73805a8c414SAlan Cox 			last_timestamp = map->timestamp;
7399fde98bbSAttilio Rao 			sx_sunlock_(&map->lock, file, line);
7400b367bd8SKonstantin Belousov 			vm_map_process_deferred();
74105a8c414SAlan Cox 			/*
74205a8c414SAlan Cox 			 * If the map's timestamp does not change while the
74305a8c414SAlan Cox 			 * map is unlocked, then the upgrade succeeds.
74405a8c414SAlan Cox 			 */
7459fde98bbSAttilio Rao 			sx_xlock_(&map->lock, file, line);
74605a8c414SAlan Cox 			if (last_timestamp != map->timestamp) {
7479fde98bbSAttilio Rao 				sx_xunlock_(&map->lock, file, line);
74805a8c414SAlan Cox 				return (1);
74905a8c414SAlan Cox 			}
75005a8c414SAlan Cox 		}
75105a8c414SAlan Cox 	}
752bc91c510SAlan Cox 	map->timestamp++;
753bc91c510SAlan Cox 	return (0);
7540e0af8ecSBrian Feldman }
7550e0af8ecSBrian Feldman 
7560e0af8ecSBrian Feldman void
757780b1c09SAlan Cox _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
7581b40f8c0SMatthew Dillon {
759bc91c510SAlan Cox 
76012c64974SMaxime Henrion 	if (map->system_map) {
761ccdf2333SAttilio Rao 		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
762461587dcSDoug Moore 	} else {
763461587dcSDoug Moore 		VM_MAP_UNLOCK_CONSISTENT(map);
7649fde98bbSAttilio Rao 		sx_downgrade_(&map->lock, file, line);
76505a8c414SAlan Cox 	}
766461587dcSDoug Moore }
76705a8c414SAlan Cox 
76805a8c414SAlan Cox /*
76905a8c414SAlan Cox  *	vm_map_locked:
77005a8c414SAlan Cox  *
77105a8c414SAlan Cox  *	Returns a non-zero value if the caller holds a write (exclusive) lock
77205a8c414SAlan Cox  *	on the specified map and the value "0" otherwise.
77305a8c414SAlan Cox  */
77405a8c414SAlan Cox int
77505a8c414SAlan Cox vm_map_locked(vm_map_t map)
77605a8c414SAlan Cox {
77705a8c414SAlan Cox 
77805a8c414SAlan Cox 	if (map->system_map)
77905a8c414SAlan Cox 		return (mtx_owned(&map->system_mtx));
78005a8c414SAlan Cox 	else
78105a8c414SAlan Cox 		return (sx_xlocked(&map->lock));
78225adb370SBrian Feldman }
78325adb370SBrian Feldman 
784acd9a301SAlan Cox /*
7858304adaaSAlan Cox  *	_vm_map_unlock_and_wait:
7868304adaaSAlan Cox  *
7878304adaaSAlan Cox  *	Atomically releases the lock on the specified map and puts the calling
7888304adaaSAlan Cox  *	thread to sleep.  The calling thread will remain asleep until either
7898304adaaSAlan Cox  *	vm_map_wakeup() is performed on the map or the specified timeout is
7908304adaaSAlan Cox  *	exceeded.
7918304adaaSAlan Cox  *
7928304adaaSAlan Cox  *	WARNING!  This function does not perform deferred deallocations of
7938304adaaSAlan Cox  *	objects and map	entries.  Therefore, the calling thread is expected to
7948304adaaSAlan Cox  *	reacquire the map lock after reawakening and later perform an ordinary
7958304adaaSAlan Cox  *	unlock operation, such as vm_map_unlock(), before completing its
7968304adaaSAlan Cox  *	operation on the map.
797acd9a301SAlan Cox  */
7989688f931SAlan Cox int
7998304adaaSAlan Cox _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
800acd9a301SAlan Cox {
801acd9a301SAlan Cox 
802461587dcSDoug Moore 	VM_MAP_UNLOCK_CONSISTENT(map);
8033a92e5d5SAlan Cox 	mtx_lock(&map_sleep_mtx);
8048304adaaSAlan Cox 	if (map->system_map)
805ccdf2333SAttilio Rao 		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
8068304adaaSAlan Cox 	else
8079fde98bbSAttilio Rao 		sx_xunlock_(&map->lock, file, line);
8088304adaaSAlan Cox 	return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
8098304adaaSAlan Cox 	    timo));
810acd9a301SAlan Cox }
811acd9a301SAlan Cox 
812acd9a301SAlan Cox /*
813acd9a301SAlan Cox  *	vm_map_wakeup:
8148304adaaSAlan Cox  *
8158304adaaSAlan Cox  *	Awaken any threads that have slept on the map using
8168304adaaSAlan Cox  *	vm_map_unlock_and_wait().
817acd9a301SAlan Cox  */
8189688f931SAlan Cox void
819acd9a301SAlan Cox vm_map_wakeup(vm_map_t map)
820acd9a301SAlan Cox {
821acd9a301SAlan Cox 
822b49ecb86SAlan Cox 	/*
8233a92e5d5SAlan Cox 	 * Acquire and release map_sleep_mtx to prevent a wakeup()
8248304adaaSAlan Cox 	 * from being performed (and lost) between the map unlock
8258304adaaSAlan Cox 	 * and the msleep() in _vm_map_unlock_and_wait().
826b49ecb86SAlan Cox 	 */
8273a92e5d5SAlan Cox 	mtx_lock(&map_sleep_mtx);
8283a92e5d5SAlan Cox 	mtx_unlock(&map_sleep_mtx);
829acd9a301SAlan Cox 	wakeup(&map->root);
830acd9a301SAlan Cox }
831acd9a301SAlan Cox 
832a5db445dSMax Laier void
833a5db445dSMax Laier vm_map_busy(vm_map_t map)
834a5db445dSMax Laier {
835a5db445dSMax Laier 
836a5db445dSMax Laier 	VM_MAP_ASSERT_LOCKED(map);
837a5db445dSMax Laier 	map->busy++;
838a5db445dSMax Laier }
839a5db445dSMax Laier 
840a5db445dSMax Laier void
841a5db445dSMax Laier vm_map_unbusy(vm_map_t map)
842a5db445dSMax Laier {
843a5db445dSMax Laier 
844a5db445dSMax Laier 	VM_MAP_ASSERT_LOCKED(map);
845a5db445dSMax Laier 	KASSERT(map->busy, ("vm_map_unbusy: not busy"));
846a5db445dSMax Laier 	if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
847a5db445dSMax Laier 		vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
848a5db445dSMax Laier 		wakeup(&map->busy);
849a5db445dSMax Laier 	}
850a5db445dSMax Laier }
851a5db445dSMax Laier 
852a5db445dSMax Laier void
853a5db445dSMax Laier vm_map_wait_busy(vm_map_t map)
854a5db445dSMax Laier {
855a5db445dSMax Laier 
856a5db445dSMax Laier 	VM_MAP_ASSERT_LOCKED(map);
857a5db445dSMax Laier 	while (map->busy) {
858a5db445dSMax Laier 		vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
859a5db445dSMax Laier 		if (map->system_map)
860a5db445dSMax Laier 			msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
861a5db445dSMax Laier 		else
862a5db445dSMax Laier 			sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
863a5db445dSMax Laier 	}
864a5db445dSMax Laier 	map->timestamp++;
865a5db445dSMax Laier }
866a5db445dSMax Laier 
8671b40f8c0SMatthew Dillon long
8681b40f8c0SMatthew Dillon vmspace_resident_count(struct vmspace *vmspace)
8691b40f8c0SMatthew Dillon {
8701b40f8c0SMatthew Dillon 	return pmap_resident_count(vmspace_pmap(vmspace));
8711b40f8c0SMatthew Dillon }
8721b40f8c0SMatthew Dillon 
873ff2b5645SMatthew Dillon /*
874df8bae1dSRodney W. Grimes  *	vm_map_create:
875df8bae1dSRodney W. Grimes  *
876df8bae1dSRodney W. Grimes  *	Creates and returns a new empty VM map with
877df8bae1dSRodney W. Grimes  *	the given physical map structure, and having
878df8bae1dSRodney W. Grimes  *	the given lower and upper address bounds.
879df8bae1dSRodney W. Grimes  */
8800d94caffSDavid Greenman vm_map_t
8811b40f8c0SMatthew Dillon vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
882df8bae1dSRodney W. Grimes {
883c0877f10SJohn Dyson 	vm_map_t result;
884df8bae1dSRodney W. Grimes 
885a163d034SWarner Losh 	result = uma_zalloc(mapzone, M_WAITOK);
88621c641b2SJohn Baldwin 	CTR1(KTR_VM, "vm_map_create: %p", result);
88792351f16SAlan Cox 	_vm_map_init(result, pmap, min, max);
888df8bae1dSRodney W. Grimes 	return (result);
889df8bae1dSRodney W. Grimes }
890df8bae1dSRodney W. Grimes 
891df8bae1dSRodney W. Grimes /*
892df8bae1dSRodney W. Grimes  * Initialize an existing vm_map structure
893df8bae1dSRodney W. Grimes  * such as that in the vmspace structure.
894df8bae1dSRodney W. Grimes  */
8958355f576SJeff Roberson static void
89692351f16SAlan Cox _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
897df8bae1dSRodney W. Grimes {
89821c641b2SJohn Baldwin 
8992203c46dSMark Johnston 	map->header.eflags = MAP_ENTRY_HEADER;
9009688f931SAlan Cox 	map->needs_wakeup = FALSE;
9013075778bSJohn Dyson 	map->system_map = 0;
90292351f16SAlan Cox 	map->pmap = pmap;
903f0165b1cSKonstantin Belousov 	map->header.end = min;
904f0165b1cSKonstantin Belousov 	map->header.start = max;
905af7cd0c5SBrian Feldman 	map->flags = 0;
906c1ad5342SDoug Moore 	map->header.left = map->header.right = &map->header;
9074e94f402SAlan Cox 	map->root = NULL;
908df8bae1dSRodney W. Grimes 	map->timestamp = 0;
909a5db445dSMax Laier 	map->busy = 0;
910fa50a355SKonstantin Belousov 	map->anon_loc = 0;
911461587dcSDoug Moore #ifdef DIAGNOSTIC
912461587dcSDoug Moore 	map->nupdates = 0;
913461587dcSDoug Moore #endif
914df8bae1dSRodney W. Grimes }
915df8bae1dSRodney W. Grimes 
916a18b1f1dSJason Evans void
91792351f16SAlan Cox vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
918a18b1f1dSJason Evans {
91992351f16SAlan Cox 
92092351f16SAlan Cox 	_vm_map_init(map, pmap, min, max);
921d923c598SAlan Cox 	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
92212c64974SMaxime Henrion 	sx_init(&map->lock, "user map");
923a18b1f1dSJason Evans }
924a18b1f1dSJason Evans 
925df8bae1dSRodney W. Grimes /*
926b18bfc3dSJohn Dyson  *	vm_map_entry_dispose:	[ internal use only ]
927b18bfc3dSJohn Dyson  *
928b18bfc3dSJohn Dyson  *	Inverse of vm_map_entry_create.
929b18bfc3dSJohn Dyson  */
93062487bb4SJohn Dyson static void
9311b40f8c0SMatthew Dillon vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
932b18bfc3dSJohn Dyson {
9332b4a2c27SAlan Cox 	uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
934b18bfc3dSJohn Dyson }
935b18bfc3dSJohn Dyson 
936b18bfc3dSJohn Dyson /*
937df8bae1dSRodney W. Grimes  *	vm_map_entry_create:	[ internal use only ]
938df8bae1dSRodney W. Grimes  *
939df8bae1dSRodney W. Grimes  *	Allocates a VM map entry for insertion.
940b28cb1caSAlfred Perlstein  *	No entry fields are filled in.
941df8bae1dSRodney W. Grimes  */
942f708ef1bSPoul-Henning Kamp static vm_map_entry_t
9431b40f8c0SMatthew Dillon vm_map_entry_create(vm_map_t map)
944df8bae1dSRodney W. Grimes {
9451f6889a1SMatthew Dillon 	vm_map_entry_t new_entry;
9461f6889a1SMatthew Dillon 
9472b4a2c27SAlan Cox 	if (map->system_map)
9482b4a2c27SAlan Cox 		new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
9492b4a2c27SAlan Cox 	else
950a163d034SWarner Losh 		new_entry = uma_zalloc(mapentzone, M_WAITOK);
9511f6889a1SMatthew Dillon 	if (new_entry == NULL)
9521f6889a1SMatthew Dillon 		panic("vm_map_entry_create: kernel resources exhausted");
9531f6889a1SMatthew Dillon 	return (new_entry);
954df8bae1dSRodney W. Grimes }
955df8bae1dSRodney W. Grimes 
956df8bae1dSRodney W. Grimes /*
957794316a8SAlan Cox  *	vm_map_entry_set_behavior:
958794316a8SAlan Cox  *
959794316a8SAlan Cox  *	Set the expected access behavior, either normal, random, or
960794316a8SAlan Cox  *	sequential.
961794316a8SAlan Cox  */
96262a59e8fSWarner Losh static inline void
963794316a8SAlan Cox vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
964794316a8SAlan Cox {
965794316a8SAlan Cox 	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
966794316a8SAlan Cox 	    (behavior & MAP_ENTRY_BEHAV_MASK);
967794316a8SAlan Cox }
968794316a8SAlan Cox 
969794316a8SAlan Cox /*
9705a0879daSDoug Moore  *	vm_map_entry_max_free_{left,right}:
9710164e057SAlan Cox  *
9725a0879daSDoug Moore  *	Compute the size of the largest free gap between two entries,
9735a0879daSDoug Moore  *	one the root of a tree and the other the ancestor of that root
9745a0879daSDoug Moore  *	that is the least or greatest ancestor found on the search path.
9750164e057SAlan Cox  */
9765a0879daSDoug Moore static inline vm_size_t
9775a0879daSDoug Moore vm_map_entry_max_free_left(vm_map_entry_t root, vm_map_entry_t left_ancestor)
9780164e057SAlan Cox {
9790164e057SAlan Cox 
980c1ad5342SDoug Moore 	return (root->left != left_ancestor ?
9815a0879daSDoug Moore 	    root->left->max_free : root->start - left_ancestor->end);
9825a0879daSDoug Moore }
9835a0879daSDoug Moore 
9845a0879daSDoug Moore static inline vm_size_t
9855a0879daSDoug Moore vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor)
9865a0879daSDoug Moore {
9875a0879daSDoug Moore 
988c1ad5342SDoug Moore 	return (root->right != right_ancestor ?
9895a0879daSDoug Moore 	    root->right->max_free : right_ancestor->start - root->end);
9900164e057SAlan Cox }
9910164e057SAlan Cox 
99283704cc2SDoug Moore /*
99383704cc2SDoug Moore  *	vm_map_entry_{pred,succ}:
99483704cc2SDoug Moore  *
99583704cc2SDoug Moore  *	Find the {predecessor, successor} of the entry by taking one step
99683704cc2SDoug Moore  *	in the appropriate direction and backtracking as much as necessary.
997c1ad5342SDoug Moore  *	vm_map_entry_succ is defined in vm_map.h.
99883704cc2SDoug Moore  */
99983704cc2SDoug Moore static inline vm_map_entry_t
100083704cc2SDoug Moore vm_map_entry_pred(vm_map_entry_t entry)
100183704cc2SDoug Moore {
1002c1ad5342SDoug Moore 	vm_map_entry_t prior;
100383704cc2SDoug Moore 
1004c1ad5342SDoug Moore 	prior = entry->left;
1005c1ad5342SDoug Moore 	if (prior->right->start < entry->start) {
1006c1ad5342SDoug Moore 		do
1007c1ad5342SDoug Moore 			prior = prior->right;
1008c1ad5342SDoug Moore 		while (prior->right != entry);
100983704cc2SDoug Moore 	}
1010c1ad5342SDoug Moore 	return (prior);
1011c1ad5342SDoug Moore }
101283704cc2SDoug Moore 
101385b7bedbSDoug Moore static inline vm_size_t
101485b7bedbSDoug Moore vm_size_max(vm_size_t a, vm_size_t b)
101585b7bedbSDoug Moore {
101685b7bedbSDoug Moore 
101785b7bedbSDoug Moore 	return (a > b ? a : b);
101885b7bedbSDoug Moore }
101985b7bedbSDoug Moore 
1020c1ad5342SDoug Moore #define SPLAY_LEFT_STEP(root, y, llist, rlist, test) do {		\
1021c1ad5342SDoug Moore 	vm_map_entry_t z;						\
10225a0879daSDoug Moore 	vm_size_t max_free;						\
10235a0879daSDoug Moore 									\
10245a0879daSDoug Moore 	/*								\
10255a0879daSDoug Moore 	 * Infer root->right->max_free == root->max_free when		\
10265a0879daSDoug Moore 	 * y->max_free < root->max_free || root->max_free == 0.		\
10275a0879daSDoug Moore 	 * Otherwise, look right to find it.				\
10285a0879daSDoug Moore 	 */								\
10299f701172SKonstantin Belousov 	y = root->left;							\
10305a0879daSDoug Moore 	max_free = root->max_free;					\
1031668a8aa8SDoug Moore 	KASSERT(max_free == vm_size_max(				\
1032668a8aa8SDoug Moore 	    vm_map_entry_max_free_left(root, llist),			\
1033668a8aa8SDoug Moore 	    vm_map_entry_max_free_right(root, rlist)),			\
10345a0879daSDoug Moore 	    ("%s: max_free invariant fails", __func__));		\
1035668a8aa8SDoug Moore 	if (max_free - 1 < vm_map_entry_max_free_left(root, llist))	\
10365a0879daSDoug Moore 		max_free = vm_map_entry_max_free_right(root, rlist);	\
1037c1ad5342SDoug Moore 	if (y != llist && (test)) {					\
10389f701172SKonstantin Belousov 		/* Rotate right and make y root. */			\
1039c1ad5342SDoug Moore 		z = y->right;						\
1040c1ad5342SDoug Moore 		if (z != root) {					\
1041c1ad5342SDoug Moore 			root->left = z;					\
10429f701172SKonstantin Belousov 			y->right = root;				\
10435a0879daSDoug Moore 			if (max_free < y->max_free)			\
104485b7bedbSDoug Moore 			    root->max_free = max_free =			\
1045c1ad5342SDoug Moore 			    vm_size_max(max_free, z->max_free);		\
1046c1ad5342SDoug Moore 		} else if (max_free < y->max_free)			\
1047c1ad5342SDoug Moore 			root->max_free = max_free =			\
1048c1ad5342SDoug Moore 			    vm_size_max(max_free, root->start - y->end);\
10499f701172SKonstantin Belousov 		root = y;						\
10509f701172SKonstantin Belousov 		y = root->left;						\
10519f701172SKonstantin Belousov 	}								\
10525a0879daSDoug Moore 	/* Copy right->max_free.  Put root on rlist. */			\
10535a0879daSDoug Moore 	root->max_free = max_free;					\
10545a0879daSDoug Moore 	KASSERT(max_free == vm_map_entry_max_free_right(root, rlist),	\
10555a0879daSDoug Moore 	    ("%s: max_free not copied from right", __func__));		\
10569f701172SKonstantin Belousov 	root->left = rlist;						\
10579f701172SKonstantin Belousov 	rlist = root;							\
1058c1ad5342SDoug Moore 	root = y != llist ? y : NULL;					\
10599f701172SKonstantin Belousov } while (0)
10609f701172SKonstantin Belousov 
1061c1ad5342SDoug Moore #define SPLAY_RIGHT_STEP(root, y, llist, rlist, test) do {		\
1062c1ad5342SDoug Moore 	vm_map_entry_t z;						\
10635a0879daSDoug Moore 	vm_size_t max_free;						\
10645a0879daSDoug Moore 									\
10655a0879daSDoug Moore 	/*								\
10665a0879daSDoug Moore 	 * Infer root->left->max_free == root->max_free when		\
10675a0879daSDoug Moore 	 * y->max_free < root->max_free || root->max_free == 0.		\
10685a0879daSDoug Moore 	 * Otherwise, look left to find it.				\
10695a0879daSDoug Moore 	 */								\
10709f701172SKonstantin Belousov 	y = root->right;						\
10715a0879daSDoug Moore 	max_free = root->max_free;					\
1072668a8aa8SDoug Moore 	KASSERT(max_free == vm_size_max(				\
1073668a8aa8SDoug Moore 	    vm_map_entry_max_free_left(root, llist),			\
1074668a8aa8SDoug Moore 	    vm_map_entry_max_free_right(root, rlist)),			\
10755a0879daSDoug Moore 	    ("%s: max_free invariant fails", __func__));		\
1076668a8aa8SDoug Moore 	if (max_free - 1 < vm_map_entry_max_free_right(root, rlist))	\
10775a0879daSDoug Moore 		max_free = vm_map_entry_max_free_left(root, llist);	\
1078c1ad5342SDoug Moore 	if (y != rlist && (test)) {					\
10799f701172SKonstantin Belousov 		/* Rotate left and make y root. */			\
1080c1ad5342SDoug Moore 		z = y->left;						\
1081c1ad5342SDoug Moore 		if (z != root) {					\
1082c1ad5342SDoug Moore 			root->right = z;				\
10839f701172SKonstantin Belousov 			y->left = root;					\
10845a0879daSDoug Moore 			if (max_free < y->max_free)			\
108585b7bedbSDoug Moore 			    root->max_free = max_free =			\
1086c1ad5342SDoug Moore 			    vm_size_max(max_free, z->max_free);		\
1087c1ad5342SDoug Moore 		} else if (max_free < y->max_free)			\
1088c1ad5342SDoug Moore 			root->max_free = max_free =			\
1089c1ad5342SDoug Moore 			    vm_size_max(max_free, y->start - root->end);\
10909f701172SKonstantin Belousov 		root = y;						\
10919f701172SKonstantin Belousov 		y = root->right;					\
10929f701172SKonstantin Belousov 	}								\
10935a0879daSDoug Moore 	/* Copy left->max_free.  Put root on llist. */			\
10945a0879daSDoug Moore 	root->max_free = max_free;					\
10955a0879daSDoug Moore 	KASSERT(max_free == vm_map_entry_max_free_left(root, llist),	\
10965a0879daSDoug Moore 	    ("%s: max_free not copied from left", __func__));		\
10979f701172SKonstantin Belousov 	root->right = llist;						\
10989f701172SKonstantin Belousov 	llist = root;							\
1099c1ad5342SDoug Moore 	root = y != rlist ? y : NULL;					\
11009f701172SKonstantin Belousov } while (0)
11019f701172SKonstantin Belousov 
11020164e057SAlan Cox /*
1103c1ad5342SDoug Moore  * Walk down the tree until we find addr or a gap where addr would go, breaking
1104c1ad5342SDoug Moore  * off left and right subtrees of nodes less than, or greater than addr.  Treat
1105c1ad5342SDoug Moore  * subtrees with root->max_free < length as empty trees.  llist and rlist are
1106c1ad5342SDoug Moore  * the two sides in reverse order (bottom-up), with llist linked by the right
1107c1ad5342SDoug Moore  * pointer and rlist linked by the left pointer in the vm_map_entry, and both
1108c1ad5342SDoug Moore  * lists terminated by &map->header.  This function, and the subsequent call to
1109c1ad5342SDoug Moore  * vm_map_splay_merge_{left,right,pred,succ}, rely on the start and end address
11105a0879daSDoug Moore  * values in &map->header.
11114e94f402SAlan Cox  */
11121867d2f2SDoug Moore static __always_inline vm_map_entry_t
11135a0879daSDoug Moore vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length,
11141867d2f2SDoug Moore     vm_map_entry_t *llist, vm_map_entry_t *rlist)
11154e94f402SAlan Cox {
1116c1ad5342SDoug Moore 	vm_map_entry_t left, right, root, y;
11174e94f402SAlan Cox 
1118c1ad5342SDoug Moore 	left = right = &map->header;
11195a0879daSDoug Moore 	root = map->root;
11209f701172SKonstantin Belousov 	while (root != NULL && root->max_free >= length) {
1121c1ad5342SDoug Moore 		KASSERT(left->end <= root->start &&
1122c1ad5342SDoug Moore 		    root->end <= right->start,
11235a0879daSDoug Moore 		    ("%s: root not within tree bounds", __func__));
11240164e057SAlan Cox 		if (addr < root->start) {
1125c1ad5342SDoug Moore 			SPLAY_LEFT_STEP(root, y, left, right,
11269f701172SKonstantin Belousov 			    y->max_free >= length && addr < y->start);
11277438d60bSAlan Cox 		} else if (addr >= root->end) {
1128c1ad5342SDoug Moore 			SPLAY_RIGHT_STEP(root, y, left, right,
11299f701172SKonstantin Belousov 			    y->max_free >= length && addr >= y->end);
11307438d60bSAlan Cox 		} else
11317438d60bSAlan Cox 			break;
11320164e057SAlan Cox 	}
1133c1ad5342SDoug Moore 	*llist = left;
1134c1ad5342SDoug Moore 	*rlist = right;
11359f701172SKonstantin Belousov 	return (root);
11369f701172SKonstantin Belousov }
11379f701172SKonstantin Belousov 
11381867d2f2SDoug Moore static __always_inline void
11391867d2f2SDoug Moore vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *rlist)
11409f701172SKonstantin Belousov {
1141c1ad5342SDoug Moore 	vm_map_entry_t hi, right, y;
11429f701172SKonstantin Belousov 
1143c1ad5342SDoug Moore 	right = *rlist;
1144c1ad5342SDoug Moore 	hi = root->right == right ? NULL : root->right;
1145c1ad5342SDoug Moore 	if (hi == NULL)
1146c1ad5342SDoug Moore 		return;
1147c1ad5342SDoug Moore 	do
1148c1ad5342SDoug Moore 		SPLAY_LEFT_STEP(hi, y, root, right, true);
1149c1ad5342SDoug Moore 	while (hi != NULL);
1150c1ad5342SDoug Moore 	*rlist = right;
11519f701172SKonstantin Belousov }
11529f701172SKonstantin Belousov 
11531867d2f2SDoug Moore static __always_inline void
11541867d2f2SDoug Moore vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *llist)
11559f701172SKonstantin Belousov {
1156c1ad5342SDoug Moore 	vm_map_entry_t left, lo, y;
11579f701172SKonstantin Belousov 
1158c1ad5342SDoug Moore 	left = *llist;
1159c1ad5342SDoug Moore 	lo = root->left == left ? NULL : root->left;
1160c1ad5342SDoug Moore 	if (lo == NULL)
1161c1ad5342SDoug Moore 		return;
1162c1ad5342SDoug Moore 	do
1163c1ad5342SDoug Moore 		SPLAY_RIGHT_STEP(lo, y, left, root, true);
1164c1ad5342SDoug Moore 	while (lo != NULL);
1165c1ad5342SDoug Moore 	*llist = left;
11669f701172SKonstantin Belousov }
11670164e057SAlan Cox 
11685a0879daSDoug Moore static inline void
11695a0879daSDoug Moore vm_map_entry_swap(vm_map_entry_t *a, vm_map_entry_t *b)
11705a0879daSDoug Moore {
11715a0879daSDoug Moore 	vm_map_entry_t tmp;
11725a0879daSDoug Moore 
11735a0879daSDoug Moore 	tmp = *b;
11745a0879daSDoug Moore 	*b = *a;
11755a0879daSDoug Moore 	*a = tmp;
11765a0879daSDoug Moore }
11775a0879daSDoug Moore 
11780164e057SAlan Cox /*
11799f701172SKonstantin Belousov  * Walk back up the two spines, flip the pointers and set max_free.  The
11809f701172SKonstantin Belousov  * subtrees of the root go at the bottom of llist and rlist.
11810164e057SAlan Cox  */
118285b7bedbSDoug Moore static vm_size_t
118385b7bedbSDoug Moore vm_map_splay_merge_left_walk(vm_map_entry_t header, vm_map_entry_t root,
118485b7bedbSDoug Moore     vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t llist)
11859f701172SKonstantin Belousov {
11865a0879daSDoug Moore 	do {
11870164e057SAlan Cox 		/*
11885a0879daSDoug Moore 		 * The max_free values of the children of llist are in
118985b7bedbSDoug Moore 		 * llist->max_free and max_free.  Update with the
11905a0879daSDoug Moore 		 * max value.
11910164e057SAlan Cox 		 */
119285b7bedbSDoug Moore 		llist->max_free = max_free =
119385b7bedbSDoug Moore 		    vm_size_max(llist->max_free, max_free);
119485b7bedbSDoug Moore 		vm_map_entry_swap(&llist->right, &tail);
119585b7bedbSDoug Moore 		vm_map_entry_swap(&tail, &llist);
119685b7bedbSDoug Moore 	} while (llist != header);
119785b7bedbSDoug Moore 	root->left = tail;
119885b7bedbSDoug Moore 	return (max_free);
11995a0879daSDoug Moore }
120085b7bedbSDoug Moore 
120185b7bedbSDoug Moore /*
120285b7bedbSDoug Moore  * When llist is known to be the predecessor of root.
120385b7bedbSDoug Moore  */
120485b7bedbSDoug Moore static inline vm_size_t
120585b7bedbSDoug Moore vm_map_splay_merge_pred(vm_map_entry_t header, vm_map_entry_t root,
120685b7bedbSDoug Moore     vm_map_entry_t llist)
120785b7bedbSDoug Moore {
120885b7bedbSDoug Moore 	vm_size_t max_free;
120985b7bedbSDoug Moore 
121085b7bedbSDoug Moore 	max_free = root->start - llist->end;
121185b7bedbSDoug Moore 	if (llist != header) {
121285b7bedbSDoug Moore 		max_free = vm_map_splay_merge_left_walk(header, root,
1213c1ad5342SDoug Moore 		    root, max_free, llist);
121485b7bedbSDoug Moore 	} else {
1215c1ad5342SDoug Moore 		root->left = header;
1216c1ad5342SDoug Moore 		header->right = root;
121785b7bedbSDoug Moore 	}
121885b7bedbSDoug Moore 	return (max_free);
121985b7bedbSDoug Moore }
122085b7bedbSDoug Moore 
122185b7bedbSDoug Moore /*
122285b7bedbSDoug Moore  * When llist may or may not be the predecessor of root.
122385b7bedbSDoug Moore  */
122485b7bedbSDoug Moore static inline vm_size_t
122585b7bedbSDoug Moore vm_map_splay_merge_left(vm_map_entry_t header, vm_map_entry_t root,
122685b7bedbSDoug Moore     vm_map_entry_t llist)
122785b7bedbSDoug Moore {
122885b7bedbSDoug Moore 	vm_size_t max_free;
122985b7bedbSDoug Moore 
123085b7bedbSDoug Moore 	max_free = vm_map_entry_max_free_left(root, llist);
123185b7bedbSDoug Moore 	if (llist != header) {
123285b7bedbSDoug Moore 		max_free = vm_map_splay_merge_left_walk(header, root,
1233c1ad5342SDoug Moore 		    root->left == llist ? root : root->left,
1234c1ad5342SDoug Moore 		    max_free, llist);
123585b7bedbSDoug Moore 	}
123685b7bedbSDoug Moore 	return (max_free);
123785b7bedbSDoug Moore }
123885b7bedbSDoug Moore 
123985b7bedbSDoug Moore static vm_size_t
124085b7bedbSDoug Moore vm_map_splay_merge_right_walk(vm_map_entry_t header, vm_map_entry_t root,
124185b7bedbSDoug Moore     vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t rlist)
124285b7bedbSDoug Moore {
12435a0879daSDoug Moore 	do {
12445a0879daSDoug Moore 		/*
12455a0879daSDoug Moore 		 * The max_free values of the children of rlist are in
124685b7bedbSDoug Moore 		 * rlist->max_free and max_free.  Update with the
12475a0879daSDoug Moore 		 * max value.
12485a0879daSDoug Moore 		 */
124985b7bedbSDoug Moore 		rlist->max_free = max_free =
125085b7bedbSDoug Moore 		    vm_size_max(rlist->max_free, max_free);
125185b7bedbSDoug Moore 		vm_map_entry_swap(&rlist->left, &tail);
125285b7bedbSDoug Moore 		vm_map_entry_swap(&tail, &rlist);
125385b7bedbSDoug Moore 	} while (rlist != header);
125485b7bedbSDoug Moore 	root->right = tail;
125585b7bedbSDoug Moore 	return (max_free);
12565a0879daSDoug Moore }
125785b7bedbSDoug Moore 
125885b7bedbSDoug Moore /*
125985b7bedbSDoug Moore  * When rlist is known to be the succecessor of root.
126085b7bedbSDoug Moore  */
126185b7bedbSDoug Moore static inline vm_size_t
126285b7bedbSDoug Moore vm_map_splay_merge_succ(vm_map_entry_t header, vm_map_entry_t root,
126385b7bedbSDoug Moore     vm_map_entry_t rlist)
126485b7bedbSDoug Moore {
126585b7bedbSDoug Moore 	vm_size_t max_free;
126685b7bedbSDoug Moore 
126785b7bedbSDoug Moore 	max_free = rlist->start - root->end;
126885b7bedbSDoug Moore 	if (rlist != header) {
126985b7bedbSDoug Moore 		max_free = vm_map_splay_merge_right_walk(header, root,
1270c1ad5342SDoug Moore 		    root, max_free, rlist);
127185b7bedbSDoug Moore 	} else {
1272c1ad5342SDoug Moore 		root->right = header;
1273c1ad5342SDoug Moore 		header->left = root;
127485b7bedbSDoug Moore 	}
127585b7bedbSDoug Moore 	return (max_free);
127685b7bedbSDoug Moore }
127785b7bedbSDoug Moore 
127885b7bedbSDoug Moore /*
127985b7bedbSDoug Moore  * When rlist may or may not be the succecessor of root.
128085b7bedbSDoug Moore  */
128185b7bedbSDoug Moore static inline vm_size_t
128285b7bedbSDoug Moore vm_map_splay_merge_right(vm_map_entry_t header, vm_map_entry_t root,
128385b7bedbSDoug Moore     vm_map_entry_t rlist)
128485b7bedbSDoug Moore {
128585b7bedbSDoug Moore 	vm_size_t max_free;
128685b7bedbSDoug Moore 
128785b7bedbSDoug Moore 	max_free = vm_map_entry_max_free_right(root, rlist);
128885b7bedbSDoug Moore 	if (rlist != header) {
128985b7bedbSDoug Moore 		max_free = vm_map_splay_merge_right_walk(header, root,
1290c1ad5342SDoug Moore 		    root->right == rlist ? root : root->right,
1291c1ad5342SDoug Moore 		    max_free, rlist);
129285b7bedbSDoug Moore 	}
129385b7bedbSDoug Moore 	return (max_free);
12944e94f402SAlan Cox }
12954e94f402SAlan Cox 
12964e94f402SAlan Cox /*
1297d1d3f7e1SDoug Moore  *	vm_map_splay:
1298d1d3f7e1SDoug Moore  *
1299d1d3f7e1SDoug Moore  *	The Sleator and Tarjan top-down splay algorithm with the
1300d1d3f7e1SDoug Moore  *	following variation.  Max_free must be computed bottom-up, so
1301d1d3f7e1SDoug Moore  *	on the downward pass, maintain the left and right spines in
1302d1d3f7e1SDoug Moore  *	reverse order.  Then, make a second pass up each side to fix
1303d1d3f7e1SDoug Moore  *	the pointers and compute max_free.  The time bound is O(log n)
1304d1d3f7e1SDoug Moore  *	amortized.
1305d1d3f7e1SDoug Moore  *
1306c1ad5342SDoug Moore  *	The tree is threaded, which means that there are no null pointers.
1307c1ad5342SDoug Moore  *	When a node has no left child, its left pointer points to its
1308c1ad5342SDoug Moore  *	predecessor, which the last ancestor on the search path from the root
1309c1ad5342SDoug Moore  *	where the search branched right.  Likewise, when a node has no right
1310c1ad5342SDoug Moore  *	child, its right pointer points to its successor.  The map header node
1311c1ad5342SDoug Moore  *	is the predecessor of the first map entry, and the successor of the
1312c1ad5342SDoug Moore  *	last.
1313c1ad5342SDoug Moore  *
1314d1d3f7e1SDoug Moore  *	The new root is the vm_map_entry containing "addr", or else an
1315d1d3f7e1SDoug Moore  *	adjacent entry (lower if possible) if addr is not in the tree.
1316d1d3f7e1SDoug Moore  *
1317d1d3f7e1SDoug Moore  *	The map must be locked, and leaves it so.
1318d1d3f7e1SDoug Moore  *
1319d1d3f7e1SDoug Moore  *	Returns: the new root.
1320d1d3f7e1SDoug Moore  */
1321d1d3f7e1SDoug Moore static vm_map_entry_t
1322d1d3f7e1SDoug Moore vm_map_splay(vm_map_t map, vm_offset_t addr)
1323d1d3f7e1SDoug Moore {
132485b7bedbSDoug Moore 	vm_map_entry_t header, llist, rlist, root;
132585b7bedbSDoug Moore 	vm_size_t max_free_left, max_free_right;
1326d1d3f7e1SDoug Moore 
132785b7bedbSDoug Moore 	header = &map->header;
1328d1d3f7e1SDoug Moore 	root = vm_map_splay_split(map, addr, 0, &llist, &rlist);
1329d1d3f7e1SDoug Moore 	if (root != NULL) {
133085b7bedbSDoug Moore 		max_free_left = vm_map_splay_merge_left(header, root, llist);
133185b7bedbSDoug Moore 		max_free_right = vm_map_splay_merge_right(header, root, rlist);
133285b7bedbSDoug Moore 	} else if (llist != header) {
1333d1d3f7e1SDoug Moore 		/*
1334d1d3f7e1SDoug Moore 		 * Recover the greatest node in the left
1335d1d3f7e1SDoug Moore 		 * subtree and make it the root.
1336d1d3f7e1SDoug Moore 		 */
1337d1d3f7e1SDoug Moore 		root = llist;
1338d1d3f7e1SDoug Moore 		llist = root->right;
133985b7bedbSDoug Moore 		max_free_left = vm_map_splay_merge_left(header, root, llist);
134085b7bedbSDoug Moore 		max_free_right = vm_map_splay_merge_succ(header, root, rlist);
134185b7bedbSDoug Moore 	} else if (rlist != header) {
1342d1d3f7e1SDoug Moore 		/*
1343d1d3f7e1SDoug Moore 		 * Recover the least node in the right
1344d1d3f7e1SDoug Moore 		 * subtree and make it the root.
1345d1d3f7e1SDoug Moore 		 */
1346d1d3f7e1SDoug Moore 		root = rlist;
1347d1d3f7e1SDoug Moore 		rlist = root->left;
134885b7bedbSDoug Moore 		max_free_left = vm_map_splay_merge_pred(header, root, llist);
134985b7bedbSDoug Moore 		max_free_right = vm_map_splay_merge_right(header, root, rlist);
1350d1d3f7e1SDoug Moore 	} else {
1351d1d3f7e1SDoug Moore 		/* There is no root. */
1352d1d3f7e1SDoug Moore 		return (NULL);
1353d1d3f7e1SDoug Moore 	}
135485b7bedbSDoug Moore 	root->max_free = vm_size_max(max_free_left, max_free_right);
135585b7bedbSDoug Moore 	map->root = root;
1356d1d3f7e1SDoug Moore 	VM_MAP_ASSERT_CONSISTENT(map);
1357d1d3f7e1SDoug Moore 	return (root);
1358d1d3f7e1SDoug Moore }
1359d1d3f7e1SDoug Moore 
1360d1d3f7e1SDoug Moore /*
1361df8bae1dSRodney W. Grimes  *	vm_map_entry_{un,}link:
1362df8bae1dSRodney W. Grimes  *
1363668a8aa8SDoug Moore  *	Insert/remove entries from maps.  On linking, if new entry clips
1364668a8aa8SDoug Moore  *	existing entry, trim existing entry to avoid overlap, and manage
1365668a8aa8SDoug Moore  *	offsets.  On unlinking, merge disappearing entry with neighbor, if
1366668a8aa8SDoug Moore  *	called for, and manage offsets.  Callers should not modify fields in
1367668a8aa8SDoug Moore  *	entries already mapped.
1368df8bae1dSRodney W. Grimes  */
13694e94f402SAlan Cox static void
13705a0879daSDoug Moore vm_map_entry_link(vm_map_t map, vm_map_entry_t entry)
137199c81ca9SAlan Cox {
137285b7bedbSDoug Moore 	vm_map_entry_t header, llist, rlist, root;
1373668a8aa8SDoug Moore 	vm_size_t max_free_left, max_free_right;
137421c641b2SJohn Baldwin 
13759f701172SKonstantin Belousov 	CTR3(KTR_VM,
13769f701172SKonstantin Belousov 	    "vm_map_entry_link: map %p, nentries %d, entry %p", map,
13779f701172SKonstantin Belousov 	    map->nentries, entry);
13783a0916b8SKonstantin Belousov 	VM_MAP_ASSERT_LOCKED(map);
137999c81ca9SAlan Cox 	map->nentries++;
138085b7bedbSDoug Moore 	header = &map->header;
13815a0879daSDoug Moore 	root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1382668a8aa8SDoug Moore 	if (root == NULL) {
1383668a8aa8SDoug Moore 		/*
1384668a8aa8SDoug Moore 		 * The new entry does not overlap any existing entry in the
1385668a8aa8SDoug Moore 		 * map, so it becomes the new root of the map tree.
1386668a8aa8SDoug Moore 		 */
1387668a8aa8SDoug Moore 		max_free_left = vm_map_splay_merge_pred(header, entry, llist);
1388668a8aa8SDoug Moore 		max_free_right = vm_map_splay_merge_succ(header, entry, rlist);
1389668a8aa8SDoug Moore 	} else if (entry->start == root->start) {
1390668a8aa8SDoug Moore 		/*
1391668a8aa8SDoug Moore 		 * The new entry is a clone of root, with only the end field
1392668a8aa8SDoug Moore 		 * changed.  The root entry will be shrunk to abut the new
1393668a8aa8SDoug Moore 		 * entry, and will be the right child of the new root entry in
1394668a8aa8SDoug Moore 		 * the modified map.
1395668a8aa8SDoug Moore 		 */
1396668a8aa8SDoug Moore 		KASSERT(entry->end < root->end,
1397668a8aa8SDoug Moore 		    ("%s: clip_start not within entry", __func__));
1398668a8aa8SDoug Moore 		vm_map_splay_findprev(root, &llist);
1399668a8aa8SDoug Moore 		root->offset += entry->end - root->start;
1400668a8aa8SDoug Moore 		root->start = entry->end;
1401668a8aa8SDoug Moore 		max_free_left = vm_map_splay_merge_pred(header, entry, llist);
1402668a8aa8SDoug Moore 		max_free_right = root->max_free = vm_size_max(
1403668a8aa8SDoug Moore 		    vm_map_splay_merge_pred(entry, root, entry),
1404668a8aa8SDoug Moore 		    vm_map_splay_merge_right(header, root, rlist));
1405668a8aa8SDoug Moore 	} else {
1406668a8aa8SDoug Moore 		/*
1407668a8aa8SDoug Moore 		 * The new entry is a clone of root, with only the start field
1408668a8aa8SDoug Moore 		 * changed.  The root entry will be shrunk to abut the new
1409668a8aa8SDoug Moore 		 * entry, and will be the left child of the new root entry in
1410668a8aa8SDoug Moore 		 * the modified map.
1411668a8aa8SDoug Moore 		 */
1412668a8aa8SDoug Moore 		KASSERT(entry->end == root->end,
1413668a8aa8SDoug Moore 		    ("%s: clip_start not within entry", __func__));
1414668a8aa8SDoug Moore 		vm_map_splay_findnext(root, &rlist);
1415668a8aa8SDoug Moore 		entry->offset += entry->start - root->start;
1416668a8aa8SDoug Moore 		root->end = entry->start;
1417668a8aa8SDoug Moore 		max_free_left = root->max_free = vm_size_max(
1418668a8aa8SDoug Moore 		    vm_map_splay_merge_left(header, root, llist),
1419668a8aa8SDoug Moore 		    vm_map_splay_merge_succ(entry, root, entry));
1420668a8aa8SDoug Moore 		max_free_right = vm_map_splay_merge_succ(header, entry, rlist);
1421668a8aa8SDoug Moore 	}
1422668a8aa8SDoug Moore 	entry->max_free = vm_size_max(max_free_left, max_free_right);
1423668a8aa8SDoug Moore 	map->root = entry;
14249f701172SKonstantin Belousov 	VM_MAP_ASSERT_CONSISTENT(map);
1425df8bae1dSRodney W. Grimes }
142699c81ca9SAlan Cox 
14279f701172SKonstantin Belousov enum unlink_merge_type {
14289f701172SKonstantin Belousov 	UNLINK_MERGE_NONE,
14299f701172SKonstantin Belousov 	UNLINK_MERGE_NEXT
14309f701172SKonstantin Belousov };
14319f701172SKonstantin Belousov 
14324e94f402SAlan Cox static void
14335a0879daSDoug Moore vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry,
14349f701172SKonstantin Belousov     enum unlink_merge_type op)
143599c81ca9SAlan Cox {
1436c1ad5342SDoug Moore 	vm_map_entry_t header, llist, rlist, root;
143785b7bedbSDoug Moore 	vm_size_t max_free_left, max_free_right;
143899c81ca9SAlan Cox 
14393a0916b8SKonstantin Belousov 	VM_MAP_ASSERT_LOCKED(map);
144085b7bedbSDoug Moore 	header = &map->header;
14415a0879daSDoug Moore 	root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
14429f701172SKonstantin Belousov 	KASSERT(root != NULL,
14439f701172SKonstantin Belousov 	    ("vm_map_entry_unlink: unlink object not mapped"));
14444e94f402SAlan Cox 
14451867d2f2SDoug Moore 	vm_map_splay_findprev(root, &llist);
14469f701172SKonstantin Belousov 	vm_map_splay_findnext(root, &rlist);
14471867d2f2SDoug Moore 	if (op == UNLINK_MERGE_NEXT) {
14489f701172SKonstantin Belousov 		rlist->start = root->start;
14499f701172SKonstantin Belousov 		rlist->offset = root->offset;
14501867d2f2SDoug Moore 	}
145185b7bedbSDoug Moore 	if (llist != header) {
14529f701172SKonstantin Belousov 		root = llist;
14539f701172SKonstantin Belousov 		llist = root->right;
145485b7bedbSDoug Moore 		max_free_left = vm_map_splay_merge_left(header, root, llist);
145585b7bedbSDoug Moore 		max_free_right = vm_map_splay_merge_succ(header, root, rlist);
145685b7bedbSDoug Moore 	} else if (rlist != header) {
14579f701172SKonstantin Belousov 		root = rlist;
14589f701172SKonstantin Belousov 		rlist = root->left;
145985b7bedbSDoug Moore 		max_free_left = vm_map_splay_merge_pred(header, root, llist);
146085b7bedbSDoug Moore 		max_free_right = vm_map_splay_merge_right(header, root, rlist);
1461c1ad5342SDoug Moore 	} else {
1462c1ad5342SDoug Moore 		header->left = header->right = header;
14639f701172SKonstantin Belousov 		root = NULL;
1464c1ad5342SDoug Moore 	}
14659f701172SKonstantin Belousov 	if (root != NULL)
146685b7bedbSDoug Moore 		root->max_free = vm_size_max(max_free_left, max_free_right);
146785b7bedbSDoug Moore 	map->root = root;
14689f701172SKonstantin Belousov 	VM_MAP_ASSERT_CONSISTENT(map);
146999c81ca9SAlan Cox 	map->nentries--;
147021c641b2SJohn Baldwin 	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
147121c641b2SJohn Baldwin 	    map->nentries, entry);
1472df8bae1dSRodney W. Grimes }
1473df8bae1dSRodney W. Grimes 
1474df8bae1dSRodney W. Grimes /*
1475fa581662SDoug Moore  *	vm_map_entry_resize:
14760164e057SAlan Cox  *
1477fa581662SDoug Moore  *	Resize a vm_map_entry, recompute the amount of free space that
1478fa581662SDoug Moore  *	follows it and propagate that value up the tree.
14790164e057SAlan Cox  *
14800164e057SAlan Cox  *	The map must be locked, and leaves it so.
14810164e057SAlan Cox  */
14820164e057SAlan Cox static void
1483fa581662SDoug Moore vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount)
14840164e057SAlan Cox {
148585b7bedbSDoug Moore 	vm_map_entry_t header, llist, rlist, root;
14860164e057SAlan Cox 
14879f701172SKonstantin Belousov 	VM_MAP_ASSERT_LOCKED(map);
148885b7bedbSDoug Moore 	header = &map->header;
14895a0879daSDoug Moore 	root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
14901867d2f2SDoug Moore 	KASSERT(root != NULL, ("%s: resize object not mapped", __func__));
14919f701172SKonstantin Belousov 	vm_map_splay_findnext(root, &rlist);
14921895f520SDoug Moore 	entry->end += grow_amount;
149385b7bedbSDoug Moore 	root->max_free = vm_size_max(
149485b7bedbSDoug Moore 	    vm_map_splay_merge_left(header, root, llist),
149585b7bedbSDoug Moore 	    vm_map_splay_merge_succ(header, root, rlist));
149685b7bedbSDoug Moore 	map->root = root;
14979f701172SKonstantin Belousov 	VM_MAP_ASSERT_CONSISTENT(map);
1498fa581662SDoug Moore 	CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p",
149973f11451SDoug Moore 	    __func__, map, map->nentries, entry);
15000164e057SAlan Cox }
15010164e057SAlan Cox 
15020164e057SAlan Cox /*
1503d1d3f7e1SDoug Moore  *	vm_map_lookup_entry:	[ internal use only ]
1504df8bae1dSRodney W. Grimes  *
1505d1d3f7e1SDoug Moore  *	Finds the map entry containing (or
1506d1d3f7e1SDoug Moore  *	immediately preceding) the specified address
1507d1d3f7e1SDoug Moore  *	in the given map; the entry is returned
1508d1d3f7e1SDoug Moore  *	in the "entry" parameter.  The boolean
1509d1d3f7e1SDoug Moore  *	result indicates whether the address is
1510d1d3f7e1SDoug Moore  *	actually contained in the map.
1511df8bae1dSRodney W. Grimes  */
1512d1d3f7e1SDoug Moore boolean_t
1513d1d3f7e1SDoug Moore vm_map_lookup_entry(
1514d1d3f7e1SDoug Moore 	vm_map_t map,
1515d1d3f7e1SDoug Moore 	vm_offset_t address,
1516d1d3f7e1SDoug Moore 	vm_map_entry_t *entry)	/* OUT */
1517df8bae1dSRodney W. Grimes {
1518c1ad5342SDoug Moore 	vm_map_entry_t cur, header, lbound, ubound;
1519d1d3f7e1SDoug Moore 	boolean_t locked;
1520df8bae1dSRodney W. Grimes 
15214c3ef59eSAlan Cox 	/*
15224c3ef59eSAlan Cox 	 * If the map is empty, then the map entry immediately preceding
1523d1d3f7e1SDoug Moore 	 * "address" is the map's header.
15244c3ef59eSAlan Cox 	 */
152585b7bedbSDoug Moore 	header = &map->header;
1526d1d3f7e1SDoug Moore 	cur = map->root;
1527d1d3f7e1SDoug Moore 	if (cur == NULL) {
152885b7bedbSDoug Moore 		*entry = header;
1529d1d3f7e1SDoug Moore 		return (FALSE);
1530d1d3f7e1SDoug Moore 	}
1531d1d3f7e1SDoug Moore 	if (address >= cur->start && cur->end > address) {
1532d1d3f7e1SDoug Moore 		*entry = cur;
1533d1d3f7e1SDoug Moore 		return (TRUE);
15349f701172SKonstantin Belousov 	}
15359f701172SKonstantin Belousov 	if ((locked = vm_map_locked(map)) ||
153605a8c414SAlan Cox 	    sx_try_upgrade(&map->lock)) {
153705a8c414SAlan Cox 		/*
153805a8c414SAlan Cox 		 * Splay requires a write lock on the map.  However, it only
153905a8c414SAlan Cox 		 * restructures the binary search tree; it does not otherwise
154005a8c414SAlan Cox 		 * change the map.  Thus, the map's timestamp need not change
154105a8c414SAlan Cox 		 * on a temporary upgrade.
154205a8c414SAlan Cox 		 */
1543d1d3f7e1SDoug Moore 		cur = vm_map_splay(map, address);
1544461587dcSDoug Moore 		if (!locked) {
1545461587dcSDoug Moore 			VM_MAP_UNLOCK_CONSISTENT(map);
154605a8c414SAlan Cox 			sx_downgrade(&map->lock);
1547461587dcSDoug Moore 		}
1548d1d3f7e1SDoug Moore 
1549d1d3f7e1SDoug Moore 		/*
1550d1d3f7e1SDoug Moore 		 * If "address" is contained within a map entry, the new root
1551d1d3f7e1SDoug Moore 		 * is that map entry.  Otherwise, the new root is a map entry
1552d1d3f7e1SDoug Moore 		 * immediately before or after "address".
1553d1d3f7e1SDoug Moore 		 */
1554d1d3f7e1SDoug Moore 		if (address < cur->start) {
155585b7bedbSDoug Moore 			*entry = header;
1556d1d3f7e1SDoug Moore 			return (FALSE);
1557d1d3f7e1SDoug Moore 		}
1558d1d3f7e1SDoug Moore 		*entry = cur;
1559d1d3f7e1SDoug Moore 		return (address < cur->end);
15609f701172SKonstantin Belousov 	}
156105a8c414SAlan Cox 	/*
156205a8c414SAlan Cox 	 * Since the map is only locked for read access, perform a
1563d1d3f7e1SDoug Moore 	 * standard binary search tree lookup for "address".
156405a8c414SAlan Cox 	 */
1565c1ad5342SDoug Moore 	lbound = ubound = header;
1566c1ad5342SDoug Moore 	for (;;) {
1567d1d3f7e1SDoug Moore 		if (address < cur->start) {
1568c1ad5342SDoug Moore 			ubound = cur;
1569d1d3f7e1SDoug Moore 			cur = cur->left;
1570c1ad5342SDoug Moore 			if (cur == lbound)
1571c1ad5342SDoug Moore 				break;
1572d1d3f7e1SDoug Moore 		} else if (cur->end <= address) {
1573d1d3f7e1SDoug Moore 			lbound = cur;
1574d1d3f7e1SDoug Moore 			cur = cur->right;
1575c1ad5342SDoug Moore 			if (cur == ubound)
1576c1ad5342SDoug Moore 				break;
15779f701172SKonstantin Belousov 		} else {
1578d1d3f7e1SDoug Moore 			*entry = cur;
1579d1d3f7e1SDoug Moore 			return (TRUE);
158005a8c414SAlan Cox 		}
1581c1ad5342SDoug Moore 	}
1582d1d3f7e1SDoug Moore 	*entry = lbound;
1583d1d3f7e1SDoug Moore 	return (FALSE);
1584df8bae1dSRodney W. Grimes }
1585df8bae1dSRodney W. Grimes 
1586df8bae1dSRodney W. Grimes /*
158730dcfc09SJohn Dyson  *	vm_map_insert:
158830dcfc09SJohn Dyson  *
158930dcfc09SJohn Dyson  *	Inserts the given whole VM object into the target
159030dcfc09SJohn Dyson  *	map at the specified address range.  The object's
159130dcfc09SJohn Dyson  *	size should match that of the address range.
159230dcfc09SJohn Dyson  *
159330dcfc09SJohn Dyson  *	Requires that the map be locked, and leaves it so.
15942aaeadf8SMatthew Dillon  *
15952aaeadf8SMatthew Dillon  *	If object is non-NULL, ref count must be bumped by caller
15962aaeadf8SMatthew Dillon  *	prior to making call to account for the new entry.
159730dcfc09SJohn Dyson  */
159830dcfc09SJohn Dyson int
1599b9dcd593SBruce Evans vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
160033314db0SAlan Cox     vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
160130dcfc09SJohn Dyson {
160283704cc2SDoug Moore 	vm_map_entry_t new_entry, next_entry, prev_entry;
1603ef694c1aSEdward Tomasz Napierala 	struct ucred *cred;
16041569205fSKonstantin Belousov 	vm_eflags_t protoeflags;
16058211bd45SKonstantin Belousov 	vm_inherit_t inheritance;
160630dcfc09SJohn Dyson 
16073a0916b8SKonstantin Belousov 	VM_MAP_ASSERT_LOCKED(map);
16082e47807cSJeff Roberson 	KASSERT(object != kernel_object ||
160933314db0SAlan Cox 	    (cow & MAP_COPY_ON_WRITE) == 0,
16102e47807cSJeff Roberson 	    ("vm_map_insert: kernel object and COW"));
161133314db0SAlan Cox 	KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0,
161233314db0SAlan Cox 	    ("vm_map_insert: paradoxical MAP_NOFAULT request"));
161300de6773SKonstantin Belousov 	KASSERT((prot & ~max) == 0,
161400de6773SKonstantin Belousov 	    ("prot %#x is not subset of max_prot %#x", prot, max));
16153a0916b8SKonstantin Belousov 
161630dcfc09SJohn Dyson 	/*
161730dcfc09SJohn Dyson 	 * Check that the start and end points are not bogus.
161830dcfc09SJohn Dyson 	 */
1619f0165b1cSKonstantin Belousov 	if (start < vm_map_min(map) || end > vm_map_max(map) ||
1620f0165b1cSKonstantin Belousov 	    start >= end)
162130dcfc09SJohn Dyson 		return (KERN_INVALID_ADDRESS);
162230dcfc09SJohn Dyson 
162330dcfc09SJohn Dyson 	/*
162430dcfc09SJohn Dyson 	 * Find the entry prior to the proposed starting address; if it's part
162530dcfc09SJohn Dyson 	 * of an existing entry, this range is bogus.
162630dcfc09SJohn Dyson 	 */
1627723413beSDoug Moore 	if (vm_map_lookup_entry(map, start, &prev_entry))
162830dcfc09SJohn Dyson 		return (KERN_NO_SPACE);
162930dcfc09SJohn Dyson 
163030dcfc09SJohn Dyson 	/*
163130dcfc09SJohn Dyson 	 * Assert that the next entry doesn't overlap the end point.
163230dcfc09SJohn Dyson 	 */
163383704cc2SDoug Moore 	next_entry = vm_map_entry_succ(prev_entry);
163483704cc2SDoug Moore 	if (next_entry->start < end)
163530dcfc09SJohn Dyson 		return (KERN_NO_SPACE);
163630dcfc09SJohn Dyson 
163719bd0d9cSKonstantin Belousov 	if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL ||
163819bd0d9cSKonstantin Belousov 	    max != VM_PROT_NONE))
163919bd0d9cSKonstantin Belousov 		return (KERN_INVALID_ARGUMENT);
164019bd0d9cSKonstantin Belousov 
1641afa07f7eSJohn Dyson 	protoeflags = 0;
1642afa07f7eSJohn Dyson 	if (cow & MAP_COPY_ON_WRITE)
1643e5f13bddSAlan Cox 		protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY;
164433314db0SAlan Cox 	if (cow & MAP_NOFAULT)
1645afa07f7eSJohn Dyson 		protoeflags |= MAP_ENTRY_NOFAULT;
16464f79d873SMatthew Dillon 	if (cow & MAP_DISABLE_SYNCER)
16474f79d873SMatthew Dillon 		protoeflags |= MAP_ENTRY_NOSYNC;
16489730a5daSPaul Saab 	if (cow & MAP_DISABLE_COREDUMP)
16499730a5daSPaul Saab 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
1650712efe66SAlan Cox 	if (cow & MAP_STACK_GROWS_DOWN)
1651712efe66SAlan Cox 		protoeflags |= MAP_ENTRY_GROWS_DOWN;
1652712efe66SAlan Cox 	if (cow & MAP_STACK_GROWS_UP)
1653712efe66SAlan Cox 		protoeflags |= MAP_ENTRY_GROWS_UP;
1654fe7bcbafSKyle Evans 	if (cow & MAP_WRITECOUNT)
1655fe7bcbafSKyle Evans 		protoeflags |= MAP_ENTRY_WRITECNT;
165678022527SKonstantin Belousov 	if (cow & MAP_VN_EXEC)
165778022527SKonstantin Belousov 		protoeflags |= MAP_ENTRY_VN_EXEC;
165819bd0d9cSKonstantin Belousov 	if ((cow & MAP_CREATE_GUARD) != 0)
165919bd0d9cSKonstantin Belousov 		protoeflags |= MAP_ENTRY_GUARD;
166019bd0d9cSKonstantin Belousov 	if ((cow & MAP_CREATE_STACK_GAP_DN) != 0)
166119bd0d9cSKonstantin Belousov 		protoeflags |= MAP_ENTRY_STACK_GAP_DN;
166219bd0d9cSKonstantin Belousov 	if ((cow & MAP_CREATE_STACK_GAP_UP) != 0)
166319bd0d9cSKonstantin Belousov 		protoeflags |= MAP_ENTRY_STACK_GAP_UP;
16648211bd45SKonstantin Belousov 	if (cow & MAP_INHERIT_SHARE)
16658211bd45SKonstantin Belousov 		inheritance = VM_INHERIT_SHARE;
16668211bd45SKonstantin Belousov 	else
16678211bd45SKonstantin Belousov 		inheritance = VM_INHERIT_DEFAULT;
16684f79d873SMatthew Dillon 
1669ef694c1aSEdward Tomasz Napierala 	cred = NULL;
167019bd0d9cSKonstantin Belousov 	if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0)
16713364c323SKonstantin Belousov 		goto charged;
16723364c323SKonstantin Belousov 	if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
16733364c323SKonstantin Belousov 	    ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
16743364c323SKonstantin Belousov 		if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
16753364c323SKonstantin Belousov 			return (KERN_RESOURCE_SHORTAGE);
16761569205fSKonstantin Belousov 		KASSERT(object == NULL ||
16771569205fSKonstantin Belousov 		    (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 ||
1678ef694c1aSEdward Tomasz Napierala 		    object->cred == NULL,
16791569205fSKonstantin Belousov 		    ("overcommit: vm_map_insert o %p", object));
1680ef694c1aSEdward Tomasz Napierala 		cred = curthread->td_ucred;
16813364c323SKonstantin Belousov 	}
16823364c323SKonstantin Belousov 
16833364c323SKonstantin Belousov charged:
1684f8616ebfSAlan Cox 	/* Expand the kernel pmap, if necessary. */
1685f8616ebfSAlan Cox 	if (map == kernel_map && end > kernel_vm_end)
1686f8616ebfSAlan Cox 		pmap_growkernel(end);
16871d284e00SAlan Cox 	if (object != NULL) {
168830dcfc09SJohn Dyson 		/*
16891d284e00SAlan Cox 		 * OBJ_ONEMAPPING must be cleared unless this mapping
16901d284e00SAlan Cox 		 * is trivially proven to be the only mapping for any
16911d284e00SAlan Cox 		 * of the object's pages.  (Object granularity
16921d284e00SAlan Cox 		 * reference counting is insufficient to recognize
16931d284e00SAlan Cox 		 * aliases with precision.)
169430dcfc09SJohn Dyson 		 */
169563967687SJeff Roberson 		if ((object->flags & OBJ_ANON) != 0) {
169689f6b863SAttilio Rao 			VM_OBJECT_WLOCK(object);
16971d284e00SAlan Cox 			if (object->ref_count > 1 || object->shadow_count != 0)
16982aaeadf8SMatthew Dillon 				vm_object_clear_flag(object, OBJ_ONEMAPPING);
169989f6b863SAttilio Rao 			VM_OBJECT_WUNLOCK(object);
170063967687SJeff Roberson 		}
17012203c46dSMark Johnston 	} else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) ==
17022203c46dSMark Johnston 	    protoeflags &&
170378022527SKonstantin Belousov 	    (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP |
170478022527SKonstantin Belousov 	    MAP_VN_EXEC)) == 0 &&
1705737e25f7SAlan Cox 	    prev_entry->end == start && (prev_entry->cred == cred ||
17063364c323SKonstantin Belousov 	    (prev_entry->object.vm_object != NULL &&
17071569205fSKonstantin Belousov 	    prev_entry->object.vm_object->cred == cred)) &&
17088cc7e047SJohn Dyson 	    vm_object_coalesce(prev_entry->object.vm_object,
170957a21abaSAlan Cox 	    prev_entry->offset,
17108cc7e047SJohn Dyson 	    (vm_size_t)(prev_entry->end - prev_entry->start),
171160169c88SAlan Cox 	    (vm_size_t)(end - prev_entry->end), cred != NULL &&
171260169c88SAlan Cox 	    (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) {
171330dcfc09SJohn Dyson 		/*
17142aaeadf8SMatthew Dillon 		 * We were able to extend the object.  Determine if we
17152aaeadf8SMatthew Dillon 		 * can extend the previous map entry to include the
17162aaeadf8SMatthew Dillon 		 * new range as well.
171730dcfc09SJohn Dyson 		 */
17181569205fSKonstantin Belousov 		if (prev_entry->inheritance == inheritance &&
17191569205fSKonstantin Belousov 		    prev_entry->protection == prot &&
1720737e25f7SAlan Cox 		    prev_entry->max_protection == max &&
1721737e25f7SAlan Cox 		    prev_entry->wired_count == 0) {
1722737e25f7SAlan Cox 			KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) ==
1723737e25f7SAlan Cox 			    0, ("prev_entry %p has incoherent wiring",
1724737e25f7SAlan Cox 			    prev_entry));
172519bd0d9cSKonstantin Belousov 			if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0)
17261569205fSKonstantin Belousov 				map->size += end - prev_entry->end;
1727fa581662SDoug Moore 			vm_map_entry_resize(map, prev_entry,
17281895f520SDoug Moore 			    end - prev_entry->end);
172983704cc2SDoug Moore 			vm_map_try_merge_entries(map, prev_entry, next_entry);
173030dcfc09SJohn Dyson 			return (KERN_SUCCESS);
173130dcfc09SJohn Dyson 		}
17328cc7e047SJohn Dyson 
17332aaeadf8SMatthew Dillon 		/*
17342aaeadf8SMatthew Dillon 		 * If we can extend the object but cannot extend the
17352aaeadf8SMatthew Dillon 		 * map entry, we have to create a new map entry.  We
17362aaeadf8SMatthew Dillon 		 * must bump the ref count on the extended object to
17374e71e795SMatthew Dillon 		 * account for it.  object may be NULL.
17382aaeadf8SMatthew Dillon 		 */
17392aaeadf8SMatthew Dillon 		object = prev_entry->object.vm_object;
17402aaeadf8SMatthew Dillon 		offset = prev_entry->offset +
17412aaeadf8SMatthew Dillon 		    (prev_entry->end - prev_entry->start);
17428cc7e047SJohn Dyson 		vm_object_reference(object);
1743ef694c1aSEdward Tomasz Napierala 		if (cred != NULL && object != NULL && object->cred != NULL &&
17443364c323SKonstantin Belousov 		    !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
17453364c323SKonstantin Belousov 			/* Object already accounts for this uid. */
1746ef694c1aSEdward Tomasz Napierala 			cred = NULL;
17473364c323SKonstantin Belousov 		}
1748b18bfc3dSJohn Dyson 	}
174960169c88SAlan Cox 	if (cred != NULL)
175060169c88SAlan Cox 		crhold(cred);
17512aaeadf8SMatthew Dillon 
17522aaeadf8SMatthew Dillon 	/*
175330dcfc09SJohn Dyson 	 * Create a new entry
175430dcfc09SJohn Dyson 	 */
175530dcfc09SJohn Dyson 	new_entry = vm_map_entry_create(map);
175630dcfc09SJohn Dyson 	new_entry->start = start;
175730dcfc09SJohn Dyson 	new_entry->end = end;
1758ef694c1aSEdward Tomasz Napierala 	new_entry->cred = NULL;
175930dcfc09SJohn Dyson 
1760afa07f7eSJohn Dyson 	new_entry->eflags = protoeflags;
176130dcfc09SJohn Dyson 	new_entry->object.vm_object = object;
176230dcfc09SJohn Dyson 	new_entry->offset = offset;
17632267af78SJulian Elischer 
17648211bd45SKonstantin Belousov 	new_entry->inheritance = inheritance;
176530dcfc09SJohn Dyson 	new_entry->protection = prot;
176630dcfc09SJohn Dyson 	new_entry->max_protection = max;
176730dcfc09SJohn Dyson 	new_entry->wired_count = 0;
1768997ac690SKonstantin Belousov 	new_entry->wiring_thread = NULL;
176913458803SAlan Cox 	new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1770381b7242SAlan Cox 	new_entry->next_read = start;
1771e5f251d2SAlan Cox 
1772ef694c1aSEdward Tomasz Napierala 	KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
17731569205fSKonstantin Belousov 	    ("overcommit: vm_map_insert leaks vm_map %p", new_entry));
1774ef694c1aSEdward Tomasz Napierala 	new_entry->cred = cred;
17753364c323SKonstantin Belousov 
177630dcfc09SJohn Dyson 	/*
177730dcfc09SJohn Dyson 	 * Insert the new entry into the list
177830dcfc09SJohn Dyson 	 */
17799f701172SKonstantin Belousov 	vm_map_entry_link(map, new_entry);
178019bd0d9cSKonstantin Belousov 	if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0)
178130dcfc09SJohn Dyson 		map->size += new_entry->end - new_entry->start;
178230dcfc09SJohn Dyson 
17831a484d28SMatthew Dillon 	/*
1784eaaf9f7fSAlan Cox 	 * Try to coalesce the new entry with both the previous and next
1785eaaf9f7fSAlan Cox 	 * entries in the list.  Previously, we only attempted to coalesce
1786eaaf9f7fSAlan Cox 	 * with the previous entry when object is NULL.  Here, we handle the
1787eaaf9f7fSAlan Cox 	 * other cases, which are less common.
17881a484d28SMatthew Dillon 	 */
178983ea714fSDoug Moore 	vm_map_try_merge_entries(map, prev_entry, new_entry);
179083704cc2SDoug Moore 	vm_map_try_merge_entries(map, new_entry, next_entry);
17914e71e795SMatthew Dillon 
17921569205fSKonstantin Belousov 	if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
17931569205fSKonstantin Belousov 		vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
17941569205fSKonstantin Belousov 		    end - start, cow & MAP_PREFAULT_PARTIAL);
17954f79d873SMatthew Dillon 	}
1796e972780aSAlan Cox 
179730dcfc09SJohn Dyson 	return (KERN_SUCCESS);
179830dcfc09SJohn Dyson }
179930dcfc09SJohn Dyson 
180030dcfc09SJohn Dyson /*
18010164e057SAlan Cox  *	vm_map_findspace:
18020164e057SAlan Cox  *
18030164e057SAlan Cox  *	Find the first fit (lowest VM address) for "length" free bytes
18040164e057SAlan Cox  *	beginning at address >= start in the given map.
18050164e057SAlan Cox  *
18069f701172SKonstantin Belousov  *	In a vm_map_entry, "max_free" is the maximum amount of
18079f701172SKonstantin Belousov  *	contiguous free space between an entry in its subtree and a
18089f701172SKonstantin Belousov  *	neighbor of that entry.  This allows finding a free region in
18099f701172SKonstantin Belousov  *	one path down the tree, so O(log n) amortized with splay
18109f701172SKonstantin Belousov  *	trees.
18110164e057SAlan Cox  *
18120164e057SAlan Cox  *	The map must be locked, and leaves it so.
18130164e057SAlan Cox  *
18149f701172SKonstantin Belousov  *	Returns: starting address if sufficient space,
18159f701172SKonstantin Belousov  *		 vm_map_max(map)-length+1 if insufficient space.
1816df8bae1dSRodney W. Grimes  */
18179f701172SKonstantin Belousov vm_offset_t
18189f701172SKonstantin Belousov vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length)
1819df8bae1dSRodney W. Grimes {
182085b7bedbSDoug Moore 	vm_map_entry_t header, llist, rlist, root, y;
182185b7bedbSDoug Moore 	vm_size_t left_length, max_free_left, max_free_right;
1822e65d58a0SDoug Moore 	vm_offset_t gap_end;
1823df8bae1dSRodney W. Grimes 
1824986b43f8SAlan Cox 	/*
1825986b43f8SAlan Cox 	 * Request must fit within min/max VM address and must avoid
1826986b43f8SAlan Cox 	 * address wrap.
1827986b43f8SAlan Cox 	 */
1828f0165b1cSKonstantin Belousov 	start = MAX(start, vm_map_min(map));
1829e65d58a0SDoug Moore 	if (start >= vm_map_max(map) || length > vm_map_max(map) - start)
18309f701172SKonstantin Belousov 		return (vm_map_max(map) - length + 1);
1831df8bae1dSRodney W. Grimes 
18320164e057SAlan Cox 	/* Empty tree means wide open address space. */
18339f701172SKonstantin Belousov 	if (map->root == NULL)
18349f701172SKonstantin Belousov 		return (start);
18350164e057SAlan Cox 
18360164e057SAlan Cox 	/*
1837e65d58a0SDoug Moore 	 * After splay_split, if start is within an entry, push it to the start
1838e65d58a0SDoug Moore 	 * of the following gap.  If rlist is at the end of the gap containing
1839e65d58a0SDoug Moore 	 * start, save the end of that gap in gap_end to see if the gap is big
1840e65d58a0SDoug Moore 	 * enough; otherwise set gap_end to start skip gap-checking and move
1841e65d58a0SDoug Moore 	 * directly to a search of the right subtree.
18420164e057SAlan Cox 	 */
184385b7bedbSDoug Moore 	header = &map->header;
18445a0879daSDoug Moore 	root = vm_map_splay_split(map, start, length, &llist, &rlist);
1845e65d58a0SDoug Moore 	gap_end = rlist->start;
1846e65d58a0SDoug Moore 	if (root != NULL) {
18479f701172SKonstantin Belousov 		start = root->end;
1848c1ad5342SDoug Moore 		if (root->right != rlist)
1849e65d58a0SDoug Moore 			gap_end = start;
185085b7bedbSDoug Moore 		max_free_left = vm_map_splay_merge_left(header, root, llist);
185185b7bedbSDoug Moore 		max_free_right = vm_map_splay_merge_right(header, root, rlist);
185285b7bedbSDoug Moore 	} else if (rlist != header) {
18539f701172SKonstantin Belousov 		root = rlist;
18549f701172SKonstantin Belousov 		rlist = root->left;
185585b7bedbSDoug Moore 		max_free_left = vm_map_splay_merge_pred(header, root, llist);
185685b7bedbSDoug Moore 		max_free_right = vm_map_splay_merge_right(header, root, rlist);
18579f701172SKonstantin Belousov 	} else {
18589f701172SKonstantin Belousov 		root = llist;
18599f701172SKonstantin Belousov 		llist = root->right;
186085b7bedbSDoug Moore 		max_free_left = vm_map_splay_merge_left(header, root, llist);
186185b7bedbSDoug Moore 		max_free_right = vm_map_splay_merge_succ(header, root, rlist);
18620164e057SAlan Cox 	}
186385b7bedbSDoug Moore 	root->max_free = vm_size_max(max_free_left, max_free_right);
186485b7bedbSDoug Moore 	map->root = root;
18659f701172SKonstantin Belousov 	VM_MAP_ASSERT_CONSISTENT(map);
1866e65d58a0SDoug Moore 	if (length <= gap_end - start)
18679f701172SKonstantin Belousov 		return (start);
18680164e057SAlan Cox 
18690164e057SAlan Cox 	/* With max_free, can immediately tell if no solution. */
1870c1ad5342SDoug Moore 	if (root->right == header || length > root->right->max_free)
18719f701172SKonstantin Belousov 		return (vm_map_max(map) - length + 1);
18720164e057SAlan Cox 
18730164e057SAlan Cox 	/*
18749f701172SKonstantin Belousov 	 * Splay for the least large-enough gap in the right subtree.
18750164e057SAlan Cox 	 */
187685b7bedbSDoug Moore 	llist = rlist = header;
18779f701172SKonstantin Belousov 	for (left_length = 0;;
18785a0879daSDoug Moore 	    left_length = vm_map_entry_max_free_left(root, llist)) {
18799f701172SKonstantin Belousov 		if (length <= left_length)
1880c1ad5342SDoug Moore 			SPLAY_LEFT_STEP(root, y, llist, rlist,
18815a0879daSDoug Moore 			    length <= vm_map_entry_max_free_left(y, llist));
18829f701172SKonstantin Belousov 		else
1883c1ad5342SDoug Moore 			SPLAY_RIGHT_STEP(root, y, llist, rlist,
18845a0879daSDoug Moore 			    length > vm_map_entry_max_free_left(y, root));
18859f701172SKonstantin Belousov 		if (root == NULL)
18869f701172SKonstantin Belousov 			break;
18870164e057SAlan Cox 	}
18889f701172SKonstantin Belousov 	root = llist;
18899f701172SKonstantin Belousov 	llist = root->right;
189085b7bedbSDoug Moore 	max_free_left = vm_map_splay_merge_left(header, root, llist);
189185b7bedbSDoug Moore 	if (rlist == header) {
189285b7bedbSDoug Moore 		root->max_free = vm_size_max(max_free_left,
189385b7bedbSDoug Moore 		    vm_map_splay_merge_succ(header, root, rlist));
189485b7bedbSDoug Moore 	} else {
18955a0879daSDoug Moore 		y = rlist;
18969f701172SKonstantin Belousov 		rlist = y->left;
189785b7bedbSDoug Moore 		y->max_free = vm_size_max(
189885b7bedbSDoug Moore 		    vm_map_splay_merge_pred(root, y, root),
189985b7bedbSDoug Moore 		    vm_map_splay_merge_right(header, y, rlist));
190085b7bedbSDoug Moore 		root->max_free = vm_size_max(max_free_left, y->max_free);
19019f701172SKonstantin Belousov 	}
190285b7bedbSDoug Moore 	map->root = root;
19039f701172SKonstantin Belousov 	VM_MAP_ASSERT_CONSISTENT(map);
19049f701172SKonstantin Belousov 	return (root->end);
1905df8bae1dSRodney W. Grimes }
1906df8bae1dSRodney W. Grimes 
1907d239bd3cSKonstantin Belousov int
1908d239bd3cSKonstantin Belousov vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1909b8ca4ef2SAlan Cox     vm_offset_t start, vm_size_t length, vm_prot_t prot,
1910d239bd3cSKonstantin Belousov     vm_prot_t max, int cow)
1911d239bd3cSKonstantin Belousov {
1912b8ca4ef2SAlan Cox 	vm_offset_t end;
1913d239bd3cSKonstantin Belousov 	int result;
1914d239bd3cSKonstantin Belousov 
1915d239bd3cSKonstantin Belousov 	end = start + length;
19164648ba0aSKonstantin Belousov 	KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
19174648ba0aSKonstantin Belousov 	    object == NULL,
19184648ba0aSKonstantin Belousov 	    ("vm_map_fixed: non-NULL backing object for stack"));
1919897d81a0SKonstantin Belousov 	vm_map_lock(map);
1920d239bd3cSKonstantin Belousov 	VM_MAP_RANGE_CHECK(map, start, end);
192111c42bccSKonstantin Belousov 	if ((cow & MAP_CHECK_EXCL) == 0)
192211c42bccSKonstantin Belousov 		vm_map_delete(map, start, end);
19234648ba0aSKonstantin Belousov 	if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
19244648ba0aSKonstantin Belousov 		result = vm_map_stack_locked(map, start, length, sgrowsiz,
19254648ba0aSKonstantin Belousov 		    prot, max, cow);
19264648ba0aSKonstantin Belousov 	} else {
19274648ba0aSKonstantin Belousov 		result = vm_map_insert(map, object, offset, start, end,
19284648ba0aSKonstantin Belousov 		    prot, max, cow);
19294648ba0aSKonstantin Belousov 	}
1930d239bd3cSKonstantin Belousov 	vm_map_unlock(map);
1931d239bd3cSKonstantin Belousov 	return (result);
1932d239bd3cSKonstantin Belousov }
1933d239bd3cSKonstantin Belousov 
1934fa50a355SKonstantin Belousov static const int aslr_pages_rnd_64[2] = {0x1000, 0x10};
1935fa50a355SKonstantin Belousov static const int aslr_pages_rnd_32[2] = {0x100, 0x4};
1936fa50a355SKonstantin Belousov 
1937fa50a355SKonstantin Belousov static int cluster_anon = 1;
1938fa50a355SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW,
1939fa50a355SKonstantin Belousov     &cluster_anon, 0,
1940484e9d03SKonstantin Belousov     "Cluster anonymous mappings: 0 = no, 1 = yes if no hint, 2 = always");
1941484e9d03SKonstantin Belousov 
1942484e9d03SKonstantin Belousov static bool
1943484e9d03SKonstantin Belousov clustering_anon_allowed(vm_offset_t addr)
1944484e9d03SKonstantin Belousov {
1945484e9d03SKonstantin Belousov 
1946484e9d03SKonstantin Belousov 	switch (cluster_anon) {
1947484e9d03SKonstantin Belousov 	case 0:
1948484e9d03SKonstantin Belousov 		return (false);
1949484e9d03SKonstantin Belousov 	case 1:
1950484e9d03SKonstantin Belousov 		return (addr == 0);
1951484e9d03SKonstantin Belousov 	case 2:
1952484e9d03SKonstantin Belousov 	default:
1953484e9d03SKonstantin Belousov 		return (true);
1954484e9d03SKonstantin Belousov 	}
1955484e9d03SKonstantin Belousov }
1956fa50a355SKonstantin Belousov 
1957fa50a355SKonstantin Belousov static long aslr_restarts;
1958fa50a355SKonstantin Belousov SYSCTL_LONG(_vm, OID_AUTO, aslr_restarts, CTLFLAG_RD,
1959fa50a355SKonstantin Belousov     &aslr_restarts, 0,
1960fa50a355SKonstantin Belousov     "Number of aslr failures");
1961fa50a355SKonstantin Belousov 
1962fa50a355SKonstantin Belousov #define	MAP_32BIT_MAX_ADDR	((vm_offset_t)1 << 31)
1963fa50a355SKonstantin Belousov 
1964df8bae1dSRodney W. Grimes /*
1965fec29688SAlan Cox  * Searches for the specified amount of free space in the given map with the
1966fec29688SAlan Cox  * specified alignment.  Performs an address-ordered, first-fit search from
1967fec29688SAlan Cox  * the given address "*addr", with an optional upper bound "max_addr".  If the
1968fec29688SAlan Cox  * parameter "alignment" is zero, then the alignment is computed from the
1969fec29688SAlan Cox  * given (object, offset) pair so as to enable the greatest possible use of
1970fec29688SAlan Cox  * superpage mappings.  Returns KERN_SUCCESS and the address of the free space
1971fec29688SAlan Cox  * in "*addr" if successful.  Otherwise, returns KERN_NO_SPACE.
1972fec29688SAlan Cox  *
1973fec29688SAlan Cox  * The map must be locked.  Initially, there must be at least "length" bytes
1974fec29688SAlan Cox  * of free space at the given address.
1975fec29688SAlan Cox  */
1976fec29688SAlan Cox static int
1977fec29688SAlan Cox vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1978fec29688SAlan Cox     vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr,
1979fec29688SAlan Cox     vm_offset_t alignment)
1980fec29688SAlan Cox {
1981fec29688SAlan Cox 	vm_offset_t aligned_addr, free_addr;
1982fec29688SAlan Cox 
1983fec29688SAlan Cox 	VM_MAP_ASSERT_LOCKED(map);
1984fec29688SAlan Cox 	free_addr = *addr;
19859f701172SKonstantin Belousov 	KASSERT(free_addr == vm_map_findspace(map, free_addr, length),
1986e65d58a0SDoug Moore 	    ("caller failed to provide space %#jx at address %p",
1987e65d58a0SDoug Moore 	     (uintmax_t)length, (void *)free_addr));
1988fec29688SAlan Cox 	for (;;) {
1989fec29688SAlan Cox 		/*
1990fec29688SAlan Cox 		 * At the start of every iteration, the free space at address
1991fec29688SAlan Cox 		 * "*addr" is at least "length" bytes.
1992fec29688SAlan Cox 		 */
1993fec29688SAlan Cox 		if (alignment == 0)
1994fec29688SAlan Cox 			pmap_align_superpage(object, offset, addr, length);
1995fec29688SAlan Cox 		else if ((*addr & (alignment - 1)) != 0) {
1996fec29688SAlan Cox 			*addr &= ~(alignment - 1);
1997fec29688SAlan Cox 			*addr += alignment;
1998fec29688SAlan Cox 		}
1999fec29688SAlan Cox 		aligned_addr = *addr;
2000fec29688SAlan Cox 		if (aligned_addr == free_addr) {
2001fec29688SAlan Cox 			/*
2002fec29688SAlan Cox 			 * Alignment did not change "*addr", so "*addr" must
2003fec29688SAlan Cox 			 * still provide sufficient free space.
2004fec29688SAlan Cox 			 */
2005fec29688SAlan Cox 			return (KERN_SUCCESS);
2006fec29688SAlan Cox 		}
2007fec29688SAlan Cox 
2008fec29688SAlan Cox 		/*
2009fec29688SAlan Cox 		 * Test for address wrap on "*addr".  A wrapped "*addr" could
2010fec29688SAlan Cox 		 * be a valid address, in which case vm_map_findspace() cannot
2011fec29688SAlan Cox 		 * be relied upon to fail.
2012fec29688SAlan Cox 		 */
20139f701172SKonstantin Belousov 		if (aligned_addr < free_addr)
20149f701172SKonstantin Belousov 			return (KERN_NO_SPACE);
20159f701172SKonstantin Belousov 		*addr = vm_map_findspace(map, aligned_addr, length);
20169f701172SKonstantin Belousov 		if (*addr + length > vm_map_max(map) ||
2017fec29688SAlan Cox 		    (max_addr != 0 && *addr + length > max_addr))
2018fec29688SAlan Cox 			return (KERN_NO_SPACE);
2019fec29688SAlan Cox 		free_addr = *addr;
2020fec29688SAlan Cox 		if (free_addr == aligned_addr) {
2021fec29688SAlan Cox 			/*
2022fec29688SAlan Cox 			 * If a successful call to vm_map_findspace() did not
2023fec29688SAlan Cox 			 * change "*addr", then "*addr" must still be aligned
2024fec29688SAlan Cox 			 * and provide sufficient free space.
2025fec29688SAlan Cox 			 */
2026fec29688SAlan Cox 			return (KERN_SUCCESS);
2027fec29688SAlan Cox 		}
2028fec29688SAlan Cox 	}
2029fec29688SAlan Cox }
2030fec29688SAlan Cox 
2031fec29688SAlan Cox /*
2032df8bae1dSRodney W. Grimes  *	vm_map_find finds an unallocated region in the target address
2033df8bae1dSRodney W. Grimes  *	map with the given length.  The search is defined to be
2034df8bae1dSRodney W. Grimes  *	first-fit from the specified address; the region found is
2035df8bae1dSRodney W. Grimes  *	returned in the same parameter.
2036df8bae1dSRodney W. Grimes  *
20372aaeadf8SMatthew Dillon  *	If object is non-NULL, ref count must be bumped by caller
20382aaeadf8SMatthew Dillon  *	prior to making call to account for the new entry.
2039df8bae1dSRodney W. Grimes  */
2040df8bae1dSRodney W. Grimes int
2041b9dcd593SBruce Evans vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2042b9dcd593SBruce Evans 	    vm_offset_t *addr,	/* IN/OUT */
2043edb572a3SJohn Baldwin 	    vm_size_t length, vm_offset_t max_addr, int find_space,
2044edb572a3SJohn Baldwin 	    vm_prot_t prot, vm_prot_t max, int cow)
2045df8bae1dSRodney W. Grimes {
2046fa50a355SKonstantin Belousov 	vm_offset_t alignment, curr_min_addr, min_addr;
2047fa50a355SKonstantin Belousov 	int gap, pidx, rv, try;
2048fa50a355SKonstantin Belousov 	bool cluster, en_aslr, update_anon;
2049df8bae1dSRodney W. Grimes 
20504648ba0aSKonstantin Belousov 	KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
20514648ba0aSKonstantin Belousov 	    object == NULL,
20524648ba0aSKonstantin Belousov 	    ("vm_map_find: non-NULL backing object for stack"));
2053ea7e7006SKonstantin Belousov 	MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE &&
2054ea7e7006SKonstantin Belousov 	    (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0));
2055ff74a3faSJohn Baldwin 	if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
2056ff74a3faSJohn Baldwin 	    (object->flags & OBJ_COLORED) == 0))
2057ff74a3faSJohn Baldwin 		find_space = VMFS_ANY_SPACE;
20585aa60b6fSJohn Baldwin 	if (find_space >> 8 != 0) {
20595aa60b6fSJohn Baldwin 		KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
20605aa60b6fSJohn Baldwin 		alignment = (vm_offset_t)1 << (find_space >> 8);
20615aa60b6fSJohn Baldwin 	} else
20625aa60b6fSJohn Baldwin 		alignment = 0;
2063fa50a355SKonstantin Belousov 	en_aslr = (map->flags & MAP_ASLR) != 0;
2064484e9d03SKonstantin Belousov 	update_anon = cluster = clustering_anon_allowed(*addr) &&
2065fa50a355SKonstantin Belousov 	    (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 &&
2066fa50a355SKonstantin Belousov 	    find_space != VMFS_NO_SPACE && object == NULL &&
2067fa50a355SKonstantin Belousov 	    (cow & (MAP_INHERIT_SHARE | MAP_STACK_GROWS_UP |
2068fa50a355SKonstantin Belousov 	    MAP_STACK_GROWS_DOWN)) == 0 && prot != PROT_NONE;
2069fa50a355SKonstantin Belousov 	curr_min_addr = min_addr = *addr;
2070fa50a355SKonstantin Belousov 	if (en_aslr && min_addr == 0 && !cluster &&
2071fa50a355SKonstantin Belousov 	    find_space != VMFS_NO_SPACE &&
2072fa50a355SKonstantin Belousov 	    (map->flags & MAP_ASLR_IGNSTART) != 0)
2073fa50a355SKonstantin Belousov 		curr_min_addr = min_addr = vm_map_min(map);
2074fa50a355SKonstantin Belousov 	try = 0;
20754d572bb3SAlan Cox 	vm_map_lock(map);
2076fa50a355SKonstantin Belousov 	if (cluster) {
2077fa50a355SKonstantin Belousov 		curr_min_addr = map->anon_loc;
2078fa50a355SKonstantin Belousov 		if (curr_min_addr == 0)
2079fa50a355SKonstantin Belousov 			cluster = false;
2080fa50a355SKonstantin Belousov 	}
208126c538ffSAlan Cox 	if (find_space != VMFS_NO_SPACE) {
2082fec29688SAlan Cox 		KASSERT(find_space == VMFS_ANY_SPACE ||
2083fec29688SAlan Cox 		    find_space == VMFS_OPTIMAL_SPACE ||
2084fec29688SAlan Cox 		    find_space == VMFS_SUPER_SPACE ||
2085fec29688SAlan Cox 		    alignment != 0, ("unexpected VMFS flag"));
2086fec29688SAlan Cox again:
2087fa50a355SKonstantin Belousov 		/*
2088fa50a355SKonstantin Belousov 		 * When creating an anonymous mapping, try clustering
2089fa50a355SKonstantin Belousov 		 * with an existing anonymous mapping first.
2090fa50a355SKonstantin Belousov 		 *
2091fa50a355SKonstantin Belousov 		 * We make up to two attempts to find address space
2092fa50a355SKonstantin Belousov 		 * for a given find_space value. The first attempt may
2093fa50a355SKonstantin Belousov 		 * apply randomization or may cluster with an existing
2094fa50a355SKonstantin Belousov 		 * anonymous mapping. If this first attempt fails,
2095fa50a355SKonstantin Belousov 		 * perform a first-fit search of the available address
2096fa50a355SKonstantin Belousov 		 * space.
2097fa50a355SKonstantin Belousov 		 *
2098fa50a355SKonstantin Belousov 		 * If all tries failed, and find_space is
2099fa50a355SKonstantin Belousov 		 * VMFS_OPTIMAL_SPACE, fallback to VMFS_ANY_SPACE.
2100fa50a355SKonstantin Belousov 		 * Again enable clustering and randomization.
2101fa50a355SKonstantin Belousov 		 */
2102fa50a355SKonstantin Belousov 		try++;
2103fa50a355SKonstantin Belousov 		MPASS(try <= 2);
2104fa50a355SKonstantin Belousov 
2105fa50a355SKonstantin Belousov 		if (try == 2) {
2106fa50a355SKonstantin Belousov 			/*
2107fa50a355SKonstantin Belousov 			 * Second try: we failed either to find a
2108fa50a355SKonstantin Belousov 			 * suitable region for randomizing the
2109fa50a355SKonstantin Belousov 			 * allocation, or to cluster with an existing
2110fa50a355SKonstantin Belousov 			 * mapping.  Retry with free run.
2111fa50a355SKonstantin Belousov 			 */
2112fa50a355SKonstantin Belousov 			curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ?
2113fa50a355SKonstantin Belousov 			    vm_map_min(map) : min_addr;
2114fa50a355SKonstantin Belousov 			atomic_add_long(&aslr_restarts, 1);
2115fa50a355SKonstantin Belousov 		}
2116fa50a355SKonstantin Belousov 
2117fa50a355SKonstantin Belousov 		if (try == 1 && en_aslr && !cluster) {
2118fa50a355SKonstantin Belousov 			/*
2119fa50a355SKonstantin Belousov 			 * Find space for allocation, including
2120fa50a355SKonstantin Belousov 			 * gap needed for later randomization.
2121fa50a355SKonstantin Belousov 			 */
2122fa50a355SKonstantin Belousov 			pidx = MAXPAGESIZES > 1 && pagesizes[1] != 0 &&
2123fa50a355SKonstantin Belousov 			    (find_space == VMFS_SUPER_SPACE || find_space ==
2124fa50a355SKonstantin Belousov 			    VMFS_OPTIMAL_SPACE) ? 1 : 0;
2125fa50a355SKonstantin Belousov 			gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR &&
2126fa50a355SKonstantin Belousov 			    (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ?
2127fa50a355SKonstantin Belousov 			    aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx];
21289f701172SKonstantin Belousov 			*addr = vm_map_findspace(map, curr_min_addr,
21299f701172SKonstantin Belousov 			    length + gap * pagesizes[pidx]);
21309f701172SKonstantin Belousov 			if (*addr + length + gap * pagesizes[pidx] >
2131a5a02ef4SKonstantin Belousov 			    vm_map_max(map))
2132fa50a355SKonstantin Belousov 				goto again;
2133fa50a355SKonstantin Belousov 			/* And randomize the start address. */
2134fa50a355SKonstantin Belousov 			*addr += (arc4random() % gap) * pagesizes[pidx];
21355019dac9SKonstantin Belousov 			if (max_addr != 0 && *addr + length > max_addr)
21365019dac9SKonstantin Belousov 				goto again;
21379f701172SKonstantin Belousov 		} else {
21389f701172SKonstantin Belousov 			*addr = vm_map_findspace(map, curr_min_addr, length);
21399f701172SKonstantin Belousov 			if (*addr + length > vm_map_max(map) ||
2140edb572a3SJohn Baldwin 			    (max_addr != 0 && *addr + length > max_addr)) {
2141fa50a355SKonstantin Belousov 				if (cluster) {
2142fa50a355SKonstantin Belousov 					cluster = false;
2143fa50a355SKonstantin Belousov 					MPASS(try == 1);
2144fa50a355SKonstantin Belousov 					goto again;
2145fa50a355SKonstantin Belousov 				}
2146fec29688SAlan Cox 				rv = KERN_NO_SPACE;
2147fec29688SAlan Cox 				goto done;
2148fec29688SAlan Cox 			}
21499f701172SKonstantin Belousov 		}
2150fa50a355SKonstantin Belousov 
2151fec29688SAlan Cox 		if (find_space != VMFS_ANY_SPACE &&
2152fec29688SAlan Cox 		    (rv = vm_map_alignspace(map, object, offset, addr, length,
2153fec29688SAlan Cox 		    max_addr, alignment)) != KERN_SUCCESS) {
2154ff74a3faSJohn Baldwin 			if (find_space == VMFS_OPTIMAL_SPACE) {
2155ff74a3faSJohn Baldwin 				find_space = VMFS_ANY_SPACE;
2156fa50a355SKonstantin Belousov 				curr_min_addr = min_addr;
2157fa50a355SKonstantin Belousov 				cluster = update_anon;
2158fa50a355SKonstantin Belousov 				try = 0;
2159ff74a3faSJohn Baldwin 				goto again;
2160ff74a3faSJohn Baldwin 			}
2161fec29688SAlan Cox 			goto done;
2162df8bae1dSRodney W. Grimes 		}
2163ea7e7006SKonstantin Belousov 	} else if ((cow & MAP_REMAP) != 0) {
2164ea7e7006SKonstantin Belousov 		if (*addr < vm_map_min(map) ||
2165ea7e7006SKonstantin Belousov 		    *addr + length > vm_map_max(map) ||
2166ea7e7006SKonstantin Belousov 		    *addr + length <= length) {
2167ea7e7006SKonstantin Belousov 			rv = KERN_INVALID_ADDRESS;
2168ea7e7006SKonstantin Belousov 			goto done;
2169ea7e7006SKonstantin Belousov 		}
2170ea7e7006SKonstantin Belousov 		vm_map_delete(map, *addr, *addr + length);
2171df8bae1dSRodney W. Grimes 	}
21724648ba0aSKonstantin Belousov 	if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
2173fec29688SAlan Cox 		rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot,
2174fec29688SAlan Cox 		    max, cow);
21754648ba0aSKonstantin Belousov 	} else {
2176fec29688SAlan Cox 		rv = vm_map_insert(map, object, offset, *addr, *addr + length,
2177fec29688SAlan Cox 		    prot, max, cow);
21784648ba0aSKonstantin Belousov 	}
2179fa50a355SKonstantin Belousov 	if (rv == KERN_SUCCESS && update_anon)
2180fa50a355SKonstantin Belousov 		map->anon_loc = *addr + length;
2181fec29688SAlan Cox done:
2182df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
2183fec29688SAlan Cox 	return (rv);
2184df8bae1dSRodney W. Grimes }
2185df8bae1dSRodney W. Grimes 
2186e8502826SKonstantin Belousov /*
2187e8502826SKonstantin Belousov  *	vm_map_find_min() is a variant of vm_map_find() that takes an
2188e8502826SKonstantin Belousov  *	additional parameter (min_addr) and treats the given address
2189e8502826SKonstantin Belousov  *	(*addr) differently.  Specifically, it treats *addr as a hint
2190e8502826SKonstantin Belousov  *	and not as the minimum address where the mapping is created.
2191e8502826SKonstantin Belousov  *
2192e8502826SKonstantin Belousov  *	This function works in two phases.  First, it tries to
2193e8502826SKonstantin Belousov  *	allocate above the hint.  If that fails and the hint is
2194e8502826SKonstantin Belousov  *	greater than min_addr, it performs a second pass, replacing
2195e8502826SKonstantin Belousov  *	the hint with min_addr as the minimum address for the
2196e8502826SKonstantin Belousov  *	allocation.
2197e8502826SKonstantin Belousov  */
21986a97a3f7SKonstantin Belousov int
21996a97a3f7SKonstantin Belousov vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
22006a97a3f7SKonstantin Belousov     vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr,
22016a97a3f7SKonstantin Belousov     vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max,
22026a97a3f7SKonstantin Belousov     int cow)
22036a97a3f7SKonstantin Belousov {
22046a97a3f7SKonstantin Belousov 	vm_offset_t hint;
22056a97a3f7SKonstantin Belousov 	int rv;
22066a97a3f7SKonstantin Belousov 
22076a97a3f7SKonstantin Belousov 	hint = *addr;
22086a97a3f7SKonstantin Belousov 	for (;;) {
22096a97a3f7SKonstantin Belousov 		rv = vm_map_find(map, object, offset, addr, length, max_addr,
22106a97a3f7SKonstantin Belousov 		    find_space, prot, max, cow);
22116a97a3f7SKonstantin Belousov 		if (rv == KERN_SUCCESS || min_addr >= hint)
22126a97a3f7SKonstantin Belousov 			return (rv);
22137683ad70SKonstantin Belousov 		*addr = hint = min_addr;
22146a97a3f7SKonstantin Belousov 	}
22156a97a3f7SKonstantin Belousov }
22166a97a3f7SKonstantin Belousov 
221792e78c10SAlan Cox /*
221892e78c10SAlan Cox  * A map entry with any of the following flags set must not be merged with
221992e78c10SAlan Cox  * another entry.
222092e78c10SAlan Cox  */
222192e78c10SAlan Cox #define	MAP_ENTRY_NOMERGE_MASK	(MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \
222278022527SKonstantin Belousov 	    MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC)
222392e78c10SAlan Cox 
222407424462SKonstantin Belousov static bool
222507424462SKonstantin Belousov vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry)
222607424462SKonstantin Belousov {
222707424462SKonstantin Belousov 
222892e78c10SAlan Cox 	KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 ||
222992e78c10SAlan Cox 	    (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0,
223092e78c10SAlan Cox 	    ("vm_map_mergeable_neighbors: neither %p nor %p are mergeable",
223192e78c10SAlan Cox 	    prev, entry));
223207424462SKonstantin Belousov 	return (prev->end == entry->start &&
223307424462SKonstantin Belousov 	    prev->object.vm_object == entry->object.vm_object &&
223407424462SKonstantin Belousov 	    (prev->object.vm_object == NULL ||
223592e78c10SAlan Cox 	    prev->offset + (prev->end - prev->start) == entry->offset) &&
223607424462SKonstantin Belousov 	    prev->eflags == entry->eflags &&
223707424462SKonstantin Belousov 	    prev->protection == entry->protection &&
223807424462SKonstantin Belousov 	    prev->max_protection == entry->max_protection &&
223907424462SKonstantin Belousov 	    prev->inheritance == entry->inheritance &&
224007424462SKonstantin Belousov 	    prev->wired_count == entry->wired_count &&
224107424462SKonstantin Belousov 	    prev->cred == entry->cred);
224207424462SKonstantin Belousov }
224307424462SKonstantin Belousov 
224407424462SKonstantin Belousov static void
224507424462SKonstantin Belousov vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry)
224607424462SKonstantin Belousov {
224707424462SKonstantin Belousov 
224807424462SKonstantin Belousov 	/*
224992e78c10SAlan Cox 	 * If the backing object is a vnode object, vm_object_deallocate()
225092e78c10SAlan Cox 	 * calls vrele().  However, vrele() does not lock the vnode because
225192e78c10SAlan Cox 	 * the vnode has additional references.  Thus, the map lock can be
225292e78c10SAlan Cox 	 * kept without causing a lock-order reversal with the vnode lock.
225307424462SKonstantin Belousov 	 *
225492e78c10SAlan Cox 	 * Since we count the number of virtual page mappings in
225592e78c10SAlan Cox 	 * object->un_pager.vnp.writemappings, the writemappings value
225692e78c10SAlan Cox 	 * should not be adjusted when the entry is disposed of.
225707424462SKonstantin Belousov 	 */
225807424462SKonstantin Belousov 	if (entry->object.vm_object != NULL)
225907424462SKonstantin Belousov 		vm_object_deallocate(entry->object.vm_object);
226007424462SKonstantin Belousov 	if (entry->cred != NULL)
226107424462SKonstantin Belousov 		crfree(entry->cred);
226207424462SKonstantin Belousov 	vm_map_entry_dispose(map, entry);
226307424462SKonstantin Belousov }
226407424462SKonstantin Belousov 
2265df8bae1dSRodney W. Grimes /*
226683ea714fSDoug Moore  *	vm_map_try_merge_entries:
226767bf6868SJohn Dyson  *
226883ea714fSDoug Moore  *	Compare the given map entry to its predecessor, and merge its precessor
226983ea714fSDoug Moore  *	into it if possible.  The entry remains valid, and may be extended.
227083ea714fSDoug Moore  *	The predecessor may be deleted.
22714e71e795SMatthew Dillon  *
22724e71e795SMatthew Dillon  *	The map must be locked.
2273df8bae1dSRodney W. Grimes  */
22740afcd3afSAlan Cox void
22752767c9f3SDoug Moore vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry,
22762767c9f3SDoug Moore     vm_map_entry_t entry)
2277df8bae1dSRodney W. Grimes {
2278df8bae1dSRodney W. Grimes 
227983ea714fSDoug Moore 	VM_MAP_ASSERT_LOCKED(map);
228083ea714fSDoug Moore 	if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 &&
22812767c9f3SDoug Moore 	    vm_map_mergeable_neighbors(prev_entry, entry)) {
22822767c9f3SDoug Moore 		vm_map_entry_unlink(map, prev_entry, UNLINK_MERGE_NEXT);
22832767c9f3SDoug Moore 		vm_map_merged_neighbor_dispose(map, prev_entry);
2284308c24baSJohn Dyson 	}
2285df8bae1dSRodney W. Grimes }
228692e78c10SAlan Cox 
2287df8bae1dSRodney W. Grimes /*
2288af1d6d6aSDoug Moore  *	vm_map_entry_back:
2289af1d6d6aSDoug Moore  *
2290af1d6d6aSDoug Moore  *	Allocate an object to back a map entry.
2291af1d6d6aSDoug Moore  */
2292af1d6d6aSDoug Moore static inline void
2293af1d6d6aSDoug Moore vm_map_entry_back(vm_map_entry_t entry)
2294af1d6d6aSDoug Moore {
2295af1d6d6aSDoug Moore 	vm_object_t object;
2296af1d6d6aSDoug Moore 
2297af1d6d6aSDoug Moore 	KASSERT(entry->object.vm_object == NULL,
2298af1d6d6aSDoug Moore 	    ("map entry %p has backing object", entry));
2299af1d6d6aSDoug Moore 	KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
2300af1d6d6aSDoug Moore 	    ("map entry %p is a submap", entry));
230167388836SKonstantin Belousov 	object = vm_object_allocate_anon(atop(entry->end - entry->start), NULL,
230267388836SKonstantin Belousov 	    entry->cred, entry->end - entry->start);
2303af1d6d6aSDoug Moore 	entry->object.vm_object = object;
2304af1d6d6aSDoug Moore 	entry->offset = 0;
2305af1d6d6aSDoug Moore 	entry->cred = NULL;
2306af1d6d6aSDoug Moore }
2307af1d6d6aSDoug Moore 
2308af1d6d6aSDoug Moore /*
2309af1d6d6aSDoug Moore  *	vm_map_entry_charge_object
2310af1d6d6aSDoug Moore  *
2311af1d6d6aSDoug Moore  *	If there is no object backing this entry, create one.  Otherwise, if
2312af1d6d6aSDoug Moore  *	the entry has cred, give it to the backing object.
2313af1d6d6aSDoug Moore  */
2314af1d6d6aSDoug Moore static inline void
2315af1d6d6aSDoug Moore vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry)
2316af1d6d6aSDoug Moore {
2317af1d6d6aSDoug Moore 
2318af1d6d6aSDoug Moore 	VM_MAP_ASSERT_LOCKED(map);
2319af1d6d6aSDoug Moore 	KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
2320af1d6d6aSDoug Moore 	    ("map entry %p is a submap", entry));
2321af1d6d6aSDoug Moore 	if (entry->object.vm_object == NULL && !map->system_map &&
2322af1d6d6aSDoug Moore 	    (entry->eflags & MAP_ENTRY_GUARD) == 0)
2323af1d6d6aSDoug Moore 		vm_map_entry_back(entry);
2324af1d6d6aSDoug Moore 	else if (entry->object.vm_object != NULL &&
2325af1d6d6aSDoug Moore 	    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
2326af1d6d6aSDoug Moore 	    entry->cred != NULL) {
2327af1d6d6aSDoug Moore 		VM_OBJECT_WLOCK(entry->object.vm_object);
2328af1d6d6aSDoug Moore 		KASSERT(entry->object.vm_object->cred == NULL,
2329af1d6d6aSDoug Moore 		    ("OVERCOMMIT: %s: both cred e %p", __func__, entry));
2330af1d6d6aSDoug Moore 		entry->object.vm_object->cred = entry->cred;
2331af1d6d6aSDoug Moore 		entry->object.vm_object->charge = entry->end - entry->start;
2332af1d6d6aSDoug Moore 		VM_OBJECT_WUNLOCK(entry->object.vm_object);
2333af1d6d6aSDoug Moore 		entry->cred = NULL;
2334af1d6d6aSDoug Moore 	}
2335af1d6d6aSDoug Moore }
2336af1d6d6aSDoug Moore 
2337af1d6d6aSDoug Moore /*
2338037c0994SDoug Moore  *	vm_map_entry_clone
2339037c0994SDoug Moore  *
2340037c0994SDoug Moore  *	Create a duplicate map entry for clipping.
2341037c0994SDoug Moore  */
2342037c0994SDoug Moore static vm_map_entry_t
2343037c0994SDoug Moore vm_map_entry_clone(vm_map_t map, vm_map_entry_t entry)
2344037c0994SDoug Moore {
2345037c0994SDoug Moore 	vm_map_entry_t new_entry;
2346037c0994SDoug Moore 
2347037c0994SDoug Moore 	VM_MAP_ASSERT_LOCKED(map);
2348037c0994SDoug Moore 
2349037c0994SDoug Moore 	/*
2350037c0994SDoug Moore 	 * Create a backing object now, if none exists, so that more individual
2351037c0994SDoug Moore 	 * objects won't be created after the map entry is split.
2352037c0994SDoug Moore 	 */
2353037c0994SDoug Moore 	vm_map_entry_charge_object(map, entry);
2354037c0994SDoug Moore 
2355037c0994SDoug Moore 	/* Clone the entry. */
2356037c0994SDoug Moore 	new_entry = vm_map_entry_create(map);
2357037c0994SDoug Moore 	*new_entry = *entry;
2358037c0994SDoug Moore 	if (new_entry->cred != NULL)
2359037c0994SDoug Moore 		crhold(entry->cred);
2360037c0994SDoug Moore 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2361037c0994SDoug Moore 		vm_object_reference(new_entry->object.vm_object);
2362037c0994SDoug Moore 		vm_map_entry_set_vnode_text(new_entry, true);
2363037c0994SDoug Moore 		/*
2364037c0994SDoug Moore 		 * The object->un_pager.vnp.writemappings for the object of
2365037c0994SDoug Moore 		 * MAP_ENTRY_WRITECNT type entry shall be kept as is here.  The
2366037c0994SDoug Moore 		 * virtual pages are re-distributed among the clipped entries,
2367037c0994SDoug Moore 		 * so the sum is left the same.
2368037c0994SDoug Moore 		 */
2369037c0994SDoug Moore 	}
2370037c0994SDoug Moore 	return (new_entry);
2371037c0994SDoug Moore }
2372037c0994SDoug Moore 
2373037c0994SDoug Moore /*
2374df8bae1dSRodney W. Grimes  *	vm_map_clip_start:	[ internal use only ]
2375df8bae1dSRodney W. Grimes  *
2376df8bae1dSRodney W. Grimes  *	Asserts that the given entry begins at or after
2377df8bae1dSRodney W. Grimes  *	the specified address; if necessary,
2378df8bae1dSRodney W. Grimes  *	it splits the entry into two.
2379df8bae1dSRodney W. Grimes  */
2380df8bae1dSRodney W. Grimes #define vm_map_clip_start(map, entry, startaddr) \
2381df8bae1dSRodney W. Grimes { \
2382df8bae1dSRodney W. Grimes 	if (startaddr > entry->start) \
2383df8bae1dSRodney W. Grimes 		_vm_map_clip_start(map, entry, startaddr); \
2384df8bae1dSRodney W. Grimes }
2385df8bae1dSRodney W. Grimes 
2386df8bae1dSRodney W. Grimes /*
2387df8bae1dSRodney W. Grimes  *	This routine is called only when it is known that
2388df8bae1dSRodney W. Grimes  *	the entry must be split.
2389df8bae1dSRodney W. Grimes  */
23900d94caffSDavid Greenman static void
23911b40f8c0SMatthew Dillon _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
2392df8bae1dSRodney W. Grimes {
2393c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
2394df8bae1dSRodney W. Grimes 
23953a0916b8SKonstantin Belousov 	VM_MAP_ASSERT_LOCKED(map);
2396ed11e4d7SMark Johnston 	KASSERT(entry->end > start && entry->start < start,
2397ed11e4d7SMark Johnston 	    ("_vm_map_clip_start: invalid clip of entry %p", entry));
23983a0916b8SKonstantin Belousov 
2399037c0994SDoug Moore 	new_entry = vm_map_entry_clone(map, entry);
2400df8bae1dSRodney W. Grimes 
24014766eba1SDoug Moore 	/*
24024766eba1SDoug Moore 	 * Split off the front portion.  Insert the new entry BEFORE this one,
24034766eba1SDoug Moore 	 * so that this entry has the specified starting address.
24044766eba1SDoug Moore 	 */
2405df8bae1dSRodney W. Grimes 	new_entry->end = start;
24069f701172SKonstantin Belousov 	vm_map_entry_link(map, new_entry);
2407c0877f10SJohn Dyson }
2408df8bae1dSRodney W. Grimes 
2409df8bae1dSRodney W. Grimes /*
2410df8bae1dSRodney W. Grimes  *	vm_map_clip_end:	[ internal use only ]
2411df8bae1dSRodney W. Grimes  *
2412df8bae1dSRodney W. Grimes  *	Asserts that the given entry ends at or before
2413df8bae1dSRodney W. Grimes  *	the specified address; if necessary,
2414df8bae1dSRodney W. Grimes  *	it splits the entry into two.
2415df8bae1dSRodney W. Grimes  */
2416df8bae1dSRodney W. Grimes #define vm_map_clip_end(map, entry, endaddr) \
2417df8bae1dSRodney W. Grimes { \
2418af045176SPoul-Henning Kamp 	if ((endaddr) < (entry->end)) \
2419af045176SPoul-Henning Kamp 		_vm_map_clip_end((map), (entry), (endaddr)); \
2420df8bae1dSRodney W. Grimes }
2421df8bae1dSRodney W. Grimes 
2422df8bae1dSRodney W. Grimes /*
2423df8bae1dSRodney W. Grimes  *	This routine is called only when it is known that
2424df8bae1dSRodney W. Grimes  *	the entry must be split.
2425df8bae1dSRodney W. Grimes  */
24260d94caffSDavid Greenman static void
24271b40f8c0SMatthew Dillon _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
2428df8bae1dSRodney W. Grimes {
2429c0877f10SJohn Dyson 	vm_map_entry_t new_entry;
2430df8bae1dSRodney W. Grimes 
24313a0916b8SKonstantin Belousov 	VM_MAP_ASSERT_LOCKED(map);
2432ed11e4d7SMark Johnston 	KASSERT(entry->start < end && entry->end > end,
2433ed11e4d7SMark Johnston 	    ("_vm_map_clip_end: invalid clip of entry %p", entry));
24343a0916b8SKonstantin Belousov 
2435037c0994SDoug Moore 	new_entry = vm_map_entry_clone(map, entry);
2436df8bae1dSRodney W. Grimes 
24374766eba1SDoug Moore 	/*
24384766eba1SDoug Moore 	 * Split off the back portion.  Insert the new entry AFTER this one,
24394766eba1SDoug Moore 	 * so that this entry has the specified ending address.
24404766eba1SDoug Moore 	 */
2441668a8aa8SDoug Moore 	new_entry->start = end;
24429f701172SKonstantin Belousov 	vm_map_entry_link(map, new_entry);
2443c0877f10SJohn Dyson }
2444df8bae1dSRodney W. Grimes 
2445df8bae1dSRodney W. Grimes /*
2446df8bae1dSRodney W. Grimes  *	vm_map_submap:		[ kernel use only ]
2447df8bae1dSRodney W. Grimes  *
2448df8bae1dSRodney W. Grimes  *	Mark the given range as handled by a subordinate map.
2449df8bae1dSRodney W. Grimes  *
2450df8bae1dSRodney W. Grimes  *	This range must have been created with vm_map_find,
2451df8bae1dSRodney W. Grimes  *	and no other operations may have been performed on this
2452df8bae1dSRodney W. Grimes  *	range prior to calling vm_map_submap.
2453df8bae1dSRodney W. Grimes  *
2454df8bae1dSRodney W. Grimes  *	Only a limited number of operations can be performed
2455df8bae1dSRodney W. Grimes  *	within this rage after calling vm_map_submap:
2456df8bae1dSRodney W. Grimes  *		vm_fault
2457df8bae1dSRodney W. Grimes  *	[Don't try vm_map_copy!]
2458df8bae1dSRodney W. Grimes  *
2459df8bae1dSRodney W. Grimes  *	To remove a submapping, one must first remove the
2460df8bae1dSRodney W. Grimes  *	range from the superior map, and then destroy the
2461df8bae1dSRodney W. Grimes  *	submap (if desired).  [Better yet, don't try it.]
2462df8bae1dSRodney W. Grimes  */
2463df8bae1dSRodney W. Grimes int
24641b40f8c0SMatthew Dillon vm_map_submap(
24651b40f8c0SMatthew Dillon 	vm_map_t map,
24661b40f8c0SMatthew Dillon 	vm_offset_t start,
24671b40f8c0SMatthew Dillon 	vm_offset_t end,
24681b40f8c0SMatthew Dillon 	vm_map_t submap)
2469df8bae1dSRodney W. Grimes {
2470df8bae1dSRodney W. Grimes 	vm_map_entry_t entry;
2471fa50a355SKonstantin Belousov 	int result;
2472fa50a355SKonstantin Belousov 
2473fa50a355SKonstantin Belousov 	result = KERN_INVALID_ARGUMENT;
2474fa50a355SKonstantin Belousov 
2475fa50a355SKonstantin Belousov 	vm_map_lock(submap);
2476fa50a355SKonstantin Belousov 	submap->flags |= MAP_IS_SUB_MAP;
2477fa50a355SKonstantin Belousov 	vm_map_unlock(submap);
2478df8bae1dSRodney W. Grimes 
2479df8bae1dSRodney W. Grimes 	vm_map_lock(map);
2480df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
2481*e6bd3a81SMark Johnston 	if (vm_map_lookup_entry(map, start, &entry) && entry->end >= end &&
2482*e6bd3a81SMark Johnston 	    (entry->eflags & MAP_ENTRY_COW) == 0 &&
2483*e6bd3a81SMark Johnston 	    entry->object.vm_object == NULL) {
2484df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
2485df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
24862d8acc0fSJohn Dyson 		entry->object.sub_map = submap;
2487afa07f7eSJohn Dyson 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
2488df8bae1dSRodney W. Grimes 		result = KERN_SUCCESS;
2489df8bae1dSRodney W. Grimes 	}
2490df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
2491df8bae1dSRodney W. Grimes 
2492fa50a355SKonstantin Belousov 	if (result != KERN_SUCCESS) {
2493fa50a355SKonstantin Belousov 		vm_map_lock(submap);
2494fa50a355SKonstantin Belousov 		submap->flags &= ~MAP_IS_SUB_MAP;
2495fa50a355SKonstantin Belousov 		vm_map_unlock(submap);
2496fa50a355SKonstantin Belousov 	}
2497df8bae1dSRodney W. Grimes 	return (result);
2498df8bae1dSRodney W. Grimes }
2499df8bae1dSRodney W. Grimes 
2500df8bae1dSRodney W. Grimes /*
2501dd05fa19SAlan Cox  * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
25021f78f902SAlan Cox  */
25031f78f902SAlan Cox #define	MAX_INIT_PT	96
25041f78f902SAlan Cox 
25051f78f902SAlan Cox /*
25060551c08dSAlan Cox  *	vm_map_pmap_enter:
25070551c08dSAlan Cox  *
2508dd05fa19SAlan Cox  *	Preload the specified map's pmap with mappings to the specified
2509dd05fa19SAlan Cox  *	object's memory-resident pages.  No further physical pages are
2510dd05fa19SAlan Cox  *	allocated, and no further virtual pages are retrieved from secondary
2511dd05fa19SAlan Cox  *	storage.  If the specified flags include MAP_PREFAULT_PARTIAL, then a
2512dd05fa19SAlan Cox  *	limited number of page mappings are created at the low-end of the
2513dd05fa19SAlan Cox  *	specified address range.  (For this purpose, a superpage mapping
2514dd05fa19SAlan Cox  *	counts as one page mapping.)  Otherwise, all resident pages within
25153453bca8SAlan Cox  *	the specified address range are mapped.
25160551c08dSAlan Cox  */
2517077ec27cSAlan Cox static void
25184da4d293SAlan Cox vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
25190551c08dSAlan Cox     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
25200551c08dSAlan Cox {
25218fece8c3SAlan Cox 	vm_offset_t start;
2522ce142d9eSAlan Cox 	vm_page_t p, p_start;
2523dd05fa19SAlan Cox 	vm_pindex_t mask, psize, threshold, tmpidx;
25240551c08dSAlan Cox 
2525ba8bca61SAlan Cox 	if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
25261f78f902SAlan Cox 		return;
25279af6d512SAttilio Rao 	if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
252889f6b863SAttilio Rao 		VM_OBJECT_WLOCK(object);
252901381811SJohn Baldwin 		if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
25309af6d512SAttilio Rao 			pmap_object_init_pt(map->pmap, addr, object, pindex,
25319af6d512SAttilio Rao 			    size);
25329af6d512SAttilio Rao 			VM_OBJECT_WUNLOCK(object);
25339af6d512SAttilio Rao 			return;
25349af6d512SAttilio Rao 		}
25359af6d512SAttilio Rao 		VM_OBJECT_LOCK_DOWNGRADE(object);
2536886b9021SJeff Roberson 	} else
2537886b9021SJeff Roberson 		VM_OBJECT_RLOCK(object);
25381f78f902SAlan Cox 
25391f78f902SAlan Cox 	psize = atop(size);
25401f78f902SAlan Cox 	if (psize + pindex > object->size) {
2541ed2f945aSMark Johnston 		if (pindex >= object->size) {
25429af6d512SAttilio Rao 			VM_OBJECT_RUNLOCK(object);
25439af6d512SAttilio Rao 			return;
25449af6d512SAttilio Rao 		}
25451f78f902SAlan Cox 		psize = object->size - pindex;
25461f78f902SAlan Cox 	}
25471f78f902SAlan Cox 
2548ce142d9eSAlan Cox 	start = 0;
2549ce142d9eSAlan Cox 	p_start = NULL;
2550dd05fa19SAlan Cox 	threshold = MAX_INIT_PT;
25511f78f902SAlan Cox 
2552b382c10aSKonstantin Belousov 	p = vm_page_find_least(object, pindex);
25531f78f902SAlan Cox 	/*
25541f78f902SAlan Cox 	 * Assert: the variable p is either (1) the page with the
25551f78f902SAlan Cox 	 * least pindex greater than or equal to the parameter pindex
25561f78f902SAlan Cox 	 * or (2) NULL.
25571f78f902SAlan Cox 	 */
25581f78f902SAlan Cox 	for (;
25591f78f902SAlan Cox 	     p != NULL && (tmpidx = p->pindex - pindex) < psize;
25601f78f902SAlan Cox 	     p = TAILQ_NEXT(p, listq)) {
25611f78f902SAlan Cox 		/*
25621f78f902SAlan Cox 		 * don't allow an madvise to blow away our really
25631f78f902SAlan Cox 		 * free pages allocating pv entries.
25641f78f902SAlan Cox 		 */
2565dd05fa19SAlan Cox 		if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
2566e2068d0bSJeff Roberson 		    vm_page_count_severe()) ||
2567dd05fa19SAlan Cox 		    ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
2568dd05fa19SAlan Cox 		    tmpidx >= threshold)) {
2569379fb642SAlan Cox 			psize = tmpidx;
25701f78f902SAlan Cox 			break;
25711f78f902SAlan Cox 		}
25720012f373SJeff Roberson 		if (vm_page_all_valid(p)) {
2573ce142d9eSAlan Cox 			if (p_start == NULL) {
2574ce142d9eSAlan Cox 				start = addr + ptoa(tmpidx);
2575ce142d9eSAlan Cox 				p_start = p;
2576ce142d9eSAlan Cox 			}
2577dd05fa19SAlan Cox 			/* Jump ahead if a superpage mapping is possible. */
2578dd05fa19SAlan Cox 			if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
2579dd05fa19SAlan Cox 			    (pagesizes[p->psind] - 1)) == 0) {
2580dd05fa19SAlan Cox 				mask = atop(pagesizes[p->psind]) - 1;
2581dd05fa19SAlan Cox 				if (tmpidx + mask < psize &&
258288302601SAlan Cox 				    vm_page_ps_test(p, PS_ALL_VALID, NULL)) {
2583dd05fa19SAlan Cox 					p += mask;
2584dd05fa19SAlan Cox 					threshold += mask;
2585dd05fa19SAlan Cox 				}
2586dd05fa19SAlan Cox 			}
25877bfda801SAlan Cox 		} else if (p_start != NULL) {
2588cf4682aeSAlan Cox 			pmap_enter_object(map->pmap, start, addr +
2589cf4682aeSAlan Cox 			    ptoa(tmpidx), p_start, prot);
2590cf4682aeSAlan Cox 			p_start = NULL;
2591cf4682aeSAlan Cox 		}
2592cf4682aeSAlan Cox 	}
2593c46b90e9SAlan Cox 	if (p_start != NULL)
2594379fb642SAlan Cox 		pmap_enter_object(map->pmap, start, addr + ptoa(psize),
2595379fb642SAlan Cox 		    p_start, prot);
25969af6d512SAttilio Rao 	VM_OBJECT_RUNLOCK(object);
25970551c08dSAlan Cox }
25980551c08dSAlan Cox 
25990551c08dSAlan Cox /*
2600df8bae1dSRodney W. Grimes  *	vm_map_protect:
2601df8bae1dSRodney W. Grimes  *
2602df8bae1dSRodney W. Grimes  *	Sets the protection of the specified address
2603df8bae1dSRodney W. Grimes  *	region in the target map.  If "set_max" is
2604df8bae1dSRodney W. Grimes  *	specified, the maximum protection is to be set;
2605df8bae1dSRodney W. Grimes  *	otherwise, only the current protection is affected.
2606df8bae1dSRodney W. Grimes  */
2607df8bae1dSRodney W. Grimes int
2608b9dcd593SBruce Evans vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
2609b9dcd593SBruce Evans 	       vm_prot_t new_prot, boolean_t set_max)
2610df8bae1dSRodney W. Grimes {
26112767c9f3SDoug Moore 	vm_map_entry_t entry, first_entry, in_tran, prev_entry;
26123364c323SKonstantin Belousov 	vm_object_t obj;
2613ef694c1aSEdward Tomasz Napierala 	struct ucred *cred;
2614210a6886SKonstantin Belousov 	vm_prot_t old_prot;
2615a72dce34SDoug Moore 	int rv;
2616df8bae1dSRodney W. Grimes 
261779e9451fSKonstantin Belousov 	if (start == end)
261879e9451fSKonstantin Belousov 		return (KERN_SUCCESS);
261979e9451fSKonstantin Belousov 
262019f5d9f2SKonstantin Belousov again:
262119f5d9f2SKonstantin Belousov 	in_tran = NULL;
2622df8bae1dSRodney W. Grimes 	vm_map_lock(map);
2623df8bae1dSRodney W. Grimes 
2624e1cb9d37SMark Johnston 	/*
2625e1cb9d37SMark Johnston 	 * Ensure that we are not concurrently wiring pages.  vm_map_wire() may
2626e1cb9d37SMark Johnston 	 * need to fault pages into the map and will drop the map lock while
2627e1cb9d37SMark Johnston 	 * doing so, and the VM object may end up in an inconsistent state if we
2628e1cb9d37SMark Johnston 	 * update the protection on the map entry in between faults.
2629e1cb9d37SMark Johnston 	 */
2630e1cb9d37SMark Johnston 	vm_map_wait_busy(map);
2631e1cb9d37SMark Johnston 
2632df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
2633df8bae1dSRodney W. Grimes 
26342767c9f3SDoug Moore 	if (!vm_map_lookup_entry(map, start, &first_entry))
26352767c9f3SDoug Moore 		first_entry = vm_map_entry_succ(first_entry);
2636df8bae1dSRodney W. Grimes 
2637df8bae1dSRodney W. Grimes 	/*
26380d94caffSDavid Greenman 	 * Make a first pass to check for protection violations.
2639df8bae1dSRodney W. Grimes 	 */
26402767c9f3SDoug Moore 	for (entry = first_entry; entry->start < end;
26412767c9f3SDoug Moore 	    entry = vm_map_entry_succ(entry)) {
26422767c9f3SDoug Moore 		if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
26438a89ca94SKonstantin Belousov 			continue;
26442767c9f3SDoug Moore 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
2645a1f6d91cSDavid Greenman 			vm_map_unlock(map);
2646df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ARGUMENT);
2647a1f6d91cSDavid Greenman 		}
26482767c9f3SDoug Moore 		if ((new_prot & entry->max_protection) != new_prot) {
2649df8bae1dSRodney W. Grimes 			vm_map_unlock(map);
2650df8bae1dSRodney W. Grimes 			return (KERN_PROTECTION_FAILURE);
2651df8bae1dSRodney W. Grimes 		}
26522767c9f3SDoug Moore 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0)
26532767c9f3SDoug Moore 			in_tran = entry;
265419f5d9f2SKonstantin Belousov 	}
265519f5d9f2SKonstantin Belousov 
265619f5d9f2SKonstantin Belousov 	/*
2657bdb90e76SDoug Moore 	 * Postpone the operation until all in-transition map entries have
2658bdb90e76SDoug Moore 	 * stabilized.  An in-transition entry might already have its pages
2659bdb90e76SDoug Moore 	 * wired and wired_count incremented, but not yet have its
2660bdb90e76SDoug Moore 	 * MAP_ENTRY_USER_WIRED flag set.  In which case, we would fail to call
2661bdb90e76SDoug Moore 	 * vm_fault_copy_entry() in the final loop below.
266219f5d9f2SKonstantin Belousov 	 */
266319f5d9f2SKonstantin Belousov 	if (in_tran != NULL) {
266419f5d9f2SKonstantin Belousov 		in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
266519f5d9f2SKonstantin Belousov 		vm_map_unlock_and_wait(map, 0);
266619f5d9f2SKonstantin Belousov 		goto again;
2667df8bae1dSRodney W. Grimes 	}
2668df8bae1dSRodney W. Grimes 
26693364c323SKonstantin Belousov 	/*
2670a72dce34SDoug Moore 	 * Before changing the protections, try to reserve swap space for any
2671a72dce34SDoug Moore 	 * private (i.e., copy-on-write) mappings that are transitioning from
2672a72dce34SDoug Moore 	 * read-only to read/write access.  If a reservation fails, break out
2673a72dce34SDoug Moore 	 * of this loop early and let the next loop simplify the entries, since
2674a72dce34SDoug Moore 	 * some may now be mergeable.
26753364c323SKonstantin Belousov 	 */
2676a72dce34SDoug Moore 	rv = KERN_SUCCESS;
26772767c9f3SDoug Moore 	vm_map_clip_start(map, first_entry, start);
26782767c9f3SDoug Moore 	for (entry = first_entry; entry->start < end;
26792767c9f3SDoug Moore 	    entry = vm_map_entry_succ(entry)) {
26802767c9f3SDoug Moore 		vm_map_clip_end(map, entry, end);
26813364c323SKonstantin Belousov 
26823364c323SKonstantin Belousov 		if (set_max ||
26832767c9f3SDoug Moore 		    ((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 ||
26842767c9f3SDoug Moore 		    ENTRY_CHARGED(entry) ||
26852767c9f3SDoug Moore 		    (entry->eflags & MAP_ENTRY_GUARD) != 0) {
26863364c323SKonstantin Belousov 			continue;
26873364c323SKonstantin Belousov 		}
26883364c323SKonstantin Belousov 
2689ef694c1aSEdward Tomasz Napierala 		cred = curthread->td_ucred;
26902767c9f3SDoug Moore 		obj = entry->object.vm_object;
26913364c323SKonstantin Belousov 
26922767c9f3SDoug Moore 		if (obj == NULL ||
26932767c9f3SDoug Moore 		    (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) {
26942767c9f3SDoug Moore 			if (!swap_reserve(entry->end - entry->start)) {
2695a72dce34SDoug Moore 				rv = KERN_RESOURCE_SHORTAGE;
26962767c9f3SDoug Moore 				end = entry->end;
2697a72dce34SDoug Moore 				break;
26983364c323SKonstantin Belousov 			}
2699ef694c1aSEdward Tomasz Napierala 			crhold(cred);
27002767c9f3SDoug Moore 			entry->cred = cred;
27013364c323SKonstantin Belousov 			continue;
27023364c323SKonstantin Belousov 		}
27033364c323SKonstantin Belousov 
2704886b9021SJeff Roberson 		if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP)
2705886b9021SJeff Roberson 			continue;
270689f6b863SAttilio Rao 		VM_OBJECT_WLOCK(obj);
27073364c323SKonstantin Belousov 		if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
270889f6b863SAttilio Rao 			VM_OBJECT_WUNLOCK(obj);
27093364c323SKonstantin Belousov 			continue;
27103364c323SKonstantin Belousov 		}
27113364c323SKonstantin Belousov 
27123364c323SKonstantin Belousov 		/*
27133364c323SKonstantin Belousov 		 * Charge for the whole object allocation now, since
27143364c323SKonstantin Belousov 		 * we cannot distinguish between non-charged and
27153364c323SKonstantin Belousov 		 * charged clipped mapping of the same object later.
27163364c323SKonstantin Belousov 		 */
27173364c323SKonstantin Belousov 		KASSERT(obj->charge == 0,
27183d95614fSKonstantin Belousov 		    ("vm_map_protect: object %p overcharged (entry %p)",
27192767c9f3SDoug Moore 		    obj, entry));
27203364c323SKonstantin Belousov 		if (!swap_reserve(ptoa(obj->size))) {
272189f6b863SAttilio Rao 			VM_OBJECT_WUNLOCK(obj);
2722a72dce34SDoug Moore 			rv = KERN_RESOURCE_SHORTAGE;
27232767c9f3SDoug Moore 			end = entry->end;
2724a72dce34SDoug Moore 			break;
27253364c323SKonstantin Belousov 		}
27263364c323SKonstantin Belousov 
2727ef694c1aSEdward Tomasz Napierala 		crhold(cred);
2728ef694c1aSEdward Tomasz Napierala 		obj->cred = cred;
27293364c323SKonstantin Belousov 		obj->charge = ptoa(obj->size);
273089f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(obj);
27313364c323SKonstantin Belousov 	}
27323364c323SKonstantin Belousov 
2733df8bae1dSRodney W. Grimes 	/*
2734a72dce34SDoug Moore 	 * If enough swap space was available, go back and fix up protections.
2735a72dce34SDoug Moore 	 * Otherwise, just simplify entries, since some may have been modified.
2736a72dce34SDoug Moore 	 * [Note that clipping is not necessary the second time.]
2737df8bae1dSRodney W. Grimes 	 */
27382767c9f3SDoug Moore 	for (prev_entry = vm_map_entry_pred(first_entry), entry = first_entry;
27392767c9f3SDoug Moore 	    entry->start < end;
27402767c9f3SDoug Moore 	    vm_map_try_merge_entries(map, prev_entry, entry),
27412767c9f3SDoug Moore 	    prev_entry = entry, entry = vm_map_entry_succ(entry)) {
2742a72dce34SDoug Moore 		if (rv != KERN_SUCCESS ||
27432767c9f3SDoug Moore 		    (entry->eflags & MAP_ENTRY_GUARD) != 0)
274419bd0d9cSKonstantin Belousov 			continue;
274519bd0d9cSKonstantin Belousov 
27462767c9f3SDoug Moore 		old_prot = entry->protection;
2747210a6886SKonstantin Belousov 
2748df8bae1dSRodney W. Grimes 		if (set_max)
27492767c9f3SDoug Moore 			entry->protection =
27502767c9f3SDoug Moore 			    (entry->max_protection = new_prot) &
2751df8bae1dSRodney W. Grimes 			    old_prot;
2752df8bae1dSRodney W. Grimes 		else
27532767c9f3SDoug Moore 			entry->protection = new_prot;
2754df8bae1dSRodney W. Grimes 
2755dd006a1bSAlan Cox 		/*
2756dd006a1bSAlan Cox 		 * For user wired map entries, the normal lazy evaluation of
2757dd006a1bSAlan Cox 		 * write access upgrades through soft page faults is
2758dd006a1bSAlan Cox 		 * undesirable.  Instead, immediately copy any pages that are
2759dd006a1bSAlan Cox 		 * copy-on-write and enable write access in the physical map.
2760dd006a1bSAlan Cox 		 */
27612767c9f3SDoug Moore 		if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
27622767c9f3SDoug Moore 		    (entry->protection & VM_PROT_WRITE) != 0 &&
27635930251aSKonstantin Belousov 		    (old_prot & VM_PROT_WRITE) == 0)
27642767c9f3SDoug Moore 			vm_fault_copy_entry(map, map, entry, entry, NULL);
2765210a6886SKonstantin Belousov 
2766df8bae1dSRodney W. Grimes 		/*
27672fafce9eSAlan Cox 		 * When restricting access, update the physical map.  Worry
27682fafce9eSAlan Cox 		 * about copy-on-write here.
2769df8bae1dSRodney W. Grimes 		 */
27702767c9f3SDoug Moore 		if ((old_prot & ~entry->protection) != 0) {
2771afa07f7eSJohn Dyson #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2772df8bae1dSRodney W. Grimes 							VM_PROT_ALL)
27732767c9f3SDoug Moore 			pmap_protect(map->pmap, entry->start,
27742767c9f3SDoug Moore 			    entry->end,
27752767c9f3SDoug Moore 			    entry->protection & MASK(entry));
2776df8bae1dSRodney W. Grimes #undef	MASK
2777df8bae1dSRodney W. Grimes 		}
2778df8bae1dSRodney W. Grimes 	}
27792767c9f3SDoug Moore 	vm_map_try_merge_entries(map, prev_entry, entry);
2780df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
2781a72dce34SDoug Moore 	return (rv);
2782df8bae1dSRodney W. Grimes }
2783df8bae1dSRodney W. Grimes 
2784df8bae1dSRodney W. Grimes /*
2785867a482dSJohn Dyson  *	vm_map_madvise:
2786867a482dSJohn Dyson  *
2787867a482dSJohn Dyson  *	This routine traverses a processes map handling the madvise
2788f7fc307aSAlan Cox  *	system call.  Advisories are classified as either those effecting
2789f7fc307aSAlan Cox  *	the vm_map_entry structure, or those effecting the underlying
2790f7fc307aSAlan Cox  *	objects.
2791867a482dSJohn Dyson  */
2792b4309055SMatthew Dillon int
27931b40f8c0SMatthew Dillon vm_map_madvise(
27941b40f8c0SMatthew Dillon 	vm_map_t map,
27951b40f8c0SMatthew Dillon 	vm_offset_t start,
27961b40f8c0SMatthew Dillon 	vm_offset_t end,
27971b40f8c0SMatthew Dillon 	int behav)
2798867a482dSJohn Dyson {
27992767c9f3SDoug Moore 	vm_map_entry_t entry, prev_entry;
28003e7cb27cSAlan Cox 	bool modify_map;
2801867a482dSJohn Dyson 
2802b4309055SMatthew Dillon 	/*
2803b4309055SMatthew Dillon 	 * Some madvise calls directly modify the vm_map_entry, in which case
2804b4309055SMatthew Dillon 	 * we need to use an exclusive lock on the map and we need to perform
2805b4309055SMatthew Dillon 	 * various clipping operations.  Otherwise we only need a read-lock
2806b4309055SMatthew Dillon 	 * on the map.
2807b4309055SMatthew Dillon 	 */
2808b4309055SMatthew Dillon 	switch(behav) {
2809b4309055SMatthew Dillon 	case MADV_NORMAL:
2810b4309055SMatthew Dillon 	case MADV_SEQUENTIAL:
2811b4309055SMatthew Dillon 	case MADV_RANDOM:
28124f79d873SMatthew Dillon 	case MADV_NOSYNC:
28134f79d873SMatthew Dillon 	case MADV_AUTOSYNC:
28149730a5daSPaul Saab 	case MADV_NOCORE:
28159730a5daSPaul Saab 	case MADV_CORE:
281679e9451fSKonstantin Belousov 		if (start == end)
28173e7cb27cSAlan Cox 			return (0);
28183e7cb27cSAlan Cox 		modify_map = true;
2819867a482dSJohn Dyson 		vm_map_lock(map);
2820b4309055SMatthew Dillon 		break;
2821b4309055SMatthew Dillon 	case MADV_WILLNEED:
2822b4309055SMatthew Dillon 	case MADV_DONTNEED:
2823b4309055SMatthew Dillon 	case MADV_FREE:
282479e9451fSKonstantin Belousov 		if (start == end)
28253e7cb27cSAlan Cox 			return (0);
28263e7cb27cSAlan Cox 		modify_map = false;
2827f7fc307aSAlan Cox 		vm_map_lock_read(map);
2828b4309055SMatthew Dillon 		break;
2829b4309055SMatthew Dillon 	default:
28303e7cb27cSAlan Cox 		return (EINVAL);
2831b4309055SMatthew Dillon 	}
2832b4309055SMatthew Dillon 
2833b4309055SMatthew Dillon 	/*
2834b4309055SMatthew Dillon 	 * Locate starting entry and clip if necessary.
2835b4309055SMatthew Dillon 	 */
2836867a482dSJohn Dyson 	VM_MAP_RANGE_CHECK(map, start, end);
2837867a482dSJohn Dyson 
28382767c9f3SDoug Moore 	if (vm_map_lookup_entry(map, start, &entry)) {
2839f7fc307aSAlan Cox 		if (modify_map)
28402767c9f3SDoug Moore 			vm_map_clip_start(map, entry, start);
28412767c9f3SDoug Moore 		prev_entry = vm_map_entry_pred(entry);
2842d1d3f7e1SDoug Moore 	} else {
28432767c9f3SDoug Moore 		prev_entry = entry;
28442767c9f3SDoug Moore 		entry = vm_map_entry_succ(entry);
2845b4309055SMatthew Dillon 	}
2846867a482dSJohn Dyson 
2847f7fc307aSAlan Cox 	if (modify_map) {
2848f7fc307aSAlan Cox 		/*
2849f7fc307aSAlan Cox 		 * madvise behaviors that are implemented in the vm_map_entry.
2850f7fc307aSAlan Cox 		 *
2851f7fc307aSAlan Cox 		 * We clip the vm_map_entry so that behavioral changes are
2852f7fc307aSAlan Cox 		 * limited to the specified address range.
2853f7fc307aSAlan Cox 		 */
28542767c9f3SDoug Moore 		for (; entry->start < end;
28552767c9f3SDoug Moore 		     prev_entry = entry, entry = vm_map_entry_succ(entry)) {
28562767c9f3SDoug Moore 			if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
2857867a482dSJohn Dyson 				continue;
2858fed9a903SJohn Dyson 
28592767c9f3SDoug Moore 			vm_map_clip_end(map, entry, end);
2860fed9a903SJohn Dyson 
2861f7fc307aSAlan Cox 			switch (behav) {
2862867a482dSJohn Dyson 			case MADV_NORMAL:
28632767c9f3SDoug Moore 				vm_map_entry_set_behavior(entry,
28642767c9f3SDoug Moore 				    MAP_ENTRY_BEHAV_NORMAL);
2865867a482dSJohn Dyson 				break;
2866867a482dSJohn Dyson 			case MADV_SEQUENTIAL:
28672767c9f3SDoug Moore 				vm_map_entry_set_behavior(entry,
28682767c9f3SDoug Moore 				    MAP_ENTRY_BEHAV_SEQUENTIAL);
2869867a482dSJohn Dyson 				break;
2870867a482dSJohn Dyson 			case MADV_RANDOM:
28712767c9f3SDoug Moore 				vm_map_entry_set_behavior(entry,
28722767c9f3SDoug Moore 				    MAP_ENTRY_BEHAV_RANDOM);
2873867a482dSJohn Dyson 				break;
28744f79d873SMatthew Dillon 			case MADV_NOSYNC:
28752767c9f3SDoug Moore 				entry->eflags |= MAP_ENTRY_NOSYNC;
28764f79d873SMatthew Dillon 				break;
28774f79d873SMatthew Dillon 			case MADV_AUTOSYNC:
28782767c9f3SDoug Moore 				entry->eflags &= ~MAP_ENTRY_NOSYNC;
28794f79d873SMatthew Dillon 				break;
28809730a5daSPaul Saab 			case MADV_NOCORE:
28812767c9f3SDoug Moore 				entry->eflags |= MAP_ENTRY_NOCOREDUMP;
28829730a5daSPaul Saab 				break;
28839730a5daSPaul Saab 			case MADV_CORE:
28842767c9f3SDoug Moore 				entry->eflags &= ~MAP_ENTRY_NOCOREDUMP;
28859730a5daSPaul Saab 				break;
2886867a482dSJohn Dyson 			default:
2887867a482dSJohn Dyson 				break;
2888867a482dSJohn Dyson 			}
28892767c9f3SDoug Moore 			vm_map_try_merge_entries(map, prev_entry, entry);
2890867a482dSJohn Dyson 		}
28912767c9f3SDoug Moore 		vm_map_try_merge_entries(map, prev_entry, entry);
2892867a482dSJohn Dyson 		vm_map_unlock(map);
2893b4309055SMatthew Dillon 	} else {
289492a59946SJohn Baldwin 		vm_pindex_t pstart, pend;
2895f7fc307aSAlan Cox 
2896f7fc307aSAlan Cox 		/*
2897f7fc307aSAlan Cox 		 * madvise behaviors that are implemented in the underlying
2898f7fc307aSAlan Cox 		 * vm_object.
2899f7fc307aSAlan Cox 		 *
2900f7fc307aSAlan Cox 		 * Since we don't clip the vm_map_entry, we have to clip
2901f7fc307aSAlan Cox 		 * the vm_object pindex and count.
2902f7fc307aSAlan Cox 		 */
29032767c9f3SDoug Moore 		for (; entry->start < end;
29042767c9f3SDoug Moore 		    entry = vm_map_entry_succ(entry)) {
290551321f7cSAlan Cox 			vm_offset_t useEnd, useStart;
29065f99b57cSMatthew Dillon 
29072767c9f3SDoug Moore 			if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
2908f7fc307aSAlan Cox 				continue;
2909f7fc307aSAlan Cox 
2910bf5661f4SKonstantin Belousov 			/*
2911bf5661f4SKonstantin Belousov 			 * MADV_FREE would otherwise rewind time to
2912bf5661f4SKonstantin Belousov 			 * the creation of the shadow object.  Because
2913bf5661f4SKonstantin Belousov 			 * we hold the VM map read-locked, neither the
2914bf5661f4SKonstantin Belousov 			 * entry's object nor the presence of a
2915bf5661f4SKonstantin Belousov 			 * backing object can change.
2916bf5661f4SKonstantin Belousov 			 */
2917bf5661f4SKonstantin Belousov 			if (behav == MADV_FREE &&
29182767c9f3SDoug Moore 			    entry->object.vm_object != NULL &&
29192767c9f3SDoug Moore 			    entry->object.vm_object->backing_object != NULL)
2920bf5661f4SKonstantin Belousov 				continue;
2921bf5661f4SKonstantin Belousov 
29222767c9f3SDoug Moore 			pstart = OFF_TO_IDX(entry->offset);
29232767c9f3SDoug Moore 			pend = pstart + atop(entry->end - entry->start);
29242767c9f3SDoug Moore 			useStart = entry->start;
29252767c9f3SDoug Moore 			useEnd = entry->end;
2926f7fc307aSAlan Cox 
29272767c9f3SDoug Moore 			if (entry->start < start) {
29282767c9f3SDoug Moore 				pstart += atop(start - entry->start);
29295f99b57cSMatthew Dillon 				useStart = start;
2930f7fc307aSAlan Cox 			}
29312767c9f3SDoug Moore 			if (entry->end > end) {
29322767c9f3SDoug Moore 				pend -= atop(entry->end - end);
293351321f7cSAlan Cox 				useEnd = end;
293451321f7cSAlan Cox 			}
2935f7fc307aSAlan Cox 
293692a59946SJohn Baldwin 			if (pstart >= pend)
2937f7fc307aSAlan Cox 				continue;
2938f7fc307aSAlan Cox 
293951321f7cSAlan Cox 			/*
294051321f7cSAlan Cox 			 * Perform the pmap_advise() before clearing
294151321f7cSAlan Cox 			 * PGA_REFERENCED in vm_page_advise().  Otherwise, a
294251321f7cSAlan Cox 			 * concurrent pmap operation, such as pmap_remove(),
294351321f7cSAlan Cox 			 * could clear a reference in the pmap and set
294451321f7cSAlan Cox 			 * PGA_REFERENCED on the page before the pmap_advise()
294551321f7cSAlan Cox 			 * had completed.  Consequently, the page would appear
294651321f7cSAlan Cox 			 * referenced based upon an old reference that
294751321f7cSAlan Cox 			 * occurred before this pmap_advise() ran.
294851321f7cSAlan Cox 			 */
294951321f7cSAlan Cox 			if (behav == MADV_DONTNEED || behav == MADV_FREE)
295051321f7cSAlan Cox 				pmap_advise(map->pmap, useStart, useEnd,
295151321f7cSAlan Cox 				    behav);
295251321f7cSAlan Cox 
29532767c9f3SDoug Moore 			vm_object_madvise(entry->object.vm_object, pstart,
295492a59946SJohn Baldwin 			    pend, behav);
295554432196SKonstantin Belousov 
295654432196SKonstantin Belousov 			/*
295754432196SKonstantin Belousov 			 * Pre-populate paging structures in the
295854432196SKonstantin Belousov 			 * WILLNEED case.  For wired entries, the
295954432196SKonstantin Belousov 			 * paging structures are already populated.
296054432196SKonstantin Belousov 			 */
296154432196SKonstantin Belousov 			if (behav == MADV_WILLNEED &&
29622767c9f3SDoug Moore 			    entry->wired_count == 0) {
29630551c08dSAlan Cox 				vm_map_pmap_enter(map,
29645f99b57cSMatthew Dillon 				    useStart,
29652767c9f3SDoug Moore 				    entry->protection,
29662767c9f3SDoug Moore 				    entry->object.vm_object,
296792a59946SJohn Baldwin 				    pstart,
296892a59946SJohn Baldwin 				    ptoa(pend - pstart),
2969e3026983SMatthew Dillon 				    MAP_PREFAULT_MADVISE
2970b4309055SMatthew Dillon 				);
2971f7fc307aSAlan Cox 			}
2972f7fc307aSAlan Cox 		}
2973f7fc307aSAlan Cox 		vm_map_unlock_read(map);
2974f7fc307aSAlan Cox 	}
2975b4309055SMatthew Dillon 	return (0);
2976867a482dSJohn Dyson }
2977867a482dSJohn Dyson 
2978867a482dSJohn Dyson 
2979867a482dSJohn Dyson /*
2980df8bae1dSRodney W. Grimes  *	vm_map_inherit:
2981df8bae1dSRodney W. Grimes  *
2982df8bae1dSRodney W. Grimes  *	Sets the inheritance of the specified address
2983df8bae1dSRodney W. Grimes  *	range in the target map.  Inheritance
2984df8bae1dSRodney W. Grimes  *	affects how the map will be shared with
2985e2abaaaaSAlan Cox  *	child maps at the time of vmspace_fork.
2986df8bae1dSRodney W. Grimes  */
2987df8bae1dSRodney W. Grimes int
2988b9dcd593SBruce Evans vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2989b9dcd593SBruce Evans 	       vm_inherit_t new_inheritance)
2990df8bae1dSRodney W. Grimes {
299183704cc2SDoug Moore 	vm_map_entry_t entry, prev_entry;
2992df8bae1dSRodney W. Grimes 
2993df8bae1dSRodney W. Grimes 	switch (new_inheritance) {
2994df8bae1dSRodney W. Grimes 	case VM_INHERIT_NONE:
2995df8bae1dSRodney W. Grimes 	case VM_INHERIT_COPY:
2996df8bae1dSRodney W. Grimes 	case VM_INHERIT_SHARE:
299778d7964bSXin LI 	case VM_INHERIT_ZERO:
2998df8bae1dSRodney W. Grimes 		break;
2999df8bae1dSRodney W. Grimes 	default:
3000df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ARGUMENT);
3001df8bae1dSRodney W. Grimes 	}
300279e9451fSKonstantin Belousov 	if (start == end)
300379e9451fSKonstantin Belousov 		return (KERN_SUCCESS);
3004df8bae1dSRodney W. Grimes 	vm_map_lock(map);
3005df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
300683704cc2SDoug Moore 	if (vm_map_lookup_entry(map, start, &prev_entry)) {
300783704cc2SDoug Moore 		entry = prev_entry;
3008df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
300983704cc2SDoug Moore 		prev_entry = vm_map_entry_pred(entry);
3010d1d3f7e1SDoug Moore 	} else
301183704cc2SDoug Moore 		entry = vm_map_entry_succ(prev_entry);
301283704cc2SDoug Moore 	for (; entry->start < end;
301383704cc2SDoug Moore 	    prev_entry = entry, entry = vm_map_entry_succ(entry)) {
3014df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
301519bd0d9cSKonstantin Belousov 		if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
301619bd0d9cSKonstantin Belousov 		    new_inheritance != VM_INHERIT_ZERO)
3017df8bae1dSRodney W. Grimes 			entry->inheritance = new_inheritance;
301883704cc2SDoug Moore 		vm_map_try_merge_entries(map, prev_entry, entry);
3019df8bae1dSRodney W. Grimes 	}
302083704cc2SDoug Moore 	vm_map_try_merge_entries(map, prev_entry, entry);
3021df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
3022df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
3023df8bae1dSRodney W. Grimes }
3024df8bae1dSRodney W. Grimes 
3025df8bae1dSRodney W. Grimes /*
3026312df2c1SDoug Moore  *	vm_map_entry_in_transition:
3027312df2c1SDoug Moore  *
3028312df2c1SDoug Moore  *	Release the map lock, and sleep until the entry is no longer in
3029312df2c1SDoug Moore  *	transition.  Awake and acquire the map lock.  If the map changed while
3030312df2c1SDoug Moore  *	another held the lock, lookup a possibly-changed entry at or after the
3031312df2c1SDoug Moore  *	'start' position of the old entry.
3032312df2c1SDoug Moore  */
3033312df2c1SDoug Moore static vm_map_entry_t
3034312df2c1SDoug Moore vm_map_entry_in_transition(vm_map_t map, vm_offset_t in_start,
3035312df2c1SDoug Moore     vm_offset_t *io_end, bool holes_ok, vm_map_entry_t in_entry)
3036312df2c1SDoug Moore {
3037312df2c1SDoug Moore 	vm_map_entry_t entry;
3038312df2c1SDoug Moore 	vm_offset_t start;
3039312df2c1SDoug Moore 	u_int last_timestamp;
3040312df2c1SDoug Moore 
3041312df2c1SDoug Moore 	VM_MAP_ASSERT_LOCKED(map);
3042312df2c1SDoug Moore 	KASSERT((in_entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3043312df2c1SDoug Moore 	    ("not in-tranition map entry %p", in_entry));
3044312df2c1SDoug Moore 	/*
3045312df2c1SDoug Moore 	 * We have not yet clipped the entry.
3046312df2c1SDoug Moore 	 */
3047312df2c1SDoug Moore 	start = MAX(in_start, in_entry->start);
3048312df2c1SDoug Moore 	in_entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3049312df2c1SDoug Moore 	last_timestamp = map->timestamp;
3050312df2c1SDoug Moore 	if (vm_map_unlock_and_wait(map, 0)) {
3051312df2c1SDoug Moore 		/*
3052312df2c1SDoug Moore 		 * Allow interruption of user wiring/unwiring?
3053312df2c1SDoug Moore 		 */
3054312df2c1SDoug Moore 	}
3055312df2c1SDoug Moore 	vm_map_lock(map);
3056312df2c1SDoug Moore 	if (last_timestamp + 1 == map->timestamp)
3057312df2c1SDoug Moore 		return (in_entry);
3058312df2c1SDoug Moore 
3059312df2c1SDoug Moore 	/*
3060312df2c1SDoug Moore 	 * Look again for the entry because the map was modified while it was
3061312df2c1SDoug Moore 	 * unlocked.  Specifically, the entry may have been clipped, merged, or
3062312df2c1SDoug Moore 	 * deleted.
3063312df2c1SDoug Moore 	 */
3064312df2c1SDoug Moore 	if (!vm_map_lookup_entry(map, start, &entry)) {
3065312df2c1SDoug Moore 		if (!holes_ok) {
3066312df2c1SDoug Moore 			*io_end = start;
3067312df2c1SDoug Moore 			return (NULL);
3068312df2c1SDoug Moore 		}
30697cdcf863SDoug Moore 		entry = vm_map_entry_succ(entry);
3070312df2c1SDoug Moore 	}
3071312df2c1SDoug Moore 	return (entry);
3072312df2c1SDoug Moore }
3073312df2c1SDoug Moore 
3074312df2c1SDoug Moore /*
3075acd9a301SAlan Cox  *	vm_map_unwire:
3076acd9a301SAlan Cox  *
3077e27e17b7SAlan Cox  *	Implements both kernel and user unwiring.
3078acd9a301SAlan Cox  */
3079acd9a301SAlan Cox int
3080acd9a301SAlan Cox vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
3081abd498aaSBruce M Simpson     int flags)
3082acd9a301SAlan Cox {
308383704cc2SDoug Moore 	vm_map_entry_t entry, first_entry, next_entry, prev_entry;
3084acd9a301SAlan Cox 	int rv;
308583704cc2SDoug Moore 	bool holes_ok, need_wakeup, user_unwire;
3086acd9a301SAlan Cox 
308779e9451fSKonstantin Belousov 	if (start == end)
308879e9451fSKonstantin Belousov 		return (KERN_SUCCESS);
30899a0cdf94SDoug Moore 	holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0;
30909a0cdf94SDoug Moore 	user_unwire = (flags & VM_MAP_WIRE_USER) != 0;
3091acd9a301SAlan Cox 	vm_map_lock(map);
3092acd9a301SAlan Cox 	VM_MAP_RANGE_CHECK(map, start, end);
3093d1d3f7e1SDoug Moore 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
30949a0cdf94SDoug Moore 		if (holes_ok)
30957cdcf863SDoug Moore 			first_entry = vm_map_entry_succ(first_entry);
3096d1d3f7e1SDoug Moore 		else {
3097acd9a301SAlan Cox 			vm_map_unlock(map);
3098acd9a301SAlan Cox 			return (KERN_INVALID_ADDRESS);
3099acd9a301SAlan Cox 		}
3100abd498aaSBruce M Simpson 	}
3101d2860f22SDoug Moore 	rv = KERN_SUCCESS;
310283704cc2SDoug Moore 	for (entry = first_entry; entry->start < end; entry = next_entry) {
3103acd9a301SAlan Cox 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3104acd9a301SAlan Cox 			/*
3105acd9a301SAlan Cox 			 * We have not yet clipped the entry.
3106acd9a301SAlan Cox 			 */
310783704cc2SDoug Moore 			next_entry = vm_map_entry_in_transition(map, start,
310883704cc2SDoug Moore 			    &end, holes_ok, entry);
310983704cc2SDoug Moore 			if (next_entry == NULL) {
311083704cc2SDoug Moore 				if (entry == first_entry) {
3111acd9a301SAlan Cox 					vm_map_unlock(map);
3112acd9a301SAlan Cox 					return (KERN_INVALID_ADDRESS);
3113acd9a301SAlan Cox 				}
3114acd9a301SAlan Cox 				rv = KERN_INVALID_ADDRESS;
3115d2860f22SDoug Moore 				break;
3116acd9a301SAlan Cox 			}
311783704cc2SDoug Moore 			first_entry = (entry == first_entry) ?
311883704cc2SDoug Moore 			    next_entry : NULL;
3119acd9a301SAlan Cox 			continue;
3120acd9a301SAlan Cox 		}
3121acd9a301SAlan Cox 		vm_map_clip_start(map, entry, start);
3122acd9a301SAlan Cox 		vm_map_clip_end(map, entry, end);
3123acd9a301SAlan Cox 		/*
3124acd9a301SAlan Cox 		 * Mark the entry in case the map lock is released.  (See
3125acd9a301SAlan Cox 		 * above.)
3126acd9a301SAlan Cox 		 */
3127ff3ae454SKonstantin Belousov 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3128ff3ae454SKonstantin Belousov 		    entry->wiring_thread == NULL,
3129ff3ae454SKonstantin Belousov 		    ("owned map entry %p", entry));
3130acd9a301SAlan Cox 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
31310acea7dfSKonstantin Belousov 		entry->wiring_thread = curthread;
313283704cc2SDoug Moore 		next_entry = vm_map_entry_succ(entry);
3133acd9a301SAlan Cox 		/*
3134acd9a301SAlan Cox 		 * Check the map for holes in the specified region.
31359a0cdf94SDoug Moore 		 * If holes_ok, skip this check.
3136acd9a301SAlan Cox 		 */
31379a0cdf94SDoug Moore 		if (!holes_ok &&
313883704cc2SDoug Moore 		    entry->end < end && next_entry->start > entry->end) {
3139acd9a301SAlan Cox 			end = entry->end;
3140acd9a301SAlan Cox 			rv = KERN_INVALID_ADDRESS;
3141d2860f22SDoug Moore 			break;
3142acd9a301SAlan Cox 		}
3143acd9a301SAlan Cox 		/*
31443ffbc0cdSAlan Cox 		 * If system unwiring, require that the entry is system wired.
3145acd9a301SAlan Cox 		 */
31460ada205eSBrian Feldman 		if (!user_unwire &&
31470ada205eSBrian Feldman 		    vm_map_entry_system_wired_count(entry) == 0) {
3148acd9a301SAlan Cox 			end = entry->end;
3149acd9a301SAlan Cox 			rv = KERN_INVALID_ARGUMENT;
3150d2860f22SDoug Moore 			break;
3151acd9a301SAlan Cox 		}
3152acd9a301SAlan Cox 	}
31539a0cdf94SDoug Moore 	need_wakeup = false;
31549a0cdf94SDoug Moore 	if (first_entry == NULL &&
31559a0cdf94SDoug Moore 	    !vm_map_lookup_entry(map, start, &first_entry)) {
31569a0cdf94SDoug Moore 		KASSERT(holes_ok, ("vm_map_unwire: lookup failed"));
315783704cc2SDoug Moore 		prev_entry = first_entry;
315883704cc2SDoug Moore 		entry = vm_map_entry_succ(first_entry);
315983704cc2SDoug Moore 	} else {
316083704cc2SDoug Moore 		prev_entry = vm_map_entry_pred(first_entry);
316183704cc2SDoug Moore 		entry = first_entry;
3162acd9a301SAlan Cox 	}
316383704cc2SDoug Moore 	for (; entry->start < end;
316483704cc2SDoug Moore 	    prev_entry = entry, entry = vm_map_entry_succ(entry)) {
31650acea7dfSKonstantin Belousov 		/*
31669a0cdf94SDoug Moore 		 * If holes_ok was specified, an empty
31670acea7dfSKonstantin Belousov 		 * space in the unwired region could have been mapped
31680acea7dfSKonstantin Belousov 		 * while the map lock was dropped for draining
31690acea7dfSKonstantin Belousov 		 * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
31700acea7dfSKonstantin Belousov 		 * could be simultaneously wiring this new mapping
31710acea7dfSKonstantin Belousov 		 * entry.  Detect these cases and skip any entries
31720acea7dfSKonstantin Belousov 		 * marked as in transition by us.
31730acea7dfSKonstantin Belousov 		 */
31740acea7dfSKonstantin Belousov 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
31750acea7dfSKonstantin Belousov 		    entry->wiring_thread != curthread) {
31769a0cdf94SDoug Moore 			KASSERT(holes_ok,
31770acea7dfSKonstantin Belousov 			    ("vm_map_unwire: !HOLESOK and new/changed entry"));
31780acea7dfSKonstantin Belousov 			continue;
31790acea7dfSKonstantin Belousov 		}
31800acea7dfSKonstantin Belousov 
31813ffbc0cdSAlan Cox 		if (rv == KERN_SUCCESS && (!user_unwire ||
31823ffbc0cdSAlan Cox 		    (entry->eflags & MAP_ENTRY_USER_WIRED))) {
318303462509SAlan Cox 			if (entry->wired_count == 1)
318403462509SAlan Cox 				vm_map_entry_unwire(map, entry);
318503462509SAlan Cox 			else
3186b2f3846aSAlan Cox 				entry->wired_count--;
318754a3a114SMark Johnston 			if (user_unwire)
318854a3a114SMark Johnston 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3189b2f3846aSAlan Cox 		}
31900acea7dfSKonstantin Belousov 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3191ff3ae454SKonstantin Belousov 		    ("vm_map_unwire: in-transition flag missing %p", entry));
3192ff3ae454SKonstantin Belousov 		KASSERT(entry->wiring_thread == curthread,
3193ff3ae454SKonstantin Belousov 		    ("vm_map_unwire: alien wire %p", entry));
3194acd9a301SAlan Cox 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
31950acea7dfSKonstantin Belousov 		entry->wiring_thread = NULL;
3196acd9a301SAlan Cox 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3197acd9a301SAlan Cox 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
31989a0cdf94SDoug Moore 			need_wakeup = true;
3199acd9a301SAlan Cox 		}
320083704cc2SDoug Moore 		vm_map_try_merge_entries(map, prev_entry, entry);
3201acd9a301SAlan Cox 	}
320283704cc2SDoug Moore 	vm_map_try_merge_entries(map, prev_entry, entry);
3203acd9a301SAlan Cox 	vm_map_unlock(map);
3204acd9a301SAlan Cox 	if (need_wakeup)
3205acd9a301SAlan Cox 		vm_map_wakeup(map);
3206acd9a301SAlan Cox 	return (rv);
3207acd9a301SAlan Cox }
3208acd9a301SAlan Cox 
320954a3a114SMark Johnston static void
321054a3a114SMark Johnston vm_map_wire_user_count_sub(u_long npages)
321154a3a114SMark Johnston {
321254a3a114SMark Johnston 
321354a3a114SMark Johnston 	atomic_subtract_long(&vm_user_wire_count, npages);
321454a3a114SMark Johnston }
321554a3a114SMark Johnston 
321654a3a114SMark Johnston static bool
321754a3a114SMark Johnston vm_map_wire_user_count_add(u_long npages)
321854a3a114SMark Johnston {
321954a3a114SMark Johnston 	u_long wired;
322054a3a114SMark Johnston 
322154a3a114SMark Johnston 	wired = vm_user_wire_count;
322254a3a114SMark Johnston 	do {
322354a3a114SMark Johnston 		if (npages + wired > vm_page_max_user_wired)
322454a3a114SMark Johnston 			return (false);
322554a3a114SMark Johnston 	} while (!atomic_fcmpset_long(&vm_user_wire_count, &wired,
322654a3a114SMark Johnston 	    npages + wired));
322754a3a114SMark Johnston 
322854a3a114SMark Johnston 	return (true);
322954a3a114SMark Johnston }
323054a3a114SMark Johnston 
3231acd9a301SAlan Cox /*
323266cd575bSAlan Cox  *	vm_map_wire_entry_failure:
323366cd575bSAlan Cox  *
323466cd575bSAlan Cox  *	Handle a wiring failure on the given entry.
323566cd575bSAlan Cox  *
323666cd575bSAlan Cox  *	The map should be locked.
323766cd575bSAlan Cox  */
323866cd575bSAlan Cox static void
323966cd575bSAlan Cox vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
324066cd575bSAlan Cox     vm_offset_t failed_addr)
324166cd575bSAlan Cox {
324266cd575bSAlan Cox 
324366cd575bSAlan Cox 	VM_MAP_ASSERT_LOCKED(map);
324466cd575bSAlan Cox 	KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
324566cd575bSAlan Cox 	    entry->wired_count == 1,
324666cd575bSAlan Cox 	    ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
324766cd575bSAlan Cox 	KASSERT(failed_addr < entry->end,
324866cd575bSAlan Cox 	    ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
324966cd575bSAlan Cox 
325066cd575bSAlan Cox 	/*
325166cd575bSAlan Cox 	 * If any pages at the start of this entry were successfully wired,
325266cd575bSAlan Cox 	 * then unwire them.
325366cd575bSAlan Cox 	 */
325466cd575bSAlan Cox 	if (failed_addr > entry->start) {
325566cd575bSAlan Cox 		pmap_unwire(map->pmap, entry->start, failed_addr);
325666cd575bSAlan Cox 		vm_object_unwire(entry->object.vm_object, entry->offset,
325766cd575bSAlan Cox 		    failed_addr - entry->start, PQ_ACTIVE);
325866cd575bSAlan Cox 	}
325966cd575bSAlan Cox 
326066cd575bSAlan Cox 	/*
326166cd575bSAlan Cox 	 * Assign an out-of-range value to represent the failure to wire this
326266cd575bSAlan Cox 	 * entry.
326366cd575bSAlan Cox 	 */
326466cd575bSAlan Cox 	entry->wired_count = -1;
326566cd575bSAlan Cox }
326666cd575bSAlan Cox 
326754a3a114SMark Johnston int
326854a3a114SMark Johnston vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
326954a3a114SMark Johnston {
327054a3a114SMark Johnston 	int rv;
327154a3a114SMark Johnston 
327254a3a114SMark Johnston 	vm_map_lock(map);
327354a3a114SMark Johnston 	rv = vm_map_wire_locked(map, start, end, flags);
327454a3a114SMark Johnston 	vm_map_unlock(map);
327554a3a114SMark Johnston 	return (rv);
327654a3a114SMark Johnston }
327754a3a114SMark Johnston 
327854a3a114SMark Johnston 
327966cd575bSAlan Cox /*
328054a3a114SMark Johnston  *	vm_map_wire_locked:
3281e27e17b7SAlan Cox  *
328254a3a114SMark Johnston  *	Implements both kernel and user wiring.  Returns with the map locked,
328354a3a114SMark Johnston  *	the map lock may be dropped.
3284e27e17b7SAlan Cox  */
3285e27e17b7SAlan Cox int
328654a3a114SMark Johnston vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
3287e27e17b7SAlan Cox {
328883704cc2SDoug Moore 	vm_map_entry_t entry, first_entry, next_entry, prev_entry;
328966cd575bSAlan Cox 	vm_offset_t faddr, saved_end, saved_start;
329054a3a114SMark Johnston 	u_long npages;
329154a3a114SMark Johnston 	u_int last_timestamp;
329212d7cc84SAlan Cox 	int rv;
329383704cc2SDoug Moore 	bool holes_ok, need_wakeup, user_wire;
3294e4cd31ddSJeff Roberson 	vm_prot_t prot;
3295e27e17b7SAlan Cox 
329654a3a114SMark Johnston 	VM_MAP_ASSERT_LOCKED(map);
329754a3a114SMark Johnston 
329879e9451fSKonstantin Belousov 	if (start == end)
329979e9451fSKonstantin Belousov 		return (KERN_SUCCESS);
3300e4cd31ddSJeff Roberson 	prot = 0;
3301e4cd31ddSJeff Roberson 	if (flags & VM_MAP_WIRE_WRITE)
3302e4cd31ddSJeff Roberson 		prot |= VM_PROT_WRITE;
33039a0cdf94SDoug Moore 	holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0;
33049a0cdf94SDoug Moore 	user_wire = (flags & VM_MAP_WIRE_USER) != 0;
330512d7cc84SAlan Cox 	VM_MAP_RANGE_CHECK(map, start, end);
3306d1d3f7e1SDoug Moore 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
33079a0cdf94SDoug Moore 		if (holes_ok)
33087cdcf863SDoug Moore 			first_entry = vm_map_entry_succ(first_entry);
3309d1d3f7e1SDoug Moore 		else
331012d7cc84SAlan Cox 			return (KERN_INVALID_ADDRESS);
331112d7cc84SAlan Cox 	}
331283704cc2SDoug Moore 	for (entry = first_entry; entry->start < end; entry = next_entry) {
331312d7cc84SAlan Cox 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
331412d7cc84SAlan Cox 			/*
331512d7cc84SAlan Cox 			 * We have not yet clipped the entry.
331612d7cc84SAlan Cox 			 */
331783704cc2SDoug Moore 			next_entry = vm_map_entry_in_transition(map, start,
331883704cc2SDoug Moore 			    &end, holes_ok, entry);
331983704cc2SDoug Moore 			if (next_entry == NULL) {
332083704cc2SDoug Moore 				if (entry == first_entry)
332112d7cc84SAlan Cox 					return (KERN_INVALID_ADDRESS);
332212d7cc84SAlan Cox 				rv = KERN_INVALID_ADDRESS;
332312d7cc84SAlan Cox 				goto done;
332412d7cc84SAlan Cox 			}
332583704cc2SDoug Moore 			first_entry = (entry == first_entry) ?
332683704cc2SDoug Moore 			    next_entry : NULL;
332712d7cc84SAlan Cox 			continue;
332812d7cc84SAlan Cox 		}
332912d7cc84SAlan Cox 		vm_map_clip_start(map, entry, start);
333012d7cc84SAlan Cox 		vm_map_clip_end(map, entry, end);
333112d7cc84SAlan Cox 		/*
333212d7cc84SAlan Cox 		 * Mark the entry in case the map lock is released.  (See
333312d7cc84SAlan Cox 		 * above.)
333412d7cc84SAlan Cox 		 */
3335ff3ae454SKonstantin Belousov 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3336ff3ae454SKonstantin Belousov 		    entry->wiring_thread == NULL,
3337ff3ae454SKonstantin Belousov 		    ("owned map entry %p", entry));
333812d7cc84SAlan Cox 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
33390acea7dfSKonstantin Belousov 		entry->wiring_thread = curthread;
3340e4cd31ddSJeff Roberson 		if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
3341e4cd31ddSJeff Roberson 		    || (entry->protection & prot) != prot) {
3342529ab57bSKonstantin Belousov 			entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
33439a0cdf94SDoug Moore 			if (!holes_ok) {
33446d7e8091SKonstantin Belousov 				end = entry->end;
33456d7e8091SKonstantin Belousov 				rv = KERN_INVALID_ADDRESS;
33466d7e8091SKonstantin Belousov 				goto done;
33476d7e8091SKonstantin Belousov 			}
334838e220e8SDoug Moore 		} else if (entry->wired_count == 0) {
33490ada205eSBrian Feldman 			entry->wired_count++;
335054a3a114SMark Johnston 
335154a3a114SMark Johnston 			npages = atop(entry->end - entry->start);
335254a3a114SMark Johnston 			if (user_wire && !vm_map_wire_user_count_add(npages)) {
335354a3a114SMark Johnston 				vm_map_wire_entry_failure(map, entry,
335454a3a114SMark Johnston 				    entry->start);
335554a3a114SMark Johnston 				end = entry->end;
335654a3a114SMark Johnston 				rv = KERN_RESOURCE_SHORTAGE;
335754a3a114SMark Johnston 				goto done;
335854a3a114SMark Johnston 			}
335966cd575bSAlan Cox 
336012d7cc84SAlan Cox 			/*
336112d7cc84SAlan Cox 			 * Release the map lock, relying on the in-transition
3362a5db445dSMax Laier 			 * mark.  Mark the map busy for fork.
336312d7cc84SAlan Cox 			 */
336454a3a114SMark Johnston 			saved_start = entry->start;
336554a3a114SMark Johnston 			saved_end = entry->end;
3366312df2c1SDoug Moore 			last_timestamp = map->timestamp;
3367a5db445dSMax Laier 			vm_map_busy(map);
336812d7cc84SAlan Cox 			vm_map_unlock(map);
336966cd575bSAlan Cox 
33700b695684SAlan Cox 			faddr = saved_start;
33710b695684SAlan Cox 			do {
337266cd575bSAlan Cox 				/*
337366cd575bSAlan Cox 				 * Simulate a fault to get the page and enter
337466cd575bSAlan Cox 				 * it into the physical map.
337566cd575bSAlan Cox 				 */
3376df08823dSKonstantin Belousov 				if ((rv = vm_fault(map, faddr,
3377df08823dSKonstantin Belousov 				    VM_PROT_NONE, VM_FAULT_WIRE, NULL)) !=
3378df08823dSKonstantin Belousov 				    KERN_SUCCESS)
337966cd575bSAlan Cox 					break;
33800b695684SAlan Cox 			} while ((faddr += PAGE_SIZE) < saved_end);
338112d7cc84SAlan Cox 			vm_map_lock(map);
3382a5db445dSMax Laier 			vm_map_unbusy(map);
338312d7cc84SAlan Cox 			if (last_timestamp + 1 != map->timestamp) {
338412d7cc84SAlan Cox 				/*
338512d7cc84SAlan Cox 				 * Look again for the entry because the map was
338612d7cc84SAlan Cox 				 * modified while it was unlocked.  The entry
338712d7cc84SAlan Cox 				 * may have been clipped, but NOT merged or
338812d7cc84SAlan Cox 				 * deleted.
338912d7cc84SAlan Cox 				 */
33909a0cdf94SDoug Moore 				if (!vm_map_lookup_entry(map, saved_start,
339183704cc2SDoug Moore 				    &next_entry))
33929a0cdf94SDoug Moore 					KASSERT(false,
33939a0cdf94SDoug Moore 					    ("vm_map_wire: lookup failed"));
339483704cc2SDoug Moore 				first_entry = (entry == first_entry) ?
339583704cc2SDoug Moore 				    next_entry : NULL;
339683704cc2SDoug Moore 				for (entry = next_entry; entry->end < saved_end;
339783704cc2SDoug Moore 				    entry = vm_map_entry_succ(entry)) {
339866cd575bSAlan Cox 					/*
339966cd575bSAlan Cox 					 * In case of failure, handle entries
340066cd575bSAlan Cox 					 * that were not fully wired here;
340166cd575bSAlan Cox 					 * fully wired entries are handled
340266cd575bSAlan Cox 					 * later.
340366cd575bSAlan Cox 					 */
340466cd575bSAlan Cox 					if (rv != KERN_SUCCESS &&
340566cd575bSAlan Cox 					    faddr < entry->end)
340666cd575bSAlan Cox 						vm_map_wire_entry_failure(map,
340766cd575bSAlan Cox 						    entry, faddr);
340812d7cc84SAlan Cox 				}
340928c58286SAlan Cox 			}
341012d7cc84SAlan Cox 			if (rv != KERN_SUCCESS) {
341166cd575bSAlan Cox 				vm_map_wire_entry_failure(map, entry, faddr);
341254a3a114SMark Johnston 				if (user_wire)
341354a3a114SMark Johnston 					vm_map_wire_user_count_sub(npages);
341412d7cc84SAlan Cox 				end = entry->end;
341512d7cc84SAlan Cox 				goto done;
341612d7cc84SAlan Cox 			}
34170ada205eSBrian Feldman 		} else if (!user_wire ||
34180ada205eSBrian Feldman 			   (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
34190ada205eSBrian Feldman 			entry->wired_count++;
342012d7cc84SAlan Cox 		}
342112d7cc84SAlan Cox 		/*
342212d7cc84SAlan Cox 		 * Check the map for holes in the specified region.
34239a0cdf94SDoug Moore 		 * If holes_ok was specified, skip this check.
342412d7cc84SAlan Cox 		 */
342583704cc2SDoug Moore 		next_entry = vm_map_entry_succ(entry);
34269a0cdf94SDoug Moore 		if (!holes_ok &&
342783704cc2SDoug Moore 		    entry->end < end && next_entry->start > entry->end) {
342812d7cc84SAlan Cox 			end = entry->end;
342912d7cc84SAlan Cox 			rv = KERN_INVALID_ADDRESS;
343012d7cc84SAlan Cox 			goto done;
343112d7cc84SAlan Cox 		}
343212d7cc84SAlan Cox 	}
343312d7cc84SAlan Cox 	rv = KERN_SUCCESS;
343412d7cc84SAlan Cox done:
34359a0cdf94SDoug Moore 	need_wakeup = false;
34369a0cdf94SDoug Moore 	if (first_entry == NULL &&
34379a0cdf94SDoug Moore 	    !vm_map_lookup_entry(map, start, &first_entry)) {
34389a0cdf94SDoug Moore 		KASSERT(holes_ok, ("vm_map_wire: lookup failed"));
343983704cc2SDoug Moore 		prev_entry = first_entry;
344083704cc2SDoug Moore 		entry = vm_map_entry_succ(first_entry);
344183704cc2SDoug Moore 	} else {
344283704cc2SDoug Moore 		prev_entry = vm_map_entry_pred(first_entry);
344383704cc2SDoug Moore 		entry = first_entry;
344412d7cc84SAlan Cox 	}
344583704cc2SDoug Moore 	for (; entry->start < end;
344683704cc2SDoug Moore 	    prev_entry = entry, entry = vm_map_entry_succ(entry)) {
34470acea7dfSKonstantin Belousov 		/*
34489a0cdf94SDoug Moore 		 * If holes_ok was specified, an empty
34490acea7dfSKonstantin Belousov 		 * space in the unwired region could have been mapped
34500acea7dfSKonstantin Belousov 		 * while the map lock was dropped for faulting in the
34510acea7dfSKonstantin Belousov 		 * pages or draining MAP_ENTRY_IN_TRANSITION.
34520acea7dfSKonstantin Belousov 		 * Moreover, another thread could be simultaneously
34530acea7dfSKonstantin Belousov 		 * wiring this new mapping entry.  Detect these cases
3454546bb2d7SKonstantin Belousov 		 * and skip any entries marked as in transition not by us.
34550acea7dfSKonstantin Belousov 		 */
34560acea7dfSKonstantin Belousov 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
34570acea7dfSKonstantin Belousov 		    entry->wiring_thread != curthread) {
34589a0cdf94SDoug Moore 			KASSERT(holes_ok,
34590acea7dfSKonstantin Belousov 			    ("vm_map_wire: !HOLESOK and new/changed entry"));
34600acea7dfSKonstantin Belousov 			continue;
34610acea7dfSKonstantin Belousov 		}
34620acea7dfSKonstantin Belousov 
3463b71f9b0dSDoug Moore 		if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) {
3464b71f9b0dSDoug Moore 			/* do nothing */
3465b71f9b0dSDoug Moore 		} else if (rv == KERN_SUCCESS) {
346612d7cc84SAlan Cox 			if (user_wire)
346712d7cc84SAlan Cox 				entry->eflags |= MAP_ENTRY_USER_WIRED;
346828c58286SAlan Cox 		} else if (entry->wired_count == -1) {
346928c58286SAlan Cox 			/*
347028c58286SAlan Cox 			 * Wiring failed on this entry.  Thus, unwiring is
347128c58286SAlan Cox 			 * unnecessary.
347228c58286SAlan Cox 			 */
347328c58286SAlan Cox 			entry->wired_count = 0;
347403462509SAlan Cox 		} else if (!user_wire ||
347503462509SAlan Cox 		    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
347666cd575bSAlan Cox 			/*
347766cd575bSAlan Cox 			 * Undo the wiring.  Wiring succeeded on this entry
347866cd575bSAlan Cox 			 * but failed on a later entry.
347966cd575bSAlan Cox 			 */
348054a3a114SMark Johnston 			if (entry->wired_count == 1) {
348103462509SAlan Cox 				vm_map_entry_unwire(map, entry);
348254a3a114SMark Johnston 				if (user_wire)
348354a3a114SMark Johnston 					vm_map_wire_user_count_sub(
348454a3a114SMark Johnston 					    atop(entry->end - entry->start));
348554a3a114SMark Johnston 			} else
348612d7cc84SAlan Cox 				entry->wired_count--;
348712d7cc84SAlan Cox 		}
34880acea7dfSKonstantin Belousov 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
34890acea7dfSKonstantin Belousov 		    ("vm_map_wire: in-transition flag missing %p", entry));
34900acea7dfSKonstantin Belousov 		KASSERT(entry->wiring_thread == curthread,
34910acea7dfSKonstantin Belousov 		    ("vm_map_wire: alien wire %p", entry));
34920acea7dfSKonstantin Belousov 		entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
34930acea7dfSKonstantin Belousov 		    MAP_ENTRY_WIRE_SKIPPED);
34940acea7dfSKonstantin Belousov 		entry->wiring_thread = NULL;
349512d7cc84SAlan Cox 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
349612d7cc84SAlan Cox 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
34979a0cdf94SDoug Moore 			need_wakeup = true;
349812d7cc84SAlan Cox 		}
349983704cc2SDoug Moore 		vm_map_try_merge_entries(map, prev_entry, entry);
350012d7cc84SAlan Cox 	}
350183704cc2SDoug Moore 	vm_map_try_merge_entries(map, prev_entry, entry);
350212d7cc84SAlan Cox 	if (need_wakeup)
350312d7cc84SAlan Cox 		vm_map_wakeup(map);
350412d7cc84SAlan Cox 	return (rv);
3505e27e17b7SAlan Cox }
3506e27e17b7SAlan Cox 
3507e27e17b7SAlan Cox /*
3508950f8459SAlan Cox  * vm_map_sync
3509df8bae1dSRodney W. Grimes  *
3510df8bae1dSRodney W. Grimes  * Push any dirty cached pages in the address range to their pager.
3511df8bae1dSRodney W. Grimes  * If syncio is TRUE, dirty pages are written synchronously.
3512df8bae1dSRodney W. Grimes  * If invalidate is TRUE, any cached pages are freed as well.
3513df8bae1dSRodney W. Grimes  *
3514637315edSAlan Cox  * If the size of the region from start to end is zero, we are
3515637315edSAlan Cox  * supposed to flush all modified pages within the region containing
3516637315edSAlan Cox  * start.  Unfortunately, a region can be split or coalesced with
3517637315edSAlan Cox  * neighboring regions, making it difficult to determine what the
3518637315edSAlan Cox  * original region was.  Therefore, we approximate this requirement by
3519637315edSAlan Cox  * flushing the current region containing start.
3520637315edSAlan Cox  *
3521df8bae1dSRodney W. Grimes  * Returns an error if any part of the specified range is not mapped.
3522df8bae1dSRodney W. Grimes  */
3523df8bae1dSRodney W. Grimes int
3524950f8459SAlan Cox vm_map_sync(
35251b40f8c0SMatthew Dillon 	vm_map_t map,
35261b40f8c0SMatthew Dillon 	vm_offset_t start,
35271b40f8c0SMatthew Dillon 	vm_offset_t end,
35281b40f8c0SMatthew Dillon 	boolean_t syncio,
35291b40f8c0SMatthew Dillon 	boolean_t invalidate)
3530df8bae1dSRodney W. Grimes {
35312767c9f3SDoug Moore 	vm_map_entry_t entry, first_entry, next_entry;
3532df8bae1dSRodney W. Grimes 	vm_size_t size;
3533df8bae1dSRodney W. Grimes 	vm_object_t object;
3534a316d390SJohn Dyson 	vm_ooffset_t offset;
3535e53fa61bSKonstantin Belousov 	unsigned int last_timestamp;
3536126d6082SKonstantin Belousov 	boolean_t failed;
3537df8bae1dSRodney W. Grimes 
3538df8bae1dSRodney W. Grimes 	vm_map_lock_read(map);
3539df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
35402767c9f3SDoug Moore 	if (!vm_map_lookup_entry(map, start, &first_entry)) {
3541df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map);
3542df8bae1dSRodney W. Grimes 		return (KERN_INVALID_ADDRESS);
3543d1d3f7e1SDoug Moore 	} else if (start == end) {
35442767c9f3SDoug Moore 		start = first_entry->start;
35452767c9f3SDoug Moore 		end = first_entry->end;
3546df8bae1dSRodney W. Grimes 	}
3547df8bae1dSRodney W. Grimes 	/*
3548b7b7cd44SAlan Cox 	 * Make a first pass to check for user-wired memory and holes.
3549df8bae1dSRodney W. Grimes 	 */
35502767c9f3SDoug Moore 	for (entry = first_entry; entry->start < end; entry = next_entry) {
35512767c9f3SDoug Moore 		if (invalidate &&
35522767c9f3SDoug Moore 		    (entry->eflags & MAP_ENTRY_USER_WIRED) != 0) {
3553df8bae1dSRodney W. Grimes 			vm_map_unlock_read(map);
3554df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ARGUMENT);
3555df8bae1dSRodney W. Grimes 		}
35562767c9f3SDoug Moore 		next_entry = vm_map_entry_succ(entry);
35572767c9f3SDoug Moore 		if (end > entry->end &&
35582767c9f3SDoug Moore 		    entry->end != next_entry->start) {
3559df8bae1dSRodney W. Grimes 			vm_map_unlock_read(map);
3560df8bae1dSRodney W. Grimes 			return (KERN_INVALID_ADDRESS);
3561df8bae1dSRodney W. Grimes 		}
3562df8bae1dSRodney W. Grimes 	}
3563df8bae1dSRodney W. Grimes 
35642cf13952SAlan Cox 	if (invalidate)
3565bc105a67SAlan Cox 		pmap_remove(map->pmap, start, end);
3566126d6082SKonstantin Belousov 	failed = FALSE;
35672cf13952SAlan Cox 
3568df8bae1dSRodney W. Grimes 	/*
3569df8bae1dSRodney W. Grimes 	 * Make a second pass, cleaning/uncaching pages from the indicated
3570df8bae1dSRodney W. Grimes 	 * objects as we go.
3571df8bae1dSRodney W. Grimes 	 */
35722767c9f3SDoug Moore 	for (entry = first_entry; entry->start < end;) {
35732767c9f3SDoug Moore 		offset = entry->offset + (start - entry->start);
35742767c9f3SDoug Moore 		size = (end <= entry->end ? end : entry->end) - start;
35752767c9f3SDoug Moore 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
3576c0877f10SJohn Dyson 			vm_map_t smap;
3577df8bae1dSRodney W. Grimes 			vm_map_entry_t tentry;
3578df8bae1dSRodney W. Grimes 			vm_size_t tsize;
3579df8bae1dSRodney W. Grimes 
35802767c9f3SDoug Moore 			smap = entry->object.sub_map;
3581df8bae1dSRodney W. Grimes 			vm_map_lock_read(smap);
3582df8bae1dSRodney W. Grimes 			(void) vm_map_lookup_entry(smap, offset, &tentry);
3583df8bae1dSRodney W. Grimes 			tsize = tentry->end - offset;
3584df8bae1dSRodney W. Grimes 			if (tsize < size)
3585df8bae1dSRodney W. Grimes 				size = tsize;
3586df8bae1dSRodney W. Grimes 			object = tentry->object.vm_object;
3587df8bae1dSRodney W. Grimes 			offset = tentry->offset + (offset - tentry->start);
3588df8bae1dSRodney W. Grimes 			vm_map_unlock_read(smap);
3589df8bae1dSRodney W. Grimes 		} else {
35902767c9f3SDoug Moore 			object = entry->object.vm_object;
3591df8bae1dSRodney W. Grimes 		}
3592e53fa61bSKonstantin Belousov 		vm_object_reference(object);
3593e53fa61bSKonstantin Belousov 		last_timestamp = map->timestamp;
3594e53fa61bSKonstantin Belousov 		vm_map_unlock_read(map);
3595126d6082SKonstantin Belousov 		if (!vm_object_sync(object, offset, size, syncio, invalidate))
3596126d6082SKonstantin Belousov 			failed = TRUE;
3597df8bae1dSRodney W. Grimes 		start += size;
3598e53fa61bSKonstantin Belousov 		vm_object_deallocate(object);
3599e53fa61bSKonstantin Belousov 		vm_map_lock_read(map);
3600d1d3f7e1SDoug Moore 		if (last_timestamp == map->timestamp ||
36012767c9f3SDoug Moore 		    !vm_map_lookup_entry(map, start, &entry))
36022767c9f3SDoug Moore 			entry = vm_map_entry_succ(entry);
3603df8bae1dSRodney W. Grimes 	}
3604df8bae1dSRodney W. Grimes 
3605df8bae1dSRodney W. Grimes 	vm_map_unlock_read(map);
3606126d6082SKonstantin Belousov 	return (failed ? KERN_FAILURE : KERN_SUCCESS);
3607df8bae1dSRodney W. Grimes }
3608df8bae1dSRodney W. Grimes 
3609df8bae1dSRodney W. Grimes /*
3610df8bae1dSRodney W. Grimes  *	vm_map_entry_unwire:	[ internal use only ]
3611df8bae1dSRodney W. Grimes  *
3612df8bae1dSRodney W. Grimes  *	Make the region specified by this entry pageable.
3613df8bae1dSRodney W. Grimes  *
3614df8bae1dSRodney W. Grimes  *	The map in question should be locked.
3615df8bae1dSRodney W. Grimes  *	[This is the reason for this routine's existence.]
3616df8bae1dSRodney W. Grimes  */
36170362d7d7SJohn Dyson static void
36181b40f8c0SMatthew Dillon vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
3619df8bae1dSRodney W. Grimes {
362054a3a114SMark Johnston 	vm_size_t size;
362103462509SAlan Cox 
362203462509SAlan Cox 	VM_MAP_ASSERT_LOCKED(map);
362303462509SAlan Cox 	KASSERT(entry->wired_count > 0,
362403462509SAlan Cox 	    ("vm_map_entry_unwire: entry %p isn't wired", entry));
362554a3a114SMark Johnston 
362654a3a114SMark Johnston 	size = entry->end - entry->start;
362754a3a114SMark Johnston 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0)
362854a3a114SMark Johnston 		vm_map_wire_user_count_sub(atop(size));
362903462509SAlan Cox 	pmap_unwire(map->pmap, entry->start, entry->end);
363054a3a114SMark Johnston 	vm_object_unwire(entry->object.vm_object, entry->offset, size,
363154a3a114SMark Johnston 	    PQ_ACTIVE);
3632df8bae1dSRodney W. Grimes 	entry->wired_count = 0;
3633df8bae1dSRodney W. Grimes }
3634df8bae1dSRodney W. Grimes 
36350b367bd8SKonstantin Belousov static void
36360b367bd8SKonstantin Belousov vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
36370b367bd8SKonstantin Belousov {
36380b367bd8SKonstantin Belousov 
36390b367bd8SKonstantin Belousov 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
36400b367bd8SKonstantin Belousov 		vm_object_deallocate(entry->object.vm_object);
36410b367bd8SKonstantin Belousov 	uma_zfree(system_map ? kmapentzone : mapentzone, entry);
36420b367bd8SKonstantin Belousov }
36430b367bd8SKonstantin Belousov 
3644df8bae1dSRodney W. Grimes /*
3645df8bae1dSRodney W. Grimes  *	vm_map_entry_delete:	[ internal use only ]
3646df8bae1dSRodney W. Grimes  *
3647df8bae1dSRodney W. Grimes  *	Deallocate the given entry from the target map.
3648df8bae1dSRodney W. Grimes  */
36490362d7d7SJohn Dyson static void
36501b40f8c0SMatthew Dillon vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
3651df8bae1dSRodney W. Grimes {
365232a89c32SAlan Cox 	vm_object_t object;
36533364c323SKonstantin Belousov 	vm_pindex_t offidxstart, offidxend, count, size1;
3654d1780e8dSKonstantin Belousov 	vm_size_t size;
365532a89c32SAlan Cox 
36569f701172SKonstantin Belousov 	vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE);
36573364c323SKonstantin Belousov 	object = entry->object.vm_object;
365819bd0d9cSKonstantin Belousov 
365919bd0d9cSKonstantin Belousov 	if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
366019bd0d9cSKonstantin Belousov 		MPASS(entry->cred == NULL);
366119bd0d9cSKonstantin Belousov 		MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0);
366219bd0d9cSKonstantin Belousov 		MPASS(object == NULL);
366319bd0d9cSKonstantin Belousov 		vm_map_entry_deallocate(entry, map->system_map);
366419bd0d9cSKonstantin Belousov 		return;
366519bd0d9cSKonstantin Belousov 	}
366619bd0d9cSKonstantin Belousov 
36673364c323SKonstantin Belousov 	size = entry->end - entry->start;
36683364c323SKonstantin Belousov 	map->size -= size;
36693364c323SKonstantin Belousov 
3670ef694c1aSEdward Tomasz Napierala 	if (entry->cred != NULL) {
3671ef694c1aSEdward Tomasz Napierala 		swap_release_by_cred(size, entry->cred);
3672ef694c1aSEdward Tomasz Napierala 		crfree(entry->cred);
36733364c323SKonstantin Belousov 	}
3674df8bae1dSRodney W. Grimes 
367563967687SJeff Roberson 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) {
367663967687SJeff Roberson 		entry->object.vm_object = NULL;
367763967687SJeff Roberson 	} else if ((object->flags & OBJ_ANON) != 0 ||
367863967687SJeff Roberson 	    object == kernel_object) {
3679ef694c1aSEdward Tomasz Napierala 		KASSERT(entry->cred == NULL || object->cred == NULL ||
36803364c323SKonstantin Belousov 		    (entry->eflags & MAP_ENTRY_NEEDS_COPY),
3681ef694c1aSEdward Tomasz Napierala 		    ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
3682d1780e8dSKonstantin Belousov 		count = atop(size);
368332a89c32SAlan Cox 		offidxstart = OFF_TO_IDX(entry->offset);
368432a89c32SAlan Cox 		offidxend = offidxstart + count;
368589f6b863SAttilio Rao 		VM_OBJECT_WLOCK(object);
368663967687SJeff Roberson 		if (object->ref_count != 1 &&
368763967687SJeff Roberson 		    ((object->flags & OBJ_ONEMAPPING) != 0 ||
36882e47807cSJeff Roberson 		    object == kernel_object)) {
368932a89c32SAlan Cox 			vm_object_collapse(object);
36906bbee8e2SAlan Cox 
36916bbee8e2SAlan Cox 			/*
36926bbee8e2SAlan Cox 			 * The option OBJPR_NOTMAPPED can be passed here
36936bbee8e2SAlan Cox 			 * because vm_map_delete() already performed
36946bbee8e2SAlan Cox 			 * pmap_remove() on the only mapping to this range
36956bbee8e2SAlan Cox 			 * of pages.
36966bbee8e2SAlan Cox 			 */
36976bbee8e2SAlan Cox 			vm_object_page_remove(object, offidxstart, offidxend,
36986bbee8e2SAlan Cox 			    OBJPR_NOTMAPPED);
369932a89c32SAlan Cox 			if (object->type == OBJT_SWAP)
37009a4ee196SKonstantin Belousov 				swap_pager_freespace(object, offidxstart,
37019a4ee196SKonstantin Belousov 				    count);
370232a89c32SAlan Cox 			if (offidxend >= object->size &&
37033364c323SKonstantin Belousov 			    offidxstart < object->size) {
37043364c323SKonstantin Belousov 				size1 = object->size;
370532a89c32SAlan Cox 				object->size = offidxstart;
3706ef694c1aSEdward Tomasz Napierala 				if (object->cred != NULL) {
37073364c323SKonstantin Belousov 					size1 -= object->size;
37083364c323SKonstantin Belousov 					KASSERT(object->charge >= ptoa(size1),
37099a4ee196SKonstantin Belousov 					    ("object %p charge < 0", object));
37109a4ee196SKonstantin Belousov 					swap_release_by_cred(ptoa(size1),
37119a4ee196SKonstantin Belousov 					    object->cred);
37123364c323SKonstantin Belousov 					object->charge -= ptoa(size1);
37133364c323SKonstantin Belousov 				}
37143364c323SKonstantin Belousov 			}
371532a89c32SAlan Cox 		}
371689f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(object);
371763967687SJeff Roberson 	}
37180b367bd8SKonstantin Belousov 	if (map->system_map)
37190b367bd8SKonstantin Belousov 		vm_map_entry_deallocate(entry, TRUE);
37200b367bd8SKonstantin Belousov 	else {
37217cdcf863SDoug Moore 		entry->defer_next = curthread->td_map_def_user;
37220b367bd8SKonstantin Belousov 		curthread->td_map_def_user = entry;
37230b367bd8SKonstantin Belousov 	}
3724df8bae1dSRodney W. Grimes }
3725df8bae1dSRodney W. Grimes 
3726df8bae1dSRodney W. Grimes /*
3727df8bae1dSRodney W. Grimes  *	vm_map_delete:	[ internal use only ]
3728df8bae1dSRodney W. Grimes  *
3729df8bae1dSRodney W. Grimes  *	Deallocates the given address range from the target
3730df8bae1dSRodney W. Grimes  *	map.
3731df8bae1dSRodney W. Grimes  */
3732df8bae1dSRodney W. Grimes int
3733655c3490SKonstantin Belousov vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
3734df8bae1dSRodney W. Grimes {
3735c0877f10SJohn Dyson 	vm_map_entry_t entry;
3736d1d3f7e1SDoug Moore 	vm_map_entry_t first_entry;
3737df8bae1dSRodney W. Grimes 
37383a0916b8SKonstantin Belousov 	VM_MAP_ASSERT_LOCKED(map);
373979e9451fSKonstantin Belousov 	if (start == end)
374079e9451fSKonstantin Belousov 		return (KERN_SUCCESS);
37413a0916b8SKonstantin Belousov 
3742df8bae1dSRodney W. Grimes 	/*
3743df8bae1dSRodney W. Grimes 	 * Find the start of the region, and clip it
3744df8bae1dSRodney W. Grimes 	 */
3745d1d3f7e1SDoug Moore 	if (!vm_map_lookup_entry(map, start, &first_entry))
37467cdcf863SDoug Moore 		entry = vm_map_entry_succ(first_entry);
3747d1d3f7e1SDoug Moore 	else {
3748d1d3f7e1SDoug Moore 		entry = first_entry;
3749df8bae1dSRodney W. Grimes 		vm_map_clip_start(map, entry, start);
3750d1d3f7e1SDoug Moore 	}
3751df8bae1dSRodney W. Grimes 
3752df8bae1dSRodney W. Grimes 	/*
3753df8bae1dSRodney W. Grimes 	 * Step through all entries in this region
3754df8bae1dSRodney W. Grimes 	 */
37551c5196c3SKonstantin Belousov 	while (entry->start < end) {
3756df8bae1dSRodney W. Grimes 		vm_map_entry_t next;
3757df8bae1dSRodney W. Grimes 
375873b2baceSAlan Cox 		/*
375973b2baceSAlan Cox 		 * Wait for wiring or unwiring of an entry to complete.
37607c938963SBrian Feldman 		 * Also wait for any system wirings to disappear on
37617c938963SBrian Feldman 		 * user maps.
376273b2baceSAlan Cox 		 */
37637c938963SBrian Feldman 		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
37647c938963SBrian Feldman 		    (vm_map_pmap(map) != kernel_pmap &&
37657c938963SBrian Feldman 		    vm_map_entry_system_wired_count(entry) != 0)) {
376673b2baceSAlan Cox 			unsigned int last_timestamp;
376773b2baceSAlan Cox 			vm_offset_t saved_start;
3768d1d3f7e1SDoug Moore 			vm_map_entry_t tmp_entry;
376973b2baceSAlan Cox 
377073b2baceSAlan Cox 			saved_start = entry->start;
377173b2baceSAlan Cox 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
377273b2baceSAlan Cox 			last_timestamp = map->timestamp;
37738ce2d00aSPawel Jakub Dawidek 			(void) vm_map_unlock_and_wait(map, 0);
377473b2baceSAlan Cox 			vm_map_lock(map);
3775d1d3f7e1SDoug Moore 			if (last_timestamp + 1 != map->timestamp) {
377673b2baceSAlan Cox 				/*
377773b2baceSAlan Cox 				 * Look again for the entry because the map was
3778d1d3f7e1SDoug Moore 				 * modified while it was unlocked.
3779d1d3f7e1SDoug Moore 				 * Specifically, the entry may have been
3780d1d3f7e1SDoug Moore 				 * clipped, merged, or deleted.
378173b2baceSAlan Cox 				 */
3782d1d3f7e1SDoug Moore 				if (!vm_map_lookup_entry(map, saved_start,
3783d1d3f7e1SDoug Moore 							 &tmp_entry))
37847cdcf863SDoug Moore 					entry = vm_map_entry_succ(tmp_entry);
3785d1d3f7e1SDoug Moore 				else {
3786d1d3f7e1SDoug Moore 					entry = tmp_entry;
3787d1d3f7e1SDoug Moore 					vm_map_clip_start(map, entry,
3788d1d3f7e1SDoug Moore 							  saved_start);
3789d1d3f7e1SDoug Moore 				}
3790d1d3f7e1SDoug Moore 			}
379173b2baceSAlan Cox 			continue;
379273b2baceSAlan Cox 		}
3793df8bae1dSRodney W. Grimes 		vm_map_clip_end(map, entry, end);
3794df8bae1dSRodney W. Grimes 
37957cdcf863SDoug Moore 		next = vm_map_entry_succ(entry);
3796df8bae1dSRodney W. Grimes 
3797df8bae1dSRodney W. Grimes 		/*
37980d94caffSDavid Greenman 		 * Unwire before removing addresses from the pmap; otherwise,
37990d94caffSDavid Greenman 		 * unwiring will put the entries back in the pmap.
3800df8bae1dSRodney W. Grimes 		 */
3801be7be412SKonstantin Belousov 		if (entry->wired_count != 0)
3802df8bae1dSRodney W. Grimes 			vm_map_entry_unwire(map, entry);
3803df8bae1dSRodney W. Grimes 
380432f0fefcSKonstantin Belousov 		/*
380532f0fefcSKonstantin Belousov 		 * Remove mappings for the pages, but only if the
380632f0fefcSKonstantin Belousov 		 * mappings could exist.  For instance, it does not
380732f0fefcSKonstantin Belousov 		 * make sense to call pmap_remove() for guard entries.
380832f0fefcSKonstantin Belousov 		 */
380932f0fefcSKonstantin Belousov 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 ||
381032f0fefcSKonstantin Belousov 		    entry->object.vm_object != NULL)
381132a89c32SAlan Cox 			pmap_remove(map->pmap, entry->start, entry->end);
3812df8bae1dSRodney W. Grimes 
3813fa50a355SKonstantin Belousov 		if (entry->end == map->anon_loc)
3814fa50a355SKonstantin Belousov 			map->anon_loc = entry->start;
3815fa50a355SKonstantin Belousov 
3816df8bae1dSRodney W. Grimes 		/*
3817e608cc3cSKonstantin Belousov 		 * Delete the entry only after removing all pmap
3818e608cc3cSKonstantin Belousov 		 * entries pointing to its pages.  (Otherwise, its
3819e608cc3cSKonstantin Belousov 		 * page frames may be reallocated, and any modify bits
3820e608cc3cSKonstantin Belousov 		 * will be set in the wrong object!)
3821df8bae1dSRodney W. Grimes 		 */
3822df8bae1dSRodney W. Grimes 		vm_map_entry_delete(map, entry);
3823df8bae1dSRodney W. Grimes 		entry = next;
3824df8bae1dSRodney W. Grimes 	}
3825df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
3826df8bae1dSRodney W. Grimes }
3827df8bae1dSRodney W. Grimes 
3828df8bae1dSRodney W. Grimes /*
3829df8bae1dSRodney W. Grimes  *	vm_map_remove:
3830df8bae1dSRodney W. Grimes  *
3831df8bae1dSRodney W. Grimes  *	Remove the given address range from the target map.
3832df8bae1dSRodney W. Grimes  *	This is the exported form of vm_map_delete.
3833df8bae1dSRodney W. Grimes  */
3834df8bae1dSRodney W. Grimes int
38351b40f8c0SMatthew Dillon vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3836df8bae1dSRodney W. Grimes {
38376eaee3feSAlan Cox 	int result;
3838df8bae1dSRodney W. Grimes 
3839df8bae1dSRodney W. Grimes 	vm_map_lock(map);
3840df8bae1dSRodney W. Grimes 	VM_MAP_RANGE_CHECK(map, start, end);
3841655c3490SKonstantin Belousov 	result = vm_map_delete(map, start, end);
3842df8bae1dSRodney W. Grimes 	vm_map_unlock(map);
3843df8bae1dSRodney W. Grimes 	return (result);
3844df8bae1dSRodney W. Grimes }
3845df8bae1dSRodney W. Grimes 
3846df8bae1dSRodney W. Grimes /*
3847df8bae1dSRodney W. Grimes  *	vm_map_check_protection:
3848df8bae1dSRodney W. Grimes  *
38492d5c7e45SMatthew Dillon  *	Assert that the target map allows the specified privilege on the
38502d5c7e45SMatthew Dillon  *	entire address region given.  The entire region must be allocated.
38512d5c7e45SMatthew Dillon  *
38522d5c7e45SMatthew Dillon  *	WARNING!  This code does not and should not check whether the
38532d5c7e45SMatthew Dillon  *	contents of the region is accessible.  For example a smaller file
38542d5c7e45SMatthew Dillon  *	might be mapped into a larger address space.
38552d5c7e45SMatthew Dillon  *
38562d5c7e45SMatthew Dillon  *	NOTE!  This code is also called by munmap().
3857d8834602SAlan Cox  *
3858d8834602SAlan Cox  *	The map must be locked.  A read lock is sufficient.
3859df8bae1dSRodney W. Grimes  */
38600d94caffSDavid Greenman boolean_t
3861b9dcd593SBruce Evans vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3862b9dcd593SBruce Evans 			vm_prot_t protection)
3863df8bae1dSRodney W. Grimes {
3864c0877f10SJohn Dyson 	vm_map_entry_t entry;
3865d1d3f7e1SDoug Moore 	vm_map_entry_t tmp_entry;
3866df8bae1dSRodney W. Grimes 
3867d1d3f7e1SDoug Moore 	if (!vm_map_lookup_entry(map, start, &tmp_entry))
3868df8bae1dSRodney W. Grimes 		return (FALSE);
3869d1d3f7e1SDoug Moore 	entry = tmp_entry;
3870df8bae1dSRodney W. Grimes 
3871df8bae1dSRodney W. Grimes 	while (start < end) {
3872df8bae1dSRodney W. Grimes 		/*
3873df8bae1dSRodney W. Grimes 		 * No holes allowed!
3874df8bae1dSRodney W. Grimes 		 */
3875d8834602SAlan Cox 		if (start < entry->start)
3876df8bae1dSRodney W. Grimes 			return (FALSE);
3877df8bae1dSRodney W. Grimes 		/*
3878df8bae1dSRodney W. Grimes 		 * Check protection associated with entry.
3879df8bae1dSRodney W. Grimes 		 */
3880d8834602SAlan Cox 		if ((entry->protection & protection) != protection)
3881df8bae1dSRodney W. Grimes 			return (FALSE);
3882df8bae1dSRodney W. Grimes 		/* go to next entry */
3883df8bae1dSRodney W. Grimes 		start = entry->end;
38847cdcf863SDoug Moore 		entry = vm_map_entry_succ(entry);
3885df8bae1dSRodney W. Grimes 	}
3886df8bae1dSRodney W. Grimes 	return (TRUE);
3887df8bae1dSRodney W. Grimes }
3888df8bae1dSRodney W. Grimes 
38894d987866SJeff Roberson 
38904d987866SJeff Roberson /*
38914d987866SJeff Roberson  *
3892886b9021SJeff Roberson  *	vm_map_copy_swap_object:
38934d987866SJeff Roberson  *
3894886b9021SJeff Roberson  *	Copies a swap-backed object from an existing map entry to a
38954d987866SJeff Roberson  *	new one.  Carries forward the swap charge.  May change the
38964d987866SJeff Roberson  *	src object on return.
38974d987866SJeff Roberson  */
38984d987866SJeff Roberson static void
3899886b9021SJeff Roberson vm_map_copy_swap_object(vm_map_entry_t src_entry, vm_map_entry_t dst_entry,
39004d987866SJeff Roberson     vm_offset_t size, vm_ooffset_t *fork_charge)
39014d987866SJeff Roberson {
39024d987866SJeff Roberson 	vm_object_t src_object;
39034d987866SJeff Roberson 	struct ucred *cred;
39044d987866SJeff Roberson 	int charged;
39054d987866SJeff Roberson 
39064d987866SJeff Roberson 	src_object = src_entry->object.vm_object;
39074d987866SJeff Roberson 	charged = ENTRY_CHARGED(src_entry);
3908d966c761SJeff Roberson 	if ((src_object->flags & OBJ_ANON) != 0) {
3909d966c761SJeff Roberson 		VM_OBJECT_WLOCK(src_object);
39104d987866SJeff Roberson 		vm_object_collapse(src_object);
39114d987866SJeff Roberson 		if ((src_object->flags & OBJ_ONEMAPPING) != 0) {
39124d987866SJeff Roberson 			vm_object_split(src_entry);
39134d987866SJeff Roberson 			src_object = src_entry->object.vm_object;
39144d987866SJeff Roberson 		}
39154d987866SJeff Roberson 		vm_object_reference_locked(src_object);
39164d987866SJeff Roberson 		vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3917d966c761SJeff Roberson 		VM_OBJECT_WUNLOCK(src_object);
3918d966c761SJeff Roberson 	} else
3919d966c761SJeff Roberson 		vm_object_reference(src_object);
39204d987866SJeff Roberson 	if (src_entry->cred != NULL &&
39214d987866SJeff Roberson 	    !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
39224d987866SJeff Roberson 		KASSERT(src_object->cred == NULL,
39234d987866SJeff Roberson 		    ("OVERCOMMIT: vm_map_copy_anon_entry: cred %p",
39244d987866SJeff Roberson 		     src_object));
39254d987866SJeff Roberson 		src_object->cred = src_entry->cred;
39264d987866SJeff Roberson 		src_object->charge = size;
39274d987866SJeff Roberson 	}
39284d987866SJeff Roberson 	dst_entry->object.vm_object = src_object;
39294d987866SJeff Roberson 	if (charged) {
39304d987866SJeff Roberson 		cred = curthread->td_ucred;
39314d987866SJeff Roberson 		crhold(cred);
39324d987866SJeff Roberson 		dst_entry->cred = cred;
39334d987866SJeff Roberson 		*fork_charge += size;
39344d987866SJeff Roberson 		if (!(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
39354d987866SJeff Roberson 			crhold(cred);
39364d987866SJeff Roberson 			src_entry->cred = cred;
39374d987866SJeff Roberson 			*fork_charge += size;
39384d987866SJeff Roberson 		}
39394d987866SJeff Roberson 	}
39404d987866SJeff Roberson }
39414d987866SJeff Roberson 
394286524867SJohn Dyson /*
3943df8bae1dSRodney W. Grimes  *	vm_map_copy_entry:
3944df8bae1dSRodney W. Grimes  *
3945df8bae1dSRodney W. Grimes  *	Copies the contents of the source entry to the destination
3946df8bae1dSRodney W. Grimes  *	entry.  The entries *must* be aligned properly.
3947df8bae1dSRodney W. Grimes  */
3948f708ef1bSPoul-Henning Kamp static void
39491b40f8c0SMatthew Dillon vm_map_copy_entry(
39501b40f8c0SMatthew Dillon 	vm_map_t src_map,
39511b40f8c0SMatthew Dillon 	vm_map_t dst_map,
39521b40f8c0SMatthew Dillon 	vm_map_entry_t src_entry,
39533364c323SKonstantin Belousov 	vm_map_entry_t dst_entry,
39543364c323SKonstantin Belousov 	vm_ooffset_t *fork_charge)
3955df8bae1dSRodney W. Grimes {
3956c0877f10SJohn Dyson 	vm_object_t src_object;
395784110e7eSKonstantin Belousov 	vm_map_entry_t fake_entry;
39583364c323SKonstantin Belousov 	vm_offset_t size;
3959c0877f10SJohn Dyson 
39603a0916b8SKonstantin Belousov 	VM_MAP_ASSERT_LOCKED(dst_map);
39613a0916b8SKonstantin Belousov 
39629fdfe602SMatthew Dillon 	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
3963df8bae1dSRodney W. Grimes 		return;
3964df8bae1dSRodney W. Grimes 
3965afaa41f6SAlan Cox 	if (src_entry->wired_count == 0 ||
3966afaa41f6SAlan Cox 	    (src_entry->protection & VM_PROT_WRITE) == 0) {
3967df8bae1dSRodney W. Grimes 		/*
39680d94caffSDavid Greenman 		 * If the source entry is marked needs_copy, it is already
39690d94caffSDavid Greenman 		 * write-protected.
3970df8bae1dSRodney W. Grimes 		 */
3971d9a9209aSAlan Cox 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
3972d9a9209aSAlan Cox 		    (src_entry->protection & VM_PROT_WRITE) != 0) {
3973df8bae1dSRodney W. Grimes 			pmap_protect(src_map->pmap,
3974df8bae1dSRodney W. Grimes 			    src_entry->start,
3975df8bae1dSRodney W. Grimes 			    src_entry->end,
3976df8bae1dSRodney W. Grimes 			    src_entry->protection & ~VM_PROT_WRITE);
3977df8bae1dSRodney W. Grimes 		}
3978b18bfc3dSJohn Dyson 
3979df8bae1dSRodney W. Grimes 		/*
3980df8bae1dSRodney W. Grimes 		 * Make a copy of the object.
3981df8bae1dSRodney W. Grimes 		 */
39823364c323SKonstantin Belousov 		size = src_entry->end - src_entry->start;
39838aef1712SMatthew Dillon 		if ((src_object = src_entry->object.vm_object) != NULL) {
3984886b9021SJeff Roberson 			if (src_object->type == OBJT_DEFAULT ||
3985886b9021SJeff Roberson 			    src_object->type == OBJT_SWAP) {
3986886b9021SJeff Roberson 				vm_map_copy_swap_object(src_entry, dst_entry,
39874d987866SJeff Roberson 				    size, fork_charge);
39884d987866SJeff Roberson 				/* May have split/collapsed, reload obj. */
39894d987866SJeff Roberson 				src_object = src_entry->object.vm_object;
39904d987866SJeff Roberson 			} else {
39914d987866SJeff Roberson 				vm_object_reference(src_object);
3992c0877f10SJohn Dyson 				dst_entry->object.vm_object = src_object;
39933364c323SKonstantin Belousov 			}
39949a4ee196SKonstantin Belousov 			src_entry->eflags |= MAP_ENTRY_COW |
39959a4ee196SKonstantin Belousov 			    MAP_ENTRY_NEEDS_COPY;
39969a4ee196SKonstantin Belousov 			dst_entry->eflags |= MAP_ENTRY_COW |
39979a4ee196SKonstantin Belousov 			    MAP_ENTRY_NEEDS_COPY;
3998b18bfc3dSJohn Dyson 			dst_entry->offset = src_entry->offset;
3999fe7bcbafSKyle Evans 			if (src_entry->eflags & MAP_ENTRY_WRITECNT) {
400084110e7eSKonstantin Belousov 				/*
4001fe7bcbafSKyle Evans 				 * MAP_ENTRY_WRITECNT cannot
400284110e7eSKonstantin Belousov 				 * indicate write reference from
400384110e7eSKonstantin Belousov 				 * src_entry, since the entry is
400484110e7eSKonstantin Belousov 				 * marked as needs copy.  Allocate a
400584110e7eSKonstantin Belousov 				 * fake entry that is used to
4006fe7bcbafSKyle Evans 				 * decrement object->un_pager writecount
400784110e7eSKonstantin Belousov 				 * at the appropriate time.  Attach
400884110e7eSKonstantin Belousov 				 * fake_entry to the deferred list.
400984110e7eSKonstantin Belousov 				 */
401084110e7eSKonstantin Belousov 				fake_entry = vm_map_entry_create(dst_map);
4011fe7bcbafSKyle Evans 				fake_entry->eflags = MAP_ENTRY_WRITECNT;
4012fe7bcbafSKyle Evans 				src_entry->eflags &= ~MAP_ENTRY_WRITECNT;
401384110e7eSKonstantin Belousov 				vm_object_reference(src_object);
401484110e7eSKonstantin Belousov 				fake_entry->object.vm_object = src_object;
401584110e7eSKonstantin Belousov 				fake_entry->start = src_entry->start;
401684110e7eSKonstantin Belousov 				fake_entry->end = src_entry->end;
40177cdcf863SDoug Moore 				fake_entry->defer_next =
40187cdcf863SDoug Moore 				    curthread->td_map_def_user;
401984110e7eSKonstantin Belousov 				curthread->td_map_def_user = fake_entry;
402084110e7eSKonstantin Belousov 			}
40210ec97ffcSKonstantin Belousov 
40220ec97ffcSKonstantin Belousov 			pmap_copy(dst_map->pmap, src_map->pmap,
40230ec97ffcSKonstantin Belousov 			    dst_entry->start, dst_entry->end - dst_entry->start,
40240ec97ffcSKonstantin Belousov 			    src_entry->start);
4025b18bfc3dSJohn Dyson 		} else {
4026b18bfc3dSJohn Dyson 			dst_entry->object.vm_object = NULL;
4027b18bfc3dSJohn Dyson 			dst_entry->offset = 0;
4028ef694c1aSEdward Tomasz Napierala 			if (src_entry->cred != NULL) {
4029ef694c1aSEdward Tomasz Napierala 				dst_entry->cred = curthread->td_ucred;
4030ef694c1aSEdward Tomasz Napierala 				crhold(dst_entry->cred);
40313364c323SKonstantin Belousov 				*fork_charge += size;
40323364c323SKonstantin Belousov 			}
4033b18bfc3dSJohn Dyson 		}
40340d94caffSDavid Greenman 	} else {
4035df8bae1dSRodney W. Grimes 		/*
4036afaa41f6SAlan Cox 		 * We don't want to make writeable wired pages copy-on-write.
4037afaa41f6SAlan Cox 		 * Immediately copy these pages into the new map by simulating
4038afaa41f6SAlan Cox 		 * page faults.  The new pages are pageable.
4039df8bae1dSRodney W. Grimes 		 */
4040121fd461SKonstantin Belousov 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
4041121fd461SKonstantin Belousov 		    fork_charge);
4042df8bae1dSRodney W. Grimes 	}
4043df8bae1dSRodney W. Grimes }
4044df8bae1dSRodney W. Grimes 
4045df8bae1dSRodney W. Grimes /*
40462a7be1b6SBrian Feldman  * vmspace_map_entry_forked:
40472a7be1b6SBrian Feldman  * Update the newly-forked vmspace each time a map entry is inherited
40482a7be1b6SBrian Feldman  * or copied.  The values for vm_dsize and vm_tsize are approximate
40492a7be1b6SBrian Feldman  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
40502a7be1b6SBrian Feldman  */
40512a7be1b6SBrian Feldman static void
40522a7be1b6SBrian Feldman vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
40532a7be1b6SBrian Feldman     vm_map_entry_t entry)
40542a7be1b6SBrian Feldman {
40552a7be1b6SBrian Feldman 	vm_size_t entrysize;
40562a7be1b6SBrian Feldman 	vm_offset_t newend;
40572a7be1b6SBrian Feldman 
405819bd0d9cSKonstantin Belousov 	if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
405919bd0d9cSKonstantin Belousov 		return;
40602a7be1b6SBrian Feldman 	entrysize = entry->end - entry->start;
40612a7be1b6SBrian Feldman 	vm2->vm_map.size += entrysize;
40622a7be1b6SBrian Feldman 	if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
40632a7be1b6SBrian Feldman 		vm2->vm_ssize += btoc(entrysize);
40642a7be1b6SBrian Feldman 	} else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
40652a7be1b6SBrian Feldman 	    entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
4066b351299cSAndrew Gallatin 		newend = MIN(entry->end,
40672a7be1b6SBrian Feldman 		    (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
40682a7be1b6SBrian Feldman 		vm2->vm_dsize += btoc(newend - entry->start);
40692a7be1b6SBrian Feldman 	} else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
40702a7be1b6SBrian Feldman 	    entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
4071b351299cSAndrew Gallatin 		newend = MIN(entry->end,
40722a7be1b6SBrian Feldman 		    (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
40732a7be1b6SBrian Feldman 		vm2->vm_tsize += btoc(newend - entry->start);
40742a7be1b6SBrian Feldman 	}
40752a7be1b6SBrian Feldman }
40762a7be1b6SBrian Feldman 
40772a7be1b6SBrian Feldman /*
4078df8bae1dSRodney W. Grimes  * vmspace_fork:
4079df8bae1dSRodney W. Grimes  * Create a new process vmspace structure and vm_map
4080df8bae1dSRodney W. Grimes  * based on those of an existing process.  The new map
4081df8bae1dSRodney W. Grimes  * is based on the old map, according to the inheritance
4082df8bae1dSRodney W. Grimes  * values on the regions in that map.
4083df8bae1dSRodney W. Grimes  *
40842a7be1b6SBrian Feldman  * XXX It might be worth coalescing the entries added to the new vmspace.
40852a7be1b6SBrian Feldman  *
4086df8bae1dSRodney W. Grimes  * The source map must not be locked.
4087df8bae1dSRodney W. Grimes  */
4088df8bae1dSRodney W. Grimes struct vmspace *
40893364c323SKonstantin Belousov vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
4090df8bae1dSRodney W. Grimes {
4091c0877f10SJohn Dyson 	struct vmspace *vm2;
409279e53838SAlan Cox 	vm_map_t new_map, old_map;
409379e53838SAlan Cox 	vm_map_entry_t new_entry, old_entry;
4094de5f6a77SJohn Dyson 	vm_object_t object;
4095e7a9df16SKonstantin Belousov 	int error, locked;
409619bd0d9cSKonstantin Belousov 	vm_inherit_t inh;
4097df8bae1dSRodney W. Grimes 
409879e53838SAlan Cox 	old_map = &vm1->vm_map;
409979e53838SAlan Cox 	/* Copy immutable fields of vm1 to vm2. */
41006e00f3a3SKonstantin Belousov 	vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map),
41016e00f3a3SKonstantin Belousov 	    pmap_pinit);
410289b57fcfSKonstantin Belousov 	if (vm2 == NULL)
410379e53838SAlan Cox 		return (NULL);
4104e7a9df16SKonstantin Belousov 
41052a7be1b6SBrian Feldman 	vm2->vm_taddr = vm1->vm_taddr;
41062a7be1b6SBrian Feldman 	vm2->vm_daddr = vm1->vm_daddr;
41072a7be1b6SBrian Feldman 	vm2->vm_maxsaddr = vm1->vm_maxsaddr;
410879e53838SAlan Cox 	vm_map_lock(old_map);
410979e53838SAlan Cox 	if (old_map->busy)
411079e53838SAlan Cox 		vm_map_wait_busy(old_map);
411179e53838SAlan Cox 	new_map = &vm2->vm_map;
41121fac7d7fSKonstantin Belousov 	locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
41131fac7d7fSKonstantin Belousov 	KASSERT(locked, ("vmspace_fork: lock failed"));
4114df8bae1dSRodney W. Grimes 
4115e7a9df16SKonstantin Belousov 	error = pmap_vmspace_copy(new_map->pmap, old_map->pmap);
4116e7a9df16SKonstantin Belousov 	if (error != 0) {
4117e7a9df16SKonstantin Belousov 		sx_xunlock(&old_map->lock);
4118e7a9df16SKonstantin Belousov 		sx_xunlock(&new_map->lock);
4119e7a9df16SKonstantin Belousov 		vm_map_process_deferred();
4120e7a9df16SKonstantin Belousov 		vmspace_free(vm2);
4121e7a9df16SKonstantin Belousov 		return (NULL);
4122e7a9df16SKonstantin Belousov 	}
4123e7a9df16SKonstantin Belousov 
4124fa50a355SKonstantin Belousov 	new_map->anon_loc = old_map->anon_loc;
4125e7a9df16SKonstantin Belousov 
41262767c9f3SDoug Moore 	VM_MAP_ENTRY_FOREACH(old_entry, old_map) {
41272767c9f3SDoug Moore 		if ((old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
4128df8bae1dSRodney W. Grimes 			panic("vm_map_fork: encountered a submap");
4129df8bae1dSRodney W. Grimes 
413019bd0d9cSKonstantin Belousov 		inh = old_entry->inheritance;
413119bd0d9cSKonstantin Belousov 		if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 &&
413219bd0d9cSKonstantin Belousov 		    inh != VM_INHERIT_NONE)
413319bd0d9cSKonstantin Belousov 			inh = VM_INHERIT_COPY;
413419bd0d9cSKonstantin Belousov 
413519bd0d9cSKonstantin Belousov 		switch (inh) {
4136df8bae1dSRodney W. Grimes 		case VM_INHERIT_NONE:
4137df8bae1dSRodney W. Grimes 			break;
4138df8bae1dSRodney W. Grimes 
4139df8bae1dSRodney W. Grimes 		case VM_INHERIT_SHARE:
4140df8bae1dSRodney W. Grimes 			/*
41412767c9f3SDoug Moore 			 * Clone the entry, creating the shared object if
41422767c9f3SDoug Moore 			 * necessary.
4143fed9a903SJohn Dyson 			 */
4144fed9a903SJohn Dyson 			object = old_entry->object.vm_object;
4145fed9a903SJohn Dyson 			if (object == NULL) {
4146af1d6d6aSDoug Moore 				vm_map_entry_back(old_entry);
4147af1d6d6aSDoug Moore 				object = old_entry->object.vm_object;
41489a2f6362SAlan Cox 			}
41499a2f6362SAlan Cox 
41509a2f6362SAlan Cox 			/*
41519a2f6362SAlan Cox 			 * Add the reference before calling vm_object_shadow
41529a2f6362SAlan Cox 			 * to insure that a shadow object is created.
41539a2f6362SAlan Cox 			 */
41549a2f6362SAlan Cox 			vm_object_reference(object);
41559a2f6362SAlan Cox 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
41565069bf57SJohn Dyson 				vm_object_shadow(&old_entry->object.vm_object,
41575069bf57SJohn Dyson 				    &old_entry->offset,
415867388836SKonstantin Belousov 				    old_entry->end - old_entry->start,
415967388836SKonstantin Belousov 				    old_entry->cred,
4160d30344bdSIan Dowse 				    /* Transfer the second reference too. */
416167388836SKonstantin Belousov 				    true);
416267388836SKonstantin Belousov 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
416367388836SKonstantin Belousov 				old_entry->cred = NULL;
41647fd10fb3SKonstantin Belousov 
41657fd10fb3SKonstantin Belousov 				/*
416683ea714fSDoug Moore 				 * As in vm_map_merged_neighbor_dispose(),
416783ea714fSDoug Moore 				 * the vnode lock will not be acquired in
41687fd10fb3SKonstantin Belousov 				 * this call to vm_object_deallocate().
41697fd10fb3SKonstantin Belousov 				 */
4170d30344bdSIan Dowse 				vm_object_deallocate(object);
41715069bf57SJohn Dyson 				object = old_entry->object.vm_object;
417267388836SKonstantin Belousov 			} else {
417389f6b863SAttilio Rao 				VM_OBJECT_WLOCK(object);
4174069e9bc1SDoug Rabson 				vm_object_clear_flag(object, OBJ_ONEMAPPING);
4175ef694c1aSEdward Tomasz Napierala 				if (old_entry->cred != NULL) {
417667388836SKonstantin Belousov 					KASSERT(object->cred == NULL,
417767388836SKonstantin Belousov 					    ("vmspace_fork both cred"));
4178ef694c1aSEdward Tomasz Napierala 					object->cred = old_entry->cred;
417967388836SKonstantin Belousov 					object->charge = old_entry->end -
418067388836SKonstantin Belousov 					    old_entry->start;
4181ef694c1aSEdward Tomasz Napierala 					old_entry->cred = NULL;
41823364c323SKonstantin Belousov 				}
4183b9781cf6SKonstantin Belousov 
4184b9781cf6SKonstantin Belousov 				/*
4185b9781cf6SKonstantin Belousov 				 * Assert the correct state of the vnode
4186b9781cf6SKonstantin Belousov 				 * v_writecount while the object is locked, to
4187b9781cf6SKonstantin Belousov 				 * not relock it later for the assertion
4188b9781cf6SKonstantin Belousov 				 * correctness.
4189b9781cf6SKonstantin Belousov 				 */
4190fe7bcbafSKyle Evans 				if (old_entry->eflags & MAP_ENTRY_WRITECNT &&
4191b9781cf6SKonstantin Belousov 				    object->type == OBJT_VNODE) {
419267388836SKonstantin Belousov 					KASSERT(((struct vnode *)object->
419367388836SKonstantin Belousov 					    handle)->v_writecount > 0,
419467388836SKonstantin Belousov 					    ("vmspace_fork: v_writecount %p",
419567388836SKonstantin Belousov 					    object));
419667388836SKonstantin Belousov 					KASSERT(object->un_pager.vnp.
419767388836SKonstantin Belousov 					    writemappings > 0,
4198b9781cf6SKonstantin Belousov 					    ("vmspace_fork: vnp.writecount %p",
4199b9781cf6SKonstantin Belousov 					    object));
4200b9781cf6SKonstantin Belousov 				}
420189f6b863SAttilio Rao 				VM_OBJECT_WUNLOCK(object);
420267388836SKonstantin Belousov 			}
4203fed9a903SJohn Dyson 
4204fed9a903SJohn Dyson 			/*
4205ad5fca3bSAlan Cox 			 * Clone the entry, referencing the shared object.
4206df8bae1dSRodney W. Grimes 			 */
4207df8bae1dSRodney W. Grimes 			new_entry = vm_map_entry_create(new_map);
4208df8bae1dSRodney W. Grimes 			*new_entry = *old_entry;
42099f6acfd1SKonstantin Belousov 			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
42109f6acfd1SKonstantin Belousov 			    MAP_ENTRY_IN_TRANSITION);
42110acea7dfSKonstantin Belousov 			new_entry->wiring_thread = NULL;
4212df8bae1dSRodney W. Grimes 			new_entry->wired_count = 0;
4213fe7bcbafSKyle Evans 			if (new_entry->eflags & MAP_ENTRY_WRITECNT) {
4214fe7bcbafSKyle Evans 				vm_pager_update_writecount(object,
421584110e7eSKonstantin Belousov 				    new_entry->start, new_entry->end);
421684110e7eSKonstantin Belousov 			}
421778022527SKonstantin Belousov 			vm_map_entry_set_vnode_text(new_entry, true);
4218df8bae1dSRodney W. Grimes 
4219df8bae1dSRodney W. Grimes 			/*
42200d94caffSDavid Greenman 			 * Insert the entry into the new map -- we know we're
42210d94caffSDavid Greenman 			 * inserting at the end of the new map.
4222df8bae1dSRodney W. Grimes 			 */
42239f701172SKonstantin Belousov 			vm_map_entry_link(new_map, new_entry);
42242a7be1b6SBrian Feldman 			vmspace_map_entry_forked(vm1, vm2, new_entry);
4225df8bae1dSRodney W. Grimes 
4226df8bae1dSRodney W. Grimes 			/*
4227df8bae1dSRodney W. Grimes 			 * Update the physical map
4228df8bae1dSRodney W. Grimes 			 */
4229df8bae1dSRodney W. Grimes 			pmap_copy(new_map->pmap, old_map->pmap,
4230df8bae1dSRodney W. Grimes 			    new_entry->start,
4231df8bae1dSRodney W. Grimes 			    (old_entry->end - old_entry->start),
4232df8bae1dSRodney W. Grimes 			    old_entry->start);
4233df8bae1dSRodney W. Grimes 			break;
4234df8bae1dSRodney W. Grimes 
4235df8bae1dSRodney W. Grimes 		case VM_INHERIT_COPY:
4236df8bae1dSRodney W. Grimes 			/*
4237df8bae1dSRodney W. Grimes 			 * Clone the entry and link into the map.
4238df8bae1dSRodney W. Grimes 			 */
4239df8bae1dSRodney W. Grimes 			new_entry = vm_map_entry_create(new_map);
4240df8bae1dSRodney W. Grimes 			*new_entry = *old_entry;
424184110e7eSKonstantin Belousov 			/*
424284110e7eSKonstantin Belousov 			 * Copied entry is COW over the old object.
424384110e7eSKonstantin Belousov 			 */
42449f6acfd1SKonstantin Belousov 			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
4245fe7bcbafSKyle Evans 			    MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_WRITECNT);
42460acea7dfSKonstantin Belousov 			new_entry->wiring_thread = NULL;
4247df8bae1dSRodney W. Grimes 			new_entry->wired_count = 0;
4248df8bae1dSRodney W. Grimes 			new_entry->object.vm_object = NULL;
4249ef694c1aSEdward Tomasz Napierala 			new_entry->cred = NULL;
42509f701172SKonstantin Belousov 			vm_map_entry_link(new_map, new_entry);
42512a7be1b6SBrian Feldman 			vmspace_map_entry_forked(vm1, vm2, new_entry);
4252bd7e5f99SJohn Dyson 			vm_map_copy_entry(old_map, new_map, old_entry,
42533364c323SKonstantin Belousov 			    new_entry, fork_charge);
425478022527SKonstantin Belousov 			vm_map_entry_set_vnode_text(new_entry, true);
4255df8bae1dSRodney W. Grimes 			break;
425678d7964bSXin LI 
425778d7964bSXin LI 		case VM_INHERIT_ZERO:
425878d7964bSXin LI 			/*
425978d7964bSXin LI 			 * Create a new anonymous mapping entry modelled from
426078d7964bSXin LI 			 * the old one.
426178d7964bSXin LI 			 */
426278d7964bSXin LI 			new_entry = vm_map_entry_create(new_map);
426378d7964bSXin LI 			memset(new_entry, 0, sizeof(*new_entry));
426478d7964bSXin LI 
426578d7964bSXin LI 			new_entry->start = old_entry->start;
426678d7964bSXin LI 			new_entry->end = old_entry->end;
426778d7964bSXin LI 			new_entry->eflags = old_entry->eflags &
426878d7964bSXin LI 			    ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION |
4269fe7bcbafSKyle Evans 			    MAP_ENTRY_WRITECNT | MAP_ENTRY_VN_EXEC);
427078d7964bSXin LI 			new_entry->protection = old_entry->protection;
427178d7964bSXin LI 			new_entry->max_protection = old_entry->max_protection;
427278d7964bSXin LI 			new_entry->inheritance = VM_INHERIT_ZERO;
427378d7964bSXin LI 
42749f701172SKonstantin Belousov 			vm_map_entry_link(new_map, new_entry);
427578d7964bSXin LI 			vmspace_map_entry_forked(vm1, vm2, new_entry);
427678d7964bSXin LI 
427778d7964bSXin LI 			new_entry->cred = curthread->td_ucred;
427878d7964bSXin LI 			crhold(new_entry->cred);
427978d7964bSXin LI 			*fork_charge += (new_entry->end - new_entry->start);
428078d7964bSXin LI 
428178d7964bSXin LI 			break;
4282df8bae1dSRodney W. Grimes 		}
4283df8bae1dSRodney W. Grimes 	}
428484110e7eSKonstantin Belousov 	/*
428584110e7eSKonstantin Belousov 	 * Use inlined vm_map_unlock() to postpone handling the deferred
428684110e7eSKonstantin Belousov 	 * map entries, which cannot be done until both old_map and
428784110e7eSKonstantin Belousov 	 * new_map locks are released.
428884110e7eSKonstantin Belousov 	 */
428984110e7eSKonstantin Belousov 	sx_xunlock(&old_map->lock);
429084110e7eSKonstantin Belousov 	sx_xunlock(&new_map->lock);
429184110e7eSKonstantin Belousov 	vm_map_process_deferred();
4292df8bae1dSRodney W. Grimes 
4293df8bae1dSRodney W. Grimes 	return (vm2);
4294df8bae1dSRodney W. Grimes }
4295df8bae1dSRodney W. Grimes 
42968056df6eSAlan Cox /*
42978056df6eSAlan Cox  * Create a process's stack for exec_new_vmspace().  This function is never
42988056df6eSAlan Cox  * asked to wire the newly created stack.
42998056df6eSAlan Cox  */
430094f7e29aSAlan Cox int
430194f7e29aSAlan Cox vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
430294f7e29aSAlan Cox     vm_prot_t prot, vm_prot_t max, int cow)
430394f7e29aSAlan Cox {
43044648ba0aSKonstantin Belousov 	vm_size_t growsize, init_ssize;
43058056df6eSAlan Cox 	rlim_t vmemlim;
43064648ba0aSKonstantin Belousov 	int rv;
43074648ba0aSKonstantin Belousov 
43088056df6eSAlan Cox 	MPASS((map->flags & MAP_WIREFUTURE) == 0);
43094648ba0aSKonstantin Belousov 	growsize = sgrowsiz;
43104648ba0aSKonstantin Belousov 	init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
43114648ba0aSKonstantin Belousov 	vm_map_lock(map);
4312f6f6d240SMateusz Guzik 	vmemlim = lim_cur(curthread, RLIMIT_VMEM);
43134648ba0aSKonstantin Belousov 	/* If we would blow our VMEM resource limit, no go */
43144648ba0aSKonstantin Belousov 	if (map->size + init_ssize > vmemlim) {
43154648ba0aSKonstantin Belousov 		rv = KERN_NO_SPACE;
43164648ba0aSKonstantin Belousov 		goto out;
43174648ba0aSKonstantin Belousov 	}
4318e1f92cccSAlan Cox 	rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
43194648ba0aSKonstantin Belousov 	    max, cow);
43204648ba0aSKonstantin Belousov out:
43214648ba0aSKonstantin Belousov 	vm_map_unlock(map);
43224648ba0aSKonstantin Belousov 	return (rv);
43234648ba0aSKonstantin Belousov }
43244648ba0aSKonstantin Belousov 
432519f49ad3SKonstantin Belousov static int stack_guard_page = 1;
432619f49ad3SKonstantin Belousov SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN,
432719f49ad3SKonstantin Belousov     &stack_guard_page, 0,
432819f49ad3SKonstantin Belousov     "Specifies the number of guard pages for a stack that grows");
432919f49ad3SKonstantin Belousov 
43304648ba0aSKonstantin Belousov static int
43314648ba0aSKonstantin Belousov vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
43324648ba0aSKonstantin Belousov     vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
43334648ba0aSKonstantin Belousov {
4334d1d3f7e1SDoug Moore 	vm_map_entry_t new_entry, prev_entry;
433519bd0d9cSKonstantin Belousov 	vm_offset_t bot, gap_bot, gap_top, top;
433619f49ad3SKonstantin Belousov 	vm_size_t init_ssize, sgp;
4337fd75d710SMarcel Moolenaar 	int orient, rv;
433894f7e29aSAlan Cox 
4339fd75d710SMarcel Moolenaar 	/*
4340fd75d710SMarcel Moolenaar 	 * The stack orientation is piggybacked with the cow argument.
4341fd75d710SMarcel Moolenaar 	 * Extract it into orient and mask the cow argument so that we
4342fd75d710SMarcel Moolenaar 	 * don't pass it around further.
4343fd75d710SMarcel Moolenaar 	 */
4344fd75d710SMarcel Moolenaar 	orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP);
4345fd75d710SMarcel Moolenaar 	KASSERT(orient != 0, ("No stack grow direction"));
434619bd0d9cSKonstantin Belousov 	KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP),
434719bd0d9cSKonstantin Belousov 	    ("bi-dir stack"));
4348fd75d710SMarcel Moolenaar 
434977bc7900SKonstantin Belousov 	if (addrbos < vm_map_min(map) ||
43509410cd7dSKonstantin Belousov 	    addrbos + max_ssize > vm_map_max(map) ||
43519410cd7dSKonstantin Belousov 	    addrbos + max_ssize <= addrbos)
43529410cd7dSKonstantin Belousov 		return (KERN_INVALID_ADDRESS);
4353156e8654SKonstantin Belousov 	sgp = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
4354156e8654SKonstantin Belousov 	    (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :
4355fe69291fSKonstantin Belousov 	    (vm_size_t)stack_guard_page * PAGE_SIZE;
43569410cd7dSKonstantin Belousov 	if (sgp >= max_ssize)
43579410cd7dSKonstantin Belousov 		return (KERN_INVALID_ARGUMENT);
4358fd75d710SMarcel Moolenaar 
435919f49ad3SKonstantin Belousov 	init_ssize = growsize;
436019f49ad3SKonstantin Belousov 	if (max_ssize < init_ssize + sgp)
436119f49ad3SKonstantin Belousov 		init_ssize = max_ssize - sgp;
436294f7e29aSAlan Cox 
436394f7e29aSAlan Cox 	/* If addr is already mapped, no go */
4364d1d3f7e1SDoug Moore 	if (vm_map_lookup_entry(map, addrbos, &prev_entry))
436594f7e29aSAlan Cox 		return (KERN_NO_SPACE);
4366a69ac174SMatthew Dillon 
4367fd75d710SMarcel Moolenaar 	/*
4368763df3ecSPedro F. Giffuni 	 * If we can't accommodate max_ssize in the current mapping, no go.
436994f7e29aSAlan Cox 	 */
43707cdcf863SDoug Moore 	if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize)
437194f7e29aSAlan Cox 		return (KERN_NO_SPACE);
437294f7e29aSAlan Cox 
4373fd75d710SMarcel Moolenaar 	/*
4374fd75d710SMarcel Moolenaar 	 * We initially map a stack of only init_ssize.  We will grow as
4375fd75d710SMarcel Moolenaar 	 * needed later.  Depending on the orientation of the stack (i.e.
4376fd75d710SMarcel Moolenaar 	 * the grow direction) we either map at the top of the range, the
4377fd75d710SMarcel Moolenaar 	 * bottom of the range or in the middle.
437894f7e29aSAlan Cox 	 *
4379fd75d710SMarcel Moolenaar 	 * Note: we would normally expect prot and max to be VM_PROT_ALL,
4380fd75d710SMarcel Moolenaar 	 * and cow to be 0.  Possibly we should eliminate these as input
4381fd75d710SMarcel Moolenaar 	 * parameters, and just pass these values here in the insert call.
438294f7e29aSAlan Cox 	 */
438319bd0d9cSKonstantin Belousov 	if (orient == MAP_STACK_GROWS_DOWN) {
4384fd75d710SMarcel Moolenaar 		bot = addrbos + max_ssize - init_ssize;
4385fd75d710SMarcel Moolenaar 		top = bot + init_ssize;
438619bd0d9cSKonstantin Belousov 		gap_bot = addrbos;
438719bd0d9cSKonstantin Belousov 		gap_top = bot;
438819bd0d9cSKonstantin Belousov 	} else /* if (orient == MAP_STACK_GROWS_UP) */ {
438919bd0d9cSKonstantin Belousov 		bot = addrbos;
439019bd0d9cSKonstantin Belousov 		top = bot + init_ssize;
439119bd0d9cSKonstantin Belousov 		gap_bot = top;
439219bd0d9cSKonstantin Belousov 		gap_top = addrbos + max_ssize;
439319bd0d9cSKonstantin Belousov 	}
4394fd75d710SMarcel Moolenaar 	rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
439519bd0d9cSKonstantin Belousov 	if (rv != KERN_SUCCESS)
439619bd0d9cSKonstantin Belousov 		return (rv);
43977cdcf863SDoug Moore 	new_entry = vm_map_entry_succ(prev_entry);
439819bd0d9cSKonstantin Belousov 	KASSERT(new_entry->end == top || new_entry->start == bot,
439919bd0d9cSKonstantin Belousov 	    ("Bad entry start/end for new stack entry"));
4400712efe66SAlan Cox 	KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 ||
4401712efe66SAlan Cox 	    (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0,
4402712efe66SAlan Cox 	    ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
4403712efe66SAlan Cox 	KASSERT((orient & MAP_STACK_GROWS_UP) == 0 ||
4404712efe66SAlan Cox 	    (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0,
4405712efe66SAlan Cox 	    ("new entry lacks MAP_ENTRY_GROWS_UP"));
4406fe69291fSKonstantin Belousov 	if (gap_bot == gap_top)
4407fe69291fSKonstantin Belousov 		return (KERN_SUCCESS);
440819bd0d9cSKonstantin Belousov 	rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE,
440919bd0d9cSKonstantin Belousov 	    VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ?
441019bd0d9cSKonstantin Belousov 	    MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP));
4411a7751d32SKonstantin Belousov 	if (rv == KERN_SUCCESS) {
4412a7751d32SKonstantin Belousov 		/*
4413a7751d32SKonstantin Belousov 		 * Gap can never successfully handle a fault, so
4414a7751d32SKonstantin Belousov 		 * read-ahead logic is never used for it.  Re-use
4415a7751d32SKonstantin Belousov 		 * next_read of the gap entry to store
4416a7751d32SKonstantin Belousov 		 * stack_guard_page for vm_map_growstack().
4417a7751d32SKonstantin Belousov 		 */
4418a7751d32SKonstantin Belousov 		if (orient == MAP_STACK_GROWS_DOWN)
44197cdcf863SDoug Moore 			vm_map_entry_pred(new_entry)->next_read = sgp;
4420a7751d32SKonstantin Belousov 		else
44217cdcf863SDoug Moore 			vm_map_entry_succ(new_entry)->next_read = sgp;
4422a7751d32SKonstantin Belousov 	} else {
442319bd0d9cSKonstantin Belousov 		(void)vm_map_delete(map, bot, top);
4424a7751d32SKonstantin Belousov 	}
442594f7e29aSAlan Cox 	return (rv);
442694f7e29aSAlan Cox }
442794f7e29aSAlan Cox 
442819bd0d9cSKonstantin Belousov /*
442919bd0d9cSKonstantin Belousov  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if we
443019bd0d9cSKonstantin Belousov  * successfully grow the stack.
443194f7e29aSAlan Cox  */
443219bd0d9cSKonstantin Belousov static int
443319bd0d9cSKonstantin Belousov vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
443494f7e29aSAlan Cox {
443519bd0d9cSKonstantin Belousov 	vm_map_entry_t stack_entry;
443619bd0d9cSKonstantin Belousov 	struct proc *p;
443719bd0d9cSKonstantin Belousov 	struct vmspace *vm;
443819bd0d9cSKonstantin Belousov 	struct ucred *cred;
443919bd0d9cSKonstantin Belousov 	vm_offset_t gap_end, gap_start, grow_start;
4440fa581662SDoug Moore 	vm_size_t grow_amount, guard, max_grow;
44417e19eda4SAndrey Zonov 	rlim_t lmemlim, stacklim, vmemlim;
444219bd0d9cSKonstantin Belousov 	int rv, rv1;
444319bd0d9cSKonstantin Belousov 	bool gap_deleted, grow_down, is_procstack;
44441ba5ad42SEdward Tomasz Napierala #ifdef notyet
44451ba5ad42SEdward Tomasz Napierala 	uint64_t limit;
44461ba5ad42SEdward Tomasz Napierala #endif
4447afcc55f3SEdward Tomasz Napierala #ifdef RACCT
44481ba5ad42SEdward Tomasz Napierala 	int error;
4449afcc55f3SEdward Tomasz Napierala #endif
445023955314SAlfred Perlstein 
445119bd0d9cSKonstantin Belousov 	p = curproc;
445219bd0d9cSKonstantin Belousov 	vm = p->p_vmspace;
4453eb5ea878SKonstantin Belousov 
4454eb5ea878SKonstantin Belousov 	/*
4455eb5ea878SKonstantin Belousov 	 * Disallow stack growth when the access is performed by a
4456eb5ea878SKonstantin Belousov 	 * debugger or AIO daemon.  The reason is that the wrong
4457eb5ea878SKonstantin Belousov 	 * resource limits are applied.
4458eb5ea878SKonstantin Belousov 	 */
445910ae16c7SKonstantin Belousov 	if (p != initproc && (map != &p->p_vmspace->vm_map ||
446010ae16c7SKonstantin Belousov 	    p->p_textvp == NULL))
4461f758aaddSKonstantin Belousov 		return (KERN_FAILURE);
4462eb5ea878SKonstantin Belousov 
446319bd0d9cSKonstantin Belousov 	MPASS(!map->system_map);
446419bd0d9cSKonstantin Belousov 
4465f6f6d240SMateusz Guzik 	lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK);
4466f6f6d240SMateusz Guzik 	stacklim = lim_cur(curthread, RLIMIT_STACK);
4467f6f6d240SMateusz Guzik 	vmemlim = lim_cur(curthread, RLIMIT_VMEM);
446819bd0d9cSKonstantin Belousov retry:
446919bd0d9cSKonstantin Belousov 	/* If addr is not in a hole for a stack grow area, no need to grow. */
4470d1d3f7e1SDoug Moore 	if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry))
447119bd0d9cSKonstantin Belousov 		return (KERN_FAILURE);
447219bd0d9cSKonstantin Belousov 	if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0)
44730cddd8f0SMatthew Dillon 		return (KERN_SUCCESS);
447419bd0d9cSKonstantin Belousov 	if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) {
44757cdcf863SDoug Moore 		stack_entry = vm_map_entry_succ(gap_entry);
447619bd0d9cSKonstantin Belousov 		if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 ||
447719bd0d9cSKonstantin Belousov 		    stack_entry->start != gap_entry->end)
447819bd0d9cSKonstantin Belousov 			return (KERN_FAILURE);
447919bd0d9cSKonstantin Belousov 		grow_amount = round_page(stack_entry->start - addr);
448019bd0d9cSKonstantin Belousov 		grow_down = true;
448119bd0d9cSKonstantin Belousov 	} else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) {
44827cdcf863SDoug Moore 		stack_entry = vm_map_entry_pred(gap_entry);
448319bd0d9cSKonstantin Belousov 		if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 ||
448419bd0d9cSKonstantin Belousov 		    stack_entry->end != gap_entry->start)
448519bd0d9cSKonstantin Belousov 			return (KERN_FAILURE);
448619bd0d9cSKonstantin Belousov 		grow_amount = round_page(addr + 1 - stack_entry->end);
448719bd0d9cSKonstantin Belousov 		grow_down = false;
4488b21a0008SMarcel Moolenaar 	} else {
448919bd0d9cSKonstantin Belousov 		return (KERN_FAILURE);
4490b21a0008SMarcel Moolenaar 	}
4491156e8654SKonstantin Belousov 	guard = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
4492156e8654SKonstantin Belousov 	    (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :
4493fe69291fSKonstantin Belousov 	    gap_entry->next_read;
4494201f03b8SAlan Cox 	max_grow = gap_entry->end - gap_entry->start;
4495201f03b8SAlan Cox 	if (guard > max_grow)
4496201f03b8SAlan Cox 		return (KERN_NO_SPACE);
4497201f03b8SAlan Cox 	max_grow -= guard;
449819bd0d9cSKonstantin Belousov 	if (grow_amount > max_grow)
44990cddd8f0SMatthew Dillon 		return (KERN_NO_SPACE);
450094f7e29aSAlan Cox 
4501b21a0008SMarcel Moolenaar 	/*
4502b21a0008SMarcel Moolenaar 	 * If this is the main process stack, see if we're over the stack
4503b21a0008SMarcel Moolenaar 	 * limit.
450494f7e29aSAlan Cox 	 */
450519bd0d9cSKonstantin Belousov 	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr &&
450619bd0d9cSKonstantin Belousov 	    addr < (vm_offset_t)p->p_sysent->sv_usrstack;
450719bd0d9cSKonstantin Belousov 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim))
45080cddd8f0SMatthew Dillon 		return (KERN_NO_SPACE);
450919bd0d9cSKonstantin Belousov 
4510afcc55f3SEdward Tomasz Napierala #ifdef RACCT
45114b5c9cf6SEdward Tomasz Napierala 	if (racct_enable) {
45121ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(p);
45134b5c9cf6SEdward Tomasz Napierala 		if (is_procstack && racct_set(p, RACCT_STACK,
45144b5c9cf6SEdward Tomasz Napierala 		    ctob(vm->vm_ssize) + grow_amount)) {
45151ba5ad42SEdward Tomasz Napierala 			PROC_UNLOCK(p);
45161ba5ad42SEdward Tomasz Napierala 			return (KERN_NO_SPACE);
45171ba5ad42SEdward Tomasz Napierala 		}
45181ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(p);
45194b5c9cf6SEdward Tomasz Napierala 	}
4520afcc55f3SEdward Tomasz Napierala #endif
452194f7e29aSAlan Cox 
452219bd0d9cSKonstantin Belousov 	grow_amount = roundup(grow_amount, sgrowsiz);
452319bd0d9cSKonstantin Belousov 	if (grow_amount > max_grow)
452419bd0d9cSKonstantin Belousov 		grow_amount = max_grow;
452591d5354aSJohn Baldwin 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
4526e4826248SAlan Cox 		grow_amount = trunc_page((vm_size_t)stacklim) -
4527e4826248SAlan Cox 		    ctob(vm->vm_ssize);
452894f7e29aSAlan Cox 	}
452919bd0d9cSKonstantin Belousov 
45301ba5ad42SEdward Tomasz Napierala #ifdef notyet
45311ba5ad42SEdward Tomasz Napierala 	PROC_LOCK(p);
45321ba5ad42SEdward Tomasz Napierala 	limit = racct_get_available(p, RACCT_STACK);
45331ba5ad42SEdward Tomasz Napierala 	PROC_UNLOCK(p);
45341ba5ad42SEdward Tomasz Napierala 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
45351ba5ad42SEdward Tomasz Napierala 		grow_amount = limit - ctob(vm->vm_ssize);
45361ba5ad42SEdward Tomasz Napierala #endif
453719bd0d9cSKonstantin Belousov 
453819bd0d9cSKonstantin Belousov 	if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) {
45393ac7d297SAndrey Zonov 		if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
45407e19eda4SAndrey Zonov 			rv = KERN_NO_SPACE;
45417e19eda4SAndrey Zonov 			goto out;
45427e19eda4SAndrey Zonov 		}
45437e19eda4SAndrey Zonov #ifdef RACCT
45444b5c9cf6SEdward Tomasz Napierala 		if (racct_enable) {
45457e19eda4SAndrey Zonov 			PROC_LOCK(p);
45467e19eda4SAndrey Zonov 			if (racct_set(p, RACCT_MEMLOCK,
45473ac7d297SAndrey Zonov 			    ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
45487e19eda4SAndrey Zonov 				PROC_UNLOCK(p);
45497e19eda4SAndrey Zonov 				rv = KERN_NO_SPACE;
45507e19eda4SAndrey Zonov 				goto out;
45517e19eda4SAndrey Zonov 			}
45527e19eda4SAndrey Zonov 			PROC_UNLOCK(p);
45534b5c9cf6SEdward Tomasz Napierala 		}
45547e19eda4SAndrey Zonov #endif
45557e19eda4SAndrey Zonov 	}
455619bd0d9cSKonstantin Belousov 
4557a69ac174SMatthew Dillon 	/* If we would blow our VMEM resource limit, no go */
455891d5354aSJohn Baldwin 	if (map->size + grow_amount > vmemlim) {
45591ba5ad42SEdward Tomasz Napierala 		rv = KERN_NO_SPACE;
45601ba5ad42SEdward Tomasz Napierala 		goto out;
4561a69ac174SMatthew Dillon 	}
4562afcc55f3SEdward Tomasz Napierala #ifdef RACCT
45634b5c9cf6SEdward Tomasz Napierala 	if (racct_enable) {
45641ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(p);
45651ba5ad42SEdward Tomasz Napierala 		if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
45661ba5ad42SEdward Tomasz Napierala 			PROC_UNLOCK(p);
45671ba5ad42SEdward Tomasz Napierala 			rv = KERN_NO_SPACE;
45681ba5ad42SEdward Tomasz Napierala 			goto out;
45691ba5ad42SEdward Tomasz Napierala 		}
45701ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(p);
45714b5c9cf6SEdward Tomasz Napierala 	}
4572afcc55f3SEdward Tomasz Napierala #endif
4573a69ac174SMatthew Dillon 
457419bd0d9cSKonstantin Belousov 	if (vm_map_lock_upgrade(map)) {
457519bd0d9cSKonstantin Belousov 		gap_entry = NULL;
457619bd0d9cSKonstantin Belousov 		vm_map_lock_read(map);
457719bd0d9cSKonstantin Belousov 		goto retry;
457894f7e29aSAlan Cox 	}
457994f7e29aSAlan Cox 
458019bd0d9cSKonstantin Belousov 	if (grow_down) {
458119bd0d9cSKonstantin Belousov 		grow_start = gap_entry->end - grow_amount;
458219bd0d9cSKonstantin Belousov 		if (gap_entry->start + grow_amount == gap_entry->end) {
458319bd0d9cSKonstantin Belousov 			gap_start = gap_entry->start;
458419bd0d9cSKonstantin Belousov 			gap_end = gap_entry->end;
458519bd0d9cSKonstantin Belousov 			vm_map_entry_delete(map, gap_entry);
458619bd0d9cSKonstantin Belousov 			gap_deleted = true;
458719bd0d9cSKonstantin Belousov 		} else {
458819bd0d9cSKonstantin Belousov 			MPASS(gap_entry->start < gap_entry->end - grow_amount);
4589fa581662SDoug Moore 			vm_map_entry_resize(map, gap_entry, -grow_amount);
459019bd0d9cSKonstantin Belousov 			gap_deleted = false;
459119bd0d9cSKonstantin Belousov 		}
459219bd0d9cSKonstantin Belousov 		rv = vm_map_insert(map, NULL, 0, grow_start,
459319bd0d9cSKonstantin Belousov 		    grow_start + grow_amount,
459419bd0d9cSKonstantin Belousov 		    stack_entry->protection, stack_entry->max_protection,
4595712efe66SAlan Cox 		    MAP_STACK_GROWS_DOWN);
459619bd0d9cSKonstantin Belousov 		if (rv != KERN_SUCCESS) {
459719bd0d9cSKonstantin Belousov 			if (gap_deleted) {
459819bd0d9cSKonstantin Belousov 				rv1 = vm_map_insert(map, NULL, 0, gap_start,
459919bd0d9cSKonstantin Belousov 				    gap_end, VM_PROT_NONE, VM_PROT_NONE,
460019bd0d9cSKonstantin Belousov 				    MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN);
460119bd0d9cSKonstantin Belousov 				MPASS(rv1 == KERN_SUCCESS);
46021895f520SDoug Moore 			} else
4603fa581662SDoug Moore 				vm_map_entry_resize(map, gap_entry,
46041895f520SDoug Moore 				    grow_amount);
460594f7e29aSAlan Cox 		}
4606b21a0008SMarcel Moolenaar 	} else {
460719bd0d9cSKonstantin Belousov 		grow_start = stack_entry->end;
4608ef694c1aSEdward Tomasz Napierala 		cred = stack_entry->cred;
4609ef694c1aSEdward Tomasz Napierala 		if (cred == NULL && stack_entry->object.vm_object != NULL)
4610ef694c1aSEdward Tomasz Napierala 			cred = stack_entry->object.vm_object->cred;
4611ef694c1aSEdward Tomasz Napierala 		if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
46123364c323SKonstantin Belousov 			rv = KERN_NO_SPACE;
4613b21a0008SMarcel Moolenaar 		/* Grow the underlying object if applicable. */
46143364c323SKonstantin Belousov 		else if (stack_entry->object.vm_object == NULL ||
4615b21a0008SMarcel Moolenaar 		    vm_object_coalesce(stack_entry->object.vm_object,
461657a21abaSAlan Cox 		    stack_entry->offset,
4617b21a0008SMarcel Moolenaar 		    (vm_size_t)(stack_entry->end - stack_entry->start),
4618fa581662SDoug Moore 		    grow_amount, cred != NULL)) {
4619fa581662SDoug Moore 			if (gap_entry->start + grow_amount == gap_entry->end) {
462019bd0d9cSKonstantin Belousov 				vm_map_entry_delete(map, gap_entry);
4621fa581662SDoug Moore 				vm_map_entry_resize(map, stack_entry,
4622fa581662SDoug Moore 				    grow_amount);
4623fa581662SDoug Moore 			} else {
462419bd0d9cSKonstantin Belousov 				gap_entry->start += grow_amount;
4625fa581662SDoug Moore 				stack_entry->end += grow_amount;
4626fa581662SDoug Moore 			}
462719bd0d9cSKonstantin Belousov 			map->size += grow_amount;
4628b21a0008SMarcel Moolenaar 			rv = KERN_SUCCESS;
4629b21a0008SMarcel Moolenaar 		} else
4630b21a0008SMarcel Moolenaar 			rv = KERN_FAILURE;
4631b21a0008SMarcel Moolenaar 	}
4632b21a0008SMarcel Moolenaar 	if (rv == KERN_SUCCESS && is_procstack)
4633b21a0008SMarcel Moolenaar 		vm->vm_ssize += btoc(grow_amount);
4634b21a0008SMarcel Moolenaar 
4635abd498aaSBruce M Simpson 	/*
4636abd498aaSBruce M Simpson 	 * Heed the MAP_WIREFUTURE flag if it was set for this process.
4637abd498aaSBruce M Simpson 	 */
463819bd0d9cSKonstantin Belousov 	if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) {
463954a3a114SMark Johnston 		rv = vm_map_wire_locked(map, grow_start,
464054a3a114SMark Johnston 		    grow_start + grow_amount,
4641212e02c8SKonstantin Belousov 		    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
464254a3a114SMark Johnston 	}
464319bd0d9cSKonstantin Belousov 	vm_map_lock_downgrade(map);
4644abd498aaSBruce M Simpson 
46451ba5ad42SEdward Tomasz Napierala out:
4646afcc55f3SEdward Tomasz Napierala #ifdef RACCT
46474b5c9cf6SEdward Tomasz Napierala 	if (racct_enable && rv != KERN_SUCCESS) {
46481ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(p);
46491ba5ad42SEdward Tomasz Napierala 		error = racct_set(p, RACCT_VMEM, map->size);
46501ba5ad42SEdward Tomasz Napierala 		KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
46517e19eda4SAndrey Zonov 		if (!old_mlock) {
46527e19eda4SAndrey Zonov 			error = racct_set(p, RACCT_MEMLOCK,
46533ac7d297SAndrey Zonov 			    ptoa(pmap_wired_count(map->pmap)));
46547e19eda4SAndrey Zonov 			KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
46557e19eda4SAndrey Zonov 		}
46561ba5ad42SEdward Tomasz Napierala 	    	error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
46571ba5ad42SEdward Tomasz Napierala 		KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
46581ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(p);
46591ba5ad42SEdward Tomasz Napierala 	}
4660afcc55f3SEdward Tomasz Napierala #endif
46611ba5ad42SEdward Tomasz Napierala 
46620cddd8f0SMatthew Dillon 	return (rv);
466394f7e29aSAlan Cox }
466494f7e29aSAlan Cox 
4665df8bae1dSRodney W. Grimes /*
46665856e12eSJohn Dyson  * Unshare the specified VM space for exec.  If other processes are
46675856e12eSJohn Dyson  * mapped to it, then create a new one.  The new vmspace is null.
46685856e12eSJohn Dyson  */
466989b57fcfSKonstantin Belousov int
46703ebc1248SPeter Wemm vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
46711b40f8c0SMatthew Dillon {
46725856e12eSJohn Dyson 	struct vmspace *oldvmspace = p->p_vmspace;
46735856e12eSJohn Dyson 	struct vmspace *newvmspace;
46745856e12eSJohn Dyson 
46757032434eSKonstantin Belousov 	KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
46767032434eSKonstantin Belousov 	    ("vmspace_exec recursed"));
46776e00f3a3SKonstantin Belousov 	newvmspace = vmspace_alloc(minuser, maxuser, pmap_pinit);
467889b57fcfSKonstantin Belousov 	if (newvmspace == NULL)
467989b57fcfSKonstantin Belousov 		return (ENOMEM);
468051ab6c28SAlan Cox 	newvmspace->vm_swrss = oldvmspace->vm_swrss;
46815856e12eSJohn Dyson 	/*
46825856e12eSJohn Dyson 	 * This code is written like this for prototype purposes.  The
46835856e12eSJohn Dyson 	 * goal is to avoid running down the vmspace here, but let the
46845856e12eSJohn Dyson 	 * other process's that are still using the vmspace to finally
46855856e12eSJohn Dyson 	 * run it down.  Even though there is little or no chance of blocking
46865856e12eSJohn Dyson 	 * here, it is a good idea to keep this form for future mods.
46875856e12eSJohn Dyson 	 */
468857051fdcSTor Egge 	PROC_VMSPACE_LOCK(p);
46895856e12eSJohn Dyson 	p->p_vmspace = newvmspace;
469057051fdcSTor Egge 	PROC_VMSPACE_UNLOCK(p);
46916617724cSJeff Roberson 	if (p == curthread->td_proc)
4692b40ce416SJulian Elischer 		pmap_activate(curthread);
46937032434eSKonstantin Belousov 	curthread->td_pflags |= TDP_EXECVMSPC;
469489b57fcfSKonstantin Belousov 	return (0);
46955856e12eSJohn Dyson }
46965856e12eSJohn Dyson 
46975856e12eSJohn Dyson /*
46985856e12eSJohn Dyson  * Unshare the specified VM space for forcing COW.  This
46995856e12eSJohn Dyson  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
47005856e12eSJohn Dyson  */
470189b57fcfSKonstantin Belousov int
47021b40f8c0SMatthew Dillon vmspace_unshare(struct proc *p)
47031b40f8c0SMatthew Dillon {
47045856e12eSJohn Dyson 	struct vmspace *oldvmspace = p->p_vmspace;
47055856e12eSJohn Dyson 	struct vmspace *newvmspace;
47063364c323SKonstantin Belousov 	vm_ooffset_t fork_charge;
47075856e12eSJohn Dyson 
47085856e12eSJohn Dyson 	if (oldvmspace->vm_refcnt == 1)
470989b57fcfSKonstantin Belousov 		return (0);
47103364c323SKonstantin Belousov 	fork_charge = 0;
47113364c323SKonstantin Belousov 	newvmspace = vmspace_fork(oldvmspace, &fork_charge);
471289b57fcfSKonstantin Belousov 	if (newvmspace == NULL)
471389b57fcfSKonstantin Belousov 		return (ENOMEM);
4714ef694c1aSEdward Tomasz Napierala 	if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
47153364c323SKonstantin Belousov 		vmspace_free(newvmspace);
47163364c323SKonstantin Belousov 		return (ENOMEM);
47173364c323SKonstantin Belousov 	}
471857051fdcSTor Egge 	PROC_VMSPACE_LOCK(p);
47195856e12eSJohn Dyson 	p->p_vmspace = newvmspace;
472057051fdcSTor Egge 	PROC_VMSPACE_UNLOCK(p);
47216617724cSJeff Roberson 	if (p == curthread->td_proc)
4722b40ce416SJulian Elischer 		pmap_activate(curthread);
4723b56ef1c1SJohn Baldwin 	vmspace_free(oldvmspace);
472489b57fcfSKonstantin Belousov 	return (0);
47255856e12eSJohn Dyson }
47265856e12eSJohn Dyson 
47275856e12eSJohn Dyson /*
4728df8bae1dSRodney W. Grimes  *	vm_map_lookup:
4729df8bae1dSRodney W. Grimes  *
4730df8bae1dSRodney W. Grimes  *	Finds the VM object, offset, and
4731df8bae1dSRodney W. Grimes  *	protection for a given virtual address in the
4732df8bae1dSRodney W. Grimes  *	specified map, assuming a page fault of the
4733df8bae1dSRodney W. Grimes  *	type specified.
4734df8bae1dSRodney W. Grimes  *
4735df8bae1dSRodney W. Grimes  *	Leaves the map in question locked for read; return
4736df8bae1dSRodney W. Grimes  *	values are guaranteed until a vm_map_lookup_done
4737df8bae1dSRodney W. Grimes  *	call is performed.  Note that the map argument
4738df8bae1dSRodney W. Grimes  *	is in/out; the returned map must be used in
4739df8bae1dSRodney W. Grimes  *	the call to vm_map_lookup_done.
4740df8bae1dSRodney W. Grimes  *
4741df8bae1dSRodney W. Grimes  *	A handle (out_entry) is returned for use in
4742df8bae1dSRodney W. Grimes  *	vm_map_lookup_done, to make that fast.
4743df8bae1dSRodney W. Grimes  *
4744df8bae1dSRodney W. Grimes  *	If a lookup is requested with "write protection"
4745df8bae1dSRodney W. Grimes  *	specified, the map may be changed to perform virtual
4746df8bae1dSRodney W. Grimes  *	copying operations, although the data referenced will
4747df8bae1dSRodney W. Grimes  *	remain the same.
4748df8bae1dSRodney W. Grimes  */
4749df8bae1dSRodney W. Grimes int
4750b9dcd593SBruce Evans vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
4751b9dcd593SBruce Evans 	      vm_offset_t vaddr,
475247221757SJohn Dyson 	      vm_prot_t fault_typea,
4753b9dcd593SBruce Evans 	      vm_map_entry_t *out_entry,	/* OUT */
4754b9dcd593SBruce Evans 	      vm_object_t *object,		/* OUT */
4755b9dcd593SBruce Evans 	      vm_pindex_t *pindex,		/* OUT */
4756b9dcd593SBruce Evans 	      vm_prot_t *out_prot,		/* OUT */
47572d8acc0fSJohn Dyson 	      boolean_t *wired)			/* OUT */
4758df8bae1dSRodney W. Grimes {
4759c0877f10SJohn Dyson 	vm_map_entry_t entry;
4760c0877f10SJohn Dyson 	vm_map_t map = *var_map;
4761c0877f10SJohn Dyson 	vm_prot_t prot;
4762a6f21d15SMark Johnston 	vm_prot_t fault_type;
47633364c323SKonstantin Belousov 	vm_object_t eobject;
47640cc74f14SAlan Cox 	vm_size_t size;
4765ef694c1aSEdward Tomasz Napierala 	struct ucred *cred;
4766df8bae1dSRodney W. Grimes 
476719bd0d9cSKonstantin Belousov RetryLookup:
4768df8bae1dSRodney W. Grimes 
4769df8bae1dSRodney W. Grimes 	vm_map_lock_read(map);
4770df8bae1dSRodney W. Grimes 
477119bd0d9cSKonstantin Belousov RetryLookupLocked:
4772df8bae1dSRodney W. Grimes 	/*
47734c3ef59eSAlan Cox 	 * Lookup the faulting address.
4774df8bae1dSRodney W. Grimes 	 */
4775095104acSAlan Cox 	if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
4776095104acSAlan Cox 		vm_map_unlock_read(map);
4777095104acSAlan Cox 		return (KERN_INVALID_ADDRESS);
4778095104acSAlan Cox 	}
4779df8bae1dSRodney W. Grimes 
47804e94f402SAlan Cox 	entry = *out_entry;
4781b7b2aac2SJohn Dyson 
4782df8bae1dSRodney W. Grimes 	/*
4783df8bae1dSRodney W. Grimes 	 * Handle submaps.
4784df8bae1dSRodney W. Grimes 	 */
4785afa07f7eSJohn Dyson 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4786df8bae1dSRodney W. Grimes 		vm_map_t old_map = map;
4787df8bae1dSRodney W. Grimes 
4788df8bae1dSRodney W. Grimes 		*var_map = map = entry->object.sub_map;
4789df8bae1dSRodney W. Grimes 		vm_map_unlock_read(old_map);
4790df8bae1dSRodney W. Grimes 		goto RetryLookup;
4791df8bae1dSRodney W. Grimes 	}
4792a04c970aSJohn Dyson 
4793df8bae1dSRodney W. Grimes 	/*
47940d94caffSDavid Greenman 	 * Check whether this task is allowed to have this page.
4795df8bae1dSRodney W. Grimes 	 */
4796df8bae1dSRodney W. Grimes 	prot = entry->protection;
479719bd0d9cSKonstantin Belousov 	if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) {
479819bd0d9cSKonstantin Belousov 		fault_typea &= ~VM_PROT_FAULT_LOOKUP;
479919bd0d9cSKonstantin Belousov 		if (prot == VM_PROT_NONE && map != kernel_map &&
480019bd0d9cSKonstantin Belousov 		    (entry->eflags & MAP_ENTRY_GUARD) != 0 &&
480119bd0d9cSKonstantin Belousov 		    (entry->eflags & (MAP_ENTRY_STACK_GAP_DN |
480219bd0d9cSKonstantin Belousov 		    MAP_ENTRY_STACK_GAP_UP)) != 0 &&
480319bd0d9cSKonstantin Belousov 		    vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS)
480419bd0d9cSKonstantin Belousov 			goto RetryLookupLocked;
480519bd0d9cSKonstantin Belousov 	}
4806a6f21d15SMark Johnston 	fault_type = fault_typea & VM_PROT_ALL;
48072db65ab4SAlan Cox 	if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
4808095104acSAlan Cox 		vm_map_unlock_read(map);
4809095104acSAlan Cox 		return (KERN_PROTECTION_FAILURE);
481047221757SJohn Dyson 	}
4811b8db9776SKonstantin Belousov 	KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
4812b8db9776SKonstantin Belousov 	    (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) !=
4813b8db9776SKonstantin Belousov 	    (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY),
4814b8db9776SKonstantin Belousov 	    ("entry %p flags %x", entry, entry->eflags));
48155b3e0257SDag-Erling Smørgrav 	if ((fault_typea & VM_PROT_COPY) != 0 &&
48165b3e0257SDag-Erling Smørgrav 	    (entry->max_protection & VM_PROT_WRITE) == 0 &&
48175b3e0257SDag-Erling Smørgrav 	    (entry->eflags & MAP_ENTRY_COW) == 0) {
48185b3e0257SDag-Erling Smørgrav 		vm_map_unlock_read(map);
48195b3e0257SDag-Erling Smørgrav 		return (KERN_PROTECTION_FAILURE);
48205b3e0257SDag-Erling Smørgrav 	}
4821df8bae1dSRodney W. Grimes 
4822df8bae1dSRodney W. Grimes 	/*
48230d94caffSDavid Greenman 	 * If this page is not pageable, we have to get it for all possible
48240d94caffSDavid Greenman 	 * accesses.
4825df8bae1dSRodney W. Grimes 	 */
482605f0fdd2SPoul-Henning Kamp 	*wired = (entry->wired_count != 0);
482705f0fdd2SPoul-Henning Kamp 	if (*wired)
4828a6d42a0dSAlan Cox 		fault_type = entry->protection;
48293364c323SKonstantin Belousov 	size = entry->end - entry->start;
483067388836SKonstantin Belousov 
4831df8bae1dSRodney W. Grimes 	/*
4832df8bae1dSRodney W. Grimes 	 * If the entry was copy-on-write, we either ...
4833df8bae1dSRodney W. Grimes 	 */
4834afa07f7eSJohn Dyson 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4835df8bae1dSRodney W. Grimes 		/*
48360d94caffSDavid Greenman 		 * If we want to write the page, we may as well handle that
4837ad5fca3bSAlan Cox 		 * now since we've got the map locked.
4838df8bae1dSRodney W. Grimes 		 *
48390d94caffSDavid Greenman 		 * If we don't need to write the page, we just demote the
48400d94caffSDavid Greenman 		 * permissions allowed.
4841df8bae1dSRodney W. Grimes 		 */
4842a6d42a0dSAlan Cox 		if ((fault_type & VM_PROT_WRITE) != 0 ||
4843a6d42a0dSAlan Cox 		    (fault_typea & VM_PROT_COPY) != 0) {
4844df8bae1dSRodney W. Grimes 			/*
48450d94caffSDavid Greenman 			 * Make a new object, and place it in the object
48460d94caffSDavid Greenman 			 * chain.  Note that no new references have appeared
4847ad5fca3bSAlan Cox 			 * -- one just moved from the map to the new
48480d94caffSDavid Greenman 			 * object.
4849df8bae1dSRodney W. Grimes 			 */
485025adb370SBrian Feldman 			if (vm_map_lock_upgrade(map))
4851df8bae1dSRodney W. Grimes 				goto RetryLookup;
48529917e010SAlan Cox 
4853ef694c1aSEdward Tomasz Napierala 			if (entry->cred == NULL) {
48543364c323SKonstantin Belousov 				/*
48553364c323SKonstantin Belousov 				 * The debugger owner is charged for
48563364c323SKonstantin Belousov 				 * the memory.
48573364c323SKonstantin Belousov 				 */
4858ef694c1aSEdward Tomasz Napierala 				cred = curthread->td_ucred;
4859ef694c1aSEdward Tomasz Napierala 				crhold(cred);
4860ef694c1aSEdward Tomasz Napierala 				if (!swap_reserve_by_cred(size, cred)) {
4861ef694c1aSEdward Tomasz Napierala 					crfree(cred);
48623364c323SKonstantin Belousov 					vm_map_unlock(map);
48633364c323SKonstantin Belousov 					return (KERN_RESOURCE_SHORTAGE);
48643364c323SKonstantin Belousov 				}
4865ef694c1aSEdward Tomasz Napierala 				entry->cred = cred;
48663364c323SKonstantin Belousov 			}
48673364c323SKonstantin Belousov 			eobject = entry->object.vm_object;
486867388836SKonstantin Belousov 			vm_object_shadow(&entry->object.vm_object,
486967388836SKonstantin Belousov 			    &entry->offset, size, entry->cred, false);
487067388836SKonstantin Belousov 			if (eobject == entry->object.vm_object) {
48713364c323SKonstantin Belousov 				/*
48723364c323SKonstantin Belousov 				 * The object was not shadowed.
48733364c323SKonstantin Belousov 				 */
4874ef694c1aSEdward Tomasz Napierala 				swap_release_by_cred(size, entry->cred);
4875ef694c1aSEdward Tomasz Napierala 				crfree(entry->cred);
48763364c323SKonstantin Belousov 			}
487767388836SKonstantin Belousov 			entry->cred = NULL;
487867388836SKonstantin Belousov 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
48799917e010SAlan Cox 
48809b09b6c7SMatthew Dillon 			vm_map_lock_downgrade(map);
48810d94caffSDavid Greenman 		} else {
4882df8bae1dSRodney W. Grimes 			/*
48830d94caffSDavid Greenman 			 * We're attempting to read a copy-on-write page --
48840d94caffSDavid Greenman 			 * don't allow writes.
4885df8bae1dSRodney W. Grimes 			 */
48862d8acc0fSJohn Dyson 			prot &= ~VM_PROT_WRITE;
4887df8bae1dSRodney W. Grimes 		}
4888df8bae1dSRodney W. Grimes 	}
48892d8acc0fSJohn Dyson 
4890df8bae1dSRodney W. Grimes 	/*
4891df8bae1dSRodney W. Grimes 	 * Create an object if necessary.
4892df8bae1dSRodney W. Grimes 	 */
489367388836SKonstantin Belousov 	if (entry->object.vm_object == NULL && !map->system_map) {
489425adb370SBrian Feldman 		if (vm_map_lock_upgrade(map))
4895df8bae1dSRodney W. Grimes 			goto RetryLookup;
489667388836SKonstantin Belousov 		entry->object.vm_object = vm_object_allocate_anon(atop(size),
489767388836SKonstantin Belousov 		    NULL, entry->cred, entry->cred != NULL ? size : 0);
4898df8bae1dSRodney W. Grimes 		entry->offset = 0;
4899ef694c1aSEdward Tomasz Napierala 		entry->cred = NULL;
49009b09b6c7SMatthew Dillon 		vm_map_lock_downgrade(map);
4901df8bae1dSRodney W. Grimes 	}
4902b5b40fa6SJohn Dyson 
4903df8bae1dSRodney W. Grimes 	/*
49040d94caffSDavid Greenman 	 * Return the object/offset from this entry.  If the entry was
49050d94caffSDavid Greenman 	 * copy-on-write or empty, it has been fixed up.
4906df8bae1dSRodney W. Grimes 	 */
490710d9120cSKonstantin Belousov 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4908df8bae1dSRodney W. Grimes 	*object = entry->object.vm_object;
4909df8bae1dSRodney W. Grimes 
4910df8bae1dSRodney W. Grimes 	*out_prot = prot;
4911df8bae1dSRodney W. Grimes 	return (KERN_SUCCESS);
4912df8bae1dSRodney W. Grimes }
4913df8bae1dSRodney W. Grimes 
4914df8bae1dSRodney W. Grimes /*
491519dc5607STor Egge  *	vm_map_lookup_locked:
491619dc5607STor Egge  *
491719dc5607STor Egge  *	Lookup the faulting address.  A version of vm_map_lookup that returns
491819dc5607STor Egge  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
491919dc5607STor Egge  */
492019dc5607STor Egge int
492119dc5607STor Egge vm_map_lookup_locked(vm_map_t *var_map,		/* IN/OUT */
492219dc5607STor Egge 		     vm_offset_t vaddr,
492319dc5607STor Egge 		     vm_prot_t fault_typea,
492419dc5607STor Egge 		     vm_map_entry_t *out_entry,	/* OUT */
492519dc5607STor Egge 		     vm_object_t *object,	/* OUT */
492619dc5607STor Egge 		     vm_pindex_t *pindex,	/* OUT */
492719dc5607STor Egge 		     vm_prot_t *out_prot,	/* OUT */
492819dc5607STor Egge 		     boolean_t *wired)		/* OUT */
492919dc5607STor Egge {
493019dc5607STor Egge 	vm_map_entry_t entry;
493119dc5607STor Egge 	vm_map_t map = *var_map;
493219dc5607STor Egge 	vm_prot_t prot;
493319dc5607STor Egge 	vm_prot_t fault_type = fault_typea;
493419dc5607STor Egge 
493519dc5607STor Egge 	/*
49364c3ef59eSAlan Cox 	 * Lookup the faulting address.
493719dc5607STor Egge 	 */
493819dc5607STor Egge 	if (!vm_map_lookup_entry(map, vaddr, out_entry))
493919dc5607STor Egge 		return (KERN_INVALID_ADDRESS);
494019dc5607STor Egge 
494119dc5607STor Egge 	entry = *out_entry;
494219dc5607STor Egge 
494319dc5607STor Egge 	/*
494419dc5607STor Egge 	 * Fail if the entry refers to a submap.
494519dc5607STor Egge 	 */
494619dc5607STor Egge 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
494719dc5607STor Egge 		return (KERN_FAILURE);
494819dc5607STor Egge 
494919dc5607STor Egge 	/*
495019dc5607STor Egge 	 * Check whether this task is allowed to have this page.
495119dc5607STor Egge 	 */
495219dc5607STor Egge 	prot = entry->protection;
495319dc5607STor Egge 	fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
495419dc5607STor Egge 	if ((fault_type & prot) != fault_type)
495519dc5607STor Egge 		return (KERN_PROTECTION_FAILURE);
495619dc5607STor Egge 
495719dc5607STor Egge 	/*
495819dc5607STor Egge 	 * If this page is not pageable, we have to get it for all possible
495919dc5607STor Egge 	 * accesses.
496019dc5607STor Egge 	 */
496119dc5607STor Egge 	*wired = (entry->wired_count != 0);
496219dc5607STor Egge 	if (*wired)
4963a6d42a0dSAlan Cox 		fault_type = entry->protection;
496419dc5607STor Egge 
496519dc5607STor Egge 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
496619dc5607STor Egge 		/*
496719dc5607STor Egge 		 * Fail if the entry was copy-on-write for a write fault.
496819dc5607STor Egge 		 */
496919dc5607STor Egge 		if (fault_type & VM_PROT_WRITE)
497019dc5607STor Egge 			return (KERN_FAILURE);
497119dc5607STor Egge 		/*
497219dc5607STor Egge 		 * We're attempting to read a copy-on-write page --
497319dc5607STor Egge 		 * don't allow writes.
497419dc5607STor Egge 		 */
497519dc5607STor Egge 		prot &= ~VM_PROT_WRITE;
497619dc5607STor Egge 	}
497719dc5607STor Egge 
497819dc5607STor Egge 	/*
497919dc5607STor Egge 	 * Fail if an object should be created.
498019dc5607STor Egge 	 */
498119dc5607STor Egge 	if (entry->object.vm_object == NULL && !map->system_map)
498219dc5607STor Egge 		return (KERN_FAILURE);
498319dc5607STor Egge 
498419dc5607STor Egge 	/*
498519dc5607STor Egge 	 * Return the object/offset from this entry.  If the entry was
498619dc5607STor Egge 	 * copy-on-write or empty, it has been fixed up.
498719dc5607STor Egge 	 */
498810d9120cSKonstantin Belousov 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
498919dc5607STor Egge 	*object = entry->object.vm_object;
499019dc5607STor Egge 
499119dc5607STor Egge 	*out_prot = prot;
499219dc5607STor Egge 	return (KERN_SUCCESS);
499319dc5607STor Egge }
499419dc5607STor Egge 
499519dc5607STor Egge /*
4996df8bae1dSRodney W. Grimes  *	vm_map_lookup_done:
4997df8bae1dSRodney W. Grimes  *
4998df8bae1dSRodney W. Grimes  *	Releases locks acquired by a vm_map_lookup
4999df8bae1dSRodney W. Grimes  *	(according to the handle returned by that lookup).
5000df8bae1dSRodney W. Grimes  */
50010d94caffSDavid Greenman void
50021b40f8c0SMatthew Dillon vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
5003df8bae1dSRodney W. Grimes {
5004df8bae1dSRodney W. Grimes 	/*
5005df8bae1dSRodney W. Grimes 	 * Unlock the main-level map
5006df8bae1dSRodney W. Grimes 	 */
5007df8bae1dSRodney W. Grimes 	vm_map_unlock_read(map);
5008df8bae1dSRodney W. Grimes }
5009df8bae1dSRodney W. Grimes 
501019ea042eSKonstantin Belousov vm_offset_t
501119ea042eSKonstantin Belousov vm_map_max_KBI(const struct vm_map *map)
501219ea042eSKonstantin Belousov {
501319ea042eSKonstantin Belousov 
5014f0165b1cSKonstantin Belousov 	return (vm_map_max(map));
501519ea042eSKonstantin Belousov }
501619ea042eSKonstantin Belousov 
501719ea042eSKonstantin Belousov vm_offset_t
501819ea042eSKonstantin Belousov vm_map_min_KBI(const struct vm_map *map)
501919ea042eSKonstantin Belousov {
502019ea042eSKonstantin Belousov 
5021f0165b1cSKonstantin Belousov 	return (vm_map_min(map));
502219ea042eSKonstantin Belousov }
502319ea042eSKonstantin Belousov 
502419ea042eSKonstantin Belousov pmap_t
502519ea042eSKonstantin Belousov vm_map_pmap_KBI(vm_map_t map)
502619ea042eSKonstantin Belousov {
502719ea042eSKonstantin Belousov 
502819ea042eSKonstantin Belousov 	return (map->pmap);
502919ea042eSKonstantin Belousov }
503019ea042eSKonstantin Belousov 
5031721899b1SDoug Moore #ifdef INVARIANTS
5032721899b1SDoug Moore static void
5033461587dcSDoug Moore _vm_map_assert_consistent(vm_map_t map, int check)
5034721899b1SDoug Moore {
5035721899b1SDoug Moore 	vm_map_entry_t entry, prev;
5036c1ad5342SDoug Moore 	vm_map_entry_t cur, header, lbound, ubound;
5037721899b1SDoug Moore 	vm_size_t max_left, max_right;
5038721899b1SDoug Moore 
503985b7bedbSDoug Moore #ifdef DIAGNOSTIC
504085b7bedbSDoug Moore 	++map->nupdates;
504185b7bedbSDoug Moore #endif
5042461587dcSDoug Moore 	if (enable_vmmap_check != check)
5043721899b1SDoug Moore 		return;
5044721899b1SDoug Moore 
5045c1ad5342SDoug Moore 	header = prev = &map->header;
5046721899b1SDoug Moore 	VM_MAP_ENTRY_FOREACH(entry, map) {
5047721899b1SDoug Moore 		KASSERT(prev->end <= entry->start,
5048721899b1SDoug Moore 		    ("map %p prev->end = %jx, start = %jx", map,
5049721899b1SDoug Moore 		    (uintmax_t)prev->end, (uintmax_t)entry->start));
5050721899b1SDoug Moore 		KASSERT(entry->start < entry->end,
5051721899b1SDoug Moore 		    ("map %p start = %jx, end = %jx", map,
5052721899b1SDoug Moore 		    (uintmax_t)entry->start, (uintmax_t)entry->end));
5053c1ad5342SDoug Moore 		KASSERT(entry->left == header ||
5054721899b1SDoug Moore 		    entry->left->start < entry->start,
5055721899b1SDoug Moore 		    ("map %p left->start = %jx, start = %jx", map,
5056721899b1SDoug Moore 		    (uintmax_t)entry->left->start, (uintmax_t)entry->start));
5057c1ad5342SDoug Moore 		KASSERT(entry->right == header ||
5058721899b1SDoug Moore 		    entry->start < entry->right->start,
5059721899b1SDoug Moore 		    ("map %p start = %jx, right->start = %jx", map,
5060721899b1SDoug Moore 		    (uintmax_t)entry->start, (uintmax_t)entry->right->start));
5061c1ad5342SDoug Moore 		cur = map->root;
5062c1ad5342SDoug Moore 		lbound = ubound = header;
5063c1ad5342SDoug Moore 		for (;;) {
5064c1ad5342SDoug Moore 			if (entry->start < cur->start) {
5065c1ad5342SDoug Moore 				ubound = cur;
5066c1ad5342SDoug Moore 				cur = cur->left;
5067c1ad5342SDoug Moore 				KASSERT(cur != lbound,
5068c1ad5342SDoug Moore 				    ("map %p cannot find %jx",
5069c0829bb1SMark Johnston 				    map, (uintmax_t)entry->start));
5070c1ad5342SDoug Moore 			} else if (cur->end <= entry->start) {
5071c1ad5342SDoug Moore 				lbound = cur;
5072c1ad5342SDoug Moore 				cur = cur->right;
5073c1ad5342SDoug Moore 				KASSERT(cur != ubound,
5074c1ad5342SDoug Moore 				    ("map %p cannot find %jx",
5075c0829bb1SMark Johnston 				    map, (uintmax_t)entry->start));
5076c1ad5342SDoug Moore 			} else {
5077c1ad5342SDoug Moore 				KASSERT(cur == entry,
5078c1ad5342SDoug Moore 				    ("map %p cannot find %jx",
5079c0829bb1SMark Johnston 				    map, (uintmax_t)entry->start));
5080c1ad5342SDoug Moore 				break;
5081c1ad5342SDoug Moore 			}
5082c1ad5342SDoug Moore 		}
5083c1ad5342SDoug Moore 		max_left = vm_map_entry_max_free_left(entry, lbound);
5084c1ad5342SDoug Moore 		max_right = vm_map_entry_max_free_right(entry, ubound);
5085c1ad5342SDoug Moore 		KASSERT(entry->max_free == vm_size_max(max_left, max_right),
5086721899b1SDoug Moore 		    ("map %p max = %jx, max_left = %jx, max_right = %jx", map,
5087721899b1SDoug Moore 		    (uintmax_t)entry->max_free,
5088721899b1SDoug Moore 		    (uintmax_t)max_left, (uintmax_t)max_right));
5089721899b1SDoug Moore 		prev = entry;
5090721899b1SDoug Moore 	}
5091721899b1SDoug Moore 	KASSERT(prev->end <= entry->start,
5092721899b1SDoug Moore 	    ("map %p prev->end = %jx, start = %jx", map,
5093721899b1SDoug Moore 	    (uintmax_t)prev->end, (uintmax_t)entry->start));
5094721899b1SDoug Moore }
5095721899b1SDoug Moore #endif
5096721899b1SDoug Moore 
5097c7c34a24SBruce Evans #include "opt_ddb.h"
5098c3cb3e12SDavid Greenman #ifdef DDB
5099c7c34a24SBruce Evans #include <sys/kernel.h>
5100c7c34a24SBruce Evans 
5101c7c34a24SBruce Evans #include <ddb/ddb.h>
5102c7c34a24SBruce Evans 
51032ebcd458SAttilio Rao static void
51042ebcd458SAttilio Rao vm_map_print(vm_map_t map)
5105df8bae1dSRodney W. Grimes {
510677131528SDoug Moore 	vm_map_entry_t entry, prev;
5107c7c34a24SBruce Evans 
5108e5f251d2SAlan Cox 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
5109e5f251d2SAlan Cox 	    (void *)map,
5110101eeb7fSBruce Evans 	    (void *)map->pmap, map->nentries, map->timestamp);
5111df8bae1dSRodney W. Grimes 
5112c7c34a24SBruce Evans 	db_indent += 2;
5113721899b1SDoug Moore 	prev = &map->header;
5114721899b1SDoug Moore 	VM_MAP_ENTRY_FOREACH(entry, map) {
511519bd0d9cSKonstantin Belousov 		db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n",
511619bd0d9cSKonstantin Belousov 		    (void *)entry, (void *)entry->start, (void *)entry->end,
511719bd0d9cSKonstantin Belousov 		    entry->eflags);
5118e5f251d2SAlan Cox 		{
5119df8bae1dSRodney W. Grimes 			static char *inheritance_name[4] =
5120df8bae1dSRodney W. Grimes 			{"share", "copy", "none", "donate_copy"};
51210d94caffSDavid Greenman 
512295e5e988SJohn Dyson 			db_iprintf(" prot=%x/%x/%s",
5123df8bae1dSRodney W. Grimes 			    entry->protection,
5124df8bae1dSRodney W. Grimes 			    entry->max_protection,
512577131528SDoug Moore 			    inheritance_name[(int)(unsigned char)
512677131528SDoug Moore 			    entry->inheritance]);
5127df8bae1dSRodney W. Grimes 			if (entry->wired_count != 0)
512895e5e988SJohn Dyson 				db_printf(", wired");
5129df8bae1dSRodney W. Grimes 		}
51309fdfe602SMatthew Dillon 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
5131cd034a5bSMaxime Henrion 			db_printf(", share=%p, offset=0x%jx\n",
51329fdfe602SMatthew Dillon 			    (void *)entry->object.sub_map,
5133cd034a5bSMaxime Henrion 			    (uintmax_t)entry->offset);
513477131528SDoug Moore 			if (prev == &map->header ||
513577131528SDoug Moore 			    prev->object.sub_map !=
513677131528SDoug Moore 				entry->object.sub_map) {
5137c7c34a24SBruce Evans 				db_indent += 2;
51382ebcd458SAttilio Rao 				vm_map_print((vm_map_t)entry->object.sub_map);
5139c7c34a24SBruce Evans 				db_indent -= 2;
5140df8bae1dSRodney W. Grimes 			}
51410d94caffSDavid Greenman 		} else {
5142ef694c1aSEdward Tomasz Napierala 			if (entry->cred != NULL)
5143ef694c1aSEdward Tomasz Napierala 				db_printf(", ruid %d", entry->cred->cr_ruid);
5144cd034a5bSMaxime Henrion 			db_printf(", object=%p, offset=0x%jx",
5145101eeb7fSBruce Evans 			    (void *)entry->object.vm_object,
5146cd034a5bSMaxime Henrion 			    (uintmax_t)entry->offset);
5147ef694c1aSEdward Tomasz Napierala 			if (entry->object.vm_object && entry->object.vm_object->cred)
5148ef694c1aSEdward Tomasz Napierala 				db_printf(", obj ruid %d charge %jx",
5149ef694c1aSEdward Tomasz Napierala 				    entry->object.vm_object->cred->cr_ruid,
51503364c323SKonstantin Belousov 				    (uintmax_t)entry->object.vm_object->charge);
5151afa07f7eSJohn Dyson 			if (entry->eflags & MAP_ENTRY_COW)
5152c7c34a24SBruce Evans 				db_printf(", copy (%s)",
5153afa07f7eSJohn Dyson 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
5154c7c34a24SBruce Evans 			db_printf("\n");
5155df8bae1dSRodney W. Grimes 
515677131528SDoug Moore 			if (prev == &map->header ||
515777131528SDoug Moore 			    prev->object.vm_object !=
515877131528SDoug Moore 				entry->object.vm_object) {
5159c7c34a24SBruce Evans 				db_indent += 2;
5160101eeb7fSBruce Evans 				vm_object_print((db_expr_t)(intptr_t)
5161101eeb7fSBruce Evans 						entry->object.vm_object,
516244bbc3b7SKonstantin Belousov 						0, 0, (char *)0);
5163c7c34a24SBruce Evans 				db_indent -= 2;
5164df8bae1dSRodney W. Grimes 			}
5165df8bae1dSRodney W. Grimes 		}
5166721899b1SDoug Moore 		prev = entry;
5167df8bae1dSRodney W. Grimes 	}
5168c7c34a24SBruce Evans 	db_indent -= 2;
5169df8bae1dSRodney W. Grimes }
517095e5e988SJohn Dyson 
51712ebcd458SAttilio Rao DB_SHOW_COMMAND(map, map)
51722ebcd458SAttilio Rao {
51732ebcd458SAttilio Rao 
51742ebcd458SAttilio Rao 	if (!have_addr) {
51752ebcd458SAttilio Rao 		db_printf("usage: show map <addr>\n");
51762ebcd458SAttilio Rao 		return;
51772ebcd458SAttilio Rao 	}
51782ebcd458SAttilio Rao 	vm_map_print((vm_map_t)addr);
51792ebcd458SAttilio Rao }
518095e5e988SJohn Dyson 
518195e5e988SJohn Dyson DB_SHOW_COMMAND(procvm, procvm)
518295e5e988SJohn Dyson {
518395e5e988SJohn Dyson 	struct proc *p;
518495e5e988SJohn Dyson 
518595e5e988SJohn Dyson 	if (have_addr) {
5186a9546a6bSJohn Baldwin 		p = db_lookup_proc(addr);
518795e5e988SJohn Dyson 	} else {
518895e5e988SJohn Dyson 		p = curproc;
518995e5e988SJohn Dyson 	}
519095e5e988SJohn Dyson 
5191ac1e407bSBruce Evans 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
5192ac1e407bSBruce Evans 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
5193b1028ad1SLuoqi Chen 	    (void *)vmspace_pmap(p->p_vmspace));
519495e5e988SJohn Dyson 
51952ebcd458SAttilio Rao 	vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
519695e5e988SJohn Dyson }
519795e5e988SJohn Dyson 
5198c7c34a24SBruce Evans #endif /* DDB */
5199