1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 6df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 9df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 10df8bae1dSRodney W. Grimes * are met: 11df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 12df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 13df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 15df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 16df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 17df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 18df8bae1dSRodney W. Grimes * without specific prior written permission. 19df8bae1dSRodney W. Grimes * 20df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30df8bae1dSRodney W. Grimes * SUCH DAMAGE. 31df8bae1dSRodney W. Grimes * 323c4dd356SDavid Greenman * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 33df8bae1dSRodney W. Grimes * 34df8bae1dSRodney W. Grimes * 35df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36df8bae1dSRodney W. Grimes * All rights reserved. 37df8bae1dSRodney W. Grimes * 38df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39df8bae1dSRodney W. Grimes * 40df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 41df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 42df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 43df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 44df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 51df8bae1dSRodney W. Grimes * 52df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53df8bae1dSRodney W. Grimes * School of Computer Science 54df8bae1dSRodney W. Grimes * Carnegie Mellon University 55df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 56df8bae1dSRodney W. Grimes * 57df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 58df8bae1dSRodney W. Grimes * rights to redistribute these changes. 59df8bae1dSRodney W. Grimes */ 60df8bae1dSRodney W. Grimes 61df8bae1dSRodney W. Grimes /* 62df8bae1dSRodney W. Grimes * Virtual memory mapping module. 63df8bae1dSRodney W. Grimes */ 64df8bae1dSRodney W. Grimes 65874651b1SDavid E. O'Brien #include <sys/cdefs.h> 66874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 67874651b1SDavid E. O'Brien 68df8bae1dSRodney W. Grimes #include <sys/param.h> 69df8bae1dSRodney W. Grimes #include <sys/systm.h> 7061d80e90SJohn Baldwin #include <sys/ktr.h> 71fb919e4dSMark Murray #include <sys/lock.h> 72fb919e4dSMark Murray #include <sys/mutex.h> 73b5e8ce9fSBruce Evans #include <sys/proc.h> 74efeaf95aSDavid Greenman #include <sys/vmmeter.h> 75867a482dSJohn Dyson #include <sys/mman.h> 761efb74fbSJohn Dyson #include <sys/vnode.h> 772267af78SJulian Elischer #include <sys/resourcevar.h> 783fde38dfSMike Silbersack #include <sys/file.h> 7905ba50f5SJake Burkholder #include <sys/sysent.h> 803db161e0SMatthew Dillon #include <sys/shm.h> 81df8bae1dSRodney W. Grimes 82df8bae1dSRodney W. Grimes #include <vm/vm.h> 83efeaf95aSDavid Greenman #include <vm/vm_param.h> 84efeaf95aSDavid Greenman #include <vm/pmap.h> 85efeaf95aSDavid Greenman #include <vm/vm_map.h> 86df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 87df8bae1dSRodney W. Grimes #include <vm/vm_object.h> 8847221757SJohn Dyson #include <vm/vm_pager.h> 8926f9a767SRodney W. Grimes #include <vm/vm_kern.h> 90efeaf95aSDavid Greenman #include <vm/vm_extern.h> 9121cd6e62SSeigo Tanimura #include <vm/swap_pager.h> 92670d17b5SJeff Roberson #include <vm/uma.h> 93df8bae1dSRodney W. Grimes 94df8bae1dSRodney W. Grimes /* 95df8bae1dSRodney W. Grimes * Virtual memory maps provide for the mapping, protection, 96df8bae1dSRodney W. Grimes * and sharing of virtual memory objects. In addition, 97df8bae1dSRodney W. Grimes * this module provides for an efficient virtual copy of 98df8bae1dSRodney W. Grimes * memory from one map to another. 99df8bae1dSRodney W. Grimes * 100df8bae1dSRodney W. Grimes * Synchronization is required prior to most operations. 101df8bae1dSRodney W. Grimes * 102df8bae1dSRodney W. Grimes * Maps consist of an ordered doubly-linked list of simple 103df8bae1dSRodney W. Grimes * entries; a single hint is used to speed up lookups. 104df8bae1dSRodney W. Grimes * 105956f3135SPhilippe Charnier * Since portions of maps are specified by start/end addresses, 106df8bae1dSRodney W. Grimes * which may not align with existing map entries, all 107df8bae1dSRodney W. Grimes * routines merely "clip" entries to these start/end values. 108df8bae1dSRodney W. Grimes * [That is, an entry is split into two, bordering at a 109df8bae1dSRodney W. Grimes * start or end value.] Note that these clippings may not 110df8bae1dSRodney W. Grimes * always be necessary (as the two resulting entries are then 111df8bae1dSRodney W. Grimes * not changed); however, the clipping is done for convenience. 112df8bae1dSRodney W. Grimes * 113df8bae1dSRodney W. Grimes * As mentioned above, virtual copy operations are performed 114ad5fca3bSAlan Cox * by copying VM object references from one map to 115df8bae1dSRodney W. Grimes * another, and then marking both regions as copy-on-write. 116df8bae1dSRodney W. Grimes */ 117df8bae1dSRodney W. Grimes 118df8bae1dSRodney W. Grimes /* 119df8bae1dSRodney W. Grimes * vm_map_startup: 120df8bae1dSRodney W. Grimes * 121df8bae1dSRodney W. Grimes * Initialize the vm_map module. Must be called before 122df8bae1dSRodney W. Grimes * any other vm_map routines. 123df8bae1dSRodney W. Grimes * 124df8bae1dSRodney W. Grimes * Map and entry structures are allocated from the general 125df8bae1dSRodney W. Grimes * purpose memory pool with some exceptions: 126df8bae1dSRodney W. Grimes * 127df8bae1dSRodney W. Grimes * - The kernel map and kmem submap are allocated statically. 128df8bae1dSRodney W. Grimes * - Kernel map entries are allocated out of a static pool. 129df8bae1dSRodney W. Grimes * 130df8bae1dSRodney W. Grimes * These restrictions are necessary since malloc() uses the 131df8bae1dSRodney W. Grimes * maps and requires map entries. 132df8bae1dSRodney W. Grimes */ 133df8bae1dSRodney W. Grimes 1343a92e5d5SAlan Cox static struct mtx map_sleep_mtx; 1358355f576SJeff Roberson static uma_zone_t mapentzone; 1368355f576SJeff Roberson static uma_zone_t kmapentzone; 1378355f576SJeff Roberson static uma_zone_t mapzone; 1388355f576SJeff Roberson static uma_zone_t vmspace_zone; 1398355f576SJeff Roberson static struct vm_object kmapentobj; 140b23f72e9SBrian Feldman static int vmspace_zinit(void *mem, int size, int flags); 1418355f576SJeff Roberson static void vmspace_zfini(void *mem, int size); 142b23f72e9SBrian Feldman static int vm_map_zinit(void *mem, int ize, int flags); 1438355f576SJeff Roberson static void vm_map_zfini(void *mem, int size); 1448355f576SJeff Roberson static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max); 1451fc43fd1SAlan Cox 1468355f576SJeff Roberson #ifdef INVARIANTS 1478355f576SJeff Roberson static void vm_map_zdtor(void *mem, int size, void *arg); 1488355f576SJeff Roberson static void vmspace_zdtor(void *mem, int size, void *arg); 1498355f576SJeff Roberson #endif 150b18bfc3dSJohn Dyson 1510d94caffSDavid Greenman void 1521b40f8c0SMatthew Dillon vm_map_startup(void) 153df8bae1dSRodney W. Grimes { 1543a92e5d5SAlan Cox mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 1558355f576SJeff Roberson mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 1568355f576SJeff Roberson #ifdef INVARIANTS 1578355f576SJeff Roberson vm_map_zdtor, 1588355f576SJeff Roberson #else 1598355f576SJeff Roberson NULL, 1608355f576SJeff Roberson #endif 1618355f576SJeff Roberson vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 1628355f576SJeff Roberson uma_prealloc(mapzone, MAX_KMAP); 163670d17b5SJeff Roberson kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 16418aa2de5SJeff Roberson NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 16518aa2de5SJeff Roberson UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 166670d17b5SJeff Roberson uma_prealloc(kmapentzone, MAX_KMAPENT); 167670d17b5SJeff Roberson mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 168670d17b5SJeff Roberson NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1698355f576SJeff Roberson uma_prealloc(mapentzone, MAX_MAPENT); 170df8bae1dSRodney W. Grimes } 171df8bae1dSRodney W. Grimes 1728355f576SJeff Roberson static void 1738355f576SJeff Roberson vmspace_zfini(void *mem, int size) 1748355f576SJeff Roberson { 1758355f576SJeff Roberson struct vmspace *vm; 1768355f576SJeff Roberson 1778355f576SJeff Roberson vm = (struct vmspace *)mem; 1783f25cbddSPeter Wemm pmap_release(vmspace_pmap(vm)); 1798355f576SJeff Roberson vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map)); 1808355f576SJeff Roberson } 1818355f576SJeff Roberson 182b23f72e9SBrian Feldman static int 183b23f72e9SBrian Feldman vmspace_zinit(void *mem, int size, int flags) 1848355f576SJeff Roberson { 1858355f576SJeff Roberson struct vmspace *vm; 1868355f576SJeff Roberson 1878355f576SJeff Roberson vm = (struct vmspace *)mem; 1888355f576SJeff Roberson 189b23f72e9SBrian Feldman (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); 1903f25cbddSPeter Wemm pmap_pinit(vmspace_pmap(vm)); 191b23f72e9SBrian Feldman return (0); 1928355f576SJeff Roberson } 1938355f576SJeff Roberson 1948355f576SJeff Roberson static void 1958355f576SJeff Roberson vm_map_zfini(void *mem, int size) 1968355f576SJeff Roberson { 1978355f576SJeff Roberson vm_map_t map; 1988355f576SJeff Roberson 1998355f576SJeff Roberson map = (vm_map_t)mem; 20036daaecdSAlan Cox mtx_destroy(&map->system_mtx); 20112c64974SMaxime Henrion sx_destroy(&map->lock); 2028355f576SJeff Roberson } 2038355f576SJeff Roberson 204b23f72e9SBrian Feldman static int 205b23f72e9SBrian Feldman vm_map_zinit(void *mem, int size, int flags) 2068355f576SJeff Roberson { 2078355f576SJeff Roberson vm_map_t map; 2088355f576SJeff Roberson 2098355f576SJeff Roberson map = (vm_map_t)mem; 2108355f576SJeff Roberson map->nentries = 0; 2118355f576SJeff Roberson map->size = 0; 2128355f576SJeff Roberson map->infork = 0; 213d923c598SAlan Cox mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 21412c64974SMaxime Henrion sx_init(&map->lock, "user map"); 215b23f72e9SBrian Feldman return (0); 2168355f576SJeff Roberson } 2178355f576SJeff Roberson 2188355f576SJeff Roberson #ifdef INVARIANTS 2198355f576SJeff Roberson static void 2208355f576SJeff Roberson vmspace_zdtor(void *mem, int size, void *arg) 2218355f576SJeff Roberson { 2228355f576SJeff Roberson struct vmspace *vm; 2238355f576SJeff Roberson 2248355f576SJeff Roberson vm = (struct vmspace *)mem; 2258355f576SJeff Roberson 2268355f576SJeff Roberson vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 2278355f576SJeff Roberson } 2288355f576SJeff Roberson static void 2298355f576SJeff Roberson vm_map_zdtor(void *mem, int size, void *arg) 2308355f576SJeff Roberson { 2318355f576SJeff Roberson vm_map_t map; 2328355f576SJeff Roberson 2338355f576SJeff Roberson map = (vm_map_t)mem; 2348355f576SJeff Roberson KASSERT(map->nentries == 0, 2358355f576SJeff Roberson ("map %p nentries == %d on free.", 2368355f576SJeff Roberson map, map->nentries)); 2378355f576SJeff Roberson KASSERT(map->size == 0, 2388355f576SJeff Roberson ("map %p size == %lu on free.", 2399eb6e519SJeff Roberson map, (unsigned long)map->size)); 2408355f576SJeff Roberson KASSERT(map->infork == 0, 2418355f576SJeff Roberson ("map %p infork == %d on free.", 2428355f576SJeff Roberson map, map->infork)); 2438355f576SJeff Roberson } 2448355f576SJeff Roberson #endif /* INVARIANTS */ 2458355f576SJeff Roberson 246df8bae1dSRodney W. Grimes /* 247df8bae1dSRodney W. Grimes * Allocate a vmspace structure, including a vm_map and pmap, 248df8bae1dSRodney W. Grimes * and initialize those structures. The refcnt is set to 1. 249df8bae1dSRodney W. Grimes */ 250df8bae1dSRodney W. Grimes struct vmspace * 2512d8acc0fSJohn Dyson vmspace_alloc(min, max) 252df8bae1dSRodney W. Grimes vm_offset_t min, max; 253df8bae1dSRodney W. Grimes { 254c0877f10SJohn Dyson struct vmspace *vm; 2550d94caffSDavid Greenman 256a163d034SWarner Losh vm = uma_zalloc(vmspace_zone, M_WAITOK); 25721c641b2SJohn Baldwin CTR1(KTR_VM, "vmspace_alloc: %p", vm); 2588355f576SJeff Roberson _vm_map_init(&vm->vm_map, min, max); 259b1028ad1SLuoqi Chen vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ 260df8bae1dSRodney W. Grimes vm->vm_refcnt = 1; 2612d8acc0fSJohn Dyson vm->vm_shm = NULL; 26251ab6c28SAlan Cox vm->vm_swrss = 0; 26351ab6c28SAlan Cox vm->vm_tsize = 0; 26451ab6c28SAlan Cox vm->vm_dsize = 0; 26551ab6c28SAlan Cox vm->vm_ssize = 0; 26651ab6c28SAlan Cox vm->vm_taddr = 0; 26751ab6c28SAlan Cox vm->vm_daddr = 0; 26851ab6c28SAlan Cox vm->vm_maxsaddr = 0; 269389d2b6eSMatthew Dillon vm->vm_exitingcnt = 0; 270df8bae1dSRodney W. Grimes return (vm); 271df8bae1dSRodney W. Grimes } 272df8bae1dSRodney W. Grimes 273df8bae1dSRodney W. Grimes void 2741b40f8c0SMatthew Dillon vm_init2(void) 2751b40f8c0SMatthew Dillon { 2769e7c1bceSPeter Wemm uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count, 2773fde38dfSMike Silbersack (VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE) / 8 + 2783fde38dfSMike Silbersack maxproc * 2 + maxfiles); 2798355f576SJeff Roberson vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 2808355f576SJeff Roberson #ifdef INVARIANTS 2818355f576SJeff Roberson vmspace_zdtor, 2828355f576SJeff Roberson #else 2838355f576SJeff Roberson NULL, 2848355f576SJeff Roberson #endif 2858355f576SJeff Roberson vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 286ba9be04cSJohn Dyson pmap_init2(); 2873075778bSJohn Dyson } 2883075778bSJohn Dyson 289582ec34cSAlfred Perlstein static __inline void 290582ec34cSAlfred Perlstein vmspace_dofree(struct vmspace *vm) 291df8bae1dSRodney W. Grimes { 29221c641b2SJohn Baldwin CTR1(KTR_VM, "vmspace_free: %p", vm); 2933db161e0SMatthew Dillon 2943db161e0SMatthew Dillon /* 2953db161e0SMatthew Dillon * Make sure any SysV shm is freed, it might not have been in 2963db161e0SMatthew Dillon * exit1(). 2973db161e0SMatthew Dillon */ 2983db161e0SMatthew Dillon shmexit(vm); 2993db161e0SMatthew Dillon 30030dcfc09SJohn Dyson /* 301df8bae1dSRodney W. Grimes * Lock the map, to wait out all other references to it. 3020d94caffSDavid Greenman * Delete all of the mappings and pages they hold, then call 3030d94caffSDavid Greenman * the pmap module to reclaim anything left. 304df8bae1dSRodney W. Grimes */ 305df8bae1dSRodney W. Grimes vm_map_lock(&vm->vm_map); 306df8bae1dSRodney W. Grimes (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 307df8bae1dSRodney W. Grimes vm->vm_map.max_offset); 308a1f6d91cSDavid Greenman vm_map_unlock(&vm->vm_map); 3098355f576SJeff Roberson 3108355f576SJeff Roberson uma_zfree(vmspace_zone, vm); 311df8bae1dSRodney W. Grimes } 312582ec34cSAlfred Perlstein 313582ec34cSAlfred Perlstein void 314582ec34cSAlfred Perlstein vmspace_free(struct vmspace *vm) 315582ec34cSAlfred Perlstein { 3161a276a3fSAlan Cox int refcnt; 317582ec34cSAlfred Perlstein 318582ec34cSAlfred Perlstein if (vm->vm_refcnt == 0) 319582ec34cSAlfred Perlstein panic("vmspace_free: attempt to free already freed vmspace"); 320582ec34cSAlfred Perlstein 3211a276a3fSAlan Cox do 3221a276a3fSAlan Cox refcnt = vm->vm_refcnt; 3231a276a3fSAlan Cox while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1)); 3241a276a3fSAlan Cox if (refcnt == 1 && vm->vm_exitingcnt == 0) 325582ec34cSAlfred Perlstein vmspace_dofree(vm); 326582ec34cSAlfred Perlstein } 327582ec34cSAlfred Perlstein 328582ec34cSAlfred Perlstein void 329582ec34cSAlfred Perlstein vmspace_exitfree(struct proc *p) 330582ec34cSAlfred Perlstein { 331334f7061SPeter Wemm struct vmspace *vm; 3321a276a3fSAlan Cox int exitingcnt; 333582ec34cSAlfred Perlstein 334334f7061SPeter Wemm vm = p->p_vmspace; 335334f7061SPeter Wemm p->p_vmspace = NULL; 336389d2b6eSMatthew Dillon 337389d2b6eSMatthew Dillon /* 338389d2b6eSMatthew Dillon * cleanup by parent process wait()ing on exiting child. vm_refcnt 339389d2b6eSMatthew Dillon * may not be 0 (e.g. fork() and child exits without exec()ing). 340389d2b6eSMatthew Dillon * exitingcnt may increment above 0 and drop back down to zero 341389d2b6eSMatthew Dillon * several times while vm_refcnt is held non-zero. vm_refcnt 342389d2b6eSMatthew Dillon * may also increment above 0 and drop back down to zero several 343389d2b6eSMatthew Dillon * times while vm_exitingcnt is held non-zero. 344389d2b6eSMatthew Dillon * 345389d2b6eSMatthew Dillon * The last wait on the exiting child's vmspace will clean up 346389d2b6eSMatthew Dillon * the remainder of the vmspace. 347389d2b6eSMatthew Dillon */ 3481a276a3fSAlan Cox do 3491a276a3fSAlan Cox exitingcnt = vm->vm_exitingcnt; 3501a276a3fSAlan Cox while (!atomic_cmpset_int(&vm->vm_exitingcnt, exitingcnt, 3511a276a3fSAlan Cox exitingcnt - 1)); 3521a276a3fSAlan Cox if (vm->vm_refcnt == 0 && exitingcnt == 1) 353334f7061SPeter Wemm vmspace_dofree(vm); 354334f7061SPeter Wemm } 355df8bae1dSRodney W. Grimes 3561b40f8c0SMatthew Dillon void 357780b1c09SAlan Cox _vm_map_lock(vm_map_t map, const char *file, int line) 3581b40f8c0SMatthew Dillon { 359bc91c510SAlan Cox 36093bc4879SAlan Cox if (map->system_map) 36136daaecdSAlan Cox _mtx_lock_flags(&map->system_mtx, 0, file, line); 36212c64974SMaxime Henrion else 36312c64974SMaxime Henrion _sx_xlock(&map->lock, file, line); 3641b40f8c0SMatthew Dillon map->timestamp++; 3651b40f8c0SMatthew Dillon } 3661b40f8c0SMatthew Dillon 3671b40f8c0SMatthew Dillon void 368780b1c09SAlan Cox _vm_map_unlock(vm_map_t map, const char *file, int line) 3690e0af8ecSBrian Feldman { 370bc91c510SAlan Cox 37136daaecdSAlan Cox if (map->system_map) 37236daaecdSAlan Cox _mtx_unlock_flags(&map->system_mtx, 0, file, line); 37336daaecdSAlan Cox else 37412c64974SMaxime Henrion _sx_xunlock(&map->lock, file, line); 3750e0af8ecSBrian Feldman } 3760e0af8ecSBrian Feldman 3770e0af8ecSBrian Feldman void 378780b1c09SAlan Cox _vm_map_lock_read(vm_map_t map, const char *file, int line) 3790e0af8ecSBrian Feldman { 380bc91c510SAlan Cox 38193bc4879SAlan Cox if (map->system_map) 38236daaecdSAlan Cox _mtx_lock_flags(&map->system_mtx, 0, file, line); 38312c64974SMaxime Henrion else 38412c64974SMaxime Henrion _sx_xlock(&map->lock, file, line); 38536daaecdSAlan Cox } 3860e0af8ecSBrian Feldman 3870e0af8ecSBrian Feldman void 388780b1c09SAlan Cox _vm_map_unlock_read(vm_map_t map, const char *file, int line) 3890e0af8ecSBrian Feldman { 390bc91c510SAlan Cox 39136daaecdSAlan Cox if (map->system_map) 39236daaecdSAlan Cox _mtx_unlock_flags(&map->system_mtx, 0, file, line); 39336daaecdSAlan Cox else 39412c64974SMaxime Henrion _sx_xunlock(&map->lock, file, line); 39525adb370SBrian Feldman } 39625adb370SBrian Feldman 397d974f03cSAlan Cox int 398780b1c09SAlan Cox _vm_map_trylock(vm_map_t map, const char *file, int line) 399d974f03cSAlan Cox { 40025adb370SBrian Feldman int error; 40125adb370SBrian Feldman 40236daaecdSAlan Cox error = map->system_map ? 40336daaecdSAlan Cox !_mtx_trylock(&map->system_mtx, 0, file, line) : 40412c64974SMaxime Henrion !_sx_try_xlock(&map->lock, file, line); 4053a92e5d5SAlan Cox if (error == 0) 4063a92e5d5SAlan Cox map->timestamp++; 407bc91c510SAlan Cox return (error == 0); 4080e0af8ecSBrian Feldman } 4090e0af8ecSBrian Feldman 4100e0af8ecSBrian Feldman int 41172d97679SDavid Schultz _vm_map_trylock_read(vm_map_t map, const char *file, int line) 41272d97679SDavid Schultz { 41372d97679SDavid Schultz int error; 41472d97679SDavid Schultz 41572d97679SDavid Schultz error = map->system_map ? 41672d97679SDavid Schultz !_mtx_trylock(&map->system_mtx, 0, file, line) : 41712c64974SMaxime Henrion !_sx_try_xlock(&map->lock, file, line); 41872d97679SDavid Schultz return (error == 0); 41972d97679SDavid Schultz } 42072d97679SDavid Schultz 42172d97679SDavid Schultz int 422780b1c09SAlan Cox _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 4230e0af8ecSBrian Feldman { 424bc91c510SAlan Cox 42536daaecdSAlan Cox #ifdef INVARIANTS 42612c64974SMaxime Henrion if (map->system_map) { 42736daaecdSAlan Cox _mtx_assert(&map->system_mtx, MA_OWNED, file, line); 42836daaecdSAlan Cox } else 42912c64974SMaxime Henrion _sx_assert(&map->lock, SX_XLOCKED, file, line); 43012c64974SMaxime Henrion #endif 431bc91c510SAlan Cox map->timestamp++; 432bc91c510SAlan Cox return (0); 4330e0af8ecSBrian Feldman } 4340e0af8ecSBrian Feldman 4350e0af8ecSBrian Feldman void 436780b1c09SAlan Cox _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 4371b40f8c0SMatthew Dillon { 438bc91c510SAlan Cox 43936daaecdSAlan Cox #ifdef INVARIANTS 44012c64974SMaxime Henrion if (map->system_map) { 44136daaecdSAlan Cox _mtx_assert(&map->system_mtx, MA_OWNED, file, line); 44236daaecdSAlan Cox } else 44312c64974SMaxime Henrion _sx_assert(&map->lock, SX_XLOCKED, file, line); 44412c64974SMaxime Henrion #endif 44525adb370SBrian Feldman } 44625adb370SBrian Feldman 447acd9a301SAlan Cox /* 448acd9a301SAlan Cox * vm_map_unlock_and_wait: 449acd9a301SAlan Cox */ 4509688f931SAlan Cox int 451acd9a301SAlan Cox vm_map_unlock_and_wait(vm_map_t map, boolean_t user_wait) 452acd9a301SAlan Cox { 453acd9a301SAlan Cox 4543a92e5d5SAlan Cox mtx_lock(&map_sleep_mtx); 455acd9a301SAlan Cox vm_map_unlock(map); 4563a92e5d5SAlan Cox return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 0)); 457acd9a301SAlan Cox } 458acd9a301SAlan Cox 459acd9a301SAlan Cox /* 460acd9a301SAlan Cox * vm_map_wakeup: 461acd9a301SAlan Cox */ 4629688f931SAlan Cox void 463acd9a301SAlan Cox vm_map_wakeup(vm_map_t map) 464acd9a301SAlan Cox { 465acd9a301SAlan Cox 466b49ecb86SAlan Cox /* 4673a92e5d5SAlan Cox * Acquire and release map_sleep_mtx to prevent a wakeup() 4683a92e5d5SAlan Cox * from being performed (and lost) between the vm_map_unlock() 4693a92e5d5SAlan Cox * and the msleep() in vm_map_unlock_and_wait(). 470b49ecb86SAlan Cox */ 4713a92e5d5SAlan Cox mtx_lock(&map_sleep_mtx); 4723a92e5d5SAlan Cox mtx_unlock(&map_sleep_mtx); 473acd9a301SAlan Cox wakeup(&map->root); 474acd9a301SAlan Cox } 475acd9a301SAlan Cox 4761b40f8c0SMatthew Dillon long 4771b40f8c0SMatthew Dillon vmspace_resident_count(struct vmspace *vmspace) 4781b40f8c0SMatthew Dillon { 4791b40f8c0SMatthew Dillon return pmap_resident_count(vmspace_pmap(vmspace)); 4801b40f8c0SMatthew Dillon } 4811b40f8c0SMatthew Dillon 4822bc7dd56SBruce M Simpson long 4832bc7dd56SBruce M Simpson vmspace_wired_count(struct vmspace *vmspace) 4842bc7dd56SBruce M Simpson { 4852bc7dd56SBruce M Simpson return pmap_wired_count(vmspace_pmap(vmspace)); 4862bc7dd56SBruce M Simpson } 4872bc7dd56SBruce M Simpson 488ff2b5645SMatthew Dillon /* 489df8bae1dSRodney W. Grimes * vm_map_create: 490df8bae1dSRodney W. Grimes * 491df8bae1dSRodney W. Grimes * Creates and returns a new empty VM map with 492df8bae1dSRodney W. Grimes * the given physical map structure, and having 493df8bae1dSRodney W. Grimes * the given lower and upper address bounds. 494df8bae1dSRodney W. Grimes */ 4950d94caffSDavid Greenman vm_map_t 4961b40f8c0SMatthew Dillon vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 497df8bae1dSRodney W. Grimes { 498c0877f10SJohn Dyson vm_map_t result; 499df8bae1dSRodney W. Grimes 500a163d034SWarner Losh result = uma_zalloc(mapzone, M_WAITOK); 50121c641b2SJohn Baldwin CTR1(KTR_VM, "vm_map_create: %p", result); 5028355f576SJeff Roberson _vm_map_init(result, min, max); 503df8bae1dSRodney W. Grimes result->pmap = pmap; 504df8bae1dSRodney W. Grimes return (result); 505df8bae1dSRodney W. Grimes } 506df8bae1dSRodney W. Grimes 507df8bae1dSRodney W. Grimes /* 508df8bae1dSRodney W. Grimes * Initialize an existing vm_map structure 509df8bae1dSRodney W. Grimes * such as that in the vmspace structure. 510df8bae1dSRodney W. Grimes * The pmap is set elsewhere. 511df8bae1dSRodney W. Grimes */ 5128355f576SJeff Roberson static void 5138355f576SJeff Roberson _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max) 514df8bae1dSRodney W. Grimes { 51521c641b2SJohn Baldwin 516df8bae1dSRodney W. Grimes map->header.next = map->header.prev = &map->header; 5179688f931SAlan Cox map->needs_wakeup = FALSE; 5183075778bSJohn Dyson map->system_map = 0; 519df8bae1dSRodney W. Grimes map->min_offset = min; 520df8bae1dSRodney W. Grimes map->max_offset = max; 521df8bae1dSRodney W. Grimes map->first_free = &map->header; 522af7cd0c5SBrian Feldman map->flags = 0; 5234e94f402SAlan Cox map->root = NULL; 524df8bae1dSRodney W. Grimes map->timestamp = 0; 525df8bae1dSRodney W. Grimes } 526df8bae1dSRodney W. Grimes 527a18b1f1dSJason Evans void 5288355f576SJeff Roberson vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max) 529a18b1f1dSJason Evans { 5308355f576SJeff Roberson _vm_map_init(map, min, max); 531d923c598SAlan Cox mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 53212c64974SMaxime Henrion sx_init(&map->lock, "user map"); 533a18b1f1dSJason Evans } 534a18b1f1dSJason Evans 535df8bae1dSRodney W. Grimes /* 536b18bfc3dSJohn Dyson * vm_map_entry_dispose: [ internal use only ] 537b18bfc3dSJohn Dyson * 538b18bfc3dSJohn Dyson * Inverse of vm_map_entry_create. 539b18bfc3dSJohn Dyson */ 54062487bb4SJohn Dyson static void 5411b40f8c0SMatthew Dillon vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 542b18bfc3dSJohn Dyson { 5432b4a2c27SAlan Cox uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 544b18bfc3dSJohn Dyson } 545b18bfc3dSJohn Dyson 546b18bfc3dSJohn Dyson /* 547df8bae1dSRodney W. Grimes * vm_map_entry_create: [ internal use only ] 548df8bae1dSRodney W. Grimes * 549df8bae1dSRodney W. Grimes * Allocates a VM map entry for insertion. 550b28cb1caSAlfred Perlstein * No entry fields are filled in. 551df8bae1dSRodney W. Grimes */ 552f708ef1bSPoul-Henning Kamp static vm_map_entry_t 5531b40f8c0SMatthew Dillon vm_map_entry_create(vm_map_t map) 554df8bae1dSRodney W. Grimes { 5551f6889a1SMatthew Dillon vm_map_entry_t new_entry; 5561f6889a1SMatthew Dillon 5572b4a2c27SAlan Cox if (map->system_map) 5582b4a2c27SAlan Cox new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 5592b4a2c27SAlan Cox else 560a163d034SWarner Losh new_entry = uma_zalloc(mapentzone, M_WAITOK); 5611f6889a1SMatthew Dillon if (new_entry == NULL) 5621f6889a1SMatthew Dillon panic("vm_map_entry_create: kernel resources exhausted"); 5631f6889a1SMatthew Dillon return (new_entry); 564df8bae1dSRodney W. Grimes } 565df8bae1dSRodney W. Grimes 566df8bae1dSRodney W. Grimes /* 567794316a8SAlan Cox * vm_map_entry_set_behavior: 568794316a8SAlan Cox * 569794316a8SAlan Cox * Set the expected access behavior, either normal, random, or 570794316a8SAlan Cox * sequential. 571794316a8SAlan Cox */ 572794316a8SAlan Cox static __inline void 573794316a8SAlan Cox vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 574794316a8SAlan Cox { 575794316a8SAlan Cox entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 576794316a8SAlan Cox (behavior & MAP_ENTRY_BEHAV_MASK); 577794316a8SAlan Cox } 578794316a8SAlan Cox 579794316a8SAlan Cox /* 5804e94f402SAlan Cox * vm_map_entry_splay: 5814e94f402SAlan Cox * 5824e94f402SAlan Cox * Implements Sleator and Tarjan's top-down splay algorithm. Returns 5834e94f402SAlan Cox * the vm_map_entry containing the given address. If, however, that 5844e94f402SAlan Cox * address is not found in the vm_map, returns a vm_map_entry that is 5854e94f402SAlan Cox * adjacent to the address, coming before or after it. 5864e94f402SAlan Cox */ 5874e94f402SAlan Cox static vm_map_entry_t 5884e94f402SAlan Cox vm_map_entry_splay(vm_offset_t address, vm_map_entry_t root) 5894e94f402SAlan Cox { 5904e94f402SAlan Cox struct vm_map_entry dummy; 5914e94f402SAlan Cox vm_map_entry_t lefttreemax, righttreemin, y; 5924e94f402SAlan Cox 5934e94f402SAlan Cox if (root == NULL) 5944e94f402SAlan Cox return (root); 5954e94f402SAlan Cox lefttreemax = righttreemin = &dummy; 59661c075b6SAlan Cox for (;; root = y) { 5974e94f402SAlan Cox if (address < root->start) { 59861c075b6SAlan Cox if ((y = root->left) == NULL) 5994e94f402SAlan Cox break; 60061c075b6SAlan Cox if (address < y->start) { 6014e94f402SAlan Cox /* Rotate right. */ 6024e94f402SAlan Cox root->left = y->right; 6034e94f402SAlan Cox y->right = root; 6044e94f402SAlan Cox root = y; 60561c075b6SAlan Cox if ((y = root->left) == NULL) 6064e94f402SAlan Cox break; 6074e94f402SAlan Cox } 6084e94f402SAlan Cox /* Link into the new root's right tree. */ 6094e94f402SAlan Cox righttreemin->left = root; 6104e94f402SAlan Cox righttreemin = root; 6114e94f402SAlan Cox } else if (address >= root->end) { 61261c075b6SAlan Cox if ((y = root->right) == NULL) 6134e94f402SAlan Cox break; 61461c075b6SAlan Cox if (address >= y->end) { 6154e94f402SAlan Cox /* Rotate left. */ 6164e94f402SAlan Cox root->right = y->left; 6174e94f402SAlan Cox y->left = root; 6184e94f402SAlan Cox root = y; 61961c075b6SAlan Cox if ((y = root->right) == NULL) 6204e94f402SAlan Cox break; 6214e94f402SAlan Cox } 6224e94f402SAlan Cox /* Link into the new root's left tree. */ 6234e94f402SAlan Cox lefttreemax->right = root; 6244e94f402SAlan Cox lefttreemax = root; 6254e94f402SAlan Cox } else 6264e94f402SAlan Cox break; 6274e94f402SAlan Cox } 6284e94f402SAlan Cox /* Assemble the new root. */ 6294e94f402SAlan Cox lefttreemax->right = root->left; 6304e94f402SAlan Cox righttreemin->left = root->right; 6314e94f402SAlan Cox root->left = dummy.right; 6324e94f402SAlan Cox root->right = dummy.left; 6334e94f402SAlan Cox return (root); 6344e94f402SAlan Cox } 6354e94f402SAlan Cox 6364e94f402SAlan Cox /* 637df8bae1dSRodney W. Grimes * vm_map_entry_{un,}link: 638df8bae1dSRodney W. Grimes * 639df8bae1dSRodney W. Grimes * Insert/remove entries from maps. 640df8bae1dSRodney W. Grimes */ 6414e94f402SAlan Cox static void 64299c81ca9SAlan Cox vm_map_entry_link(vm_map_t map, 64399c81ca9SAlan Cox vm_map_entry_t after_where, 64499c81ca9SAlan Cox vm_map_entry_t entry) 64599c81ca9SAlan Cox { 64621c641b2SJohn Baldwin 64721c641b2SJohn Baldwin CTR4(KTR_VM, 64821c641b2SJohn Baldwin "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 64921c641b2SJohn Baldwin map->nentries, entry, after_where); 65099c81ca9SAlan Cox map->nentries++; 65199c81ca9SAlan Cox entry->prev = after_where; 65299c81ca9SAlan Cox entry->next = after_where->next; 65399c81ca9SAlan Cox entry->next->prev = entry; 65499c81ca9SAlan Cox after_where->next = entry; 6554e94f402SAlan Cox 6564e94f402SAlan Cox if (after_where != &map->header) { 6574e94f402SAlan Cox if (after_where != map->root) 6584e94f402SAlan Cox vm_map_entry_splay(after_where->start, map->root); 6594e94f402SAlan Cox entry->right = after_where->right; 6604e94f402SAlan Cox entry->left = after_where; 6614e94f402SAlan Cox after_where->right = NULL; 6624e94f402SAlan Cox } else { 6634e94f402SAlan Cox entry->right = map->root; 6644e94f402SAlan Cox entry->left = NULL; 6654e94f402SAlan Cox } 6664e94f402SAlan Cox map->root = entry; 667df8bae1dSRodney W. Grimes } 66899c81ca9SAlan Cox 6694e94f402SAlan Cox static void 67099c81ca9SAlan Cox vm_map_entry_unlink(vm_map_t map, 67199c81ca9SAlan Cox vm_map_entry_t entry) 67299c81ca9SAlan Cox { 6734e94f402SAlan Cox vm_map_entry_t next, prev, root; 67499c81ca9SAlan Cox 6754e94f402SAlan Cox if (entry != map->root) 6764e94f402SAlan Cox vm_map_entry_splay(entry->start, map->root); 6774e94f402SAlan Cox if (entry->left == NULL) 6784e94f402SAlan Cox root = entry->right; 6794e94f402SAlan Cox else { 6804e94f402SAlan Cox root = vm_map_entry_splay(entry->start, entry->left); 6814e94f402SAlan Cox root->right = entry->right; 6824e94f402SAlan Cox } 6834e94f402SAlan Cox map->root = root; 6844e94f402SAlan Cox 6854e94f402SAlan Cox prev = entry->prev; 6864e94f402SAlan Cox next = entry->next; 68799c81ca9SAlan Cox next->prev = prev; 68899c81ca9SAlan Cox prev->next = next; 68999c81ca9SAlan Cox map->nentries--; 69021c641b2SJohn Baldwin CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 69121c641b2SJohn Baldwin map->nentries, entry); 692df8bae1dSRodney W. Grimes } 693df8bae1dSRodney W. Grimes 694df8bae1dSRodney W. Grimes /* 695df8bae1dSRodney W. Grimes * vm_map_lookup_entry: [ internal use only ] 696df8bae1dSRodney W. Grimes * 697df8bae1dSRodney W. Grimes * Finds the map entry containing (or 698df8bae1dSRodney W. Grimes * immediately preceding) the specified address 699df8bae1dSRodney W. Grimes * in the given map; the entry is returned 700df8bae1dSRodney W. Grimes * in the "entry" parameter. The boolean 701df8bae1dSRodney W. Grimes * result indicates whether the address is 702df8bae1dSRodney W. Grimes * actually contained in the map. 703df8bae1dSRodney W. Grimes */ 7040d94caffSDavid Greenman boolean_t 7051b40f8c0SMatthew Dillon vm_map_lookup_entry( 7061b40f8c0SMatthew Dillon vm_map_t map, 7071b40f8c0SMatthew Dillon vm_offset_t address, 7081b40f8c0SMatthew Dillon vm_map_entry_t *entry) /* OUT */ 709df8bae1dSRodney W. Grimes { 710c0877f10SJohn Dyson vm_map_entry_t cur; 711df8bae1dSRodney W. Grimes 7124e94f402SAlan Cox cur = vm_map_entry_splay(address, map->root); 7134e94f402SAlan Cox if (cur == NULL) 7144e94f402SAlan Cox *entry = &map->header; 7154e94f402SAlan Cox else { 7164e94f402SAlan Cox map->root = cur; 717df8bae1dSRodney W. Grimes 718df8bae1dSRodney W. Grimes if (address >= cur->start) { 719df8bae1dSRodney W. Grimes *entry = cur; 7204e94f402SAlan Cox if (cur->end > address) 721df8bae1dSRodney W. Grimes return (TRUE); 7224e94f402SAlan Cox } else 723df8bae1dSRodney W. Grimes *entry = cur->prev; 7244e94f402SAlan Cox } 725df8bae1dSRodney W. Grimes return (FALSE); 726df8bae1dSRodney W. Grimes } 727df8bae1dSRodney W. Grimes 728df8bae1dSRodney W. Grimes /* 72930dcfc09SJohn Dyson * vm_map_insert: 73030dcfc09SJohn Dyson * 73130dcfc09SJohn Dyson * Inserts the given whole VM object into the target 73230dcfc09SJohn Dyson * map at the specified address range. The object's 73330dcfc09SJohn Dyson * size should match that of the address range. 73430dcfc09SJohn Dyson * 73530dcfc09SJohn Dyson * Requires that the map be locked, and leaves it so. 7362aaeadf8SMatthew Dillon * 7372aaeadf8SMatthew Dillon * If object is non-NULL, ref count must be bumped by caller 7382aaeadf8SMatthew Dillon * prior to making call to account for the new entry. 73930dcfc09SJohn Dyson */ 74030dcfc09SJohn Dyson int 741b9dcd593SBruce Evans vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 742b9dcd593SBruce Evans vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, 743b9dcd593SBruce Evans int cow) 74430dcfc09SJohn Dyson { 745c0877f10SJohn Dyson vm_map_entry_t new_entry; 746c0877f10SJohn Dyson vm_map_entry_t prev_entry; 74730dcfc09SJohn Dyson vm_map_entry_t temp_entry; 7489730a5daSPaul Saab vm_eflags_t protoeflags; 74930dcfc09SJohn Dyson 75030dcfc09SJohn Dyson /* 75130dcfc09SJohn Dyson * Check that the start and end points are not bogus. 75230dcfc09SJohn Dyson */ 75330dcfc09SJohn Dyson if ((start < map->min_offset) || (end > map->max_offset) || 75430dcfc09SJohn Dyson (start >= end)) 75530dcfc09SJohn Dyson return (KERN_INVALID_ADDRESS); 75630dcfc09SJohn Dyson 75730dcfc09SJohn Dyson /* 75830dcfc09SJohn Dyson * Find the entry prior to the proposed starting address; if it's part 75930dcfc09SJohn Dyson * of an existing entry, this range is bogus. 76030dcfc09SJohn Dyson */ 76130dcfc09SJohn Dyson if (vm_map_lookup_entry(map, start, &temp_entry)) 76230dcfc09SJohn Dyson return (KERN_NO_SPACE); 76330dcfc09SJohn Dyson 76430dcfc09SJohn Dyson prev_entry = temp_entry; 76530dcfc09SJohn Dyson 76630dcfc09SJohn Dyson /* 76730dcfc09SJohn Dyson * Assert that the next entry doesn't overlap the end point. 76830dcfc09SJohn Dyson */ 76930dcfc09SJohn Dyson if ((prev_entry->next != &map->header) && 77030dcfc09SJohn Dyson (prev_entry->next->start < end)) 77130dcfc09SJohn Dyson return (KERN_NO_SPACE); 77230dcfc09SJohn Dyson 773afa07f7eSJohn Dyson protoeflags = 0; 774afa07f7eSJohn Dyson 775afa07f7eSJohn Dyson if (cow & MAP_COPY_ON_WRITE) 776e5f13bddSAlan Cox protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 777afa07f7eSJohn Dyson 7784e045f93SAlan Cox if (cow & MAP_NOFAULT) { 779afa07f7eSJohn Dyson protoeflags |= MAP_ENTRY_NOFAULT; 780afa07f7eSJohn Dyson 7814e045f93SAlan Cox KASSERT(object == NULL, 7824e045f93SAlan Cox ("vm_map_insert: paradoxical MAP_NOFAULT request")); 7834e045f93SAlan Cox } 7844f79d873SMatthew Dillon if (cow & MAP_DISABLE_SYNCER) 7854f79d873SMatthew Dillon protoeflags |= MAP_ENTRY_NOSYNC; 7869730a5daSPaul Saab if (cow & MAP_DISABLE_COREDUMP) 7879730a5daSPaul Saab protoeflags |= MAP_ENTRY_NOCOREDUMP; 7884f79d873SMatthew Dillon 7891d284e00SAlan Cox if (object != NULL) { 79030dcfc09SJohn Dyson /* 7911d284e00SAlan Cox * OBJ_ONEMAPPING must be cleared unless this mapping 7921d284e00SAlan Cox * is trivially proven to be the only mapping for any 7931d284e00SAlan Cox * of the object's pages. (Object granularity 7941d284e00SAlan Cox * reference counting is insufficient to recognize 7951d284e00SAlan Cox * aliases with precision.) 79630dcfc09SJohn Dyson */ 7971d284e00SAlan Cox VM_OBJECT_LOCK(object); 7981d284e00SAlan Cox if (object->ref_count > 1 || object->shadow_count != 0) 7992aaeadf8SMatthew Dillon vm_object_clear_flag(object, OBJ_ONEMAPPING); 8001d284e00SAlan Cox VM_OBJECT_UNLOCK(object); 8014e045f93SAlan Cox } 8024e045f93SAlan Cox else if ((prev_entry != &map->header) && 8034e045f93SAlan Cox (prev_entry->eflags == protoeflags) && 8048cc7e047SJohn Dyson (prev_entry->end == start) && 8054e045f93SAlan Cox (prev_entry->wired_count == 0) && 8064e045f93SAlan Cox ((prev_entry->object.vm_object == NULL) || 8078cc7e047SJohn Dyson vm_object_coalesce(prev_entry->object.vm_object, 80857a21abaSAlan Cox prev_entry->offset, 8098cc7e047SJohn Dyson (vm_size_t)(prev_entry->end - prev_entry->start), 810cdc2c291SJohn Dyson (vm_size_t)(end - prev_entry->end)))) { 81130dcfc09SJohn Dyson /* 8122aaeadf8SMatthew Dillon * We were able to extend the object. Determine if we 8132aaeadf8SMatthew Dillon * can extend the previous map entry to include the 8142aaeadf8SMatthew Dillon * new range as well. 81530dcfc09SJohn Dyson */ 8168cc7e047SJohn Dyson if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 8178cc7e047SJohn Dyson (prev_entry->protection == prot) && 8188cc7e047SJohn Dyson (prev_entry->max_protection == max)) { 81930dcfc09SJohn Dyson map->size += (end - prev_entry->end); 82030dcfc09SJohn Dyson prev_entry->end = end; 8214e71e795SMatthew Dillon vm_map_simplify_entry(map, prev_entry); 82230dcfc09SJohn Dyson return (KERN_SUCCESS); 82330dcfc09SJohn Dyson } 8248cc7e047SJohn Dyson 8252aaeadf8SMatthew Dillon /* 8262aaeadf8SMatthew Dillon * If we can extend the object but cannot extend the 8272aaeadf8SMatthew Dillon * map entry, we have to create a new map entry. We 8282aaeadf8SMatthew Dillon * must bump the ref count on the extended object to 8294e71e795SMatthew Dillon * account for it. object may be NULL. 8302aaeadf8SMatthew Dillon */ 8312aaeadf8SMatthew Dillon object = prev_entry->object.vm_object; 8322aaeadf8SMatthew Dillon offset = prev_entry->offset + 8332aaeadf8SMatthew Dillon (prev_entry->end - prev_entry->start); 8348cc7e047SJohn Dyson vm_object_reference(object); 835b18bfc3dSJohn Dyson } 8362aaeadf8SMatthew Dillon 8372aaeadf8SMatthew Dillon /* 8382aaeadf8SMatthew Dillon * NOTE: if conditionals fail, object can be NULL here. This occurs 8392aaeadf8SMatthew Dillon * in things like the buffer map where we manage kva but do not manage 8402aaeadf8SMatthew Dillon * backing objects. 8412aaeadf8SMatthew Dillon */ 8428cc7e047SJohn Dyson 84330dcfc09SJohn Dyson /* 84430dcfc09SJohn Dyson * Create a new entry 84530dcfc09SJohn Dyson */ 84630dcfc09SJohn Dyson new_entry = vm_map_entry_create(map); 84730dcfc09SJohn Dyson new_entry->start = start; 84830dcfc09SJohn Dyson new_entry->end = end; 84930dcfc09SJohn Dyson 850afa07f7eSJohn Dyson new_entry->eflags = protoeflags; 85130dcfc09SJohn Dyson new_entry->object.vm_object = object; 85230dcfc09SJohn Dyson new_entry->offset = offset; 8532267af78SJulian Elischer new_entry->avail_ssize = 0; 8542267af78SJulian Elischer 85530dcfc09SJohn Dyson new_entry->inheritance = VM_INHERIT_DEFAULT; 85630dcfc09SJohn Dyson new_entry->protection = prot; 85730dcfc09SJohn Dyson new_entry->max_protection = max; 85830dcfc09SJohn Dyson new_entry->wired_count = 0; 859e5f251d2SAlan Cox 86030dcfc09SJohn Dyson /* 86130dcfc09SJohn Dyson * Insert the new entry into the list 86230dcfc09SJohn Dyson */ 86330dcfc09SJohn Dyson vm_map_entry_link(map, prev_entry, new_entry); 86430dcfc09SJohn Dyson map->size += new_entry->end - new_entry->start; 86530dcfc09SJohn Dyson 86630dcfc09SJohn Dyson /* 86730dcfc09SJohn Dyson * Update the free space hint 86830dcfc09SJohn Dyson */ 86967bf6868SJohn Dyson if ((map->first_free == prev_entry) && 8704f79d873SMatthew Dillon (prev_entry->end >= new_entry->start)) { 87130dcfc09SJohn Dyson map->first_free = new_entry; 8724f79d873SMatthew Dillon } 87330dcfc09SJohn Dyson 8741a484d28SMatthew Dillon #if 0 8751a484d28SMatthew Dillon /* 8761a484d28SMatthew Dillon * Temporarily removed to avoid MAP_STACK panic, due to 8771a484d28SMatthew Dillon * MAP_STACK being a huge hack. Will be added back in 8781a484d28SMatthew Dillon * when MAP_STACK (and the user stack mapping) is fixed. 8791a484d28SMatthew Dillon */ 8804e71e795SMatthew Dillon /* 8814e71e795SMatthew Dillon * It may be possible to simplify the entry 8824e71e795SMatthew Dillon */ 8834e71e795SMatthew Dillon vm_map_simplify_entry(map, new_entry); 8841a484d28SMatthew Dillon #endif 8854e71e795SMatthew Dillon 8864f79d873SMatthew Dillon if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { 8874da4d293SAlan Cox vm_map_pmap_enter(map, start, prot, 888e972780aSAlan Cox object, OFF_TO_IDX(offset), end - start, 889e972780aSAlan Cox cow & MAP_PREFAULT_PARTIAL); 8904f79d873SMatthew Dillon } 891e972780aSAlan Cox 89230dcfc09SJohn Dyson return (KERN_SUCCESS); 89330dcfc09SJohn Dyson } 89430dcfc09SJohn Dyson 89530dcfc09SJohn Dyson /* 896df8bae1dSRodney W. Grimes * Find sufficient space for `length' bytes in the given map, starting at 897df8bae1dSRodney W. Grimes * `start'. The map must be locked. Returns 0 on success, 1 on no space. 898df8bae1dSRodney W. Grimes */ 899df8bae1dSRodney W. Grimes int 9001b40f8c0SMatthew Dillon vm_map_findspace( 9011b40f8c0SMatthew Dillon vm_map_t map, 9021b40f8c0SMatthew Dillon vm_offset_t start, 9031b40f8c0SMatthew Dillon vm_size_t length, 9041b40f8c0SMatthew Dillon vm_offset_t *addr) 905df8bae1dSRodney W. Grimes { 906c0877f10SJohn Dyson vm_map_entry_t entry, next; 907c0877f10SJohn Dyson vm_offset_t end; 908df8bae1dSRodney W. Grimes 909df8bae1dSRodney W. Grimes if (start < map->min_offset) 910df8bae1dSRodney W. Grimes start = map->min_offset; 911df8bae1dSRodney W. Grimes if (start > map->max_offset) 912df8bae1dSRodney W. Grimes return (1); 913df8bae1dSRodney W. Grimes 914df8bae1dSRodney W. Grimes /* 9150d94caffSDavid Greenman * Look for the first possible address; if there's already something 9160d94caffSDavid Greenman * at this address, we have to start after it. 917df8bae1dSRodney W. Grimes */ 918df8bae1dSRodney W. Grimes if (start == map->min_offset) { 91967bf6868SJohn Dyson if ((entry = map->first_free) != &map->header) 920df8bae1dSRodney W. Grimes start = entry->end; 921df8bae1dSRodney W. Grimes } else { 922df8bae1dSRodney W. Grimes vm_map_entry_t tmp; 9230d94caffSDavid Greenman 924df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &tmp)) 925df8bae1dSRodney W. Grimes start = tmp->end; 926df8bae1dSRodney W. Grimes entry = tmp; 927df8bae1dSRodney W. Grimes } 928df8bae1dSRodney W. Grimes 929df8bae1dSRodney W. Grimes /* 9300d94caffSDavid Greenman * Look through the rest of the map, trying to fit a new region in the 9310d94caffSDavid Greenman * gap between existing regions, or after the very last region. 932df8bae1dSRodney W. Grimes */ 933df8bae1dSRodney W. Grimes for (;; start = (entry = next)->end) { 934df8bae1dSRodney W. Grimes /* 935df8bae1dSRodney W. Grimes * Find the end of the proposed new region. Be sure we didn't 936df8bae1dSRodney W. Grimes * go beyond the end of the map, or wrap around the address; 937df8bae1dSRodney W. Grimes * if so, we lose. Otherwise, if this is the last entry, or 938df8bae1dSRodney W. Grimes * if the proposed new region fits before the next entry, we 939df8bae1dSRodney W. Grimes * win. 940df8bae1dSRodney W. Grimes */ 941df8bae1dSRodney W. Grimes end = start + length; 942df8bae1dSRodney W. Grimes if (end > map->max_offset || end < start) 943df8bae1dSRodney W. Grimes return (1); 944df8bae1dSRodney W. Grimes next = entry->next; 945df8bae1dSRodney W. Grimes if (next == &map->header || next->start >= end) 946df8bae1dSRodney W. Grimes break; 947df8bae1dSRodney W. Grimes } 948df8bae1dSRodney W. Grimes *addr = start; 94999448ed1SJohn Dyson if (map == kernel_map) { 95099448ed1SJohn Dyson vm_offset_t ksize; 95199448ed1SJohn Dyson if ((ksize = round_page(start + length)) > kernel_vm_end) { 95299448ed1SJohn Dyson pmap_growkernel(ksize); 95399448ed1SJohn Dyson } 95499448ed1SJohn Dyson } 955df8bae1dSRodney W. Grimes return (0); 956df8bae1dSRodney W. Grimes } 957df8bae1dSRodney W. Grimes 958df8bae1dSRodney W. Grimes /* 959df8bae1dSRodney W. Grimes * vm_map_find finds an unallocated region in the target address 960df8bae1dSRodney W. Grimes * map with the given length. The search is defined to be 961df8bae1dSRodney W. Grimes * first-fit from the specified address; the region found is 962df8bae1dSRodney W. Grimes * returned in the same parameter. 963df8bae1dSRodney W. Grimes * 9642aaeadf8SMatthew Dillon * If object is non-NULL, ref count must be bumped by caller 9652aaeadf8SMatthew Dillon * prior to making call to account for the new entry. 966df8bae1dSRodney W. Grimes */ 967df8bae1dSRodney W. Grimes int 968b9dcd593SBruce Evans vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 969b9dcd593SBruce Evans vm_offset_t *addr, /* IN/OUT */ 970b9dcd593SBruce Evans vm_size_t length, boolean_t find_space, vm_prot_t prot, 971b9dcd593SBruce Evans vm_prot_t max, int cow) 972df8bae1dSRodney W. Grimes { 973c0877f10SJohn Dyson vm_offset_t start; 9748d6e8edeSDavid Greenman int result, s = 0; 975df8bae1dSRodney W. Grimes 976df8bae1dSRodney W. Grimes start = *addr; 9778d6e8edeSDavid Greenman 97808442f8aSBosko Milekic if (map == kmem_map) 979b18bfc3dSJohn Dyson s = splvm(); 9808d6e8edeSDavid Greenman 981bea41bcfSDavid Greenman vm_map_lock(map); 982df8bae1dSRodney W. Grimes if (find_space) { 983df8bae1dSRodney W. Grimes if (vm_map_findspace(map, start, length, addr)) { 984df8bae1dSRodney W. Grimes vm_map_unlock(map); 98508442f8aSBosko Milekic if (map == kmem_map) 9868d6e8edeSDavid Greenman splx(s); 987df8bae1dSRodney W. Grimes return (KERN_NO_SPACE); 988df8bae1dSRodney W. Grimes } 989df8bae1dSRodney W. Grimes start = *addr; 990df8bae1dSRodney W. Grimes } 991bd7e5f99SJohn Dyson result = vm_map_insert(map, object, offset, 992bd7e5f99SJohn Dyson start, start + length, prot, max, cow); 993df8bae1dSRodney W. Grimes vm_map_unlock(map); 9948d6e8edeSDavid Greenman 99508442f8aSBosko Milekic if (map == kmem_map) 9968d6e8edeSDavid Greenman splx(s); 9978d6e8edeSDavid Greenman 998df8bae1dSRodney W. Grimes return (result); 999df8bae1dSRodney W. Grimes } 1000df8bae1dSRodney W. Grimes 1001df8bae1dSRodney W. Grimes /* 1002b7b2aac2SJohn Dyson * vm_map_simplify_entry: 100367bf6868SJohn Dyson * 10044e71e795SMatthew Dillon * Simplify the given map entry by merging with either neighbor. This 10054e71e795SMatthew Dillon * routine also has the ability to merge with both neighbors. 10064e71e795SMatthew Dillon * 10074e71e795SMatthew Dillon * The map must be locked. 10084e71e795SMatthew Dillon * 10094e71e795SMatthew Dillon * This routine guarentees that the passed entry remains valid (though 10104e71e795SMatthew Dillon * possibly extended). When merging, this routine may delete one or 10114e71e795SMatthew Dillon * both neighbors. 1012df8bae1dSRodney W. Grimes */ 1013b7b2aac2SJohn Dyson void 10141b40f8c0SMatthew Dillon vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 1015df8bae1dSRodney W. Grimes { 1016308c24baSJohn Dyson vm_map_entry_t next, prev; 1017b7b2aac2SJohn Dyson vm_size_t prevsize, esize; 1018df8bae1dSRodney W. Grimes 1019acd9a301SAlan Cox if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) 1020df8bae1dSRodney W. Grimes return; 1021308c24baSJohn Dyson 1022308c24baSJohn Dyson prev = entry->prev; 1023308c24baSJohn Dyson if (prev != &map->header) { 102467bf6868SJohn Dyson prevsize = prev->end - prev->start; 102567bf6868SJohn Dyson if ( (prev->end == entry->start) && 102667bf6868SJohn Dyson (prev->object.vm_object == entry->object.vm_object) && 102795e5e988SJohn Dyson (!prev->object.vm_object || 102867bf6868SJohn Dyson (prev->offset + prevsize == entry->offset)) && 1029afa07f7eSJohn Dyson (prev->eflags == entry->eflags) && 103067bf6868SJohn Dyson (prev->protection == entry->protection) && 103167bf6868SJohn Dyson (prev->max_protection == entry->max_protection) && 103267bf6868SJohn Dyson (prev->inheritance == entry->inheritance) && 1033b7b2aac2SJohn Dyson (prev->wired_count == entry->wired_count)) { 1034308c24baSJohn Dyson if (map->first_free == prev) 1035308c24baSJohn Dyson map->first_free = entry; 1036308c24baSJohn Dyson vm_map_entry_unlink(map, prev); 1037308c24baSJohn Dyson entry->start = prev->start; 1038308c24baSJohn Dyson entry->offset = prev->offset; 1039b18bfc3dSJohn Dyson if (prev->object.vm_object) 1040308c24baSJohn Dyson vm_object_deallocate(prev->object.vm_object); 1041308c24baSJohn Dyson vm_map_entry_dispose(map, prev); 1042308c24baSJohn Dyson } 1043308c24baSJohn Dyson } 1044de5f6a77SJohn Dyson 1045de5f6a77SJohn Dyson next = entry->next; 1046308c24baSJohn Dyson if (next != &map->header) { 104767bf6868SJohn Dyson esize = entry->end - entry->start; 104867bf6868SJohn Dyson if ((entry->end == next->start) && 104967bf6868SJohn Dyson (next->object.vm_object == entry->object.vm_object) && 105067bf6868SJohn Dyson (!entry->object.vm_object || 105167bf6868SJohn Dyson (entry->offset + esize == next->offset)) && 1052afa07f7eSJohn Dyson (next->eflags == entry->eflags) && 105367bf6868SJohn Dyson (next->protection == entry->protection) && 105467bf6868SJohn Dyson (next->max_protection == entry->max_protection) && 105567bf6868SJohn Dyson (next->inheritance == entry->inheritance) && 1056b7b2aac2SJohn Dyson (next->wired_count == entry->wired_count)) { 1057308c24baSJohn Dyson if (map->first_free == next) 1058308c24baSJohn Dyson map->first_free = entry; 1059de5f6a77SJohn Dyson vm_map_entry_unlink(map, next); 1060de5f6a77SJohn Dyson entry->end = next->end; 1061b18bfc3dSJohn Dyson if (next->object.vm_object) 1062de5f6a77SJohn Dyson vm_object_deallocate(next->object.vm_object); 1063de5f6a77SJohn Dyson vm_map_entry_dispose(map, next); 1064df8bae1dSRodney W. Grimes } 1065df8bae1dSRodney W. Grimes } 1066de5f6a77SJohn Dyson } 1067df8bae1dSRodney W. Grimes /* 1068df8bae1dSRodney W. Grimes * vm_map_clip_start: [ internal use only ] 1069df8bae1dSRodney W. Grimes * 1070df8bae1dSRodney W. Grimes * Asserts that the given entry begins at or after 1071df8bae1dSRodney W. Grimes * the specified address; if necessary, 1072df8bae1dSRodney W. Grimes * it splits the entry into two. 1073df8bae1dSRodney W. Grimes */ 1074df8bae1dSRodney W. Grimes #define vm_map_clip_start(map, entry, startaddr) \ 1075df8bae1dSRodney W. Grimes { \ 1076df8bae1dSRodney W. Grimes if (startaddr > entry->start) \ 1077df8bae1dSRodney W. Grimes _vm_map_clip_start(map, entry, startaddr); \ 1078df8bae1dSRodney W. Grimes } 1079df8bae1dSRodney W. Grimes 1080df8bae1dSRodney W. Grimes /* 1081df8bae1dSRodney W. Grimes * This routine is called only when it is known that 1082df8bae1dSRodney W. Grimes * the entry must be split. 1083df8bae1dSRodney W. Grimes */ 10840d94caffSDavid Greenman static void 10851b40f8c0SMatthew Dillon _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 1086df8bae1dSRodney W. Grimes { 1087c0877f10SJohn Dyson vm_map_entry_t new_entry; 1088df8bae1dSRodney W. Grimes 1089df8bae1dSRodney W. Grimes /* 10900d94caffSDavid Greenman * Split off the front portion -- note that we must insert the new 10910d94caffSDavid Greenman * entry BEFORE this one, so that this entry has the specified 10920d94caffSDavid Greenman * starting address. 1093df8bae1dSRodney W. Grimes */ 1094f32dbbeeSJohn Dyson vm_map_simplify_entry(map, entry); 1095f32dbbeeSJohn Dyson 109611cccda1SJohn Dyson /* 109711cccda1SJohn Dyson * If there is no object backing this entry, we might as well create 109811cccda1SJohn Dyson * one now. If we defer it, an object can get created after the map 109911cccda1SJohn Dyson * is clipped, and individual objects will be created for the split-up 110011cccda1SJohn Dyson * map. This is a bit of a hack, but is also about the best place to 110111cccda1SJohn Dyson * put this improvement. 110211cccda1SJohn Dyson */ 11034e71e795SMatthew Dillon if (entry->object.vm_object == NULL && !map->system_map) { 110411cccda1SJohn Dyson vm_object_t object; 110511cccda1SJohn Dyson object = vm_object_allocate(OBJT_DEFAULT, 1106c2e11a03SJohn Dyson atop(entry->end - entry->start)); 110711cccda1SJohn Dyson entry->object.vm_object = object; 110811cccda1SJohn Dyson entry->offset = 0; 110911cccda1SJohn Dyson } 111011cccda1SJohn Dyson 1111df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(map); 1112df8bae1dSRodney W. Grimes *new_entry = *entry; 1113df8bae1dSRodney W. Grimes 1114df8bae1dSRodney W. Grimes new_entry->end = start; 1115df8bae1dSRodney W. Grimes entry->offset += (start - entry->start); 1116df8bae1dSRodney W. Grimes entry->start = start; 1117df8bae1dSRodney W. Grimes 1118df8bae1dSRodney W. Grimes vm_map_entry_link(map, entry->prev, new_entry); 1119df8bae1dSRodney W. Grimes 11209fdfe602SMatthew Dillon if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1121df8bae1dSRodney W. Grimes vm_object_reference(new_entry->object.vm_object); 1122df8bae1dSRodney W. Grimes } 1123c0877f10SJohn Dyson } 1124df8bae1dSRodney W. Grimes 1125df8bae1dSRodney W. Grimes /* 1126df8bae1dSRodney W. Grimes * vm_map_clip_end: [ internal use only ] 1127df8bae1dSRodney W. Grimes * 1128df8bae1dSRodney W. Grimes * Asserts that the given entry ends at or before 1129df8bae1dSRodney W. Grimes * the specified address; if necessary, 1130df8bae1dSRodney W. Grimes * it splits the entry into two. 1131df8bae1dSRodney W. Grimes */ 1132df8bae1dSRodney W. Grimes #define vm_map_clip_end(map, entry, endaddr) \ 1133df8bae1dSRodney W. Grimes { \ 1134af045176SPoul-Henning Kamp if ((endaddr) < (entry->end)) \ 1135af045176SPoul-Henning Kamp _vm_map_clip_end((map), (entry), (endaddr)); \ 1136df8bae1dSRodney W. Grimes } 1137df8bae1dSRodney W. Grimes 1138df8bae1dSRodney W. Grimes /* 1139df8bae1dSRodney W. Grimes * This routine is called only when it is known that 1140df8bae1dSRodney W. Grimes * the entry must be split. 1141df8bae1dSRodney W. Grimes */ 11420d94caffSDavid Greenman static void 11431b40f8c0SMatthew Dillon _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1144df8bae1dSRodney W. Grimes { 1145c0877f10SJohn Dyson vm_map_entry_t new_entry; 1146df8bae1dSRodney W. Grimes 1147df8bae1dSRodney W. Grimes /* 114811cccda1SJohn Dyson * If there is no object backing this entry, we might as well create 114911cccda1SJohn Dyson * one now. If we defer it, an object can get created after the map 115011cccda1SJohn Dyson * is clipped, and individual objects will be created for the split-up 115111cccda1SJohn Dyson * map. This is a bit of a hack, but is also about the best place to 115211cccda1SJohn Dyson * put this improvement. 115311cccda1SJohn Dyson */ 11544e71e795SMatthew Dillon if (entry->object.vm_object == NULL && !map->system_map) { 115511cccda1SJohn Dyson vm_object_t object; 115611cccda1SJohn Dyson object = vm_object_allocate(OBJT_DEFAULT, 1157c2e11a03SJohn Dyson atop(entry->end - entry->start)); 115811cccda1SJohn Dyson entry->object.vm_object = object; 115911cccda1SJohn Dyson entry->offset = 0; 116011cccda1SJohn Dyson } 116111cccda1SJohn Dyson 116211cccda1SJohn Dyson /* 11630d94caffSDavid Greenman * Create a new entry and insert it AFTER the specified entry 1164df8bae1dSRodney W. Grimes */ 1165df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(map); 1166df8bae1dSRodney W. Grimes *new_entry = *entry; 1167df8bae1dSRodney W. Grimes 1168df8bae1dSRodney W. Grimes new_entry->start = entry->end = end; 1169df8bae1dSRodney W. Grimes new_entry->offset += (end - entry->start); 1170df8bae1dSRodney W. Grimes 1171df8bae1dSRodney W. Grimes vm_map_entry_link(map, entry, new_entry); 1172df8bae1dSRodney W. Grimes 11739fdfe602SMatthew Dillon if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1174df8bae1dSRodney W. Grimes vm_object_reference(new_entry->object.vm_object); 1175df8bae1dSRodney W. Grimes } 1176c0877f10SJohn Dyson } 1177df8bae1dSRodney W. Grimes 1178df8bae1dSRodney W. Grimes /* 1179df8bae1dSRodney W. Grimes * VM_MAP_RANGE_CHECK: [ internal use only ] 1180df8bae1dSRodney W. Grimes * 1181df8bae1dSRodney W. Grimes * Asserts that the starting and ending region 1182df8bae1dSRodney W. Grimes * addresses fall within the valid range of the map. 1183df8bae1dSRodney W. Grimes */ 1184df8bae1dSRodney W. Grimes #define VM_MAP_RANGE_CHECK(map, start, end) \ 1185df8bae1dSRodney W. Grimes { \ 1186df8bae1dSRodney W. Grimes if (start < vm_map_min(map)) \ 1187df8bae1dSRodney W. Grimes start = vm_map_min(map); \ 1188df8bae1dSRodney W. Grimes if (end > vm_map_max(map)) \ 1189df8bae1dSRodney W. Grimes end = vm_map_max(map); \ 1190df8bae1dSRodney W. Grimes if (start > end) \ 1191df8bae1dSRodney W. Grimes start = end; \ 1192df8bae1dSRodney W. Grimes } 1193df8bae1dSRodney W. Grimes 1194df8bae1dSRodney W. Grimes /* 1195df8bae1dSRodney W. Grimes * vm_map_submap: [ kernel use only ] 1196df8bae1dSRodney W. Grimes * 1197df8bae1dSRodney W. Grimes * Mark the given range as handled by a subordinate map. 1198df8bae1dSRodney W. Grimes * 1199df8bae1dSRodney W. Grimes * This range must have been created with vm_map_find, 1200df8bae1dSRodney W. Grimes * and no other operations may have been performed on this 1201df8bae1dSRodney W. Grimes * range prior to calling vm_map_submap. 1202df8bae1dSRodney W. Grimes * 1203df8bae1dSRodney W. Grimes * Only a limited number of operations can be performed 1204df8bae1dSRodney W. Grimes * within this rage after calling vm_map_submap: 1205df8bae1dSRodney W. Grimes * vm_fault 1206df8bae1dSRodney W. Grimes * [Don't try vm_map_copy!] 1207df8bae1dSRodney W. Grimes * 1208df8bae1dSRodney W. Grimes * To remove a submapping, one must first remove the 1209df8bae1dSRodney W. Grimes * range from the superior map, and then destroy the 1210df8bae1dSRodney W. Grimes * submap (if desired). [Better yet, don't try it.] 1211df8bae1dSRodney W. Grimes */ 1212df8bae1dSRodney W. Grimes int 12131b40f8c0SMatthew Dillon vm_map_submap( 12141b40f8c0SMatthew Dillon vm_map_t map, 12151b40f8c0SMatthew Dillon vm_offset_t start, 12161b40f8c0SMatthew Dillon vm_offset_t end, 12171b40f8c0SMatthew Dillon vm_map_t submap) 1218df8bae1dSRodney W. Grimes { 1219df8bae1dSRodney W. Grimes vm_map_entry_t entry; 1220c0877f10SJohn Dyson int result = KERN_INVALID_ARGUMENT; 1221df8bae1dSRodney W. Grimes 1222df8bae1dSRodney W. Grimes vm_map_lock(map); 1223df8bae1dSRodney W. Grimes 1224df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1225df8bae1dSRodney W. Grimes 1226df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &entry)) { 1227df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 12280d94caffSDavid Greenman } else 1229df8bae1dSRodney W. Grimes entry = entry->next; 1230df8bae1dSRodney W. Grimes 1231df8bae1dSRodney W. Grimes vm_map_clip_end(map, entry, end); 1232df8bae1dSRodney W. Grimes 1233df8bae1dSRodney W. Grimes if ((entry->start == start) && (entry->end == end) && 12349fdfe602SMatthew Dillon ((entry->eflags & MAP_ENTRY_COW) == 0) && 1235afa07f7eSJohn Dyson (entry->object.vm_object == NULL)) { 12362d8acc0fSJohn Dyson entry->object.sub_map = submap; 1237afa07f7eSJohn Dyson entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1238df8bae1dSRodney W. Grimes result = KERN_SUCCESS; 1239df8bae1dSRodney W. Grimes } 1240df8bae1dSRodney W. Grimes vm_map_unlock(map); 1241df8bae1dSRodney W. Grimes 1242df8bae1dSRodney W. Grimes return (result); 1243df8bae1dSRodney W. Grimes } 1244df8bae1dSRodney W. Grimes 1245df8bae1dSRodney W. Grimes /* 12461f78f902SAlan Cox * The maximum number of pages to map 12471f78f902SAlan Cox */ 12481f78f902SAlan Cox #define MAX_INIT_PT 96 12491f78f902SAlan Cox 12501f78f902SAlan Cox /* 12510551c08dSAlan Cox * vm_map_pmap_enter: 12520551c08dSAlan Cox * 12534da4d293SAlan Cox * Preload read-only mappings for the given object into the specified 12540551c08dSAlan Cox * map. This eliminates the soft faults on process startup and 12550551c08dSAlan Cox * immediately after an mmap(2). 12560551c08dSAlan Cox */ 12570551c08dSAlan Cox void 12584da4d293SAlan Cox vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 12590551c08dSAlan Cox vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 12600551c08dSAlan Cox { 12611f78f902SAlan Cox vm_offset_t tmpidx; 12621f78f902SAlan Cox int psize; 12631f78f902SAlan Cox vm_page_t p, mpte; 12640551c08dSAlan Cox 12654da4d293SAlan Cox if ((prot & VM_PROT_READ) == 0 || object == NULL) 12661f78f902SAlan Cox return; 12671f78f902SAlan Cox VM_OBJECT_LOCK(object); 12681f78f902SAlan Cox if (object->type == OBJT_DEVICE) { 12691f78f902SAlan Cox pmap_object_init_pt(map->pmap, addr, object, pindex, size); 12701f78f902SAlan Cox goto unlock_return; 12711f78f902SAlan Cox } 12721f78f902SAlan Cox 12731f78f902SAlan Cox psize = atop(size); 12741f78f902SAlan Cox 12751f78f902SAlan Cox if (object->type != OBJT_VNODE || 12761f78f902SAlan Cox ((flags & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) && 12771f78f902SAlan Cox (object->resident_page_count > MAX_INIT_PT))) { 12781f78f902SAlan Cox goto unlock_return; 12791f78f902SAlan Cox } 12801f78f902SAlan Cox 12811f78f902SAlan Cox if (psize + pindex > object->size) { 12821f78f902SAlan Cox if (object->size < pindex) 12831f78f902SAlan Cox goto unlock_return; 12841f78f902SAlan Cox psize = object->size - pindex; 12851f78f902SAlan Cox } 12861f78f902SAlan Cox 12871f78f902SAlan Cox mpte = NULL; 12881f78f902SAlan Cox 12891f78f902SAlan Cox if ((p = TAILQ_FIRST(&object->memq)) != NULL) { 12901f78f902SAlan Cox if (p->pindex < pindex) { 12911f78f902SAlan Cox p = vm_page_splay(pindex, object->root); 12921f78f902SAlan Cox if ((object->root = p)->pindex < pindex) 12931f78f902SAlan Cox p = TAILQ_NEXT(p, listq); 12941f78f902SAlan Cox } 12951f78f902SAlan Cox } 12961f78f902SAlan Cox /* 12971f78f902SAlan Cox * Assert: the variable p is either (1) the page with the 12981f78f902SAlan Cox * least pindex greater than or equal to the parameter pindex 12991f78f902SAlan Cox * or (2) NULL. 13001f78f902SAlan Cox */ 13011f78f902SAlan Cox for (; 13021f78f902SAlan Cox p != NULL && (tmpidx = p->pindex - pindex) < psize; 13031f78f902SAlan Cox p = TAILQ_NEXT(p, listq)) { 13041f78f902SAlan Cox /* 13051f78f902SAlan Cox * don't allow an madvise to blow away our really 13061f78f902SAlan Cox * free pages allocating pv entries. 13071f78f902SAlan Cox */ 13081f78f902SAlan Cox if ((flags & MAP_PREFAULT_MADVISE) && 13091f78f902SAlan Cox cnt.v_free_count < cnt.v_free_reserved) { 13101f78f902SAlan Cox break; 13111f78f902SAlan Cox } 13121f78f902SAlan Cox vm_page_lock_queues(); 13131f78f902SAlan Cox if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL && 13141f78f902SAlan Cox (p->busy == 0) && 13151f78f902SAlan Cox (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 13161f78f902SAlan Cox if ((p->queue - p->pc) == PQ_CACHE) 13171f78f902SAlan Cox vm_page_deactivate(p); 13181f78f902SAlan Cox vm_page_busy(p); 13191f78f902SAlan Cox vm_page_unlock_queues(); 13201f78f902SAlan Cox VM_OBJECT_UNLOCK(object); 13211f78f902SAlan Cox mpte = pmap_enter_quick(map->pmap, 13221f78f902SAlan Cox addr + ptoa(tmpidx), p, mpte); 13231f78f902SAlan Cox VM_OBJECT_LOCK(object); 13241f78f902SAlan Cox vm_page_lock_queues(); 13251f78f902SAlan Cox vm_page_wakeup(p); 13261f78f902SAlan Cox } 13271f78f902SAlan Cox vm_page_unlock_queues(); 13281f78f902SAlan Cox } 13291f78f902SAlan Cox unlock_return: 13301f78f902SAlan Cox VM_OBJECT_UNLOCK(object); 13310551c08dSAlan Cox } 13320551c08dSAlan Cox 13330551c08dSAlan Cox /* 1334df8bae1dSRodney W. Grimes * vm_map_protect: 1335df8bae1dSRodney W. Grimes * 1336df8bae1dSRodney W. Grimes * Sets the protection of the specified address 1337df8bae1dSRodney W. Grimes * region in the target map. If "set_max" is 1338df8bae1dSRodney W. Grimes * specified, the maximum protection is to be set; 1339df8bae1dSRodney W. Grimes * otherwise, only the current protection is affected. 1340df8bae1dSRodney W. Grimes */ 1341df8bae1dSRodney W. Grimes int 1342b9dcd593SBruce Evans vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1343b9dcd593SBruce Evans vm_prot_t new_prot, boolean_t set_max) 1344df8bae1dSRodney W. Grimes { 1345c0877f10SJohn Dyson vm_map_entry_t current; 1346df8bae1dSRodney W. Grimes vm_map_entry_t entry; 1347df8bae1dSRodney W. Grimes 1348df8bae1dSRodney W. Grimes vm_map_lock(map); 1349df8bae1dSRodney W. Grimes 1350df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1351df8bae1dSRodney W. Grimes 1352df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &entry)) { 1353df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 1354b7b2aac2SJohn Dyson } else { 1355df8bae1dSRodney W. Grimes entry = entry->next; 1356b7b2aac2SJohn Dyson } 1357df8bae1dSRodney W. Grimes 1358df8bae1dSRodney W. Grimes /* 13590d94caffSDavid Greenman * Make a first pass to check for protection violations. 1360df8bae1dSRodney W. Grimes */ 1361df8bae1dSRodney W. Grimes current = entry; 1362df8bae1dSRodney W. Grimes while ((current != &map->header) && (current->start < end)) { 1363afa07f7eSJohn Dyson if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1364a1f6d91cSDavid Greenman vm_map_unlock(map); 1365df8bae1dSRodney W. Grimes return (KERN_INVALID_ARGUMENT); 1366a1f6d91cSDavid Greenman } 1367df8bae1dSRodney W. Grimes if ((new_prot & current->max_protection) != new_prot) { 1368df8bae1dSRodney W. Grimes vm_map_unlock(map); 1369df8bae1dSRodney W. Grimes return (KERN_PROTECTION_FAILURE); 1370df8bae1dSRodney W. Grimes } 1371df8bae1dSRodney W. Grimes current = current->next; 1372df8bae1dSRodney W. Grimes } 1373df8bae1dSRodney W. Grimes 1374df8bae1dSRodney W. Grimes /* 13750d94caffSDavid Greenman * Go back and fix up protections. [Note that clipping is not 13760d94caffSDavid Greenman * necessary the second time.] 1377df8bae1dSRodney W. Grimes */ 1378df8bae1dSRodney W. Grimes current = entry; 1379df8bae1dSRodney W. Grimes while ((current != &map->header) && (current->start < end)) { 1380df8bae1dSRodney W. Grimes vm_prot_t old_prot; 1381df8bae1dSRodney W. Grimes 1382df8bae1dSRodney W. Grimes vm_map_clip_end(map, current, end); 1383df8bae1dSRodney W. Grimes 1384df8bae1dSRodney W. Grimes old_prot = current->protection; 1385df8bae1dSRodney W. Grimes if (set_max) 1386df8bae1dSRodney W. Grimes current->protection = 1387df8bae1dSRodney W. Grimes (current->max_protection = new_prot) & 1388df8bae1dSRodney W. Grimes old_prot; 1389df8bae1dSRodney W. Grimes else 1390df8bae1dSRodney W. Grimes current->protection = new_prot; 1391df8bae1dSRodney W. Grimes 1392df8bae1dSRodney W. Grimes /* 13930d94caffSDavid Greenman * Update physical map if necessary. Worry about copy-on-write 13940d94caffSDavid Greenman * here -- CHECK THIS XXX 1395df8bae1dSRodney W. Grimes */ 1396df8bae1dSRodney W. Grimes if (current->protection != old_prot) { 1397afa07f7eSJohn Dyson #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1398df8bae1dSRodney W. Grimes VM_PROT_ALL) 1399df8bae1dSRodney W. Grimes pmap_protect(map->pmap, current->start, 1400df8bae1dSRodney W. Grimes current->end, 14011c85e3dfSAlan Cox current->protection & MASK(current)); 1402df8bae1dSRodney W. Grimes #undef MASK 1403df8bae1dSRodney W. Grimes } 14047d78abc9SJohn Dyson vm_map_simplify_entry(map, current); 1405df8bae1dSRodney W. Grimes current = current->next; 1406df8bae1dSRodney W. Grimes } 1407df8bae1dSRodney W. Grimes vm_map_unlock(map); 1408df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1409df8bae1dSRodney W. Grimes } 1410df8bae1dSRodney W. Grimes 1411df8bae1dSRodney W. Grimes /* 1412867a482dSJohn Dyson * vm_map_madvise: 1413867a482dSJohn Dyson * 1414867a482dSJohn Dyson * This routine traverses a processes map handling the madvise 1415f7fc307aSAlan Cox * system call. Advisories are classified as either those effecting 1416f7fc307aSAlan Cox * the vm_map_entry structure, or those effecting the underlying 1417f7fc307aSAlan Cox * objects. 1418867a482dSJohn Dyson */ 1419b4309055SMatthew Dillon int 14201b40f8c0SMatthew Dillon vm_map_madvise( 14211b40f8c0SMatthew Dillon vm_map_t map, 14221b40f8c0SMatthew Dillon vm_offset_t start, 14231b40f8c0SMatthew Dillon vm_offset_t end, 14241b40f8c0SMatthew Dillon int behav) 1425867a482dSJohn Dyson { 1426f7fc307aSAlan Cox vm_map_entry_t current, entry; 1427b4309055SMatthew Dillon int modify_map = 0; 1428867a482dSJohn Dyson 1429b4309055SMatthew Dillon /* 1430b4309055SMatthew Dillon * Some madvise calls directly modify the vm_map_entry, in which case 1431b4309055SMatthew Dillon * we need to use an exclusive lock on the map and we need to perform 1432b4309055SMatthew Dillon * various clipping operations. Otherwise we only need a read-lock 1433b4309055SMatthew Dillon * on the map. 1434b4309055SMatthew Dillon */ 1435b4309055SMatthew Dillon switch(behav) { 1436b4309055SMatthew Dillon case MADV_NORMAL: 1437b4309055SMatthew Dillon case MADV_SEQUENTIAL: 1438b4309055SMatthew Dillon case MADV_RANDOM: 14394f79d873SMatthew Dillon case MADV_NOSYNC: 14404f79d873SMatthew Dillon case MADV_AUTOSYNC: 14419730a5daSPaul Saab case MADV_NOCORE: 14429730a5daSPaul Saab case MADV_CORE: 1443b4309055SMatthew Dillon modify_map = 1; 1444867a482dSJohn Dyson vm_map_lock(map); 1445b4309055SMatthew Dillon break; 1446b4309055SMatthew Dillon case MADV_WILLNEED: 1447b4309055SMatthew Dillon case MADV_DONTNEED: 1448b4309055SMatthew Dillon case MADV_FREE: 1449f7fc307aSAlan Cox vm_map_lock_read(map); 1450b4309055SMatthew Dillon break; 1451b4309055SMatthew Dillon default: 1452b4309055SMatthew Dillon return (KERN_INVALID_ARGUMENT); 1453b4309055SMatthew Dillon } 1454b4309055SMatthew Dillon 1455b4309055SMatthew Dillon /* 1456b4309055SMatthew Dillon * Locate starting entry and clip if necessary. 1457b4309055SMatthew Dillon */ 1458867a482dSJohn Dyson VM_MAP_RANGE_CHECK(map, start, end); 1459867a482dSJohn Dyson 1460867a482dSJohn Dyson if (vm_map_lookup_entry(map, start, &entry)) { 1461f7fc307aSAlan Cox if (modify_map) 1462867a482dSJohn Dyson vm_map_clip_start(map, entry, start); 1463b4309055SMatthew Dillon } else { 1464867a482dSJohn Dyson entry = entry->next; 1465b4309055SMatthew Dillon } 1466867a482dSJohn Dyson 1467f7fc307aSAlan Cox if (modify_map) { 1468f7fc307aSAlan Cox /* 1469f7fc307aSAlan Cox * madvise behaviors that are implemented in the vm_map_entry. 1470f7fc307aSAlan Cox * 1471f7fc307aSAlan Cox * We clip the vm_map_entry so that behavioral changes are 1472f7fc307aSAlan Cox * limited to the specified address range. 1473f7fc307aSAlan Cox */ 1474867a482dSJohn Dyson for (current = entry; 1475867a482dSJohn Dyson (current != &map->header) && (current->start < end); 1476b4309055SMatthew Dillon current = current->next 1477b4309055SMatthew Dillon ) { 1478f7fc307aSAlan Cox if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1479867a482dSJohn Dyson continue; 1480fed9a903SJohn Dyson 148147221757SJohn Dyson vm_map_clip_end(map, current, end); 1482fed9a903SJohn Dyson 1483f7fc307aSAlan Cox switch (behav) { 1484867a482dSJohn Dyson case MADV_NORMAL: 14857f866e4bSAlan Cox vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 1486867a482dSJohn Dyson break; 1487867a482dSJohn Dyson case MADV_SEQUENTIAL: 14887f866e4bSAlan Cox vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 1489867a482dSJohn Dyson break; 1490867a482dSJohn Dyson case MADV_RANDOM: 14917f866e4bSAlan Cox vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 1492867a482dSJohn Dyson break; 14934f79d873SMatthew Dillon case MADV_NOSYNC: 14944f79d873SMatthew Dillon current->eflags |= MAP_ENTRY_NOSYNC; 14954f79d873SMatthew Dillon break; 14964f79d873SMatthew Dillon case MADV_AUTOSYNC: 14974f79d873SMatthew Dillon current->eflags &= ~MAP_ENTRY_NOSYNC; 14984f79d873SMatthew Dillon break; 14999730a5daSPaul Saab case MADV_NOCORE: 15009730a5daSPaul Saab current->eflags |= MAP_ENTRY_NOCOREDUMP; 15019730a5daSPaul Saab break; 15029730a5daSPaul Saab case MADV_CORE: 15039730a5daSPaul Saab current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 15049730a5daSPaul Saab break; 1505867a482dSJohn Dyson default: 1506867a482dSJohn Dyson break; 1507867a482dSJohn Dyson } 1508f7fc307aSAlan Cox vm_map_simplify_entry(map, current); 1509867a482dSJohn Dyson } 1510867a482dSJohn Dyson vm_map_unlock(map); 1511b4309055SMatthew Dillon } else { 1512f7fc307aSAlan Cox vm_pindex_t pindex; 1513f7fc307aSAlan Cox int count; 1514f7fc307aSAlan Cox 1515f7fc307aSAlan Cox /* 1516f7fc307aSAlan Cox * madvise behaviors that are implemented in the underlying 1517f7fc307aSAlan Cox * vm_object. 1518f7fc307aSAlan Cox * 1519f7fc307aSAlan Cox * Since we don't clip the vm_map_entry, we have to clip 1520f7fc307aSAlan Cox * the vm_object pindex and count. 1521f7fc307aSAlan Cox */ 1522f7fc307aSAlan Cox for (current = entry; 1523f7fc307aSAlan Cox (current != &map->header) && (current->start < end); 1524b4309055SMatthew Dillon current = current->next 1525b4309055SMatthew Dillon ) { 15265f99b57cSMatthew Dillon vm_offset_t useStart; 15275f99b57cSMatthew Dillon 1528f7fc307aSAlan Cox if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1529f7fc307aSAlan Cox continue; 1530f7fc307aSAlan Cox 1531f7fc307aSAlan Cox pindex = OFF_TO_IDX(current->offset); 1532f7fc307aSAlan Cox count = atop(current->end - current->start); 15335f99b57cSMatthew Dillon useStart = current->start; 1534f7fc307aSAlan Cox 1535f7fc307aSAlan Cox if (current->start < start) { 1536f7fc307aSAlan Cox pindex += atop(start - current->start); 1537f7fc307aSAlan Cox count -= atop(start - current->start); 15385f99b57cSMatthew Dillon useStart = start; 1539f7fc307aSAlan Cox } 1540f7fc307aSAlan Cox if (current->end > end) 1541f7fc307aSAlan Cox count -= atop(current->end - end); 1542f7fc307aSAlan Cox 1543f7fc307aSAlan Cox if (count <= 0) 1544f7fc307aSAlan Cox continue; 1545f7fc307aSAlan Cox 1546f7fc307aSAlan Cox vm_object_madvise(current->object.vm_object, 1547f7fc307aSAlan Cox pindex, count, behav); 1548b4309055SMatthew Dillon if (behav == MADV_WILLNEED) { 15490551c08dSAlan Cox vm_map_pmap_enter(map, 15505f99b57cSMatthew Dillon useStart, 15514da4d293SAlan Cox current->protection, 1552f7fc307aSAlan Cox current->object.vm_object, 1553b4309055SMatthew Dillon pindex, 1554b4309055SMatthew Dillon (count << PAGE_SHIFT), 1555e3026983SMatthew Dillon MAP_PREFAULT_MADVISE 1556b4309055SMatthew Dillon ); 1557f7fc307aSAlan Cox } 1558f7fc307aSAlan Cox } 1559f7fc307aSAlan Cox vm_map_unlock_read(map); 1560f7fc307aSAlan Cox } 1561b4309055SMatthew Dillon return (0); 1562867a482dSJohn Dyson } 1563867a482dSJohn Dyson 1564867a482dSJohn Dyson 1565867a482dSJohn Dyson /* 1566df8bae1dSRodney W. Grimes * vm_map_inherit: 1567df8bae1dSRodney W. Grimes * 1568df8bae1dSRodney W. Grimes * Sets the inheritance of the specified address 1569df8bae1dSRodney W. Grimes * range in the target map. Inheritance 1570df8bae1dSRodney W. Grimes * affects how the map will be shared with 1571df8bae1dSRodney W. Grimes * child maps at the time of vm_map_fork. 1572df8bae1dSRodney W. Grimes */ 1573df8bae1dSRodney W. Grimes int 1574b9dcd593SBruce Evans vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 1575b9dcd593SBruce Evans vm_inherit_t new_inheritance) 1576df8bae1dSRodney W. Grimes { 1577c0877f10SJohn Dyson vm_map_entry_t entry; 1578df8bae1dSRodney W. Grimes vm_map_entry_t temp_entry; 1579df8bae1dSRodney W. Grimes 1580df8bae1dSRodney W. Grimes switch (new_inheritance) { 1581df8bae1dSRodney W. Grimes case VM_INHERIT_NONE: 1582df8bae1dSRodney W. Grimes case VM_INHERIT_COPY: 1583df8bae1dSRodney W. Grimes case VM_INHERIT_SHARE: 1584df8bae1dSRodney W. Grimes break; 1585df8bae1dSRodney W. Grimes default: 1586df8bae1dSRodney W. Grimes return (KERN_INVALID_ARGUMENT); 1587df8bae1dSRodney W. Grimes } 1588df8bae1dSRodney W. Grimes vm_map_lock(map); 1589df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1590df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &temp_entry)) { 1591df8bae1dSRodney W. Grimes entry = temp_entry; 1592df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 15930d94caffSDavid Greenman } else 1594df8bae1dSRodney W. Grimes entry = temp_entry->next; 1595df8bae1dSRodney W. Grimes while ((entry != &map->header) && (entry->start < end)) { 1596df8bae1dSRodney W. Grimes vm_map_clip_end(map, entry, end); 1597df8bae1dSRodney W. Grimes entry->inheritance = new_inheritance; 159844428f62SAlan Cox vm_map_simplify_entry(map, entry); 1599df8bae1dSRodney W. Grimes entry = entry->next; 1600df8bae1dSRodney W. Grimes } 1601df8bae1dSRodney W. Grimes vm_map_unlock(map); 1602df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1603df8bae1dSRodney W. Grimes } 1604df8bae1dSRodney W. Grimes 1605df8bae1dSRodney W. Grimes /* 1606acd9a301SAlan Cox * vm_map_unwire: 1607acd9a301SAlan Cox * 1608e27e17b7SAlan Cox * Implements both kernel and user unwiring. 1609acd9a301SAlan Cox */ 1610acd9a301SAlan Cox int 1611acd9a301SAlan Cox vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 1612abd498aaSBruce M Simpson int flags) 1613acd9a301SAlan Cox { 1614acd9a301SAlan Cox vm_map_entry_t entry, first_entry, tmp_entry; 1615acd9a301SAlan Cox vm_offset_t saved_start; 1616acd9a301SAlan Cox unsigned int last_timestamp; 1617acd9a301SAlan Cox int rv; 1618abd498aaSBruce M Simpson boolean_t need_wakeup, result, user_unwire; 1619acd9a301SAlan Cox 1620abd498aaSBruce M Simpson user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 1621acd9a301SAlan Cox vm_map_lock(map); 1622acd9a301SAlan Cox VM_MAP_RANGE_CHECK(map, start, end); 1623acd9a301SAlan Cox if (!vm_map_lookup_entry(map, start, &first_entry)) { 1624abd498aaSBruce M Simpson if (flags & VM_MAP_WIRE_HOLESOK) 1625cbef13d8SAlan Cox first_entry = first_entry->next; 1626abd498aaSBruce M Simpson else { 1627acd9a301SAlan Cox vm_map_unlock(map); 1628acd9a301SAlan Cox return (KERN_INVALID_ADDRESS); 1629acd9a301SAlan Cox } 1630abd498aaSBruce M Simpson } 1631acd9a301SAlan Cox last_timestamp = map->timestamp; 1632acd9a301SAlan Cox entry = first_entry; 1633acd9a301SAlan Cox while (entry != &map->header && entry->start < end) { 1634acd9a301SAlan Cox if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1635acd9a301SAlan Cox /* 1636acd9a301SAlan Cox * We have not yet clipped the entry. 1637acd9a301SAlan Cox */ 1638acd9a301SAlan Cox saved_start = (start >= entry->start) ? start : 1639acd9a301SAlan Cox entry->start; 1640acd9a301SAlan Cox entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1641acd9a301SAlan Cox if (vm_map_unlock_and_wait(map, user_unwire)) { 1642acd9a301SAlan Cox /* 1643acd9a301SAlan Cox * Allow interruption of user unwiring? 1644acd9a301SAlan Cox */ 1645acd9a301SAlan Cox } 1646acd9a301SAlan Cox vm_map_lock(map); 1647acd9a301SAlan Cox if (last_timestamp+1 != map->timestamp) { 1648acd9a301SAlan Cox /* 1649acd9a301SAlan Cox * Look again for the entry because the map was 1650acd9a301SAlan Cox * modified while it was unlocked. 1651acd9a301SAlan Cox * Specifically, the entry may have been 1652acd9a301SAlan Cox * clipped, merged, or deleted. 1653acd9a301SAlan Cox */ 1654acd9a301SAlan Cox if (!vm_map_lookup_entry(map, saved_start, 1655acd9a301SAlan Cox &tmp_entry)) { 1656cbef13d8SAlan Cox if (flags & VM_MAP_WIRE_HOLESOK) 1657cbef13d8SAlan Cox tmp_entry = tmp_entry->next; 1658cbef13d8SAlan Cox else { 1659acd9a301SAlan Cox if (saved_start == start) { 1660acd9a301SAlan Cox /* 1661acd9a301SAlan Cox * First_entry has been deleted. 1662acd9a301SAlan Cox */ 1663acd9a301SAlan Cox vm_map_unlock(map); 1664acd9a301SAlan Cox return (KERN_INVALID_ADDRESS); 1665acd9a301SAlan Cox } 1666acd9a301SAlan Cox end = saved_start; 1667acd9a301SAlan Cox rv = KERN_INVALID_ADDRESS; 1668acd9a301SAlan Cox goto done; 1669acd9a301SAlan Cox } 1670cbef13d8SAlan Cox } 1671acd9a301SAlan Cox if (entry == first_entry) 1672acd9a301SAlan Cox first_entry = tmp_entry; 1673acd9a301SAlan Cox else 1674acd9a301SAlan Cox first_entry = NULL; 1675acd9a301SAlan Cox entry = tmp_entry; 1676acd9a301SAlan Cox } 1677acd9a301SAlan Cox last_timestamp = map->timestamp; 1678acd9a301SAlan Cox continue; 1679acd9a301SAlan Cox } 1680acd9a301SAlan Cox vm_map_clip_start(map, entry, start); 1681acd9a301SAlan Cox vm_map_clip_end(map, entry, end); 1682acd9a301SAlan Cox /* 1683acd9a301SAlan Cox * Mark the entry in case the map lock is released. (See 1684acd9a301SAlan Cox * above.) 1685acd9a301SAlan Cox */ 1686acd9a301SAlan Cox entry->eflags |= MAP_ENTRY_IN_TRANSITION; 1687acd9a301SAlan Cox /* 1688acd9a301SAlan Cox * Check the map for holes in the specified region. 1689abd498aaSBruce M Simpson * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 1690acd9a301SAlan Cox */ 1691abd498aaSBruce M Simpson if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 1692abd498aaSBruce M Simpson (entry->end < end && (entry->next == &map->header || 1693abd498aaSBruce M Simpson entry->next->start > entry->end))) { 1694acd9a301SAlan Cox end = entry->end; 1695acd9a301SAlan Cox rv = KERN_INVALID_ADDRESS; 1696acd9a301SAlan Cox goto done; 1697acd9a301SAlan Cox } 1698acd9a301SAlan Cox /* 16993ffbc0cdSAlan Cox * If system unwiring, require that the entry is system wired. 1700acd9a301SAlan Cox */ 17010ada205eSBrian Feldman if (!user_unwire && 17020ada205eSBrian Feldman vm_map_entry_system_wired_count(entry) == 0) { 1703acd9a301SAlan Cox end = entry->end; 1704acd9a301SAlan Cox rv = KERN_INVALID_ARGUMENT; 1705acd9a301SAlan Cox goto done; 1706acd9a301SAlan Cox } 1707acd9a301SAlan Cox entry = entry->next; 1708acd9a301SAlan Cox } 1709acd9a301SAlan Cox rv = KERN_SUCCESS; 1710acd9a301SAlan Cox done: 1711e27e17b7SAlan Cox need_wakeup = FALSE; 1712acd9a301SAlan Cox if (first_entry == NULL) { 1713acd9a301SAlan Cox result = vm_map_lookup_entry(map, start, &first_entry); 1714cbef13d8SAlan Cox if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 1715cbef13d8SAlan Cox first_entry = first_entry->next; 1716cbef13d8SAlan Cox else 1717acd9a301SAlan Cox KASSERT(result, ("vm_map_unwire: lookup failed")); 1718acd9a301SAlan Cox } 1719acd9a301SAlan Cox entry = first_entry; 1720acd9a301SAlan Cox while (entry != &map->header && entry->start < end) { 17213ffbc0cdSAlan Cox if (rv == KERN_SUCCESS && (!user_unwire || 17223ffbc0cdSAlan Cox (entry->eflags & MAP_ENTRY_USER_WIRED))) { 1723b2f3846aSAlan Cox if (user_unwire) 1724b2f3846aSAlan Cox entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1725b2f3846aSAlan Cox entry->wired_count--; 17260ada205eSBrian Feldman if (entry->wired_count == 0) { 1727b2f3846aSAlan Cox /* 1728b2f3846aSAlan Cox * Retain the map lock. 1729b2f3846aSAlan Cox */ 17304be14af9SAlan Cox vm_fault_unwire(map, entry->start, entry->end, 17314be14af9SAlan Cox entry->object.vm_object != NULL && 17324be14af9SAlan Cox entry->object.vm_object->type == OBJT_DEVICE); 1733b2f3846aSAlan Cox } 1734b2f3846aSAlan Cox } 1735acd9a301SAlan Cox KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 1736acd9a301SAlan Cox ("vm_map_unwire: in-transition flag missing")); 1737acd9a301SAlan Cox entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 1738acd9a301SAlan Cox if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 1739acd9a301SAlan Cox entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 1740acd9a301SAlan Cox need_wakeup = TRUE; 1741acd9a301SAlan Cox } 1742acd9a301SAlan Cox vm_map_simplify_entry(map, entry); 1743acd9a301SAlan Cox entry = entry->next; 1744acd9a301SAlan Cox } 1745acd9a301SAlan Cox vm_map_unlock(map); 1746acd9a301SAlan Cox if (need_wakeup) 1747acd9a301SAlan Cox vm_map_wakeup(map); 1748acd9a301SAlan Cox return (rv); 1749acd9a301SAlan Cox } 1750acd9a301SAlan Cox 1751acd9a301SAlan Cox /* 1752e27e17b7SAlan Cox * vm_map_wire: 1753e27e17b7SAlan Cox * 1754e27e17b7SAlan Cox * Implements both kernel and user wiring. 1755e27e17b7SAlan Cox */ 1756e27e17b7SAlan Cox int 1757e27e17b7SAlan Cox vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 1758abd498aaSBruce M Simpson int flags) 1759e27e17b7SAlan Cox { 176012d7cc84SAlan Cox vm_map_entry_t entry, first_entry, tmp_entry; 176112d7cc84SAlan Cox vm_offset_t saved_end, saved_start; 176212d7cc84SAlan Cox unsigned int last_timestamp; 176312d7cc84SAlan Cox int rv; 17644be14af9SAlan Cox boolean_t fictitious, need_wakeup, result, user_wire; 1765e27e17b7SAlan Cox 1766abd498aaSBruce M Simpson user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 176712d7cc84SAlan Cox vm_map_lock(map); 176812d7cc84SAlan Cox VM_MAP_RANGE_CHECK(map, start, end); 176912d7cc84SAlan Cox if (!vm_map_lookup_entry(map, start, &first_entry)) { 1770abd498aaSBruce M Simpson if (flags & VM_MAP_WIRE_HOLESOK) 1771cbef13d8SAlan Cox first_entry = first_entry->next; 1772abd498aaSBruce M Simpson else { 177312d7cc84SAlan Cox vm_map_unlock(map); 177412d7cc84SAlan Cox return (KERN_INVALID_ADDRESS); 177512d7cc84SAlan Cox } 1776abd498aaSBruce M Simpson } 177712d7cc84SAlan Cox last_timestamp = map->timestamp; 177812d7cc84SAlan Cox entry = first_entry; 177912d7cc84SAlan Cox while (entry != &map->header && entry->start < end) { 178012d7cc84SAlan Cox if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 178112d7cc84SAlan Cox /* 178212d7cc84SAlan Cox * We have not yet clipped the entry. 178312d7cc84SAlan Cox */ 178412d7cc84SAlan Cox saved_start = (start >= entry->start) ? start : 178512d7cc84SAlan Cox entry->start; 178612d7cc84SAlan Cox entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 178712d7cc84SAlan Cox if (vm_map_unlock_and_wait(map, user_wire)) { 178812d7cc84SAlan Cox /* 178912d7cc84SAlan Cox * Allow interruption of user wiring? 179012d7cc84SAlan Cox */ 179112d7cc84SAlan Cox } 179212d7cc84SAlan Cox vm_map_lock(map); 179312d7cc84SAlan Cox if (last_timestamp + 1 != map->timestamp) { 179412d7cc84SAlan Cox /* 179512d7cc84SAlan Cox * Look again for the entry because the map was 179612d7cc84SAlan Cox * modified while it was unlocked. 179712d7cc84SAlan Cox * Specifically, the entry may have been 179812d7cc84SAlan Cox * clipped, merged, or deleted. 179912d7cc84SAlan Cox */ 180012d7cc84SAlan Cox if (!vm_map_lookup_entry(map, saved_start, 180112d7cc84SAlan Cox &tmp_entry)) { 1802cbef13d8SAlan Cox if (flags & VM_MAP_WIRE_HOLESOK) 1803cbef13d8SAlan Cox tmp_entry = tmp_entry->next; 1804cbef13d8SAlan Cox else { 180512d7cc84SAlan Cox if (saved_start == start) { 180612d7cc84SAlan Cox /* 180712d7cc84SAlan Cox * first_entry has been deleted. 180812d7cc84SAlan Cox */ 180912d7cc84SAlan Cox vm_map_unlock(map); 181012d7cc84SAlan Cox return (KERN_INVALID_ADDRESS); 181112d7cc84SAlan Cox } 181212d7cc84SAlan Cox end = saved_start; 181312d7cc84SAlan Cox rv = KERN_INVALID_ADDRESS; 181412d7cc84SAlan Cox goto done; 181512d7cc84SAlan Cox } 1816cbef13d8SAlan Cox } 181712d7cc84SAlan Cox if (entry == first_entry) 181812d7cc84SAlan Cox first_entry = tmp_entry; 181912d7cc84SAlan Cox else 182012d7cc84SAlan Cox first_entry = NULL; 182112d7cc84SAlan Cox entry = tmp_entry; 182212d7cc84SAlan Cox } 182312d7cc84SAlan Cox last_timestamp = map->timestamp; 182412d7cc84SAlan Cox continue; 182512d7cc84SAlan Cox } 182612d7cc84SAlan Cox vm_map_clip_start(map, entry, start); 182712d7cc84SAlan Cox vm_map_clip_end(map, entry, end); 182812d7cc84SAlan Cox /* 182912d7cc84SAlan Cox * Mark the entry in case the map lock is released. (See 183012d7cc84SAlan Cox * above.) 183112d7cc84SAlan Cox */ 183212d7cc84SAlan Cox entry->eflags |= MAP_ENTRY_IN_TRANSITION; 183312d7cc84SAlan Cox /* 18340ada205eSBrian Feldman * 183512d7cc84SAlan Cox */ 18360ada205eSBrian Feldman if (entry->wired_count == 0) { 18370ada205eSBrian Feldman entry->wired_count++; 183812d7cc84SAlan Cox saved_start = entry->start; 183912d7cc84SAlan Cox saved_end = entry->end; 18404be14af9SAlan Cox fictitious = entry->object.vm_object != NULL && 18414be14af9SAlan Cox entry->object.vm_object->type == OBJT_DEVICE; 184212d7cc84SAlan Cox /* 184312d7cc84SAlan Cox * Release the map lock, relying on the in-transition 184412d7cc84SAlan Cox * mark. 184512d7cc84SAlan Cox */ 184612d7cc84SAlan Cox vm_map_unlock(map); 1847ef594d31SAlan Cox rv = vm_fault_wire(map, saved_start, saved_end, 18484be14af9SAlan Cox user_wire, fictitious); 184912d7cc84SAlan Cox vm_map_lock(map); 185012d7cc84SAlan Cox if (last_timestamp + 1 != map->timestamp) { 185112d7cc84SAlan Cox /* 185212d7cc84SAlan Cox * Look again for the entry because the map was 185312d7cc84SAlan Cox * modified while it was unlocked. The entry 185412d7cc84SAlan Cox * may have been clipped, but NOT merged or 185512d7cc84SAlan Cox * deleted. 185612d7cc84SAlan Cox */ 185712d7cc84SAlan Cox result = vm_map_lookup_entry(map, saved_start, 185812d7cc84SAlan Cox &tmp_entry); 185912d7cc84SAlan Cox KASSERT(result, ("vm_map_wire: lookup failed")); 186012d7cc84SAlan Cox if (entry == first_entry) 186112d7cc84SAlan Cox first_entry = tmp_entry; 186212d7cc84SAlan Cox else 186312d7cc84SAlan Cox first_entry = NULL; 186412d7cc84SAlan Cox entry = tmp_entry; 186528c58286SAlan Cox while (entry->end < saved_end) { 186628c58286SAlan Cox if (rv != KERN_SUCCESS) { 186728c58286SAlan Cox KASSERT(entry->wired_count == 1, 186828c58286SAlan Cox ("vm_map_wire: bad count")); 186928c58286SAlan Cox entry->wired_count = -1; 187028c58286SAlan Cox } 187112d7cc84SAlan Cox entry = entry->next; 187212d7cc84SAlan Cox } 187328c58286SAlan Cox } 187412d7cc84SAlan Cox last_timestamp = map->timestamp; 187512d7cc84SAlan Cox if (rv != KERN_SUCCESS) { 187628c58286SAlan Cox KASSERT(entry->wired_count == 1, 187728c58286SAlan Cox ("vm_map_wire: bad count")); 187812d7cc84SAlan Cox /* 187928c58286SAlan Cox * Assign an out-of-range value to represent 188028c58286SAlan Cox * the failure to wire this entry. 188112d7cc84SAlan Cox */ 188228c58286SAlan Cox entry->wired_count = -1; 188312d7cc84SAlan Cox end = entry->end; 188412d7cc84SAlan Cox goto done; 188512d7cc84SAlan Cox } 18860ada205eSBrian Feldman } else if (!user_wire || 18870ada205eSBrian Feldman (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 18880ada205eSBrian Feldman entry->wired_count++; 188912d7cc84SAlan Cox } 189012d7cc84SAlan Cox /* 189112d7cc84SAlan Cox * Check the map for holes in the specified region. 1892abd498aaSBruce M Simpson * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 189312d7cc84SAlan Cox */ 1894abd498aaSBruce M Simpson if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 1895abd498aaSBruce M Simpson (entry->end < end && (entry->next == &map->header || 1896abd498aaSBruce M Simpson entry->next->start > entry->end))) { 189712d7cc84SAlan Cox end = entry->end; 189812d7cc84SAlan Cox rv = KERN_INVALID_ADDRESS; 189912d7cc84SAlan Cox goto done; 190012d7cc84SAlan Cox } 190112d7cc84SAlan Cox entry = entry->next; 190212d7cc84SAlan Cox } 190312d7cc84SAlan Cox rv = KERN_SUCCESS; 190412d7cc84SAlan Cox done: 190512d7cc84SAlan Cox need_wakeup = FALSE; 190612d7cc84SAlan Cox if (first_entry == NULL) { 190712d7cc84SAlan Cox result = vm_map_lookup_entry(map, start, &first_entry); 1908cbef13d8SAlan Cox if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 1909cbef13d8SAlan Cox first_entry = first_entry->next; 1910cbef13d8SAlan Cox else 191112d7cc84SAlan Cox KASSERT(result, ("vm_map_wire: lookup failed")); 191212d7cc84SAlan Cox } 191312d7cc84SAlan Cox entry = first_entry; 191412d7cc84SAlan Cox while (entry != &map->header && entry->start < end) { 191512d7cc84SAlan Cox if (rv == KERN_SUCCESS) { 191612d7cc84SAlan Cox if (user_wire) 191712d7cc84SAlan Cox entry->eflags |= MAP_ENTRY_USER_WIRED; 191828c58286SAlan Cox } else if (entry->wired_count == -1) { 191928c58286SAlan Cox /* 192028c58286SAlan Cox * Wiring failed on this entry. Thus, unwiring is 192128c58286SAlan Cox * unnecessary. 192228c58286SAlan Cox */ 192328c58286SAlan Cox entry->wired_count = 0; 192412d7cc84SAlan Cox } else { 19250ada205eSBrian Feldman if (!user_wire || 19260ada205eSBrian Feldman (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) 192712d7cc84SAlan Cox entry->wired_count--; 19280ada205eSBrian Feldman if (entry->wired_count == 0) { 192912d7cc84SAlan Cox /* 193012d7cc84SAlan Cox * Retain the map lock. 193112d7cc84SAlan Cox */ 19324be14af9SAlan Cox vm_fault_unwire(map, entry->start, entry->end, 19334be14af9SAlan Cox entry->object.vm_object != NULL && 19344be14af9SAlan Cox entry->object.vm_object->type == OBJT_DEVICE); 193512d7cc84SAlan Cox } 193612d7cc84SAlan Cox } 193712d7cc84SAlan Cox KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 193812d7cc84SAlan Cox ("vm_map_wire: in-transition flag missing")); 193912d7cc84SAlan Cox entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 194012d7cc84SAlan Cox if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 194112d7cc84SAlan Cox entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 194212d7cc84SAlan Cox need_wakeup = TRUE; 194312d7cc84SAlan Cox } 194412d7cc84SAlan Cox vm_map_simplify_entry(map, entry); 194512d7cc84SAlan Cox entry = entry->next; 194612d7cc84SAlan Cox } 194712d7cc84SAlan Cox vm_map_unlock(map); 194812d7cc84SAlan Cox if (need_wakeup) 194912d7cc84SAlan Cox vm_map_wakeup(map); 195012d7cc84SAlan Cox return (rv); 1951e27e17b7SAlan Cox } 1952e27e17b7SAlan Cox 1953e27e17b7SAlan Cox /* 1954950f8459SAlan Cox * vm_map_sync 1955df8bae1dSRodney W. Grimes * 1956df8bae1dSRodney W. Grimes * Push any dirty cached pages in the address range to their pager. 1957df8bae1dSRodney W. Grimes * If syncio is TRUE, dirty pages are written synchronously. 1958df8bae1dSRodney W. Grimes * If invalidate is TRUE, any cached pages are freed as well. 1959df8bae1dSRodney W. Grimes * 1960637315edSAlan Cox * If the size of the region from start to end is zero, we are 1961637315edSAlan Cox * supposed to flush all modified pages within the region containing 1962637315edSAlan Cox * start. Unfortunately, a region can be split or coalesced with 1963637315edSAlan Cox * neighboring regions, making it difficult to determine what the 1964637315edSAlan Cox * original region was. Therefore, we approximate this requirement by 1965637315edSAlan Cox * flushing the current region containing start. 1966637315edSAlan Cox * 1967df8bae1dSRodney W. Grimes * Returns an error if any part of the specified range is not mapped. 1968df8bae1dSRodney W. Grimes */ 1969df8bae1dSRodney W. Grimes int 1970950f8459SAlan Cox vm_map_sync( 19711b40f8c0SMatthew Dillon vm_map_t map, 19721b40f8c0SMatthew Dillon vm_offset_t start, 19731b40f8c0SMatthew Dillon vm_offset_t end, 19741b40f8c0SMatthew Dillon boolean_t syncio, 19751b40f8c0SMatthew Dillon boolean_t invalidate) 1976df8bae1dSRodney W. Grimes { 1977c0877f10SJohn Dyson vm_map_entry_t current; 1978df8bae1dSRodney W. Grimes vm_map_entry_t entry; 1979df8bae1dSRodney W. Grimes vm_size_t size; 1980df8bae1dSRodney W. Grimes vm_object_t object; 1981a316d390SJohn Dyson vm_ooffset_t offset; 1982df8bae1dSRodney W. Grimes 1983df8bae1dSRodney W. Grimes vm_map_lock_read(map); 1984df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1985df8bae1dSRodney W. Grimes if (!vm_map_lookup_entry(map, start, &entry)) { 1986df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 1987df8bae1dSRodney W. Grimes return (KERN_INVALID_ADDRESS); 1988637315edSAlan Cox } else if (start == end) { 1989637315edSAlan Cox start = entry->start; 1990637315edSAlan Cox end = entry->end; 1991df8bae1dSRodney W. Grimes } 1992df8bae1dSRodney W. Grimes /* 1993b7b7cd44SAlan Cox * Make a first pass to check for user-wired memory and holes. 1994df8bae1dSRodney W. Grimes */ 1995df8bae1dSRodney W. Grimes for (current = entry; current->start < end; current = current->next) { 1996b7b7cd44SAlan Cox if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 1997df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 1998df8bae1dSRodney W. Grimes return (KERN_INVALID_ARGUMENT); 1999df8bae1dSRodney W. Grimes } 2000df8bae1dSRodney W. Grimes if (end > current->end && 2001df8bae1dSRodney W. Grimes (current->next == &map->header || 2002df8bae1dSRodney W. Grimes current->end != current->next->start)) { 2003df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 2004df8bae1dSRodney W. Grimes return (KERN_INVALID_ADDRESS); 2005df8bae1dSRodney W. Grimes } 2006df8bae1dSRodney W. Grimes } 2007df8bae1dSRodney W. Grimes 2008bc105a67SAlan Cox if (invalidate) { 2009950f8459SAlan Cox mtx_lock(&Giant); 2010bc105a67SAlan Cox pmap_remove(map->pmap, start, end); 2011950f8459SAlan Cox mtx_unlock(&Giant); 2012bc105a67SAlan Cox } 2013df8bae1dSRodney W. Grimes /* 2014df8bae1dSRodney W. Grimes * Make a second pass, cleaning/uncaching pages from the indicated 2015df8bae1dSRodney W. Grimes * objects as we go. 2016df8bae1dSRodney W. Grimes */ 2017df8bae1dSRodney W. Grimes for (current = entry; current->start < end; current = current->next) { 2018df8bae1dSRodney W. Grimes offset = current->offset + (start - current->start); 2019df8bae1dSRodney W. Grimes size = (end <= current->end ? end : current->end) - start; 20209fdfe602SMatthew Dillon if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2021c0877f10SJohn Dyson vm_map_t smap; 2022df8bae1dSRodney W. Grimes vm_map_entry_t tentry; 2023df8bae1dSRodney W. Grimes vm_size_t tsize; 2024df8bae1dSRodney W. Grimes 20259fdfe602SMatthew Dillon smap = current->object.sub_map; 2026df8bae1dSRodney W. Grimes vm_map_lock_read(smap); 2027df8bae1dSRodney W. Grimes (void) vm_map_lookup_entry(smap, offset, &tentry); 2028df8bae1dSRodney W. Grimes tsize = tentry->end - offset; 2029df8bae1dSRodney W. Grimes if (tsize < size) 2030df8bae1dSRodney W. Grimes size = tsize; 2031df8bae1dSRodney W. Grimes object = tentry->object.vm_object; 2032df8bae1dSRodney W. Grimes offset = tentry->offset + (offset - tentry->start); 2033df8bae1dSRodney W. Grimes vm_map_unlock_read(smap); 2034df8bae1dSRodney W. Grimes } else { 2035df8bae1dSRodney W. Grimes object = current->object.vm_object; 2036df8bae1dSRodney W. Grimes } 2037950f8459SAlan Cox vm_object_sync(object, offset, size, syncio, invalidate); 2038df8bae1dSRodney W. Grimes start += size; 2039df8bae1dSRodney W. Grimes } 2040df8bae1dSRodney W. Grimes 2041df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 2042df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 2043df8bae1dSRodney W. Grimes } 2044df8bae1dSRodney W. Grimes 2045df8bae1dSRodney W. Grimes /* 2046df8bae1dSRodney W. Grimes * vm_map_entry_unwire: [ internal use only ] 2047df8bae1dSRodney W. Grimes * 2048df8bae1dSRodney W. Grimes * Make the region specified by this entry pageable. 2049df8bae1dSRodney W. Grimes * 2050df8bae1dSRodney W. Grimes * The map in question should be locked. 2051df8bae1dSRodney W. Grimes * [This is the reason for this routine's existence.] 2052df8bae1dSRodney W. Grimes */ 20530362d7d7SJohn Dyson static void 20541b40f8c0SMatthew Dillon vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2055df8bae1dSRodney W. Grimes { 20564be14af9SAlan Cox vm_fault_unwire(map, entry->start, entry->end, 20574be14af9SAlan Cox entry->object.vm_object != NULL && 20584be14af9SAlan Cox entry->object.vm_object->type == OBJT_DEVICE); 2059df8bae1dSRodney W. Grimes entry->wired_count = 0; 2060df8bae1dSRodney W. Grimes } 2061df8bae1dSRodney W. Grimes 2062df8bae1dSRodney W. Grimes /* 2063df8bae1dSRodney W. Grimes * vm_map_entry_delete: [ internal use only ] 2064df8bae1dSRodney W. Grimes * 2065df8bae1dSRodney W. Grimes * Deallocate the given entry from the target map. 2066df8bae1dSRodney W. Grimes */ 20670362d7d7SJohn Dyson static void 20681b40f8c0SMatthew Dillon vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 2069df8bae1dSRodney W. Grimes { 207032a89c32SAlan Cox vm_object_t object; 207132a89c32SAlan Cox vm_pindex_t offidxstart, offidxend, count; 207232a89c32SAlan Cox 2073df8bae1dSRodney W. Grimes vm_map_entry_unlink(map, entry); 2074df8bae1dSRodney W. Grimes map->size -= entry->end - entry->start; 2075df8bae1dSRodney W. Grimes 207632a89c32SAlan Cox if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 207732a89c32SAlan Cox (object = entry->object.vm_object) != NULL) { 207832a89c32SAlan Cox count = OFF_TO_IDX(entry->end - entry->start); 207932a89c32SAlan Cox offidxstart = OFF_TO_IDX(entry->offset); 208032a89c32SAlan Cox offidxend = offidxstart + count; 208132a89c32SAlan Cox VM_OBJECT_LOCK(object); 208232a89c32SAlan Cox if (object->ref_count != 1 && 208332a89c32SAlan Cox ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || 208432a89c32SAlan Cox object == kernel_object || object == kmem_object) && 208532a89c32SAlan Cox (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 208632a89c32SAlan Cox vm_object_collapse(object); 2087b103b948SDag-Erling Smørgrav vm_object_page_remove(object, offidxstart, offidxend, FALSE); 208832a89c32SAlan Cox if (object->type == OBJT_SWAP) 208932a89c32SAlan Cox swap_pager_freespace(object, offidxstart, count); 209032a89c32SAlan Cox if (offidxend >= object->size && 209132a89c32SAlan Cox offidxstart < object->size) 209232a89c32SAlan Cox object->size = offidxstart; 209332a89c32SAlan Cox } 209432a89c32SAlan Cox VM_OBJECT_UNLOCK(object); 209532a89c32SAlan Cox vm_object_deallocate(object); 2096b5b40fa6SJohn Dyson } 2097df8bae1dSRodney W. Grimes 2098df8bae1dSRodney W. Grimes vm_map_entry_dispose(map, entry); 2099df8bae1dSRodney W. Grimes } 2100df8bae1dSRodney W. Grimes 2101df8bae1dSRodney W. Grimes /* 2102df8bae1dSRodney W. Grimes * vm_map_delete: [ internal use only ] 2103df8bae1dSRodney W. Grimes * 2104df8bae1dSRodney W. Grimes * Deallocates the given address range from the target 2105df8bae1dSRodney W. Grimes * map. 2106df8bae1dSRodney W. Grimes */ 2107df8bae1dSRodney W. Grimes int 21081b40f8c0SMatthew Dillon vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 2109df8bae1dSRodney W. Grimes { 2110c0877f10SJohn Dyson vm_map_entry_t entry; 2111df8bae1dSRodney W. Grimes vm_map_entry_t first_entry; 2112df8bae1dSRodney W. Grimes 2113df8bae1dSRodney W. Grimes /* 2114df8bae1dSRodney W. Grimes * Find the start of the region, and clip it 2115df8bae1dSRodney W. Grimes */ 2116876318ecSAlan Cox if (!vm_map_lookup_entry(map, start, &first_entry)) 2117df8bae1dSRodney W. Grimes entry = first_entry->next; 2118876318ecSAlan Cox else { 2119df8bae1dSRodney W. Grimes entry = first_entry; 2120df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 2121df8bae1dSRodney W. Grimes } 2122df8bae1dSRodney W. Grimes 2123df8bae1dSRodney W. Grimes /* 2124df8bae1dSRodney W. Grimes * Save the free space hint 2125df8bae1dSRodney W. Grimes */ 2126b18bfc3dSJohn Dyson if (entry == &map->header) { 2127b18bfc3dSJohn Dyson map->first_free = &map->header; 21282dbea5d2SJohn Dyson } else if (map->first_free->start >= start) { 2129df8bae1dSRodney W. Grimes map->first_free = entry->prev; 21302dbea5d2SJohn Dyson } 2131df8bae1dSRodney W. Grimes 2132df8bae1dSRodney W. Grimes /* 2133df8bae1dSRodney W. Grimes * Step through all entries in this region 2134df8bae1dSRodney W. Grimes */ 2135df8bae1dSRodney W. Grimes while ((entry != &map->header) && (entry->start < end)) { 2136df8bae1dSRodney W. Grimes vm_map_entry_t next; 2137df8bae1dSRodney W. Grimes 213873b2baceSAlan Cox /* 213973b2baceSAlan Cox * Wait for wiring or unwiring of an entry to complete. 214073b2baceSAlan Cox */ 21410ada205eSBrian Feldman if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) { 214273b2baceSAlan Cox unsigned int last_timestamp; 214373b2baceSAlan Cox vm_offset_t saved_start; 214473b2baceSAlan Cox vm_map_entry_t tmp_entry; 214573b2baceSAlan Cox 214673b2baceSAlan Cox saved_start = entry->start; 214773b2baceSAlan Cox entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 214873b2baceSAlan Cox last_timestamp = map->timestamp; 214973b2baceSAlan Cox (void) vm_map_unlock_and_wait(map, FALSE); 215073b2baceSAlan Cox vm_map_lock(map); 215173b2baceSAlan Cox if (last_timestamp + 1 != map->timestamp) { 215273b2baceSAlan Cox /* 215373b2baceSAlan Cox * Look again for the entry because the map was 215473b2baceSAlan Cox * modified while it was unlocked. 215573b2baceSAlan Cox * Specifically, the entry may have been 215673b2baceSAlan Cox * clipped, merged, or deleted. 215773b2baceSAlan Cox */ 215873b2baceSAlan Cox if (!vm_map_lookup_entry(map, saved_start, 215973b2baceSAlan Cox &tmp_entry)) 216073b2baceSAlan Cox entry = tmp_entry->next; 216173b2baceSAlan Cox else { 216273b2baceSAlan Cox entry = tmp_entry; 216373b2baceSAlan Cox vm_map_clip_start(map, entry, 216473b2baceSAlan Cox saved_start); 216573b2baceSAlan Cox } 216673b2baceSAlan Cox } 216773b2baceSAlan Cox continue; 216873b2baceSAlan Cox } 2169df8bae1dSRodney W. Grimes vm_map_clip_end(map, entry, end); 2170df8bae1dSRodney W. Grimes 2171c0877f10SJohn Dyson next = entry->next; 2172df8bae1dSRodney W. Grimes 2173df8bae1dSRodney W. Grimes /* 21740d94caffSDavid Greenman * Unwire before removing addresses from the pmap; otherwise, 21750d94caffSDavid Greenman * unwiring will put the entries back in the pmap. 2176df8bae1dSRodney W. Grimes */ 2177c0877f10SJohn Dyson if (entry->wired_count != 0) { 2178df8bae1dSRodney W. Grimes vm_map_entry_unwire(map, entry); 2179c0877f10SJohn Dyson } 2180df8bae1dSRodney W. Grimes 218140448065SAlan Cox if (!map->system_map) 21827d040e3cSAlan Cox mtx_lock(&Giant); 218332a89c32SAlan Cox pmap_remove(map->pmap, entry->start, entry->end); 218440448065SAlan Cox if (!map->system_map) 21857d040e3cSAlan Cox mtx_unlock(&Giant); 2186df8bae1dSRodney W. Grimes 2187df8bae1dSRodney W. Grimes /* 21880d94caffSDavid Greenman * Delete the entry (which may delete the object) only after 21890d94caffSDavid Greenman * removing all pmap entries pointing to its pages. 21900d94caffSDavid Greenman * (Otherwise, its page frames may be reallocated, and any 21910d94caffSDavid Greenman * modify bits will be set in the wrong object!) 2192df8bae1dSRodney W. Grimes */ 2193df8bae1dSRodney W. Grimes vm_map_entry_delete(map, entry); 2194df8bae1dSRodney W. Grimes entry = next; 2195df8bae1dSRodney W. Grimes } 2196df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 2197df8bae1dSRodney W. Grimes } 2198df8bae1dSRodney W. Grimes 2199df8bae1dSRodney W. Grimes /* 2200df8bae1dSRodney W. Grimes * vm_map_remove: 2201df8bae1dSRodney W. Grimes * 2202df8bae1dSRodney W. Grimes * Remove the given address range from the target map. 2203df8bae1dSRodney W. Grimes * This is the exported form of vm_map_delete. 2204df8bae1dSRodney W. Grimes */ 2205df8bae1dSRodney W. Grimes int 22061b40f8c0SMatthew Dillon vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 2207df8bae1dSRodney W. Grimes { 2208c0877f10SJohn Dyson int result, s = 0; 22098d6e8edeSDavid Greenman 221008442f8aSBosko Milekic if (map == kmem_map) 2211b18bfc3dSJohn Dyson s = splvm(); 2212df8bae1dSRodney W. Grimes 2213df8bae1dSRodney W. Grimes vm_map_lock(map); 2214df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 2215df8bae1dSRodney W. Grimes result = vm_map_delete(map, start, end); 2216df8bae1dSRodney W. Grimes vm_map_unlock(map); 2217df8bae1dSRodney W. Grimes 221808442f8aSBosko Milekic if (map == kmem_map) 22198d6e8edeSDavid Greenman splx(s); 22208d6e8edeSDavid Greenman 2221df8bae1dSRodney W. Grimes return (result); 2222df8bae1dSRodney W. Grimes } 2223df8bae1dSRodney W. Grimes 2224df8bae1dSRodney W. Grimes /* 2225df8bae1dSRodney W. Grimes * vm_map_check_protection: 2226df8bae1dSRodney W. Grimes * 22272d5c7e45SMatthew Dillon * Assert that the target map allows the specified privilege on the 22282d5c7e45SMatthew Dillon * entire address region given. The entire region must be allocated. 22292d5c7e45SMatthew Dillon * 22302d5c7e45SMatthew Dillon * WARNING! This code does not and should not check whether the 22312d5c7e45SMatthew Dillon * contents of the region is accessible. For example a smaller file 22322d5c7e45SMatthew Dillon * might be mapped into a larger address space. 22332d5c7e45SMatthew Dillon * 22342d5c7e45SMatthew Dillon * NOTE! This code is also called by munmap(). 2235d8834602SAlan Cox * 2236d8834602SAlan Cox * The map must be locked. A read lock is sufficient. 2237df8bae1dSRodney W. Grimes */ 22380d94caffSDavid Greenman boolean_t 2239b9dcd593SBruce Evans vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 2240b9dcd593SBruce Evans vm_prot_t protection) 2241df8bae1dSRodney W. Grimes { 2242c0877f10SJohn Dyson vm_map_entry_t entry; 2243df8bae1dSRodney W. Grimes vm_map_entry_t tmp_entry; 2244df8bae1dSRodney W. Grimes 2245d8834602SAlan Cox if (!vm_map_lookup_entry(map, start, &tmp_entry)) 2246df8bae1dSRodney W. Grimes return (FALSE); 2247df8bae1dSRodney W. Grimes entry = tmp_entry; 2248df8bae1dSRodney W. Grimes 2249df8bae1dSRodney W. Grimes while (start < end) { 2250d8834602SAlan Cox if (entry == &map->header) 2251df8bae1dSRodney W. Grimes return (FALSE); 2252df8bae1dSRodney W. Grimes /* 2253df8bae1dSRodney W. Grimes * No holes allowed! 2254df8bae1dSRodney W. Grimes */ 2255d8834602SAlan Cox if (start < entry->start) 2256df8bae1dSRodney W. Grimes return (FALSE); 2257df8bae1dSRodney W. Grimes /* 2258df8bae1dSRodney W. Grimes * Check protection associated with entry. 2259df8bae1dSRodney W. Grimes */ 2260d8834602SAlan Cox if ((entry->protection & protection) != protection) 2261df8bae1dSRodney W. Grimes return (FALSE); 2262df8bae1dSRodney W. Grimes /* go to next entry */ 2263df8bae1dSRodney W. Grimes start = entry->end; 2264df8bae1dSRodney W. Grimes entry = entry->next; 2265df8bae1dSRodney W. Grimes } 2266df8bae1dSRodney W. Grimes return (TRUE); 2267df8bae1dSRodney W. Grimes } 2268df8bae1dSRodney W. Grimes 226986524867SJohn Dyson /* 2270df8bae1dSRodney W. Grimes * vm_map_copy_entry: 2271df8bae1dSRodney W. Grimes * 2272df8bae1dSRodney W. Grimes * Copies the contents of the source entry to the destination 2273df8bae1dSRodney W. Grimes * entry. The entries *must* be aligned properly. 2274df8bae1dSRodney W. Grimes */ 2275f708ef1bSPoul-Henning Kamp static void 22761b40f8c0SMatthew Dillon vm_map_copy_entry( 22771b40f8c0SMatthew Dillon vm_map_t src_map, 22781b40f8c0SMatthew Dillon vm_map_t dst_map, 22791b40f8c0SMatthew Dillon vm_map_entry_t src_entry, 22801b40f8c0SMatthew Dillon vm_map_entry_t dst_entry) 2281df8bae1dSRodney W. Grimes { 2282c0877f10SJohn Dyson vm_object_t src_object; 2283c0877f10SJohn Dyson 22849fdfe602SMatthew Dillon if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 2285df8bae1dSRodney W. Grimes return; 2286df8bae1dSRodney W. Grimes 2287df8bae1dSRodney W. Grimes if (src_entry->wired_count == 0) { 2288df8bae1dSRodney W. Grimes 2289df8bae1dSRodney W. Grimes /* 22900d94caffSDavid Greenman * If the source entry is marked needs_copy, it is already 22910d94caffSDavid Greenman * write-protected. 2292df8bae1dSRodney W. Grimes */ 2293afa07f7eSJohn Dyson if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2294df8bae1dSRodney W. Grimes pmap_protect(src_map->pmap, 2295df8bae1dSRodney W. Grimes src_entry->start, 2296df8bae1dSRodney W. Grimes src_entry->end, 2297df8bae1dSRodney W. Grimes src_entry->protection & ~VM_PROT_WRITE); 2298df8bae1dSRodney W. Grimes } 2299b18bfc3dSJohn Dyson 2300df8bae1dSRodney W. Grimes /* 2301df8bae1dSRodney W. Grimes * Make a copy of the object. 2302df8bae1dSRodney W. Grimes */ 23038aef1712SMatthew Dillon if ((src_object = src_entry->object.vm_object) != NULL) { 2304a89c6258SAlan Cox VM_OBJECT_LOCK(src_object); 2305c0877f10SJohn Dyson if ((src_object->handle == NULL) && 2306c0877f10SJohn Dyson (src_object->type == OBJT_DEFAULT || 2307c0877f10SJohn Dyson src_object->type == OBJT_SWAP)) { 2308c0877f10SJohn Dyson vm_object_collapse(src_object); 230996fb8cf2SJohn Dyson if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 2310c5aaa06dSAlan Cox vm_object_split(src_entry); 2311c0877f10SJohn Dyson src_object = src_entry->object.vm_object; 2312a89c6258SAlan Cox } 2313a89c6258SAlan Cox } 2314b921a12bSAlan Cox vm_object_reference_locked(src_object); 2315069e9bc1SDoug Rabson vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 2316e2479b4fSAlan Cox VM_OBJECT_UNLOCK(src_object); 2317c0877f10SJohn Dyson dst_entry->object.vm_object = src_object; 2318afa07f7eSJohn Dyson src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2319afa07f7eSJohn Dyson dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2320b18bfc3dSJohn Dyson dst_entry->offset = src_entry->offset; 2321b18bfc3dSJohn Dyson } else { 2322b18bfc3dSJohn Dyson dst_entry->object.vm_object = NULL; 2323b18bfc3dSJohn Dyson dst_entry->offset = 0; 2324b18bfc3dSJohn Dyson } 2325df8bae1dSRodney W. Grimes 2326df8bae1dSRodney W. Grimes pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 2327df8bae1dSRodney W. Grimes dst_entry->end - dst_entry->start, src_entry->start); 23280d94caffSDavid Greenman } else { 2329df8bae1dSRodney W. Grimes /* 2330df8bae1dSRodney W. Grimes * Of course, wired down pages can't be set copy-on-write. 23310d94caffSDavid Greenman * Cause wired pages to be copied into the new map by 23320d94caffSDavid Greenman * simulating faults (the new pages are pageable) 2333df8bae1dSRodney W. Grimes */ 2334df8bae1dSRodney W. Grimes vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 2335df8bae1dSRodney W. Grimes } 2336df8bae1dSRodney W. Grimes } 2337df8bae1dSRodney W. Grimes 2338df8bae1dSRodney W. Grimes /* 23392a7be1b6SBrian Feldman * vmspace_map_entry_forked: 23402a7be1b6SBrian Feldman * Update the newly-forked vmspace each time a map entry is inherited 23412a7be1b6SBrian Feldman * or copied. The values for vm_dsize and vm_tsize are approximate 23422a7be1b6SBrian Feldman * (and mostly-obsolete ideas in the face of mmap(2) et al.) 23432a7be1b6SBrian Feldman */ 23442a7be1b6SBrian Feldman static void 23452a7be1b6SBrian Feldman vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 23462a7be1b6SBrian Feldman vm_map_entry_t entry) 23472a7be1b6SBrian Feldman { 23482a7be1b6SBrian Feldman vm_size_t entrysize; 23492a7be1b6SBrian Feldman vm_offset_t newend; 23502a7be1b6SBrian Feldman 23512a7be1b6SBrian Feldman entrysize = entry->end - entry->start; 23522a7be1b6SBrian Feldman vm2->vm_map.size += entrysize; 23532a7be1b6SBrian Feldman if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 23542a7be1b6SBrian Feldman vm2->vm_ssize += btoc(entrysize); 23552a7be1b6SBrian Feldman } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 23562a7be1b6SBrian Feldman entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 2357b351299cSAndrew Gallatin newend = MIN(entry->end, 23582a7be1b6SBrian Feldman (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 23592a7be1b6SBrian Feldman vm2->vm_dsize += btoc(newend - entry->start); 23602a7be1b6SBrian Feldman } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 23612a7be1b6SBrian Feldman entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 2362b351299cSAndrew Gallatin newend = MIN(entry->end, 23632a7be1b6SBrian Feldman (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 23642a7be1b6SBrian Feldman vm2->vm_tsize += btoc(newend - entry->start); 23652a7be1b6SBrian Feldman } 23662a7be1b6SBrian Feldman } 23672a7be1b6SBrian Feldman 23682a7be1b6SBrian Feldman /* 2369df8bae1dSRodney W. Grimes * vmspace_fork: 2370df8bae1dSRodney W. Grimes * Create a new process vmspace structure and vm_map 2371df8bae1dSRodney W. Grimes * based on those of an existing process. The new map 2372df8bae1dSRodney W. Grimes * is based on the old map, according to the inheritance 2373df8bae1dSRodney W. Grimes * values on the regions in that map. 2374df8bae1dSRodney W. Grimes * 23752a7be1b6SBrian Feldman * XXX It might be worth coalescing the entries added to the new vmspace. 23762a7be1b6SBrian Feldman * 2377df8bae1dSRodney W. Grimes * The source map must not be locked. 2378df8bae1dSRodney W. Grimes */ 2379df8bae1dSRodney W. Grimes struct vmspace * 23801b40f8c0SMatthew Dillon vmspace_fork(struct vmspace *vm1) 2381df8bae1dSRodney W. Grimes { 2382c0877f10SJohn Dyson struct vmspace *vm2; 2383df8bae1dSRodney W. Grimes vm_map_t old_map = &vm1->vm_map; 2384df8bae1dSRodney W. Grimes vm_map_t new_map; 2385df8bae1dSRodney W. Grimes vm_map_entry_t old_entry; 2386df8bae1dSRodney W. Grimes vm_map_entry_t new_entry; 2387de5f6a77SJohn Dyson vm_object_t object; 2388df8bae1dSRodney W. Grimes 23890cddd8f0SMatthew Dillon GIANT_REQUIRED; 23900cddd8f0SMatthew Dillon 2391df8bae1dSRodney W. Grimes vm_map_lock(old_map); 2392b823bbd6SMatthew Dillon old_map->infork = 1; 2393df8bae1dSRodney W. Grimes 23942d8acc0fSJohn Dyson vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); 23952a7be1b6SBrian Feldman vm2->vm_taddr = vm1->vm_taddr; 23962a7be1b6SBrian Feldman vm2->vm_daddr = vm1->vm_daddr; 23972a7be1b6SBrian Feldman vm2->vm_maxsaddr = vm1->vm_maxsaddr; 2398df8bae1dSRodney W. Grimes new_map = &vm2->vm_map; /* XXX */ 239947221757SJohn Dyson new_map->timestamp = 1; 2400df8bae1dSRodney W. Grimes 2401abd498aaSBruce M Simpson /* Do not inherit the MAP_WIREFUTURE property. */ 2402abd498aaSBruce M Simpson if ((new_map->flags & MAP_WIREFUTURE) == MAP_WIREFUTURE) 2403abd498aaSBruce M Simpson new_map->flags &= ~MAP_WIREFUTURE; 2404abd498aaSBruce M Simpson 2405df8bae1dSRodney W. Grimes old_entry = old_map->header.next; 2406df8bae1dSRodney W. Grimes 2407df8bae1dSRodney W. Grimes while (old_entry != &old_map->header) { 2408afa07f7eSJohn Dyson if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 2409df8bae1dSRodney W. Grimes panic("vm_map_fork: encountered a submap"); 2410df8bae1dSRodney W. Grimes 2411df8bae1dSRodney W. Grimes switch (old_entry->inheritance) { 2412df8bae1dSRodney W. Grimes case VM_INHERIT_NONE: 2413df8bae1dSRodney W. Grimes break; 2414df8bae1dSRodney W. Grimes 2415df8bae1dSRodney W. Grimes case VM_INHERIT_SHARE: 2416df8bae1dSRodney W. Grimes /* 2417fed9a903SJohn Dyson * Clone the entry, creating the shared object if necessary. 2418fed9a903SJohn Dyson */ 2419fed9a903SJohn Dyson object = old_entry->object.vm_object; 2420fed9a903SJohn Dyson if (object == NULL) { 2421fed9a903SJohn Dyson object = vm_object_allocate(OBJT_DEFAULT, 2422c2e11a03SJohn Dyson atop(old_entry->end - old_entry->start)); 2423fed9a903SJohn Dyson old_entry->object.vm_object = object; 2424fed9a903SJohn Dyson old_entry->offset = (vm_offset_t) 0; 24259a2f6362SAlan Cox } 24269a2f6362SAlan Cox 24279a2f6362SAlan Cox /* 24289a2f6362SAlan Cox * Add the reference before calling vm_object_shadow 24299a2f6362SAlan Cox * to insure that a shadow object is created. 24309a2f6362SAlan Cox */ 24319a2f6362SAlan Cox vm_object_reference(object); 24329a2f6362SAlan Cox if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 24335069bf57SJohn Dyson vm_object_shadow(&old_entry->object.vm_object, 24345069bf57SJohn Dyson &old_entry->offset, 2435c2e11a03SJohn Dyson atop(old_entry->end - old_entry->start)); 24365069bf57SJohn Dyson old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2437d30344bdSIan Dowse /* Transfer the second reference too. */ 2438d30344bdSIan Dowse vm_object_reference( 2439d30344bdSIan Dowse old_entry->object.vm_object); 2440d30344bdSIan Dowse vm_object_deallocate(object); 24415069bf57SJohn Dyson object = old_entry->object.vm_object; 2442fed9a903SJohn Dyson } 2443e2479b4fSAlan Cox VM_OBJECT_LOCK(object); 2444069e9bc1SDoug Rabson vm_object_clear_flag(object, OBJ_ONEMAPPING); 2445e2479b4fSAlan Cox VM_OBJECT_UNLOCK(object); 2446fed9a903SJohn Dyson 2447fed9a903SJohn Dyson /* 2448ad5fca3bSAlan Cox * Clone the entry, referencing the shared object. 2449df8bae1dSRodney W. Grimes */ 2450df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(new_map); 2451df8bae1dSRodney W. Grimes *new_entry = *old_entry; 2452028fe6ecSTor Egge new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2453df8bae1dSRodney W. Grimes new_entry->wired_count = 0; 2454df8bae1dSRodney W. Grimes 2455df8bae1dSRodney W. Grimes /* 24560d94caffSDavid Greenman * Insert the entry into the new map -- we know we're 24570d94caffSDavid Greenman * inserting at the end of the new map. 2458df8bae1dSRodney W. Grimes */ 2459df8bae1dSRodney W. Grimes vm_map_entry_link(new_map, new_map->header.prev, 2460df8bae1dSRodney W. Grimes new_entry); 24612a7be1b6SBrian Feldman vmspace_map_entry_forked(vm1, vm2, new_entry); 2462df8bae1dSRodney W. Grimes 2463df8bae1dSRodney W. Grimes /* 2464df8bae1dSRodney W. Grimes * Update the physical map 2465df8bae1dSRodney W. Grimes */ 2466df8bae1dSRodney W. Grimes pmap_copy(new_map->pmap, old_map->pmap, 2467df8bae1dSRodney W. Grimes new_entry->start, 2468df8bae1dSRodney W. Grimes (old_entry->end - old_entry->start), 2469df8bae1dSRodney W. Grimes old_entry->start); 2470df8bae1dSRodney W. Grimes break; 2471df8bae1dSRodney W. Grimes 2472df8bae1dSRodney W. Grimes case VM_INHERIT_COPY: 2473df8bae1dSRodney W. Grimes /* 2474df8bae1dSRodney W. Grimes * Clone the entry and link into the map. 2475df8bae1dSRodney W. Grimes */ 2476df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(new_map); 2477df8bae1dSRodney W. Grimes *new_entry = *old_entry; 2478028fe6ecSTor Egge new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2479df8bae1dSRodney W. Grimes new_entry->wired_count = 0; 2480df8bae1dSRodney W. Grimes new_entry->object.vm_object = NULL; 2481df8bae1dSRodney W. Grimes vm_map_entry_link(new_map, new_map->header.prev, 2482df8bae1dSRodney W. Grimes new_entry); 24832a7be1b6SBrian Feldman vmspace_map_entry_forked(vm1, vm2, new_entry); 2484bd7e5f99SJohn Dyson vm_map_copy_entry(old_map, new_map, old_entry, 2485bd7e5f99SJohn Dyson new_entry); 2486df8bae1dSRodney W. Grimes break; 2487df8bae1dSRodney W. Grimes } 2488df8bae1dSRodney W. Grimes old_entry = old_entry->next; 2489df8bae1dSRodney W. Grimes } 2490df8bae1dSRodney W. Grimes 2491b823bbd6SMatthew Dillon old_map->infork = 0; 2492df8bae1dSRodney W. Grimes vm_map_unlock(old_map); 2493df8bae1dSRodney W. Grimes 2494df8bae1dSRodney W. Grimes return (vm2); 2495df8bae1dSRodney W. Grimes } 2496df8bae1dSRodney W. Grimes 249794f7e29aSAlan Cox int 249894f7e29aSAlan Cox vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 249994f7e29aSAlan Cox vm_prot_t prot, vm_prot_t max, int cow) 250094f7e29aSAlan Cox { 2501fd75d710SMarcel Moolenaar vm_map_entry_t new_entry, prev_entry; 2502fd75d710SMarcel Moolenaar vm_offset_t bot, top; 250394f7e29aSAlan Cox vm_size_t init_ssize; 2504fd75d710SMarcel Moolenaar int orient, rv; 250591d5354aSJohn Baldwin rlim_t vmemlim; 250694f7e29aSAlan Cox 2507fd75d710SMarcel Moolenaar /* 2508fd75d710SMarcel Moolenaar * The stack orientation is piggybacked with the cow argument. 2509fd75d710SMarcel Moolenaar * Extract it into orient and mask the cow argument so that we 2510fd75d710SMarcel Moolenaar * don't pass it around further. 2511fd75d710SMarcel Moolenaar * NOTE: We explicitly allow bi-directional stacks. 2512fd75d710SMarcel Moolenaar */ 2513fd75d710SMarcel Moolenaar orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP); 2514fd75d710SMarcel Moolenaar cow &= ~orient; 2515fd75d710SMarcel Moolenaar KASSERT(orient != 0, ("No stack grow direction")); 2516fd75d710SMarcel Moolenaar 2517fd75d710SMarcel Moolenaar if (addrbos < vm_map_min(map) || addrbos > map->max_offset) 251894f7e29aSAlan Cox return (KERN_NO_SPACE); 2519fd75d710SMarcel Moolenaar 2520fd75d710SMarcel Moolenaar init_ssize = (max_ssize < sgrowsiz) ? max_ssize : sgrowsiz; 252194f7e29aSAlan Cox 252291d5354aSJohn Baldwin PROC_LOCK(curthread->td_proc); 252391d5354aSJohn Baldwin vmemlim = lim_cur(curthread->td_proc, RLIMIT_VMEM); 252491d5354aSJohn Baldwin PROC_UNLOCK(curthread->td_proc); 252591d5354aSJohn Baldwin 252694f7e29aSAlan Cox vm_map_lock(map); 252794f7e29aSAlan Cox 252894f7e29aSAlan Cox /* If addr is already mapped, no go */ 252994f7e29aSAlan Cox if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { 253094f7e29aSAlan Cox vm_map_unlock(map); 253194f7e29aSAlan Cox return (KERN_NO_SPACE); 253294f7e29aSAlan Cox } 253394f7e29aSAlan Cox 2534a69ac174SMatthew Dillon /* If we would blow our VMEM resource limit, no go */ 253591d5354aSJohn Baldwin if (map->size + init_ssize > vmemlim) { 2536a69ac174SMatthew Dillon vm_map_unlock(map); 2537a69ac174SMatthew Dillon return (KERN_NO_SPACE); 2538a69ac174SMatthew Dillon } 2539a69ac174SMatthew Dillon 2540fd75d710SMarcel Moolenaar /* 2541fd75d710SMarcel Moolenaar * If we can't accomodate max_ssize in the current mapping, no go. 2542fd75d710SMarcel Moolenaar * However, we need to be aware that subsequent user mappings might 2543fd75d710SMarcel Moolenaar * map into the space we have reserved for stack, and currently this 2544fd75d710SMarcel Moolenaar * space is not protected. 254594f7e29aSAlan Cox * 2546fd75d710SMarcel Moolenaar * Hopefully we will at least detect this condition when we try to 2547fd75d710SMarcel Moolenaar * grow the stack. 254894f7e29aSAlan Cox */ 254994f7e29aSAlan Cox if ((prev_entry->next != &map->header) && 255094f7e29aSAlan Cox (prev_entry->next->start < addrbos + max_ssize)) { 255194f7e29aSAlan Cox vm_map_unlock(map); 255294f7e29aSAlan Cox return (KERN_NO_SPACE); 255394f7e29aSAlan Cox } 255494f7e29aSAlan Cox 2555fd75d710SMarcel Moolenaar /* 2556fd75d710SMarcel Moolenaar * We initially map a stack of only init_ssize. We will grow as 2557fd75d710SMarcel Moolenaar * needed later. Depending on the orientation of the stack (i.e. 2558fd75d710SMarcel Moolenaar * the grow direction) we either map at the top of the range, the 2559fd75d710SMarcel Moolenaar * bottom of the range or in the middle. 256094f7e29aSAlan Cox * 2561fd75d710SMarcel Moolenaar * Note: we would normally expect prot and max to be VM_PROT_ALL, 2562fd75d710SMarcel Moolenaar * and cow to be 0. Possibly we should eliminate these as input 2563fd75d710SMarcel Moolenaar * parameters, and just pass these values here in the insert call. 256494f7e29aSAlan Cox */ 2565fd75d710SMarcel Moolenaar if (orient == MAP_STACK_GROWS_DOWN) 2566fd75d710SMarcel Moolenaar bot = addrbos + max_ssize - init_ssize; 2567fd75d710SMarcel Moolenaar else if (orient == MAP_STACK_GROWS_UP) 2568fd75d710SMarcel Moolenaar bot = addrbos; 2569fd75d710SMarcel Moolenaar else 2570fd75d710SMarcel Moolenaar bot = round_page(addrbos + max_ssize/2 - init_ssize/2); 2571fd75d710SMarcel Moolenaar top = bot + init_ssize; 2572fd75d710SMarcel Moolenaar rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 257394f7e29aSAlan Cox 2574fd75d710SMarcel Moolenaar /* Now set the avail_ssize amount. */ 257594f7e29aSAlan Cox if (rv == KERN_SUCCESS) { 257629b45e9eSAlan Cox if (prev_entry != &map->header) 2577fd75d710SMarcel Moolenaar vm_map_clip_end(map, prev_entry, bot); 2578fd75d710SMarcel Moolenaar new_entry = prev_entry->next; 2579fd75d710SMarcel Moolenaar if (new_entry->end != top || new_entry->start != bot) 258094f7e29aSAlan Cox panic("Bad entry start/end for new stack entry"); 2581b21a0008SMarcel Moolenaar 2582fd75d710SMarcel Moolenaar new_entry->avail_ssize = max_ssize - init_ssize; 2583fd75d710SMarcel Moolenaar if (orient & MAP_STACK_GROWS_DOWN) 2584fd75d710SMarcel Moolenaar new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; 2585fd75d710SMarcel Moolenaar if (orient & MAP_STACK_GROWS_UP) 2586fd75d710SMarcel Moolenaar new_entry->eflags |= MAP_ENTRY_GROWS_UP; 258794f7e29aSAlan Cox } 258894f7e29aSAlan Cox 258994f7e29aSAlan Cox vm_map_unlock(map); 259094f7e29aSAlan Cox return (rv); 259194f7e29aSAlan Cox } 259294f7e29aSAlan Cox 259394f7e29aSAlan Cox /* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 259494f7e29aSAlan Cox * desired address is already mapped, or if we successfully grow 259594f7e29aSAlan Cox * the stack. Also returns KERN_SUCCESS if addr is outside the 259694f7e29aSAlan Cox * stack range (this is strange, but preserves compatibility with 259794f7e29aSAlan Cox * the grow function in vm_machdep.c). 259894f7e29aSAlan Cox */ 259994f7e29aSAlan Cox int 260094f7e29aSAlan Cox vm_map_growstack(struct proc *p, vm_offset_t addr) 260194f7e29aSAlan Cox { 2602b21a0008SMarcel Moolenaar vm_map_entry_t next_entry, prev_entry; 2603b21a0008SMarcel Moolenaar vm_map_entry_t new_entry, stack_entry; 260494f7e29aSAlan Cox struct vmspace *vm = p->p_vmspace; 260594f7e29aSAlan Cox vm_map_t map = &vm->vm_map; 260694f7e29aSAlan Cox vm_offset_t end; 2607b21a0008SMarcel Moolenaar size_t grow_amount, max_grow; 260891d5354aSJohn Baldwin rlim_t stacklim, vmemlim; 2609b21a0008SMarcel Moolenaar int is_procstack, rv; 261023955314SAlfred Perlstein 261194f7e29aSAlan Cox Retry: 261291d5354aSJohn Baldwin PROC_LOCK(p); 261391d5354aSJohn Baldwin stacklim = lim_cur(p, RLIMIT_STACK); 2614bfee999dSAlan Cox vmemlim = lim_cur(p, RLIMIT_VMEM); 261591d5354aSJohn Baldwin PROC_UNLOCK(p); 261691d5354aSJohn Baldwin 261794f7e29aSAlan Cox vm_map_lock_read(map); 261894f7e29aSAlan Cox 261994f7e29aSAlan Cox /* If addr is already in the entry range, no need to grow.*/ 262094f7e29aSAlan Cox if (vm_map_lookup_entry(map, addr, &prev_entry)) { 262194f7e29aSAlan Cox vm_map_unlock_read(map); 26220cddd8f0SMatthew Dillon return (KERN_SUCCESS); 262394f7e29aSAlan Cox } 262494f7e29aSAlan Cox 2625b21a0008SMarcel Moolenaar next_entry = prev_entry->next; 2626b21a0008SMarcel Moolenaar if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) { 2627b21a0008SMarcel Moolenaar /* 2628b21a0008SMarcel Moolenaar * This entry does not grow upwards. Since the address lies 2629b21a0008SMarcel Moolenaar * beyond this entry, the next entry (if one exists) has to 2630b21a0008SMarcel Moolenaar * be a downward growable entry. The entry list header is 2631b21a0008SMarcel Moolenaar * never a growable entry, so it suffices to check the flags. 263294f7e29aSAlan Cox */ 2633b21a0008SMarcel Moolenaar if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) { 263494f7e29aSAlan Cox vm_map_unlock_read(map); 26350cddd8f0SMatthew Dillon return (KERN_SUCCESS); 263694f7e29aSAlan Cox } 2637b21a0008SMarcel Moolenaar stack_entry = next_entry; 2638b21a0008SMarcel Moolenaar } else { 2639b21a0008SMarcel Moolenaar /* 2640b21a0008SMarcel Moolenaar * This entry grows upward. If the next entry does not at 2641b21a0008SMarcel Moolenaar * least grow downwards, this is the entry we need to grow. 2642b21a0008SMarcel Moolenaar * otherwise we have two possible choices and we have to 2643b21a0008SMarcel Moolenaar * select one. 2644b21a0008SMarcel Moolenaar */ 2645b21a0008SMarcel Moolenaar if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) { 2646b21a0008SMarcel Moolenaar /* 2647b21a0008SMarcel Moolenaar * We have two choices; grow the entry closest to 2648b21a0008SMarcel Moolenaar * the address to minimize the amount of growth. 2649b21a0008SMarcel Moolenaar */ 2650b21a0008SMarcel Moolenaar if (addr - prev_entry->end <= next_entry->start - addr) 2651b21a0008SMarcel Moolenaar stack_entry = prev_entry; 2652b21a0008SMarcel Moolenaar else 2653b21a0008SMarcel Moolenaar stack_entry = next_entry; 2654b21a0008SMarcel Moolenaar } else 2655b21a0008SMarcel Moolenaar stack_entry = prev_entry; 2656b21a0008SMarcel Moolenaar } 265794f7e29aSAlan Cox 2658b21a0008SMarcel Moolenaar if (stack_entry == next_entry) { 2659b21a0008SMarcel Moolenaar KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo")); 2660b21a0008SMarcel Moolenaar KASSERT(addr < stack_entry->start, ("foo")); 2661b21a0008SMarcel Moolenaar end = (prev_entry != &map->header) ? prev_entry->end : 2662b21a0008SMarcel Moolenaar stack_entry->start - stack_entry->avail_ssize; 266394f7e29aSAlan Cox grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE); 2664b21a0008SMarcel Moolenaar max_grow = stack_entry->start - end; 2665b21a0008SMarcel Moolenaar } else { 2666b21a0008SMarcel Moolenaar KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo")); 266708667f6dSMarcel Moolenaar KASSERT(addr >= stack_entry->end, ("foo")); 2668b21a0008SMarcel Moolenaar end = (next_entry != &map->header) ? next_entry->start : 2669b21a0008SMarcel Moolenaar stack_entry->end + stack_entry->avail_ssize; 2670fd75d710SMarcel Moolenaar grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE); 2671b21a0008SMarcel Moolenaar max_grow = end - stack_entry->end; 2672b21a0008SMarcel Moolenaar } 2673b21a0008SMarcel Moolenaar 267494f7e29aSAlan Cox if (grow_amount > stack_entry->avail_ssize) { 267594f7e29aSAlan Cox vm_map_unlock_read(map); 26760cddd8f0SMatthew Dillon return (KERN_NO_SPACE); 267794f7e29aSAlan Cox } 267894f7e29aSAlan Cox 2679b21a0008SMarcel Moolenaar /* 2680b21a0008SMarcel Moolenaar * If there is no longer enough space between the entries nogo, and 2681b21a0008SMarcel Moolenaar * adjust the available space. Note: this should only happen if the 2682b21a0008SMarcel Moolenaar * user has mapped into the stack area after the stack was created, 2683b21a0008SMarcel Moolenaar * and is probably an error. 268494f7e29aSAlan Cox * 2685b21a0008SMarcel Moolenaar * This also effectively destroys any guard page the user might have 2686b21a0008SMarcel Moolenaar * intended by limiting the stack size. 268794f7e29aSAlan Cox */ 2688b21a0008SMarcel Moolenaar if (grow_amount > max_grow) { 268925adb370SBrian Feldman if (vm_map_lock_upgrade(map)) 269094f7e29aSAlan Cox goto Retry; 269194f7e29aSAlan Cox 2692b21a0008SMarcel Moolenaar stack_entry->avail_ssize = max_grow; 269394f7e29aSAlan Cox 269494f7e29aSAlan Cox vm_map_unlock(map); 26950cddd8f0SMatthew Dillon return (KERN_NO_SPACE); 269694f7e29aSAlan Cox } 269794f7e29aSAlan Cox 2698b21a0008SMarcel Moolenaar is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0; 269994f7e29aSAlan Cox 2700b21a0008SMarcel Moolenaar /* 2701b21a0008SMarcel Moolenaar * If this is the main process stack, see if we're over the stack 2702b21a0008SMarcel Moolenaar * limit. 270394f7e29aSAlan Cox */ 270491d5354aSJohn Baldwin if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 270594f7e29aSAlan Cox vm_map_unlock_read(map); 27060cddd8f0SMatthew Dillon return (KERN_NO_SPACE); 270794f7e29aSAlan Cox } 270894f7e29aSAlan Cox 270994f7e29aSAlan Cox /* Round up the grow amount modulo SGROWSIZ */ 2710cbc89bfbSPaul Saab grow_amount = roundup (grow_amount, sgrowsiz); 2711b21a0008SMarcel Moolenaar if (grow_amount > stack_entry->avail_ssize) 271294f7e29aSAlan Cox grow_amount = stack_entry->avail_ssize; 271391d5354aSJohn Baldwin if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 271491d5354aSJohn Baldwin grow_amount = stacklim - ctob(vm->vm_ssize); 271594f7e29aSAlan Cox } 271694f7e29aSAlan Cox 2717a69ac174SMatthew Dillon /* If we would blow our VMEM resource limit, no go */ 271891d5354aSJohn Baldwin if (map->size + grow_amount > vmemlim) { 2719a69ac174SMatthew Dillon vm_map_unlock_read(map); 2720a69ac174SMatthew Dillon return (KERN_NO_SPACE); 2721a69ac174SMatthew Dillon } 2722a69ac174SMatthew Dillon 272325adb370SBrian Feldman if (vm_map_lock_upgrade(map)) 272494f7e29aSAlan Cox goto Retry; 272594f7e29aSAlan Cox 2726b21a0008SMarcel Moolenaar if (stack_entry == next_entry) { 2727b21a0008SMarcel Moolenaar /* 2728b21a0008SMarcel Moolenaar * Growing downward. 2729b21a0008SMarcel Moolenaar */ 273094f7e29aSAlan Cox /* Get the preliminary new entry start value */ 273194f7e29aSAlan Cox addr = stack_entry->start - grow_amount; 273294f7e29aSAlan Cox 2733b21a0008SMarcel Moolenaar /* 2734b21a0008SMarcel Moolenaar * If this puts us into the previous entry, cut back our 2735b21a0008SMarcel Moolenaar * growth to the available space. Also, see the note above. 273694f7e29aSAlan Cox */ 273794f7e29aSAlan Cox if (addr < end) { 2738b21a0008SMarcel Moolenaar stack_entry->avail_ssize = max_grow; 273994f7e29aSAlan Cox addr = end; 274094f7e29aSAlan Cox } 274194f7e29aSAlan Cox 274294f7e29aSAlan Cox rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, 274305ba50f5SJake Burkholder p->p_sysent->sv_stackprot, VM_PROT_ALL, 0); 274494f7e29aSAlan Cox 274594f7e29aSAlan Cox /* Adjust the available stack space by the amount we grew. */ 274694f7e29aSAlan Cox if (rv == KERN_SUCCESS) { 274729b45e9eSAlan Cox if (prev_entry != &map->header) 274829b45e9eSAlan Cox vm_map_clip_end(map, prev_entry, addr); 2749b21a0008SMarcel Moolenaar new_entry = prev_entry->next; 2750b21a0008SMarcel Moolenaar KASSERT(new_entry == stack_entry->prev, ("foo")); 2751b21a0008SMarcel Moolenaar KASSERT(new_entry->end == stack_entry->start, ("foo")); 2752b21a0008SMarcel Moolenaar KASSERT(new_entry->start == addr, ("foo")); 2753b21a0008SMarcel Moolenaar grow_amount = new_entry->end - new_entry->start; 2754b21a0008SMarcel Moolenaar new_entry->avail_ssize = stack_entry->avail_ssize - 2755b21a0008SMarcel Moolenaar grow_amount; 2756b21a0008SMarcel Moolenaar stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN; 2757b21a0008SMarcel Moolenaar new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; 275894f7e29aSAlan Cox } 2759b21a0008SMarcel Moolenaar } else { 2760b21a0008SMarcel Moolenaar /* 2761b21a0008SMarcel Moolenaar * Growing upward. 2762b21a0008SMarcel Moolenaar */ 2763b21a0008SMarcel Moolenaar addr = stack_entry->end + grow_amount; 2764b21a0008SMarcel Moolenaar 2765b21a0008SMarcel Moolenaar /* 2766b21a0008SMarcel Moolenaar * If this puts us into the next entry, cut back our growth 2767b21a0008SMarcel Moolenaar * to the available space. Also, see the note above. 2768b21a0008SMarcel Moolenaar */ 2769b21a0008SMarcel Moolenaar if (addr > end) { 2770b21a0008SMarcel Moolenaar stack_entry->avail_ssize = end - stack_entry->end; 2771b21a0008SMarcel Moolenaar addr = end; 277294f7e29aSAlan Cox } 277394f7e29aSAlan Cox 2774b21a0008SMarcel Moolenaar grow_amount = addr - stack_entry->end; 2775b21a0008SMarcel Moolenaar 2776b21a0008SMarcel Moolenaar /* Grow the underlying object if applicable. */ 2777b21a0008SMarcel Moolenaar if (stack_entry->object.vm_object == NULL || 2778b21a0008SMarcel Moolenaar vm_object_coalesce(stack_entry->object.vm_object, 277957a21abaSAlan Cox stack_entry->offset, 2780b21a0008SMarcel Moolenaar (vm_size_t)(stack_entry->end - stack_entry->start), 2781b21a0008SMarcel Moolenaar (vm_size_t)grow_amount)) { 278208667f6dSMarcel Moolenaar map->size += (addr - stack_entry->end); 2783b21a0008SMarcel Moolenaar /* Update the current entry. */ 2784b21a0008SMarcel Moolenaar stack_entry->end = addr; 2785199c91abSMarcel Moolenaar stack_entry->avail_ssize -= grow_amount; 2786b21a0008SMarcel Moolenaar rv = KERN_SUCCESS; 2787b21a0008SMarcel Moolenaar 2788b21a0008SMarcel Moolenaar if (next_entry != &map->header) 2789b21a0008SMarcel Moolenaar vm_map_clip_start(map, next_entry, addr); 2790b21a0008SMarcel Moolenaar } else 2791b21a0008SMarcel Moolenaar rv = KERN_FAILURE; 2792b21a0008SMarcel Moolenaar } 2793b21a0008SMarcel Moolenaar 2794b21a0008SMarcel Moolenaar if (rv == KERN_SUCCESS && is_procstack) 2795b21a0008SMarcel Moolenaar vm->vm_ssize += btoc(grow_amount); 2796b21a0008SMarcel Moolenaar 279794f7e29aSAlan Cox vm_map_unlock(map); 2798b21a0008SMarcel Moolenaar 2799abd498aaSBruce M Simpson /* 2800abd498aaSBruce M Simpson * Heed the MAP_WIREFUTURE flag if it was set for this process. 2801abd498aaSBruce M Simpson */ 2802b21a0008SMarcel Moolenaar if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) { 2803b21a0008SMarcel Moolenaar vm_map_wire(map, 2804b21a0008SMarcel Moolenaar (stack_entry == next_entry) ? addr : addr - grow_amount, 2805b21a0008SMarcel Moolenaar (stack_entry == next_entry) ? stack_entry->start : addr, 2806b21a0008SMarcel Moolenaar (p->p_flag & P_SYSTEM) 2807b21a0008SMarcel Moolenaar ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES 2808b21a0008SMarcel Moolenaar : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 2809b21a0008SMarcel Moolenaar } 2810abd498aaSBruce M Simpson 28110cddd8f0SMatthew Dillon return (rv); 281294f7e29aSAlan Cox } 281394f7e29aSAlan Cox 2814df8bae1dSRodney W. Grimes /* 28155856e12eSJohn Dyson * Unshare the specified VM space for exec. If other processes are 28165856e12eSJohn Dyson * mapped to it, then create a new one. The new vmspace is null. 28175856e12eSJohn Dyson */ 28185856e12eSJohn Dyson void 28193ebc1248SPeter Wemm vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 28201b40f8c0SMatthew Dillon { 28215856e12eSJohn Dyson struct vmspace *oldvmspace = p->p_vmspace; 28225856e12eSJohn Dyson struct vmspace *newvmspace; 28235856e12eSJohn Dyson 28240cddd8f0SMatthew Dillon GIANT_REQUIRED; 28253ebc1248SPeter Wemm newvmspace = vmspace_alloc(minuser, maxuser); 282651ab6c28SAlan Cox newvmspace->vm_swrss = oldvmspace->vm_swrss; 28275856e12eSJohn Dyson /* 28285856e12eSJohn Dyson * This code is written like this for prototype purposes. The 28295856e12eSJohn Dyson * goal is to avoid running down the vmspace here, but let the 28305856e12eSJohn Dyson * other process's that are still using the vmspace to finally 28315856e12eSJohn Dyson * run it down. Even though there is little or no chance of blocking 28325856e12eSJohn Dyson * here, it is a good idea to keep this form for future mods. 28335856e12eSJohn Dyson */ 28345856e12eSJohn Dyson p->p_vmspace = newvmspace; 2835b40ce416SJulian Elischer if (p == curthread->td_proc) /* XXXKSE ? */ 2836b40ce416SJulian Elischer pmap_activate(curthread); 2837b56ef1c1SJohn Baldwin vmspace_free(oldvmspace); 28385856e12eSJohn Dyson } 28395856e12eSJohn Dyson 28405856e12eSJohn Dyson /* 28415856e12eSJohn Dyson * Unshare the specified VM space for forcing COW. This 28425856e12eSJohn Dyson * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 28435856e12eSJohn Dyson */ 28445856e12eSJohn Dyson void 28451b40f8c0SMatthew Dillon vmspace_unshare(struct proc *p) 28461b40f8c0SMatthew Dillon { 28475856e12eSJohn Dyson struct vmspace *oldvmspace = p->p_vmspace; 28485856e12eSJohn Dyson struct vmspace *newvmspace; 28495856e12eSJohn Dyson 28500cddd8f0SMatthew Dillon GIANT_REQUIRED; 28515856e12eSJohn Dyson if (oldvmspace->vm_refcnt == 1) 28525856e12eSJohn Dyson return; 28535856e12eSJohn Dyson newvmspace = vmspace_fork(oldvmspace); 28545856e12eSJohn Dyson p->p_vmspace = newvmspace; 2855b40ce416SJulian Elischer if (p == curthread->td_proc) /* XXXKSE ? */ 2856b40ce416SJulian Elischer pmap_activate(curthread); 2857b56ef1c1SJohn Baldwin vmspace_free(oldvmspace); 28585856e12eSJohn Dyson } 28595856e12eSJohn Dyson 28605856e12eSJohn Dyson /* 2861df8bae1dSRodney W. Grimes * vm_map_lookup: 2862df8bae1dSRodney W. Grimes * 2863df8bae1dSRodney W. Grimes * Finds the VM object, offset, and 2864df8bae1dSRodney W. Grimes * protection for a given virtual address in the 2865df8bae1dSRodney W. Grimes * specified map, assuming a page fault of the 2866df8bae1dSRodney W. Grimes * type specified. 2867df8bae1dSRodney W. Grimes * 2868df8bae1dSRodney W. Grimes * Leaves the map in question locked for read; return 2869df8bae1dSRodney W. Grimes * values are guaranteed until a vm_map_lookup_done 2870df8bae1dSRodney W. Grimes * call is performed. Note that the map argument 2871df8bae1dSRodney W. Grimes * is in/out; the returned map must be used in 2872df8bae1dSRodney W. Grimes * the call to vm_map_lookup_done. 2873df8bae1dSRodney W. Grimes * 2874df8bae1dSRodney W. Grimes * A handle (out_entry) is returned for use in 2875df8bae1dSRodney W. Grimes * vm_map_lookup_done, to make that fast. 2876df8bae1dSRodney W. Grimes * 2877df8bae1dSRodney W. Grimes * If a lookup is requested with "write protection" 2878df8bae1dSRodney W. Grimes * specified, the map may be changed to perform virtual 2879df8bae1dSRodney W. Grimes * copying operations, although the data referenced will 2880df8bae1dSRodney W. Grimes * remain the same. 2881df8bae1dSRodney W. Grimes */ 2882df8bae1dSRodney W. Grimes int 2883b9dcd593SBruce Evans vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 2884b9dcd593SBruce Evans vm_offset_t vaddr, 288547221757SJohn Dyson vm_prot_t fault_typea, 2886b9dcd593SBruce Evans vm_map_entry_t *out_entry, /* OUT */ 2887b9dcd593SBruce Evans vm_object_t *object, /* OUT */ 2888b9dcd593SBruce Evans vm_pindex_t *pindex, /* OUT */ 2889b9dcd593SBruce Evans vm_prot_t *out_prot, /* OUT */ 28902d8acc0fSJohn Dyson boolean_t *wired) /* OUT */ 2891df8bae1dSRodney W. Grimes { 2892c0877f10SJohn Dyson vm_map_entry_t entry; 2893c0877f10SJohn Dyson vm_map_t map = *var_map; 2894c0877f10SJohn Dyson vm_prot_t prot; 289547221757SJohn Dyson vm_prot_t fault_type = fault_typea; 2896df8bae1dSRodney W. Grimes 2897df8bae1dSRodney W. Grimes RetryLookup:; 2898df8bae1dSRodney W. Grimes /* 2899df8bae1dSRodney W. Grimes * Lookup the faulting address. 2900df8bae1dSRodney W. Grimes */ 2901df8bae1dSRodney W. Grimes 2902df8bae1dSRodney W. Grimes vm_map_lock_read(map); 2903df8bae1dSRodney W. Grimes #define RETURN(why) \ 2904df8bae1dSRodney W. Grimes { \ 2905df8bae1dSRodney W. Grimes vm_map_unlock_read(map); \ 2906df8bae1dSRodney W. Grimes return (why); \ 2907df8bae1dSRodney W. Grimes } 2908df8bae1dSRodney W. Grimes 2909df8bae1dSRodney W. Grimes /* 29100d94caffSDavid Greenman * If the map has an interesting hint, try it before calling full 29110d94caffSDavid Greenman * blown lookup routine. 2912df8bae1dSRodney W. Grimes */ 29134e94f402SAlan Cox entry = map->root; 2914df8bae1dSRodney W. Grimes *out_entry = entry; 29154e94f402SAlan Cox if (entry == NULL || 2916df8bae1dSRodney W. Grimes (vaddr < entry->start) || (vaddr >= entry->end)) { 2917df8bae1dSRodney W. Grimes /* 29180d94caffSDavid Greenman * Entry was either not a valid hint, or the vaddr was not 29190d94caffSDavid Greenman * contained in the entry, so do a full lookup. 2920df8bae1dSRodney W. Grimes */ 29214e94f402SAlan Cox if (!vm_map_lookup_entry(map, vaddr, out_entry)) 2922df8bae1dSRodney W. Grimes RETURN(KERN_INVALID_ADDRESS); 2923df8bae1dSRodney W. Grimes 29244e94f402SAlan Cox entry = *out_entry; 2925df8bae1dSRodney W. Grimes } 2926b7b2aac2SJohn Dyson 2927df8bae1dSRodney W. Grimes /* 2928df8bae1dSRodney W. Grimes * Handle submaps. 2929df8bae1dSRodney W. Grimes */ 2930afa07f7eSJohn Dyson if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2931df8bae1dSRodney W. Grimes vm_map_t old_map = map; 2932df8bae1dSRodney W. Grimes 2933df8bae1dSRodney W. Grimes *var_map = map = entry->object.sub_map; 2934df8bae1dSRodney W. Grimes vm_map_unlock_read(old_map); 2935df8bae1dSRodney W. Grimes goto RetryLookup; 2936df8bae1dSRodney W. Grimes } 2937a04c970aSJohn Dyson 2938df8bae1dSRodney W. Grimes /* 29390d94caffSDavid Greenman * Check whether this task is allowed to have this page. 2940a04c970aSJohn Dyson * Note the special case for MAP_ENTRY_COW 2941a04c970aSJohn Dyson * pages with an override. This is to implement a forced 2942a04c970aSJohn Dyson * COW for debuggers. 2943df8bae1dSRodney W. Grimes */ 2944480ba2f5SJohn Dyson if (fault_type & VM_PROT_OVERRIDE_WRITE) 2945480ba2f5SJohn Dyson prot = entry->max_protection; 2946480ba2f5SJohn Dyson else 2947df8bae1dSRodney W. Grimes prot = entry->protection; 294847221757SJohn Dyson fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 294947221757SJohn Dyson if ((fault_type & prot) != fault_type) { 295047221757SJohn Dyson RETURN(KERN_PROTECTION_FAILURE); 295147221757SJohn Dyson } 29522ed14a92SAlan Cox if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 295347221757SJohn Dyson (entry->eflags & MAP_ENTRY_COW) && 29542ed14a92SAlan Cox (fault_type & VM_PROT_WRITE) && 295547221757SJohn Dyson (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 2956df8bae1dSRodney W. Grimes RETURN(KERN_PROTECTION_FAILURE); 2957a04c970aSJohn Dyson } 2958df8bae1dSRodney W. Grimes 2959df8bae1dSRodney W. Grimes /* 29600d94caffSDavid Greenman * If this page is not pageable, we have to get it for all possible 29610d94caffSDavid Greenman * accesses. 2962df8bae1dSRodney W. Grimes */ 296305f0fdd2SPoul-Henning Kamp *wired = (entry->wired_count != 0); 296405f0fdd2SPoul-Henning Kamp if (*wired) 2965df8bae1dSRodney W. Grimes prot = fault_type = entry->protection; 2966df8bae1dSRodney W. Grimes 2967df8bae1dSRodney W. Grimes /* 2968df8bae1dSRodney W. Grimes * If the entry was copy-on-write, we either ... 2969df8bae1dSRodney W. Grimes */ 2970afa07f7eSJohn Dyson if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2971df8bae1dSRodney W. Grimes /* 29720d94caffSDavid Greenman * If we want to write the page, we may as well handle that 2973ad5fca3bSAlan Cox * now since we've got the map locked. 2974df8bae1dSRodney W. Grimes * 29750d94caffSDavid Greenman * If we don't need to write the page, we just demote the 29760d94caffSDavid Greenman * permissions allowed. 2977df8bae1dSRodney W. Grimes */ 2978df8bae1dSRodney W. Grimes if (fault_type & VM_PROT_WRITE) { 2979df8bae1dSRodney W. Grimes /* 29800d94caffSDavid Greenman * Make a new object, and place it in the object 29810d94caffSDavid Greenman * chain. Note that no new references have appeared 2982ad5fca3bSAlan Cox * -- one just moved from the map to the new 29830d94caffSDavid Greenman * object. 2984df8bae1dSRodney W. Grimes */ 298525adb370SBrian Feldman if (vm_map_lock_upgrade(map)) 2986df8bae1dSRodney W. Grimes goto RetryLookup; 29879917e010SAlan Cox 2988df8bae1dSRodney W. Grimes vm_object_shadow( 2989df8bae1dSRodney W. Grimes &entry->object.vm_object, 2990df8bae1dSRodney W. Grimes &entry->offset, 2991c2e11a03SJohn Dyson atop(entry->end - entry->start)); 2992afa07f7eSJohn Dyson entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 29939917e010SAlan Cox 29949b09b6c7SMatthew Dillon vm_map_lock_downgrade(map); 29950d94caffSDavid Greenman } else { 2996df8bae1dSRodney W. Grimes /* 29970d94caffSDavid Greenman * We're attempting to read a copy-on-write page -- 29980d94caffSDavid Greenman * don't allow writes. 2999df8bae1dSRodney W. Grimes */ 30002d8acc0fSJohn Dyson prot &= ~VM_PROT_WRITE; 3001df8bae1dSRodney W. Grimes } 3002df8bae1dSRodney W. Grimes } 30032d8acc0fSJohn Dyson 3004df8bae1dSRodney W. Grimes /* 3005df8bae1dSRodney W. Grimes * Create an object if necessary. 3006df8bae1dSRodney W. Grimes */ 30074e71e795SMatthew Dillon if (entry->object.vm_object == NULL && 30084e71e795SMatthew Dillon !map->system_map) { 300925adb370SBrian Feldman if (vm_map_lock_upgrade(map)) 3010df8bae1dSRodney W. Grimes goto RetryLookup; 301124a1cce3SDavid Greenman entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 3012c2e11a03SJohn Dyson atop(entry->end - entry->start)); 3013df8bae1dSRodney W. Grimes entry->offset = 0; 30149b09b6c7SMatthew Dillon vm_map_lock_downgrade(map); 3015df8bae1dSRodney W. Grimes } 3016b5b40fa6SJohn Dyson 3017df8bae1dSRodney W. Grimes /* 30180d94caffSDavid Greenman * Return the object/offset from this entry. If the entry was 30190d94caffSDavid Greenman * copy-on-write or empty, it has been fixed up. 3020df8bae1dSRodney W. Grimes */ 30219b09b6c7SMatthew Dillon *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 3022df8bae1dSRodney W. Grimes *object = entry->object.vm_object; 3023df8bae1dSRodney W. Grimes 30240ada205eSBrian Feldman /* 30250ada205eSBrian Feldman * Return whether this is the only map sharing this data. 30260ada205eSBrian Feldman */ 3027df8bae1dSRodney W. Grimes *out_prot = prot; 3028df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 3029df8bae1dSRodney W. Grimes 3030df8bae1dSRodney W. Grimes #undef RETURN 3031df8bae1dSRodney W. Grimes } 3032df8bae1dSRodney W. Grimes 3033df8bae1dSRodney W. Grimes /* 3034df8bae1dSRodney W. Grimes * vm_map_lookup_done: 3035df8bae1dSRodney W. Grimes * 3036df8bae1dSRodney W. Grimes * Releases locks acquired by a vm_map_lookup 3037df8bae1dSRodney W. Grimes * (according to the handle returned by that lookup). 3038df8bae1dSRodney W. Grimes */ 30390d94caffSDavid Greenman void 30401b40f8c0SMatthew Dillon vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 3041df8bae1dSRodney W. Grimes { 3042df8bae1dSRodney W. Grimes /* 3043df8bae1dSRodney W. Grimes * Unlock the main-level map 3044df8bae1dSRodney W. Grimes */ 3045df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 3046df8bae1dSRodney W. Grimes } 3047df8bae1dSRodney W. Grimes 3048c7c34a24SBruce Evans #include "opt_ddb.h" 3049c3cb3e12SDavid Greenman #ifdef DDB 3050c7c34a24SBruce Evans #include <sys/kernel.h> 3051c7c34a24SBruce Evans 3052c7c34a24SBruce Evans #include <ddb/ddb.h> 3053c7c34a24SBruce Evans 3054df8bae1dSRodney W. Grimes /* 3055df8bae1dSRodney W. Grimes * vm_map_print: [ debug ] 3056df8bae1dSRodney W. Grimes */ 3057c7c34a24SBruce Evans DB_SHOW_COMMAND(map, vm_map_print) 3058df8bae1dSRodney W. Grimes { 305995e5e988SJohn Dyson static int nlines; 3060c7c34a24SBruce Evans /* XXX convert args. */ 3061c0877f10SJohn Dyson vm_map_t map = (vm_map_t)addr; 3062c7c34a24SBruce Evans boolean_t full = have_addr; 3063df8bae1dSRodney W. Grimes 3064c0877f10SJohn Dyson vm_map_entry_t entry; 3065c7c34a24SBruce Evans 3066e5f251d2SAlan Cox db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 3067e5f251d2SAlan Cox (void *)map, 3068101eeb7fSBruce Evans (void *)map->pmap, map->nentries, map->timestamp); 306995e5e988SJohn Dyson nlines++; 3070df8bae1dSRodney W. Grimes 3071c7c34a24SBruce Evans if (!full && db_indent) 3072df8bae1dSRodney W. Grimes return; 3073df8bae1dSRodney W. Grimes 3074c7c34a24SBruce Evans db_indent += 2; 3075df8bae1dSRodney W. Grimes for (entry = map->header.next; entry != &map->header; 3076df8bae1dSRodney W. Grimes entry = entry->next) { 3077fc62ef1fSBruce Evans db_iprintf("map entry %p: start=%p, end=%p\n", 3078fc62ef1fSBruce Evans (void *)entry, (void *)entry->start, (void *)entry->end); 307995e5e988SJohn Dyson nlines++; 3080e5f251d2SAlan Cox { 3081df8bae1dSRodney W. Grimes static char *inheritance_name[4] = 3082df8bae1dSRodney W. Grimes {"share", "copy", "none", "donate_copy"}; 30830d94caffSDavid Greenman 308495e5e988SJohn Dyson db_iprintf(" prot=%x/%x/%s", 3085df8bae1dSRodney W. Grimes entry->protection, 3086df8bae1dSRodney W. Grimes entry->max_protection, 30878aef1712SMatthew Dillon inheritance_name[(int)(unsigned char)entry->inheritance]); 3088df8bae1dSRodney W. Grimes if (entry->wired_count != 0) 308995e5e988SJohn Dyson db_printf(", wired"); 3090df8bae1dSRodney W. Grimes } 30919fdfe602SMatthew Dillon if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3092cd034a5bSMaxime Henrion db_printf(", share=%p, offset=0x%jx\n", 30939fdfe602SMatthew Dillon (void *)entry->object.sub_map, 3094cd034a5bSMaxime Henrion (uintmax_t)entry->offset); 309595e5e988SJohn Dyson nlines++; 3096df8bae1dSRodney W. Grimes if ((entry->prev == &map->header) || 30979fdfe602SMatthew Dillon (entry->prev->object.sub_map != 30989fdfe602SMatthew Dillon entry->object.sub_map)) { 3099c7c34a24SBruce Evans db_indent += 2; 3100101eeb7fSBruce Evans vm_map_print((db_expr_t)(intptr_t) 31019fdfe602SMatthew Dillon entry->object.sub_map, 3102914181e7SBruce Evans full, 0, (char *)0); 3103c7c34a24SBruce Evans db_indent -= 2; 3104df8bae1dSRodney W. Grimes } 31050d94caffSDavid Greenman } else { 3106cd034a5bSMaxime Henrion db_printf(", object=%p, offset=0x%jx", 3107101eeb7fSBruce Evans (void *)entry->object.vm_object, 3108cd034a5bSMaxime Henrion (uintmax_t)entry->offset); 3109afa07f7eSJohn Dyson if (entry->eflags & MAP_ENTRY_COW) 3110c7c34a24SBruce Evans db_printf(", copy (%s)", 3111afa07f7eSJohn Dyson (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 3112c7c34a24SBruce Evans db_printf("\n"); 311395e5e988SJohn Dyson nlines++; 3114df8bae1dSRodney W. Grimes 3115df8bae1dSRodney W. Grimes if ((entry->prev == &map->header) || 3116df8bae1dSRodney W. Grimes (entry->prev->object.vm_object != 3117df8bae1dSRodney W. Grimes entry->object.vm_object)) { 3118c7c34a24SBruce Evans db_indent += 2; 3119101eeb7fSBruce Evans vm_object_print((db_expr_t)(intptr_t) 3120101eeb7fSBruce Evans entry->object.vm_object, 3121914181e7SBruce Evans full, 0, (char *)0); 312295e5e988SJohn Dyson nlines += 4; 3123c7c34a24SBruce Evans db_indent -= 2; 3124df8bae1dSRodney W. Grimes } 3125df8bae1dSRodney W. Grimes } 3126df8bae1dSRodney W. Grimes } 3127c7c34a24SBruce Evans db_indent -= 2; 312895e5e988SJohn Dyson if (db_indent == 0) 312995e5e988SJohn Dyson nlines = 0; 3130df8bae1dSRodney W. Grimes } 313195e5e988SJohn Dyson 313295e5e988SJohn Dyson 313395e5e988SJohn Dyson DB_SHOW_COMMAND(procvm, procvm) 313495e5e988SJohn Dyson { 313595e5e988SJohn Dyson struct proc *p; 313695e5e988SJohn Dyson 313795e5e988SJohn Dyson if (have_addr) { 313895e5e988SJohn Dyson p = (struct proc *) addr; 313995e5e988SJohn Dyson } else { 314095e5e988SJohn Dyson p = curproc; 314195e5e988SJohn Dyson } 314295e5e988SJohn Dyson 3143ac1e407bSBruce Evans db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 3144ac1e407bSBruce Evans (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 3145b1028ad1SLuoqi Chen (void *)vmspace_pmap(p->p_vmspace)); 314695e5e988SJohn Dyson 3147101eeb7fSBruce Evans vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 314895e5e988SJohn Dyson } 314995e5e988SJohn Dyson 3150c7c34a24SBruce Evans #endif /* DDB */ 3151