160727d8bSWarner Losh /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 6df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 9df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 10df8bae1dSRodney W. Grimes * are met: 11df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 12df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 13df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 15df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 16df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 17df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 18df8bae1dSRodney W. Grimes * without specific prior written permission. 19df8bae1dSRodney W. Grimes * 20df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30df8bae1dSRodney W. Grimes * SUCH DAMAGE. 31df8bae1dSRodney W. Grimes * 323c4dd356SDavid Greenman * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 33df8bae1dSRodney W. Grimes * 34df8bae1dSRodney W. Grimes * 35df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36df8bae1dSRodney W. Grimes * All rights reserved. 37df8bae1dSRodney W. Grimes * 38df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39df8bae1dSRodney W. Grimes * 40df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 41df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 42df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 43df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 44df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 51df8bae1dSRodney W. Grimes * 52df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53df8bae1dSRodney W. Grimes * School of Computer Science 54df8bae1dSRodney W. Grimes * Carnegie Mellon University 55df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 56df8bae1dSRodney W. Grimes * 57df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 58df8bae1dSRodney W. Grimes * rights to redistribute these changes. 59df8bae1dSRodney W. Grimes */ 60df8bae1dSRodney W. Grimes 61df8bae1dSRodney W. Grimes /* 62df8bae1dSRodney W. Grimes * Virtual memory mapping module. 63df8bae1dSRodney W. Grimes */ 64df8bae1dSRodney W. Grimes 65874651b1SDavid E. O'Brien #include <sys/cdefs.h> 66874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 67874651b1SDavid E. O'Brien 68df8bae1dSRodney W. Grimes #include <sys/param.h> 69df8bae1dSRodney W. Grimes #include <sys/systm.h> 709a6d144fSKonstantin Belousov #include <sys/kernel.h> 7161d80e90SJohn Baldwin #include <sys/ktr.h> 72fb919e4dSMark Murray #include <sys/lock.h> 73fb919e4dSMark Murray #include <sys/mutex.h> 74b5e8ce9fSBruce Evans #include <sys/proc.h> 75efeaf95aSDavid Greenman #include <sys/vmmeter.h> 76867a482dSJohn Dyson #include <sys/mman.h> 771efb74fbSJohn Dyson #include <sys/vnode.h> 781ba5ad42SEdward Tomasz Napierala #include <sys/racct.h> 792267af78SJulian Elischer #include <sys/resourcevar.h> 8089f6b863SAttilio Rao #include <sys/rwlock.h> 813fde38dfSMike Silbersack #include <sys/file.h> 829a6d144fSKonstantin Belousov #include <sys/sysctl.h> 8305ba50f5SJake Burkholder #include <sys/sysent.h> 843db161e0SMatthew Dillon #include <sys/shm.h> 85df8bae1dSRodney W. Grimes 86df8bae1dSRodney W. Grimes #include <vm/vm.h> 87efeaf95aSDavid Greenman #include <vm/vm_param.h> 88efeaf95aSDavid Greenman #include <vm/pmap.h> 89efeaf95aSDavid Greenman #include <vm/vm_map.h> 90df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 91df8bae1dSRodney W. Grimes #include <vm/vm_object.h> 9247221757SJohn Dyson #include <vm/vm_pager.h> 9326f9a767SRodney W. Grimes #include <vm/vm_kern.h> 94efeaf95aSDavid Greenman #include <vm/vm_extern.h> 9584110e7eSKonstantin Belousov #include <vm/vnode_pager.h> 9621cd6e62SSeigo Tanimura #include <vm/swap_pager.h> 97670d17b5SJeff Roberson #include <vm/uma.h> 98df8bae1dSRodney W. Grimes 99df8bae1dSRodney W. Grimes /* 100df8bae1dSRodney W. Grimes * Virtual memory maps provide for the mapping, protection, 101df8bae1dSRodney W. Grimes * and sharing of virtual memory objects. In addition, 102df8bae1dSRodney W. Grimes * this module provides for an efficient virtual copy of 103df8bae1dSRodney W. Grimes * memory from one map to another. 104df8bae1dSRodney W. Grimes * 105df8bae1dSRodney W. Grimes * Synchronization is required prior to most operations. 106df8bae1dSRodney W. Grimes * 107df8bae1dSRodney W. Grimes * Maps consist of an ordered doubly-linked list of simple 108e2abaaaaSAlan Cox * entries; a self-adjusting binary search tree of these 109e2abaaaaSAlan Cox * entries is used to speed up lookups. 110df8bae1dSRodney W. Grimes * 111956f3135SPhilippe Charnier * Since portions of maps are specified by start/end addresses, 112df8bae1dSRodney W. Grimes * which may not align with existing map entries, all 113df8bae1dSRodney W. Grimes * routines merely "clip" entries to these start/end values. 114df8bae1dSRodney W. Grimes * [That is, an entry is split into two, bordering at a 115df8bae1dSRodney W. Grimes * start or end value.] Note that these clippings may not 116df8bae1dSRodney W. Grimes * always be necessary (as the two resulting entries are then 117df8bae1dSRodney W. Grimes * not changed); however, the clipping is done for convenience. 118df8bae1dSRodney W. Grimes * 119df8bae1dSRodney W. Grimes * As mentioned above, virtual copy operations are performed 120ad5fca3bSAlan Cox * by copying VM object references from one map to 121df8bae1dSRodney W. Grimes * another, and then marking both regions as copy-on-write. 122df8bae1dSRodney W. Grimes */ 123df8bae1dSRodney W. Grimes 1243a92e5d5SAlan Cox static struct mtx map_sleep_mtx; 1258355f576SJeff Roberson static uma_zone_t mapentzone; 1268355f576SJeff Roberson static uma_zone_t kmapentzone; 1278355f576SJeff Roberson static uma_zone_t mapzone; 1288355f576SJeff Roberson static uma_zone_t vmspace_zone; 129b23f72e9SBrian Feldman static int vmspace_zinit(void *mem, int size, int flags); 130b23f72e9SBrian Feldman static int vm_map_zinit(void *mem, int ize, int flags); 13192351f16SAlan Cox static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, 13292351f16SAlan Cox vm_offset_t max); 1330b367bd8SKonstantin Belousov static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); 134655c3490SKonstantin Belousov static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); 13503462509SAlan Cox static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); 136*077ec27cSAlan Cox static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 137*077ec27cSAlan Cox vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); 138*077ec27cSAlan Cox static void vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry); 1398355f576SJeff Roberson #ifdef INVARIANTS 1408355f576SJeff Roberson static void vm_map_zdtor(void *mem, int size, void *arg); 1418355f576SJeff Roberson static void vmspace_zdtor(void *mem, int size, void *arg); 1428355f576SJeff Roberson #endif 1434648ba0aSKonstantin Belousov static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, 1444648ba0aSKonstantin Belousov vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, 1454648ba0aSKonstantin Belousov int cow); 14666cd575bSAlan Cox static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 14766cd575bSAlan Cox vm_offset_t failed_addr); 148b18bfc3dSJohn Dyson 149ef694c1aSEdward Tomasz Napierala #define ENTRY_CHARGED(e) ((e)->cred != NULL || \ 150ef694c1aSEdward Tomasz Napierala ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \ 1513364c323SKonstantin Belousov !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) 1523364c323SKonstantin Belousov 15357051fdcSTor Egge /* 15457051fdcSTor Egge * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type 15557051fdcSTor Egge * stable. 15657051fdcSTor Egge */ 15757051fdcSTor Egge #define PROC_VMSPACE_LOCK(p) do { } while (0) 15857051fdcSTor Egge #define PROC_VMSPACE_UNLOCK(p) do { } while (0) 15957051fdcSTor Egge 160d239bd3cSKonstantin Belousov /* 161d239bd3cSKonstantin Belousov * VM_MAP_RANGE_CHECK: [ internal use only ] 162d239bd3cSKonstantin Belousov * 163d239bd3cSKonstantin Belousov * Asserts that the starting and ending region 164d239bd3cSKonstantin Belousov * addresses fall within the valid range of the map. 165d239bd3cSKonstantin Belousov */ 166d239bd3cSKonstantin Belousov #define VM_MAP_RANGE_CHECK(map, start, end) \ 167d239bd3cSKonstantin Belousov { \ 168d239bd3cSKonstantin Belousov if (start < vm_map_min(map)) \ 169d239bd3cSKonstantin Belousov start = vm_map_min(map); \ 170d239bd3cSKonstantin Belousov if (end > vm_map_max(map)) \ 171d239bd3cSKonstantin Belousov end = vm_map_max(map); \ 172d239bd3cSKonstantin Belousov if (start > end) \ 173d239bd3cSKonstantin Belousov start = end; \ 174d239bd3cSKonstantin Belousov } 175d239bd3cSKonstantin Belousov 1766fecb26bSKonstantin Belousov /* 1776fecb26bSKonstantin Belousov * vm_map_startup: 1786fecb26bSKonstantin Belousov * 1796fecb26bSKonstantin Belousov * Initialize the vm_map module. Must be called before 1806fecb26bSKonstantin Belousov * any other vm_map routines. 1816fecb26bSKonstantin Belousov * 1826fecb26bSKonstantin Belousov * Map and entry structures are allocated from the general 1836fecb26bSKonstantin Belousov * purpose memory pool with some exceptions: 1846fecb26bSKonstantin Belousov * 1856fecb26bSKonstantin Belousov * - The kernel map and kmem submap are allocated statically. 1866fecb26bSKonstantin Belousov * - Kernel map entries are allocated out of a static pool. 1876fecb26bSKonstantin Belousov * 1886fecb26bSKonstantin Belousov * These restrictions are necessary since malloc() uses the 1896fecb26bSKonstantin Belousov * maps and requires map entries. 1906fecb26bSKonstantin Belousov */ 1916fecb26bSKonstantin Belousov 1920d94caffSDavid Greenman void 1931b40f8c0SMatthew Dillon vm_map_startup(void) 194df8bae1dSRodney W. Grimes { 1953a92e5d5SAlan Cox mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 1968355f576SJeff Roberson mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 1978355f576SJeff Roberson #ifdef INVARIANTS 1988355f576SJeff Roberson vm_map_zdtor, 1998355f576SJeff Roberson #else 2008355f576SJeff Roberson NULL, 2018355f576SJeff Roberson #endif 202f872f6eaSAlan Cox vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 2038355f576SJeff Roberson uma_prealloc(mapzone, MAX_KMAP); 204670d17b5SJeff Roberson kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 20518aa2de5SJeff Roberson NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 20618aa2de5SJeff Roberson UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 207670d17b5SJeff Roberson mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 208670d17b5SJeff Roberson NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 2095df87b21SJeff Roberson vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 2105df87b21SJeff Roberson #ifdef INVARIANTS 2115df87b21SJeff Roberson vmspace_zdtor, 2125df87b21SJeff Roberson #else 2135df87b21SJeff Roberson NULL, 2145df87b21SJeff Roberson #endif 215f872f6eaSAlan Cox vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 2168355f576SJeff Roberson } 2178355f576SJeff Roberson 218b23f72e9SBrian Feldman static int 219b23f72e9SBrian Feldman vmspace_zinit(void *mem, int size, int flags) 2208355f576SJeff Roberson { 2218355f576SJeff Roberson struct vmspace *vm; 2228355f576SJeff Roberson 2238355f576SJeff Roberson vm = (struct vmspace *)mem; 2248355f576SJeff Roberson 22589b57fcfSKonstantin Belousov vm->vm_map.pmap = NULL; 226b23f72e9SBrian Feldman (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); 227e68c64f0SKonstantin Belousov PMAP_LOCK_INIT(vmspace_pmap(vm)); 228b23f72e9SBrian Feldman return (0); 2298355f576SJeff Roberson } 2308355f576SJeff Roberson 231b23f72e9SBrian Feldman static int 232b23f72e9SBrian Feldman vm_map_zinit(void *mem, int size, int flags) 2338355f576SJeff Roberson { 2348355f576SJeff Roberson vm_map_t map; 2358355f576SJeff Roberson 2368355f576SJeff Roberson map = (vm_map_t)mem; 237763d9566STim Kientzle memset(map, 0, sizeof(*map)); 238e30df26eSAlan Cox mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK); 239e30df26eSAlan Cox sx_init(&map->lock, "vm map (user)"); 240b23f72e9SBrian Feldman return (0); 2418355f576SJeff Roberson } 2428355f576SJeff Roberson 2438355f576SJeff Roberson #ifdef INVARIANTS 2448355f576SJeff Roberson static void 2458355f576SJeff Roberson vmspace_zdtor(void *mem, int size, void *arg) 2468355f576SJeff Roberson { 2478355f576SJeff Roberson struct vmspace *vm; 2488355f576SJeff Roberson 2498355f576SJeff Roberson vm = (struct vmspace *)mem; 2508355f576SJeff Roberson 2518355f576SJeff Roberson vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 2528355f576SJeff Roberson } 2538355f576SJeff Roberson static void 2548355f576SJeff Roberson vm_map_zdtor(void *mem, int size, void *arg) 2558355f576SJeff Roberson { 2568355f576SJeff Roberson vm_map_t map; 2578355f576SJeff Roberson 2588355f576SJeff Roberson map = (vm_map_t)mem; 2598355f576SJeff Roberson KASSERT(map->nentries == 0, 2608355f576SJeff Roberson ("map %p nentries == %d on free.", 2618355f576SJeff Roberson map, map->nentries)); 2628355f576SJeff Roberson KASSERT(map->size == 0, 2638355f576SJeff Roberson ("map %p size == %lu on free.", 2649eb6e519SJeff Roberson map, (unsigned long)map->size)); 2658355f576SJeff Roberson } 2668355f576SJeff Roberson #endif /* INVARIANTS */ 2678355f576SJeff Roberson 268df8bae1dSRodney W. Grimes /* 269df8bae1dSRodney W. Grimes * Allocate a vmspace structure, including a vm_map and pmap, 270df8bae1dSRodney W. Grimes * and initialize those structures. The refcnt is set to 1. 27174d1d2b7SNeel Natu * 27274d1d2b7SNeel Natu * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit(). 273df8bae1dSRodney W. Grimes */ 274df8bae1dSRodney W. Grimes struct vmspace * 27574d1d2b7SNeel Natu vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit) 276df8bae1dSRodney W. Grimes { 277c0877f10SJohn Dyson struct vmspace *vm; 2780d94caffSDavid Greenman 279a163d034SWarner Losh vm = uma_zalloc(vmspace_zone, M_WAITOK); 28074d1d2b7SNeel Natu 28174d1d2b7SNeel Natu KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); 28274d1d2b7SNeel Natu 28374d1d2b7SNeel Natu if (pinit == NULL) 28474d1d2b7SNeel Natu pinit = &pmap_pinit; 28574d1d2b7SNeel Natu 28674d1d2b7SNeel Natu if (!pinit(vmspace_pmap(vm))) { 28789b57fcfSKonstantin Belousov uma_zfree(vmspace_zone, vm); 28889b57fcfSKonstantin Belousov return (NULL); 28989b57fcfSKonstantin Belousov } 29021c641b2SJohn Baldwin CTR1(KTR_VM, "vmspace_alloc: %p", vm); 29192351f16SAlan Cox _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); 292df8bae1dSRodney W. Grimes vm->vm_refcnt = 1; 2932d8acc0fSJohn Dyson vm->vm_shm = NULL; 29451ab6c28SAlan Cox vm->vm_swrss = 0; 29551ab6c28SAlan Cox vm->vm_tsize = 0; 29651ab6c28SAlan Cox vm->vm_dsize = 0; 29751ab6c28SAlan Cox vm->vm_ssize = 0; 29851ab6c28SAlan Cox vm->vm_taddr = 0; 29951ab6c28SAlan Cox vm->vm_daddr = 0; 30051ab6c28SAlan Cox vm->vm_maxsaddr = 0; 301df8bae1dSRodney W. Grimes return (vm); 302df8bae1dSRodney W. Grimes } 303df8bae1dSRodney W. Grimes 3041ba5ad42SEdward Tomasz Napierala static void 3051ba5ad42SEdward Tomasz Napierala vmspace_container_reset(struct proc *p) 3061ba5ad42SEdward Tomasz Napierala { 3071ba5ad42SEdward Tomasz Napierala 308afcc55f3SEdward Tomasz Napierala #ifdef RACCT 3091ba5ad42SEdward Tomasz Napierala PROC_LOCK(p); 3101ba5ad42SEdward Tomasz Napierala racct_set(p, RACCT_DATA, 0); 3111ba5ad42SEdward Tomasz Napierala racct_set(p, RACCT_STACK, 0); 3121ba5ad42SEdward Tomasz Napierala racct_set(p, RACCT_RSS, 0); 3131ba5ad42SEdward Tomasz Napierala racct_set(p, RACCT_MEMLOCK, 0); 3141ba5ad42SEdward Tomasz Napierala racct_set(p, RACCT_VMEM, 0); 3151ba5ad42SEdward Tomasz Napierala PROC_UNLOCK(p); 316afcc55f3SEdward Tomasz Napierala #endif 3171ba5ad42SEdward Tomasz Napierala } 3181ba5ad42SEdward Tomasz Napierala 31962a59e8fSWarner Losh static inline void 320582ec34cSAlfred Perlstein vmspace_dofree(struct vmspace *vm) 321df8bae1dSRodney W. Grimes { 3220ef12795SAlan Cox 32321c641b2SJohn Baldwin CTR1(KTR_VM, "vmspace_free: %p", vm); 3243db161e0SMatthew Dillon 3253db161e0SMatthew Dillon /* 3263db161e0SMatthew Dillon * Make sure any SysV shm is freed, it might not have been in 3273db161e0SMatthew Dillon * exit1(). 3283db161e0SMatthew Dillon */ 3293db161e0SMatthew Dillon shmexit(vm); 3303db161e0SMatthew Dillon 33130dcfc09SJohn Dyson /* 332df8bae1dSRodney W. Grimes * Lock the map, to wait out all other references to it. 3330d94caffSDavid Greenman * Delete all of the mappings and pages they hold, then call 3340d94caffSDavid Greenman * the pmap module to reclaim anything left. 335df8bae1dSRodney W. Grimes */ 336717f7d59SAlan Cox (void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset, 337df8bae1dSRodney W. Grimes vm->vm_map.max_offset); 3388355f576SJeff Roberson 3390ef12795SAlan Cox pmap_release(vmspace_pmap(vm)); 3400ef12795SAlan Cox vm->vm_map.pmap = NULL; 3418355f576SJeff Roberson uma_zfree(vmspace_zone, vm); 342df8bae1dSRodney W. Grimes } 343582ec34cSAlfred Perlstein 344582ec34cSAlfred Perlstein void 345582ec34cSAlfred Perlstein vmspace_free(struct vmspace *vm) 346582ec34cSAlfred Perlstein { 347582ec34cSAlfred Perlstein 348582ec34cSAlfred Perlstein if (vm->vm_refcnt == 0) 349582ec34cSAlfred Perlstein panic("vmspace_free: attempt to free already freed vmspace"); 350582ec34cSAlfred Perlstein 3511a587ef2SJohn Baldwin if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1) 352582ec34cSAlfred Perlstein vmspace_dofree(vm); 353582ec34cSAlfred Perlstein } 354582ec34cSAlfred Perlstein 355582ec34cSAlfred Perlstein void 356582ec34cSAlfred Perlstein vmspace_exitfree(struct proc *p) 357582ec34cSAlfred Perlstein { 358334f7061SPeter Wemm struct vmspace *vm; 359582ec34cSAlfred Perlstein 36057051fdcSTor Egge PROC_VMSPACE_LOCK(p); 361334f7061SPeter Wemm vm = p->p_vmspace; 362334f7061SPeter Wemm p->p_vmspace = NULL; 36357051fdcSTor Egge PROC_VMSPACE_UNLOCK(p); 36457051fdcSTor Egge KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); 36557051fdcSTor Egge vmspace_free(vm); 36657051fdcSTor Egge } 36757051fdcSTor Egge 36857051fdcSTor Egge void 36957051fdcSTor Egge vmspace_exit(struct thread *td) 37057051fdcSTor Egge { 37157051fdcSTor Egge int refcnt; 37257051fdcSTor Egge struct vmspace *vm; 37357051fdcSTor Egge struct proc *p; 374389d2b6eSMatthew Dillon 375389d2b6eSMatthew Dillon /* 37657051fdcSTor Egge * Release user portion of address space. 37757051fdcSTor Egge * This releases references to vnodes, 37857051fdcSTor Egge * which could cause I/O if the file has been unlinked. 37957051fdcSTor Egge * Need to do this early enough that we can still sleep. 380389d2b6eSMatthew Dillon * 38157051fdcSTor Egge * The last exiting process to reach this point releases as 38257051fdcSTor Egge * much of the environment as it can. vmspace_dofree() is the 38357051fdcSTor Egge * slower fallback in case another process had a temporary 38457051fdcSTor Egge * reference to the vmspace. 385389d2b6eSMatthew Dillon */ 38657051fdcSTor Egge 38757051fdcSTor Egge p = td->td_proc; 38857051fdcSTor Egge vm = p->p_vmspace; 38957051fdcSTor Egge atomic_add_int(&vmspace0.vm_refcnt, 1); 39057051fdcSTor Egge do { 39157051fdcSTor Egge refcnt = vm->vm_refcnt; 39257051fdcSTor Egge if (refcnt > 1 && p->p_vmspace != &vmspace0) { 39357051fdcSTor Egge /* Switch now since other proc might free vmspace */ 39457051fdcSTor Egge PROC_VMSPACE_LOCK(p); 39557051fdcSTor Egge p->p_vmspace = &vmspace0; 39657051fdcSTor Egge PROC_VMSPACE_UNLOCK(p); 39757051fdcSTor Egge pmap_activate(td); 39857051fdcSTor Egge } 39957051fdcSTor Egge } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1)); 40057051fdcSTor Egge if (refcnt == 1) { 40157051fdcSTor Egge if (p->p_vmspace != vm) { 40257051fdcSTor Egge /* vmspace not yet freed, switch back */ 40357051fdcSTor Egge PROC_VMSPACE_LOCK(p); 40457051fdcSTor Egge p->p_vmspace = vm; 40557051fdcSTor Egge PROC_VMSPACE_UNLOCK(p); 40657051fdcSTor Egge pmap_activate(td); 40757051fdcSTor Egge } 40857051fdcSTor Egge pmap_remove_pages(vmspace_pmap(vm)); 40957051fdcSTor Egge /* Switch now since this proc will free vmspace */ 41057051fdcSTor Egge PROC_VMSPACE_LOCK(p); 41157051fdcSTor Egge p->p_vmspace = &vmspace0; 41257051fdcSTor Egge PROC_VMSPACE_UNLOCK(p); 41357051fdcSTor Egge pmap_activate(td); 414334f7061SPeter Wemm vmspace_dofree(vm); 415334f7061SPeter Wemm } 4161ba5ad42SEdward Tomasz Napierala vmspace_container_reset(p); 41757051fdcSTor Egge } 41857051fdcSTor Egge 41957051fdcSTor Egge /* Acquire reference to vmspace owned by another process. */ 42057051fdcSTor Egge 42157051fdcSTor Egge struct vmspace * 42257051fdcSTor Egge vmspace_acquire_ref(struct proc *p) 42357051fdcSTor Egge { 42457051fdcSTor Egge struct vmspace *vm; 42557051fdcSTor Egge int refcnt; 42657051fdcSTor Egge 42757051fdcSTor Egge PROC_VMSPACE_LOCK(p); 42857051fdcSTor Egge vm = p->p_vmspace; 42957051fdcSTor Egge if (vm == NULL) { 43057051fdcSTor Egge PROC_VMSPACE_UNLOCK(p); 43157051fdcSTor Egge return (NULL); 43257051fdcSTor Egge } 43357051fdcSTor Egge do { 43457051fdcSTor Egge refcnt = vm->vm_refcnt; 43557051fdcSTor Egge if (refcnt <= 0) { /* Avoid 0->1 transition */ 43657051fdcSTor Egge PROC_VMSPACE_UNLOCK(p); 43757051fdcSTor Egge return (NULL); 43857051fdcSTor Egge } 43957051fdcSTor Egge } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1)); 44057051fdcSTor Egge if (vm != p->p_vmspace) { 44157051fdcSTor Egge PROC_VMSPACE_UNLOCK(p); 44257051fdcSTor Egge vmspace_free(vm); 44357051fdcSTor Egge return (NULL); 44457051fdcSTor Egge } 44557051fdcSTor Egge PROC_VMSPACE_UNLOCK(p); 44657051fdcSTor Egge return (vm); 44757051fdcSTor Egge } 448df8bae1dSRodney W. Grimes 4491b40f8c0SMatthew Dillon void 450780b1c09SAlan Cox _vm_map_lock(vm_map_t map, const char *file, int line) 4511b40f8c0SMatthew Dillon { 452bc91c510SAlan Cox 45393bc4879SAlan Cox if (map->system_map) 454ccdf2333SAttilio Rao mtx_lock_flags_(&map->system_mtx, 0, file, line); 45512c64974SMaxime Henrion else 4569fde98bbSAttilio Rao sx_xlock_(&map->lock, file, line); 4571b40f8c0SMatthew Dillon map->timestamp++; 4581b40f8c0SMatthew Dillon } 4591b40f8c0SMatthew Dillon 4600b367bd8SKonstantin Belousov static void 4610b367bd8SKonstantin Belousov vm_map_process_deferred(void) 4620e0af8ecSBrian Feldman { 4630b367bd8SKonstantin Belousov struct thread *td; 4646fbe60faSJohn Baldwin vm_map_entry_t entry, next; 46584110e7eSKonstantin Belousov vm_object_t object; 466655c3490SKonstantin Belousov 4670b367bd8SKonstantin Belousov td = curthread; 4686fbe60faSJohn Baldwin entry = td->td_map_def_user; 4696fbe60faSJohn Baldwin td->td_map_def_user = NULL; 4706fbe60faSJohn Baldwin while (entry != NULL) { 4716fbe60faSJohn Baldwin next = entry->next; 47284110e7eSKonstantin Belousov if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) { 47384110e7eSKonstantin Belousov /* 47484110e7eSKonstantin Belousov * Decrement the object's writemappings and 47584110e7eSKonstantin Belousov * possibly the vnode's v_writecount. 47684110e7eSKonstantin Belousov */ 47784110e7eSKonstantin Belousov KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 47884110e7eSKonstantin Belousov ("Submap with writecount")); 47984110e7eSKonstantin Belousov object = entry->object.vm_object; 48084110e7eSKonstantin Belousov KASSERT(object != NULL, ("No object for writecount")); 48184110e7eSKonstantin Belousov vnode_pager_release_writecount(object, entry->start, 48284110e7eSKonstantin Belousov entry->end); 48384110e7eSKonstantin Belousov } 4840b367bd8SKonstantin Belousov vm_map_entry_deallocate(entry, FALSE); 4856fbe60faSJohn Baldwin entry = next; 4860b367bd8SKonstantin Belousov } 4870b367bd8SKonstantin Belousov } 4880b367bd8SKonstantin Belousov 4890b367bd8SKonstantin Belousov void 4900b367bd8SKonstantin Belousov _vm_map_unlock(vm_map_t map, const char *file, int line) 4910b367bd8SKonstantin Belousov { 4920b367bd8SKonstantin Belousov 4930b367bd8SKonstantin Belousov if (map->system_map) 494ccdf2333SAttilio Rao mtx_unlock_flags_(&map->system_mtx, 0, file, line); 4950b367bd8SKonstantin Belousov else { 4969fde98bbSAttilio Rao sx_xunlock_(&map->lock, file, line); 4970b367bd8SKonstantin Belousov vm_map_process_deferred(); 498655c3490SKonstantin Belousov } 4990e0af8ecSBrian Feldman } 5000e0af8ecSBrian Feldman 5010e0af8ecSBrian Feldman void 502780b1c09SAlan Cox _vm_map_lock_read(vm_map_t map, const char *file, int line) 5030e0af8ecSBrian Feldman { 504bc91c510SAlan Cox 50593bc4879SAlan Cox if (map->system_map) 506ccdf2333SAttilio Rao mtx_lock_flags_(&map->system_mtx, 0, file, line); 50712c64974SMaxime Henrion else 5089fde98bbSAttilio Rao sx_slock_(&map->lock, file, line); 50936daaecdSAlan Cox } 5100e0af8ecSBrian Feldman 5110e0af8ecSBrian Feldman void 512780b1c09SAlan Cox _vm_map_unlock_read(vm_map_t map, const char *file, int line) 5130e0af8ecSBrian Feldman { 514bc91c510SAlan Cox 51536daaecdSAlan Cox if (map->system_map) 516ccdf2333SAttilio Rao mtx_unlock_flags_(&map->system_mtx, 0, file, line); 5170b367bd8SKonstantin Belousov else { 5189fde98bbSAttilio Rao sx_sunlock_(&map->lock, file, line); 5190b367bd8SKonstantin Belousov vm_map_process_deferred(); 5200b367bd8SKonstantin Belousov } 52125adb370SBrian Feldman } 52225adb370SBrian Feldman 523d974f03cSAlan Cox int 524780b1c09SAlan Cox _vm_map_trylock(vm_map_t map, const char *file, int line) 525d974f03cSAlan Cox { 52625adb370SBrian Feldman int error; 52725adb370SBrian Feldman 52836daaecdSAlan Cox error = map->system_map ? 529ccdf2333SAttilio Rao !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 5309fde98bbSAttilio Rao !sx_try_xlock_(&map->lock, file, line); 5313a92e5d5SAlan Cox if (error == 0) 5323a92e5d5SAlan Cox map->timestamp++; 533bc91c510SAlan Cox return (error == 0); 5340e0af8ecSBrian Feldman } 5350e0af8ecSBrian Feldman 5360e0af8ecSBrian Feldman int 53772d97679SDavid Schultz _vm_map_trylock_read(vm_map_t map, const char *file, int line) 53872d97679SDavid Schultz { 53972d97679SDavid Schultz int error; 54072d97679SDavid Schultz 54172d97679SDavid Schultz error = map->system_map ? 542ccdf2333SAttilio Rao !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 5439fde98bbSAttilio Rao !sx_try_slock_(&map->lock, file, line); 54472d97679SDavid Schultz return (error == 0); 54572d97679SDavid Schultz } 54672d97679SDavid Schultz 54705a8c414SAlan Cox /* 54805a8c414SAlan Cox * _vm_map_lock_upgrade: [ internal use only ] 54905a8c414SAlan Cox * 55005a8c414SAlan Cox * Tries to upgrade a read (shared) lock on the specified map to a write 55105a8c414SAlan Cox * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a 55205a8c414SAlan Cox * non-zero value if the upgrade fails. If the upgrade fails, the map is 55305a8c414SAlan Cox * returned without a read or write lock held. 55405a8c414SAlan Cox * 55505a8c414SAlan Cox * Requires that the map be read locked. 55605a8c414SAlan Cox */ 55772d97679SDavid Schultz int 558780b1c09SAlan Cox _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 5590e0af8ecSBrian Feldman { 56005a8c414SAlan Cox unsigned int last_timestamp; 561bc91c510SAlan Cox 56212c64974SMaxime Henrion if (map->system_map) { 563ccdf2333SAttilio Rao mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 56405a8c414SAlan Cox } else { 5659fde98bbSAttilio Rao if (!sx_try_upgrade_(&map->lock, file, line)) { 56605a8c414SAlan Cox last_timestamp = map->timestamp; 5679fde98bbSAttilio Rao sx_sunlock_(&map->lock, file, line); 5680b367bd8SKonstantin Belousov vm_map_process_deferred(); 56905a8c414SAlan Cox /* 57005a8c414SAlan Cox * If the map's timestamp does not change while the 57105a8c414SAlan Cox * map is unlocked, then the upgrade succeeds. 57205a8c414SAlan Cox */ 5739fde98bbSAttilio Rao sx_xlock_(&map->lock, file, line); 57405a8c414SAlan Cox if (last_timestamp != map->timestamp) { 5759fde98bbSAttilio Rao sx_xunlock_(&map->lock, file, line); 57605a8c414SAlan Cox return (1); 57705a8c414SAlan Cox } 57805a8c414SAlan Cox } 57905a8c414SAlan Cox } 580bc91c510SAlan Cox map->timestamp++; 581bc91c510SAlan Cox return (0); 5820e0af8ecSBrian Feldman } 5830e0af8ecSBrian Feldman 5840e0af8ecSBrian Feldman void 585780b1c09SAlan Cox _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 5861b40f8c0SMatthew Dillon { 587bc91c510SAlan Cox 58812c64974SMaxime Henrion if (map->system_map) { 589ccdf2333SAttilio Rao mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 59005a8c414SAlan Cox } else 5919fde98bbSAttilio Rao sx_downgrade_(&map->lock, file, line); 59205a8c414SAlan Cox } 59305a8c414SAlan Cox 59405a8c414SAlan Cox /* 59505a8c414SAlan Cox * vm_map_locked: 59605a8c414SAlan Cox * 59705a8c414SAlan Cox * Returns a non-zero value if the caller holds a write (exclusive) lock 59805a8c414SAlan Cox * on the specified map and the value "0" otherwise. 59905a8c414SAlan Cox */ 60005a8c414SAlan Cox int 60105a8c414SAlan Cox vm_map_locked(vm_map_t map) 60205a8c414SAlan Cox { 60305a8c414SAlan Cox 60405a8c414SAlan Cox if (map->system_map) 60505a8c414SAlan Cox return (mtx_owned(&map->system_mtx)); 60605a8c414SAlan Cox else 60705a8c414SAlan Cox return (sx_xlocked(&map->lock)); 60825adb370SBrian Feldman } 60925adb370SBrian Feldman 6103a0916b8SKonstantin Belousov #ifdef INVARIANTS 6113a0916b8SKonstantin Belousov static void 6123a0916b8SKonstantin Belousov _vm_map_assert_locked(vm_map_t map, const char *file, int line) 6133a0916b8SKonstantin Belousov { 6143a0916b8SKonstantin Belousov 6153a0916b8SKonstantin Belousov if (map->system_map) 616ccdf2333SAttilio Rao mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 6173a0916b8SKonstantin Belousov else 6189fde98bbSAttilio Rao sx_assert_(&map->lock, SA_XLOCKED, file, line); 6193a0916b8SKonstantin Belousov } 6203a0916b8SKonstantin Belousov 6213a0916b8SKonstantin Belousov #define VM_MAP_ASSERT_LOCKED(map) \ 6223a0916b8SKonstantin Belousov _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) 6233a0916b8SKonstantin Belousov #else 6243a0916b8SKonstantin Belousov #define VM_MAP_ASSERT_LOCKED(map) 6253a0916b8SKonstantin Belousov #endif 6263a0916b8SKonstantin Belousov 627acd9a301SAlan Cox /* 6288304adaaSAlan Cox * _vm_map_unlock_and_wait: 6298304adaaSAlan Cox * 6308304adaaSAlan Cox * Atomically releases the lock on the specified map and puts the calling 6318304adaaSAlan Cox * thread to sleep. The calling thread will remain asleep until either 6328304adaaSAlan Cox * vm_map_wakeup() is performed on the map or the specified timeout is 6338304adaaSAlan Cox * exceeded. 6348304adaaSAlan Cox * 6358304adaaSAlan Cox * WARNING! This function does not perform deferred deallocations of 6368304adaaSAlan Cox * objects and map entries. Therefore, the calling thread is expected to 6378304adaaSAlan Cox * reacquire the map lock after reawakening and later perform an ordinary 6388304adaaSAlan Cox * unlock operation, such as vm_map_unlock(), before completing its 6398304adaaSAlan Cox * operation on the map. 640acd9a301SAlan Cox */ 6419688f931SAlan Cox int 6428304adaaSAlan Cox _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) 643acd9a301SAlan Cox { 644acd9a301SAlan Cox 6453a92e5d5SAlan Cox mtx_lock(&map_sleep_mtx); 6468304adaaSAlan Cox if (map->system_map) 647ccdf2333SAttilio Rao mtx_unlock_flags_(&map->system_mtx, 0, file, line); 6488304adaaSAlan Cox else 6499fde98bbSAttilio Rao sx_xunlock_(&map->lock, file, line); 6508304adaaSAlan Cox return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 6518304adaaSAlan Cox timo)); 652acd9a301SAlan Cox } 653acd9a301SAlan Cox 654acd9a301SAlan Cox /* 655acd9a301SAlan Cox * vm_map_wakeup: 6568304adaaSAlan Cox * 6578304adaaSAlan Cox * Awaken any threads that have slept on the map using 6588304adaaSAlan Cox * vm_map_unlock_and_wait(). 659acd9a301SAlan Cox */ 6609688f931SAlan Cox void 661acd9a301SAlan Cox vm_map_wakeup(vm_map_t map) 662acd9a301SAlan Cox { 663acd9a301SAlan Cox 664b49ecb86SAlan Cox /* 6653a92e5d5SAlan Cox * Acquire and release map_sleep_mtx to prevent a wakeup() 6668304adaaSAlan Cox * from being performed (and lost) between the map unlock 6678304adaaSAlan Cox * and the msleep() in _vm_map_unlock_and_wait(). 668b49ecb86SAlan Cox */ 6693a92e5d5SAlan Cox mtx_lock(&map_sleep_mtx); 6703a92e5d5SAlan Cox mtx_unlock(&map_sleep_mtx); 671acd9a301SAlan Cox wakeup(&map->root); 672acd9a301SAlan Cox } 673acd9a301SAlan Cox 674a5db445dSMax Laier void 675a5db445dSMax Laier vm_map_busy(vm_map_t map) 676a5db445dSMax Laier { 677a5db445dSMax Laier 678a5db445dSMax Laier VM_MAP_ASSERT_LOCKED(map); 679a5db445dSMax Laier map->busy++; 680a5db445dSMax Laier } 681a5db445dSMax Laier 682a5db445dSMax Laier void 683a5db445dSMax Laier vm_map_unbusy(vm_map_t map) 684a5db445dSMax Laier { 685a5db445dSMax Laier 686a5db445dSMax Laier VM_MAP_ASSERT_LOCKED(map); 687a5db445dSMax Laier KASSERT(map->busy, ("vm_map_unbusy: not busy")); 688a5db445dSMax Laier if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { 689a5db445dSMax Laier vm_map_modflags(map, 0, MAP_BUSY_WAKEUP); 690a5db445dSMax Laier wakeup(&map->busy); 691a5db445dSMax Laier } 692a5db445dSMax Laier } 693a5db445dSMax Laier 694a5db445dSMax Laier void 695a5db445dSMax Laier vm_map_wait_busy(vm_map_t map) 696a5db445dSMax Laier { 697a5db445dSMax Laier 698a5db445dSMax Laier VM_MAP_ASSERT_LOCKED(map); 699a5db445dSMax Laier while (map->busy) { 700a5db445dSMax Laier vm_map_modflags(map, MAP_BUSY_WAKEUP, 0); 701a5db445dSMax Laier if (map->system_map) 702a5db445dSMax Laier msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); 703a5db445dSMax Laier else 704a5db445dSMax Laier sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); 705a5db445dSMax Laier } 706a5db445dSMax Laier map->timestamp++; 707a5db445dSMax Laier } 708a5db445dSMax Laier 7091b40f8c0SMatthew Dillon long 7101b40f8c0SMatthew Dillon vmspace_resident_count(struct vmspace *vmspace) 7111b40f8c0SMatthew Dillon { 7121b40f8c0SMatthew Dillon return pmap_resident_count(vmspace_pmap(vmspace)); 7131b40f8c0SMatthew Dillon } 7141b40f8c0SMatthew Dillon 715ff2b5645SMatthew Dillon /* 716df8bae1dSRodney W. Grimes * vm_map_create: 717df8bae1dSRodney W. Grimes * 718df8bae1dSRodney W. Grimes * Creates and returns a new empty VM map with 719df8bae1dSRodney W. Grimes * the given physical map structure, and having 720df8bae1dSRodney W. Grimes * the given lower and upper address bounds. 721df8bae1dSRodney W. Grimes */ 7220d94caffSDavid Greenman vm_map_t 7231b40f8c0SMatthew Dillon vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 724df8bae1dSRodney W. Grimes { 725c0877f10SJohn Dyson vm_map_t result; 726df8bae1dSRodney W. Grimes 727a163d034SWarner Losh result = uma_zalloc(mapzone, M_WAITOK); 72821c641b2SJohn Baldwin CTR1(KTR_VM, "vm_map_create: %p", result); 72992351f16SAlan Cox _vm_map_init(result, pmap, min, max); 730df8bae1dSRodney W. Grimes return (result); 731df8bae1dSRodney W. Grimes } 732df8bae1dSRodney W. Grimes 733df8bae1dSRodney W. Grimes /* 734df8bae1dSRodney W. Grimes * Initialize an existing vm_map structure 735df8bae1dSRodney W. Grimes * such as that in the vmspace structure. 736df8bae1dSRodney W. Grimes */ 7378355f576SJeff Roberson static void 73892351f16SAlan Cox _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 739df8bae1dSRodney W. Grimes { 74021c641b2SJohn Baldwin 741df8bae1dSRodney W. Grimes map->header.next = map->header.prev = &map->header; 7429688f931SAlan Cox map->needs_wakeup = FALSE; 7433075778bSJohn Dyson map->system_map = 0; 74492351f16SAlan Cox map->pmap = pmap; 745df8bae1dSRodney W. Grimes map->min_offset = min; 746df8bae1dSRodney W. Grimes map->max_offset = max; 747af7cd0c5SBrian Feldman map->flags = 0; 7484e94f402SAlan Cox map->root = NULL; 749df8bae1dSRodney W. Grimes map->timestamp = 0; 750a5db445dSMax Laier map->busy = 0; 751df8bae1dSRodney W. Grimes } 752df8bae1dSRodney W. Grimes 753a18b1f1dSJason Evans void 75492351f16SAlan Cox vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 755a18b1f1dSJason Evans { 75692351f16SAlan Cox 75792351f16SAlan Cox _vm_map_init(map, pmap, min, max); 758d923c598SAlan Cox mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 75912c64974SMaxime Henrion sx_init(&map->lock, "user map"); 760a18b1f1dSJason Evans } 761a18b1f1dSJason Evans 762df8bae1dSRodney W. Grimes /* 763b18bfc3dSJohn Dyson * vm_map_entry_dispose: [ internal use only ] 764b18bfc3dSJohn Dyson * 765b18bfc3dSJohn Dyson * Inverse of vm_map_entry_create. 766b18bfc3dSJohn Dyson */ 76762487bb4SJohn Dyson static void 7681b40f8c0SMatthew Dillon vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 769b18bfc3dSJohn Dyson { 7702b4a2c27SAlan Cox uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 771b18bfc3dSJohn Dyson } 772b18bfc3dSJohn Dyson 773b18bfc3dSJohn Dyson /* 774df8bae1dSRodney W. Grimes * vm_map_entry_create: [ internal use only ] 775df8bae1dSRodney W. Grimes * 776df8bae1dSRodney W. Grimes * Allocates a VM map entry for insertion. 777b28cb1caSAlfred Perlstein * No entry fields are filled in. 778df8bae1dSRodney W. Grimes */ 779f708ef1bSPoul-Henning Kamp static vm_map_entry_t 7801b40f8c0SMatthew Dillon vm_map_entry_create(vm_map_t map) 781df8bae1dSRodney W. Grimes { 7821f6889a1SMatthew Dillon vm_map_entry_t new_entry; 7831f6889a1SMatthew Dillon 7842b4a2c27SAlan Cox if (map->system_map) 7852b4a2c27SAlan Cox new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 7862b4a2c27SAlan Cox else 787a163d034SWarner Losh new_entry = uma_zalloc(mapentzone, M_WAITOK); 7881f6889a1SMatthew Dillon if (new_entry == NULL) 7891f6889a1SMatthew Dillon panic("vm_map_entry_create: kernel resources exhausted"); 7901f6889a1SMatthew Dillon return (new_entry); 791df8bae1dSRodney W. Grimes } 792df8bae1dSRodney W. Grimes 793df8bae1dSRodney W. Grimes /* 794794316a8SAlan Cox * vm_map_entry_set_behavior: 795794316a8SAlan Cox * 796794316a8SAlan Cox * Set the expected access behavior, either normal, random, or 797794316a8SAlan Cox * sequential. 798794316a8SAlan Cox */ 79962a59e8fSWarner Losh static inline void 800794316a8SAlan Cox vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 801794316a8SAlan Cox { 802794316a8SAlan Cox entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 803794316a8SAlan Cox (behavior & MAP_ENTRY_BEHAV_MASK); 804794316a8SAlan Cox } 805794316a8SAlan Cox 806794316a8SAlan Cox /* 8070164e057SAlan Cox * vm_map_entry_set_max_free: 8080164e057SAlan Cox * 8090164e057SAlan Cox * Set the max_free field in a vm_map_entry. 8100164e057SAlan Cox */ 81162a59e8fSWarner Losh static inline void 8120164e057SAlan Cox vm_map_entry_set_max_free(vm_map_entry_t entry) 8130164e057SAlan Cox { 8140164e057SAlan Cox 8150164e057SAlan Cox entry->max_free = entry->adj_free; 8160164e057SAlan Cox if (entry->left != NULL && entry->left->max_free > entry->max_free) 8170164e057SAlan Cox entry->max_free = entry->left->max_free; 8180164e057SAlan Cox if (entry->right != NULL && entry->right->max_free > entry->max_free) 8190164e057SAlan Cox entry->max_free = entry->right->max_free; 8200164e057SAlan Cox } 8210164e057SAlan Cox 8220164e057SAlan Cox /* 8234e94f402SAlan Cox * vm_map_entry_splay: 8244e94f402SAlan Cox * 8250164e057SAlan Cox * The Sleator and Tarjan top-down splay algorithm with the 8260164e057SAlan Cox * following variation. Max_free must be computed bottom-up, so 8270164e057SAlan Cox * on the downward pass, maintain the left and right spines in 8280164e057SAlan Cox * reverse order. Then, make a second pass up each side to fix 8290164e057SAlan Cox * the pointers and compute max_free. The time bound is O(log n) 8300164e057SAlan Cox * amortized. 8310164e057SAlan Cox * 8320164e057SAlan Cox * The new root is the vm_map_entry containing "addr", or else an 8330164e057SAlan Cox * adjacent entry (lower or higher) if addr is not in the tree. 8340164e057SAlan Cox * 8350164e057SAlan Cox * The map must be locked, and leaves it so. 8360164e057SAlan Cox * 8370164e057SAlan Cox * Returns: the new root. 8384e94f402SAlan Cox */ 8394e94f402SAlan Cox static vm_map_entry_t 8400164e057SAlan Cox vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root) 8414e94f402SAlan Cox { 8420164e057SAlan Cox vm_map_entry_t llist, rlist; 8430164e057SAlan Cox vm_map_entry_t ltree, rtree; 8440164e057SAlan Cox vm_map_entry_t y; 8454e94f402SAlan Cox 8460164e057SAlan Cox /* Special case of empty tree. */ 8474e94f402SAlan Cox if (root == NULL) 8484e94f402SAlan Cox return (root); 8490164e057SAlan Cox 8500164e057SAlan Cox /* 8510164e057SAlan Cox * Pass One: Splay down the tree until we find addr or a NULL 8520164e057SAlan Cox * pointer where addr would go. llist and rlist are the two 8530164e057SAlan Cox * sides in reverse order (bottom-up), with llist linked by 8540164e057SAlan Cox * the right pointer and rlist linked by the left pointer in 8550164e057SAlan Cox * the vm_map_entry. Wait until Pass Two to set max_free on 8560164e057SAlan Cox * the two spines. 8570164e057SAlan Cox */ 8580164e057SAlan Cox llist = NULL; 8590164e057SAlan Cox rlist = NULL; 8600164e057SAlan Cox for (;;) { 8610164e057SAlan Cox /* root is never NULL in here. */ 8620164e057SAlan Cox if (addr < root->start) { 8630164e057SAlan Cox y = root->left; 8640164e057SAlan Cox if (y == NULL) 8654e94f402SAlan Cox break; 8660164e057SAlan Cox if (addr < y->start && y->left != NULL) { 8670164e057SAlan Cox /* Rotate right and put y on rlist. */ 8684e94f402SAlan Cox root->left = y->right; 8694e94f402SAlan Cox y->right = root; 8700164e057SAlan Cox vm_map_entry_set_max_free(root); 8710164e057SAlan Cox root = y->left; 8720164e057SAlan Cox y->left = rlist; 8730164e057SAlan Cox rlist = y; 8740164e057SAlan Cox } else { 8750164e057SAlan Cox /* Put root on rlist. */ 8760164e057SAlan Cox root->left = rlist; 8770164e057SAlan Cox rlist = root; 8784e94f402SAlan Cox root = y; 8794e94f402SAlan Cox } 8807438d60bSAlan Cox } else if (addr >= root->end) { 8810164e057SAlan Cox y = root->right; 8827438d60bSAlan Cox if (y == NULL) 8834e94f402SAlan Cox break; 8840164e057SAlan Cox if (addr >= y->end && y->right != NULL) { 8850164e057SAlan Cox /* Rotate left and put y on llist. */ 8864e94f402SAlan Cox root->right = y->left; 8874e94f402SAlan Cox y->left = root; 8880164e057SAlan Cox vm_map_entry_set_max_free(root); 8890164e057SAlan Cox root = y->right; 8900164e057SAlan Cox y->right = llist; 8910164e057SAlan Cox llist = y; 8920164e057SAlan Cox } else { 8930164e057SAlan Cox /* Put root on llist. */ 8940164e057SAlan Cox root->right = llist; 8950164e057SAlan Cox llist = root; 8964e94f402SAlan Cox root = y; 8974e94f402SAlan Cox } 8987438d60bSAlan Cox } else 8997438d60bSAlan Cox break; 9000164e057SAlan Cox } 9010164e057SAlan Cox 9020164e057SAlan Cox /* 9030164e057SAlan Cox * Pass Two: Walk back up the two spines, flip the pointers 9040164e057SAlan Cox * and set max_free. The subtrees of the root go at the 9050164e057SAlan Cox * bottom of llist and rlist. 9060164e057SAlan Cox */ 9070164e057SAlan Cox ltree = root->left; 9080164e057SAlan Cox while (llist != NULL) { 9090164e057SAlan Cox y = llist->right; 9100164e057SAlan Cox llist->right = ltree; 9110164e057SAlan Cox vm_map_entry_set_max_free(llist); 9120164e057SAlan Cox ltree = llist; 9130164e057SAlan Cox llist = y; 9140164e057SAlan Cox } 9150164e057SAlan Cox rtree = root->right; 9160164e057SAlan Cox while (rlist != NULL) { 9170164e057SAlan Cox y = rlist->left; 9180164e057SAlan Cox rlist->left = rtree; 9190164e057SAlan Cox vm_map_entry_set_max_free(rlist); 9200164e057SAlan Cox rtree = rlist; 9210164e057SAlan Cox rlist = y; 9220164e057SAlan Cox } 9230164e057SAlan Cox 9240164e057SAlan Cox /* 9250164e057SAlan Cox * Final assembly: add ltree and rtree as subtrees of root. 9260164e057SAlan Cox */ 9270164e057SAlan Cox root->left = ltree; 9280164e057SAlan Cox root->right = rtree; 9290164e057SAlan Cox vm_map_entry_set_max_free(root); 9300164e057SAlan Cox 9314e94f402SAlan Cox return (root); 9324e94f402SAlan Cox } 9334e94f402SAlan Cox 9344e94f402SAlan Cox /* 935df8bae1dSRodney W. Grimes * vm_map_entry_{un,}link: 936df8bae1dSRodney W. Grimes * 937df8bae1dSRodney W. Grimes * Insert/remove entries from maps. 938df8bae1dSRodney W. Grimes */ 9394e94f402SAlan Cox static void 94099c81ca9SAlan Cox vm_map_entry_link(vm_map_t map, 94199c81ca9SAlan Cox vm_map_entry_t after_where, 94299c81ca9SAlan Cox vm_map_entry_t entry) 94399c81ca9SAlan Cox { 94421c641b2SJohn Baldwin 94521c641b2SJohn Baldwin CTR4(KTR_VM, 94621c641b2SJohn Baldwin "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 94721c641b2SJohn Baldwin map->nentries, entry, after_where); 9483a0916b8SKonstantin Belousov VM_MAP_ASSERT_LOCKED(map); 9495831f5fcSKonstantin Belousov KASSERT(after_where == &map->header || 9505831f5fcSKonstantin Belousov after_where->end <= entry->start, 9515831f5fcSKonstantin Belousov ("vm_map_entry_link: prev end %jx new start %jx overlap", 9525831f5fcSKonstantin Belousov (uintmax_t)after_where->end, (uintmax_t)entry->start)); 9535831f5fcSKonstantin Belousov KASSERT(after_where->next == &map->header || 9545831f5fcSKonstantin Belousov entry->end <= after_where->next->start, 9555831f5fcSKonstantin Belousov ("vm_map_entry_link: new end %jx next start %jx overlap", 9565831f5fcSKonstantin Belousov (uintmax_t)entry->end, (uintmax_t)after_where->next->start)); 9575831f5fcSKonstantin Belousov 95899c81ca9SAlan Cox map->nentries++; 95999c81ca9SAlan Cox entry->prev = after_where; 96099c81ca9SAlan Cox entry->next = after_where->next; 96199c81ca9SAlan Cox entry->next->prev = entry; 96299c81ca9SAlan Cox after_where->next = entry; 9634e94f402SAlan Cox 9644e94f402SAlan Cox if (after_where != &map->header) { 9654e94f402SAlan Cox if (after_where != map->root) 9664e94f402SAlan Cox vm_map_entry_splay(after_where->start, map->root); 9674e94f402SAlan Cox entry->right = after_where->right; 9684e94f402SAlan Cox entry->left = after_where; 9694e94f402SAlan Cox after_where->right = NULL; 9700164e057SAlan Cox after_where->adj_free = entry->start - after_where->end; 9710164e057SAlan Cox vm_map_entry_set_max_free(after_where); 9724e94f402SAlan Cox } else { 9734e94f402SAlan Cox entry->right = map->root; 9744e94f402SAlan Cox entry->left = NULL; 9754e94f402SAlan Cox } 9760164e057SAlan Cox entry->adj_free = (entry->next == &map->header ? map->max_offset : 9770164e057SAlan Cox entry->next->start) - entry->end; 9780164e057SAlan Cox vm_map_entry_set_max_free(entry); 9794e94f402SAlan Cox map->root = entry; 980df8bae1dSRodney W. Grimes } 98199c81ca9SAlan Cox 9824e94f402SAlan Cox static void 98399c81ca9SAlan Cox vm_map_entry_unlink(vm_map_t map, 98499c81ca9SAlan Cox vm_map_entry_t entry) 98599c81ca9SAlan Cox { 9864e94f402SAlan Cox vm_map_entry_t next, prev, root; 98799c81ca9SAlan Cox 9883a0916b8SKonstantin Belousov VM_MAP_ASSERT_LOCKED(map); 9894e94f402SAlan Cox if (entry != map->root) 9904e94f402SAlan Cox vm_map_entry_splay(entry->start, map->root); 9914e94f402SAlan Cox if (entry->left == NULL) 9924e94f402SAlan Cox root = entry->right; 9934e94f402SAlan Cox else { 9944e94f402SAlan Cox root = vm_map_entry_splay(entry->start, entry->left); 9954e94f402SAlan Cox root->right = entry->right; 9960164e057SAlan Cox root->adj_free = (entry->next == &map->header ? map->max_offset : 9970164e057SAlan Cox entry->next->start) - root->end; 9980164e057SAlan Cox vm_map_entry_set_max_free(root); 9994e94f402SAlan Cox } 10004e94f402SAlan Cox map->root = root; 10014e94f402SAlan Cox 10024e94f402SAlan Cox prev = entry->prev; 10034e94f402SAlan Cox next = entry->next; 100499c81ca9SAlan Cox next->prev = prev; 100599c81ca9SAlan Cox prev->next = next; 100699c81ca9SAlan Cox map->nentries--; 100721c641b2SJohn Baldwin CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 100821c641b2SJohn Baldwin map->nentries, entry); 1009df8bae1dSRodney W. Grimes } 1010df8bae1dSRodney W. Grimes 1011df8bae1dSRodney W. Grimes /* 10120164e057SAlan Cox * vm_map_entry_resize_free: 10130164e057SAlan Cox * 10140164e057SAlan Cox * Recompute the amount of free space following a vm_map_entry 10150164e057SAlan Cox * and propagate that value up the tree. Call this function after 10160164e057SAlan Cox * resizing a map entry in-place, that is, without a call to 10170164e057SAlan Cox * vm_map_entry_link() or _unlink(). 10180164e057SAlan Cox * 10190164e057SAlan Cox * The map must be locked, and leaves it so. 10200164e057SAlan Cox */ 10210164e057SAlan Cox static void 10220164e057SAlan Cox vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry) 10230164e057SAlan Cox { 10240164e057SAlan Cox 10250164e057SAlan Cox /* 10260164e057SAlan Cox * Using splay trees without parent pointers, propagating 10270164e057SAlan Cox * max_free up the tree is done by moving the entry to the 10280164e057SAlan Cox * root and making the change there. 10290164e057SAlan Cox */ 10300164e057SAlan Cox if (entry != map->root) 10310164e057SAlan Cox map->root = vm_map_entry_splay(entry->start, map->root); 10320164e057SAlan Cox 10330164e057SAlan Cox entry->adj_free = (entry->next == &map->header ? map->max_offset : 10340164e057SAlan Cox entry->next->start) - entry->end; 10350164e057SAlan Cox vm_map_entry_set_max_free(entry); 10360164e057SAlan Cox } 10370164e057SAlan Cox 10380164e057SAlan Cox /* 1039df8bae1dSRodney W. Grimes * vm_map_lookup_entry: [ internal use only ] 1040df8bae1dSRodney W. Grimes * 1041df8bae1dSRodney W. Grimes * Finds the map entry containing (or 1042df8bae1dSRodney W. Grimes * immediately preceding) the specified address 1043df8bae1dSRodney W. Grimes * in the given map; the entry is returned 1044df8bae1dSRodney W. Grimes * in the "entry" parameter. The boolean 1045df8bae1dSRodney W. Grimes * result indicates whether the address is 1046df8bae1dSRodney W. Grimes * actually contained in the map. 1047df8bae1dSRodney W. Grimes */ 10480d94caffSDavid Greenman boolean_t 10491b40f8c0SMatthew Dillon vm_map_lookup_entry( 10501b40f8c0SMatthew Dillon vm_map_t map, 10511b40f8c0SMatthew Dillon vm_offset_t address, 10521b40f8c0SMatthew Dillon vm_map_entry_t *entry) /* OUT */ 1053df8bae1dSRodney W. Grimes { 1054c0877f10SJohn Dyson vm_map_entry_t cur; 105505a8c414SAlan Cox boolean_t locked; 1056df8bae1dSRodney W. Grimes 10574c3ef59eSAlan Cox /* 10584c3ef59eSAlan Cox * If the map is empty, then the map entry immediately preceding 10594c3ef59eSAlan Cox * "address" is the map's header. 10604c3ef59eSAlan Cox */ 10614c3ef59eSAlan Cox cur = map->root; 10624e94f402SAlan Cox if (cur == NULL) 10634e94f402SAlan Cox *entry = &map->header; 10644c3ef59eSAlan Cox else if (address >= cur->start && cur->end > address) { 10654c3ef59eSAlan Cox *entry = cur; 10664c3ef59eSAlan Cox return (TRUE); 106705a8c414SAlan Cox } else if ((locked = vm_map_locked(map)) || 106805a8c414SAlan Cox sx_try_upgrade(&map->lock)) { 106905a8c414SAlan Cox /* 107005a8c414SAlan Cox * Splay requires a write lock on the map. However, it only 107105a8c414SAlan Cox * restructures the binary search tree; it does not otherwise 107205a8c414SAlan Cox * change the map. Thus, the map's timestamp need not change 107305a8c414SAlan Cox * on a temporary upgrade. 107405a8c414SAlan Cox */ 10754c3ef59eSAlan Cox map->root = cur = vm_map_entry_splay(address, cur); 107605a8c414SAlan Cox if (!locked) 107705a8c414SAlan Cox sx_downgrade(&map->lock); 1078df8bae1dSRodney W. Grimes 10794c3ef59eSAlan Cox /* 10804c3ef59eSAlan Cox * If "address" is contained within a map entry, the new root 10814c3ef59eSAlan Cox * is that map entry. Otherwise, the new root is a map entry 10824c3ef59eSAlan Cox * immediately before or after "address". 10834c3ef59eSAlan Cox */ 1084df8bae1dSRodney W. Grimes if (address >= cur->start) { 1085df8bae1dSRodney W. Grimes *entry = cur; 10864e94f402SAlan Cox if (cur->end > address) 1087df8bae1dSRodney W. Grimes return (TRUE); 10884e94f402SAlan Cox } else 1089df8bae1dSRodney W. Grimes *entry = cur->prev; 109005a8c414SAlan Cox } else 109105a8c414SAlan Cox /* 109205a8c414SAlan Cox * Since the map is only locked for read access, perform a 109305a8c414SAlan Cox * standard binary search tree lookup for "address". 109405a8c414SAlan Cox */ 109505a8c414SAlan Cox for (;;) { 109605a8c414SAlan Cox if (address < cur->start) { 109705a8c414SAlan Cox if (cur->left == NULL) { 109805a8c414SAlan Cox *entry = cur->prev; 109905a8c414SAlan Cox break; 110005a8c414SAlan Cox } 110105a8c414SAlan Cox cur = cur->left; 110205a8c414SAlan Cox } else if (cur->end > address) { 110305a8c414SAlan Cox *entry = cur; 110405a8c414SAlan Cox return (TRUE); 110505a8c414SAlan Cox } else { 110605a8c414SAlan Cox if (cur->right == NULL) { 110705a8c414SAlan Cox *entry = cur; 110805a8c414SAlan Cox break; 110905a8c414SAlan Cox } 111005a8c414SAlan Cox cur = cur->right; 111105a8c414SAlan Cox } 11124e94f402SAlan Cox } 1113df8bae1dSRodney W. Grimes return (FALSE); 1114df8bae1dSRodney W. Grimes } 1115df8bae1dSRodney W. Grimes 1116df8bae1dSRodney W. Grimes /* 111730dcfc09SJohn Dyson * vm_map_insert: 111830dcfc09SJohn Dyson * 111930dcfc09SJohn Dyson * Inserts the given whole VM object into the target 112030dcfc09SJohn Dyson * map at the specified address range. The object's 112130dcfc09SJohn Dyson * size should match that of the address range. 112230dcfc09SJohn Dyson * 112330dcfc09SJohn Dyson * Requires that the map be locked, and leaves it so. 11242aaeadf8SMatthew Dillon * 11252aaeadf8SMatthew Dillon * If object is non-NULL, ref count must be bumped by caller 11262aaeadf8SMatthew Dillon * prior to making call to account for the new entry. 112730dcfc09SJohn Dyson */ 112830dcfc09SJohn Dyson int 1129b9dcd593SBruce Evans vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 113033314db0SAlan Cox vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) 113130dcfc09SJohn Dyson { 113233314db0SAlan Cox vm_map_entry_t new_entry, prev_entry, temp_entry; 11339730a5daSPaul Saab vm_eflags_t protoeflags; 1134ef694c1aSEdward Tomasz Napierala struct ucred *cred; 11358211bd45SKonstantin Belousov vm_inherit_t inheritance; 113630dcfc09SJohn Dyson 11373a0916b8SKonstantin Belousov VM_MAP_ASSERT_LOCKED(map); 113833314db0SAlan Cox KASSERT((object != kmem_object && object != kernel_object) || 113933314db0SAlan Cox (cow & MAP_COPY_ON_WRITE) == 0, 114033314db0SAlan Cox ("vm_map_insert: kmem or kernel object and COW")); 114133314db0SAlan Cox KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0, 114233314db0SAlan Cox ("vm_map_insert: paradoxical MAP_NOFAULT request")); 11433a0916b8SKonstantin Belousov 114430dcfc09SJohn Dyson /* 114530dcfc09SJohn Dyson * Check that the start and end points are not bogus. 114630dcfc09SJohn Dyson */ 114730dcfc09SJohn Dyson if ((start < map->min_offset) || (end > map->max_offset) || 114830dcfc09SJohn Dyson (start >= end)) 114930dcfc09SJohn Dyson return (KERN_INVALID_ADDRESS); 115030dcfc09SJohn Dyson 115130dcfc09SJohn Dyson /* 115230dcfc09SJohn Dyson * Find the entry prior to the proposed starting address; if it's part 115330dcfc09SJohn Dyson * of an existing entry, this range is bogus. 115430dcfc09SJohn Dyson */ 115530dcfc09SJohn Dyson if (vm_map_lookup_entry(map, start, &temp_entry)) 115630dcfc09SJohn Dyson return (KERN_NO_SPACE); 115730dcfc09SJohn Dyson 115830dcfc09SJohn Dyson prev_entry = temp_entry; 115930dcfc09SJohn Dyson 116030dcfc09SJohn Dyson /* 116130dcfc09SJohn Dyson * Assert that the next entry doesn't overlap the end point. 116230dcfc09SJohn Dyson */ 116330dcfc09SJohn Dyson if ((prev_entry->next != &map->header) && 116430dcfc09SJohn Dyson (prev_entry->next->start < end)) 116530dcfc09SJohn Dyson return (KERN_NO_SPACE); 116630dcfc09SJohn Dyson 1167afa07f7eSJohn Dyson protoeflags = 0; 1168afa07f7eSJohn Dyson if (cow & MAP_COPY_ON_WRITE) 1169e5f13bddSAlan Cox protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY; 117033314db0SAlan Cox if (cow & MAP_NOFAULT) 1171afa07f7eSJohn Dyson protoeflags |= MAP_ENTRY_NOFAULT; 11724f79d873SMatthew Dillon if (cow & MAP_DISABLE_SYNCER) 11734f79d873SMatthew Dillon protoeflags |= MAP_ENTRY_NOSYNC; 11749730a5daSPaul Saab if (cow & MAP_DISABLE_COREDUMP) 11759730a5daSPaul Saab protoeflags |= MAP_ENTRY_NOCOREDUMP; 1176712efe66SAlan Cox if (cow & MAP_STACK_GROWS_DOWN) 1177712efe66SAlan Cox protoeflags |= MAP_ENTRY_GROWS_DOWN; 1178712efe66SAlan Cox if (cow & MAP_STACK_GROWS_UP) 1179712efe66SAlan Cox protoeflags |= MAP_ENTRY_GROWS_UP; 118084110e7eSKonstantin Belousov if (cow & MAP_VN_WRITECOUNT) 118184110e7eSKonstantin Belousov protoeflags |= MAP_ENTRY_VN_WRITECNT; 11828211bd45SKonstantin Belousov if (cow & MAP_INHERIT_SHARE) 11838211bd45SKonstantin Belousov inheritance = VM_INHERIT_SHARE; 11848211bd45SKonstantin Belousov else 11858211bd45SKonstantin Belousov inheritance = VM_INHERIT_DEFAULT; 11864f79d873SMatthew Dillon 1187ef694c1aSEdward Tomasz Napierala cred = NULL; 11883364c323SKonstantin Belousov if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT)) 11893364c323SKonstantin Belousov goto charged; 11903364c323SKonstantin Belousov if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && 11913364c323SKonstantin Belousov ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { 11923364c323SKonstantin Belousov if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) 11933364c323SKonstantin Belousov return (KERN_RESOURCE_SHORTAGE); 119441c22744SKonstantin Belousov KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) || 1195ef694c1aSEdward Tomasz Napierala object->cred == NULL, 11963364c323SKonstantin Belousov ("OVERCOMMIT: vm_map_insert o %p", object)); 1197ef694c1aSEdward Tomasz Napierala cred = curthread->td_ucred; 11983364c323SKonstantin Belousov } 11993364c323SKonstantin Belousov 12003364c323SKonstantin Belousov charged: 1201f8616ebfSAlan Cox /* Expand the kernel pmap, if necessary. */ 1202f8616ebfSAlan Cox if (map == kernel_map && end > kernel_vm_end) 1203f8616ebfSAlan Cox pmap_growkernel(end); 12041d284e00SAlan Cox if (object != NULL) { 120530dcfc09SJohn Dyson /* 12061d284e00SAlan Cox * OBJ_ONEMAPPING must be cleared unless this mapping 12071d284e00SAlan Cox * is trivially proven to be the only mapping for any 12081d284e00SAlan Cox * of the object's pages. (Object granularity 12091d284e00SAlan Cox * reference counting is insufficient to recognize 12101d284e00SAlan Cox * aliases with precision.) 121130dcfc09SJohn Dyson */ 121289f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 12131d284e00SAlan Cox if (object->ref_count > 1 || object->shadow_count != 0) 12142aaeadf8SMatthew Dillon vm_object_clear_flag(object, OBJ_ONEMAPPING); 121589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 12164e045f93SAlan Cox } 12174e045f93SAlan Cox else if ((prev_entry != &map->header) && 12184e045f93SAlan Cox (prev_entry->eflags == protoeflags) && 1219b5f8c226SKonstantin Belousov (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 && 12208cc7e047SJohn Dyson (prev_entry->end == start) && 12214e045f93SAlan Cox (prev_entry->wired_count == 0) && 1222ef694c1aSEdward Tomasz Napierala (prev_entry->cred == cred || 12233364c323SKonstantin Belousov (prev_entry->object.vm_object != NULL && 1224ef694c1aSEdward Tomasz Napierala (prev_entry->object.vm_object->cred == cred))) && 12258cc7e047SJohn Dyson vm_object_coalesce(prev_entry->object.vm_object, 122657a21abaSAlan Cox prev_entry->offset, 12278cc7e047SJohn Dyson (vm_size_t)(prev_entry->end - prev_entry->start), 122860169c88SAlan Cox (vm_size_t)(end - prev_entry->end), cred != NULL && 122960169c88SAlan Cox (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) { 123030dcfc09SJohn Dyson /* 12312aaeadf8SMatthew Dillon * We were able to extend the object. Determine if we 12322aaeadf8SMatthew Dillon * can extend the previous map entry to include the 12332aaeadf8SMatthew Dillon * new range as well. 123430dcfc09SJohn Dyson */ 12358211bd45SKonstantin Belousov if ((prev_entry->inheritance == inheritance) && 12368cc7e047SJohn Dyson (prev_entry->protection == prot) && 12378cc7e047SJohn Dyson (prev_entry->max_protection == max)) { 123830dcfc09SJohn Dyson map->size += (end - prev_entry->end); 123930dcfc09SJohn Dyson prev_entry->end = end; 12400164e057SAlan Cox vm_map_entry_resize_free(map, prev_entry); 12414e71e795SMatthew Dillon vm_map_simplify_entry(map, prev_entry); 124230dcfc09SJohn Dyson return (KERN_SUCCESS); 124330dcfc09SJohn Dyson } 12448cc7e047SJohn Dyson 12452aaeadf8SMatthew Dillon /* 12462aaeadf8SMatthew Dillon * If we can extend the object but cannot extend the 12472aaeadf8SMatthew Dillon * map entry, we have to create a new map entry. We 12482aaeadf8SMatthew Dillon * must bump the ref count on the extended object to 12494e71e795SMatthew Dillon * account for it. object may be NULL. 12502aaeadf8SMatthew Dillon */ 12512aaeadf8SMatthew Dillon object = prev_entry->object.vm_object; 12522aaeadf8SMatthew Dillon offset = prev_entry->offset + 12532aaeadf8SMatthew Dillon (prev_entry->end - prev_entry->start); 12548cc7e047SJohn Dyson vm_object_reference(object); 1255ef694c1aSEdward Tomasz Napierala if (cred != NULL && object != NULL && object->cred != NULL && 12563364c323SKonstantin Belousov !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 12573364c323SKonstantin Belousov /* Object already accounts for this uid. */ 1258ef694c1aSEdward Tomasz Napierala cred = NULL; 12593364c323SKonstantin Belousov } 1260b18bfc3dSJohn Dyson } 126160169c88SAlan Cox if (cred != NULL) 126260169c88SAlan Cox crhold(cred); 12632aaeadf8SMatthew Dillon 12642aaeadf8SMatthew Dillon /* 126530dcfc09SJohn Dyson * Create a new entry 126630dcfc09SJohn Dyson */ 126730dcfc09SJohn Dyson new_entry = vm_map_entry_create(map); 126830dcfc09SJohn Dyson new_entry->start = start; 126930dcfc09SJohn Dyson new_entry->end = end; 1270ef694c1aSEdward Tomasz Napierala new_entry->cred = NULL; 127130dcfc09SJohn Dyson 1272afa07f7eSJohn Dyson new_entry->eflags = protoeflags; 127330dcfc09SJohn Dyson new_entry->object.vm_object = object; 127430dcfc09SJohn Dyson new_entry->offset = offset; 12752267af78SJulian Elischer new_entry->avail_ssize = 0; 12762267af78SJulian Elischer 12778211bd45SKonstantin Belousov new_entry->inheritance = inheritance; 127830dcfc09SJohn Dyson new_entry->protection = prot; 127930dcfc09SJohn Dyson new_entry->max_protection = max; 128030dcfc09SJohn Dyson new_entry->wired_count = 0; 1281997ac690SKonstantin Belousov new_entry->wiring_thread = NULL; 128213458803SAlan Cox new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; 128313458803SAlan Cox new_entry->next_read = OFF_TO_IDX(offset); 1284e5f251d2SAlan Cox 1285ef694c1aSEdward Tomasz Napierala KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), 12863364c323SKonstantin Belousov ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry)); 1287ef694c1aSEdward Tomasz Napierala new_entry->cred = cred; 12883364c323SKonstantin Belousov 128930dcfc09SJohn Dyson /* 129030dcfc09SJohn Dyson * Insert the new entry into the list 129130dcfc09SJohn Dyson */ 129230dcfc09SJohn Dyson vm_map_entry_link(map, prev_entry, new_entry); 129330dcfc09SJohn Dyson map->size += new_entry->end - new_entry->start; 129430dcfc09SJohn Dyson 12951a484d28SMatthew Dillon /* 1296eaaf9f7fSAlan Cox * Try to coalesce the new entry with both the previous and next 1297eaaf9f7fSAlan Cox * entries in the list. Previously, we only attempted to coalesce 1298eaaf9f7fSAlan Cox * with the previous entry when object is NULL. Here, we handle the 1299eaaf9f7fSAlan Cox * other cases, which are less common. 13001a484d28SMatthew Dillon */ 13014e71e795SMatthew Dillon vm_map_simplify_entry(map, new_entry); 13024e71e795SMatthew Dillon 13034f79d873SMatthew Dillon if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { 13044da4d293SAlan Cox vm_map_pmap_enter(map, start, prot, 1305e972780aSAlan Cox object, OFF_TO_IDX(offset), end - start, 1306e972780aSAlan Cox cow & MAP_PREFAULT_PARTIAL); 13074f79d873SMatthew Dillon } 1308e972780aSAlan Cox 130930dcfc09SJohn Dyson return (KERN_SUCCESS); 131030dcfc09SJohn Dyson } 131130dcfc09SJohn Dyson 131230dcfc09SJohn Dyson /* 13130164e057SAlan Cox * vm_map_findspace: 13140164e057SAlan Cox * 13150164e057SAlan Cox * Find the first fit (lowest VM address) for "length" free bytes 13160164e057SAlan Cox * beginning at address >= start in the given map. 13170164e057SAlan Cox * 13180164e057SAlan Cox * In a vm_map_entry, "adj_free" is the amount of free space 13190164e057SAlan Cox * adjacent (higher address) to this entry, and "max_free" is the 13200164e057SAlan Cox * maximum amount of contiguous free space in its subtree. This 13210164e057SAlan Cox * allows finding a free region in one path down the tree, so 13220164e057SAlan Cox * O(log n) amortized with splay trees. 13230164e057SAlan Cox * 13240164e057SAlan Cox * The map must be locked, and leaves it so. 13250164e057SAlan Cox * 13260164e057SAlan Cox * Returns: 0 on success, and starting address in *addr, 13270164e057SAlan Cox * 1 if insufficient space. 1328df8bae1dSRodney W. Grimes */ 1329df8bae1dSRodney W. Grimes int 13300164e057SAlan Cox vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, 13310164e057SAlan Cox vm_offset_t *addr) /* OUT */ 1332df8bae1dSRodney W. Grimes { 13330164e057SAlan Cox vm_map_entry_t entry; 1334f8616ebfSAlan Cox vm_offset_t st; 1335df8bae1dSRodney W. Grimes 1336986b43f8SAlan Cox /* 1337986b43f8SAlan Cox * Request must fit within min/max VM address and must avoid 1338986b43f8SAlan Cox * address wrap. 1339986b43f8SAlan Cox */ 1340df8bae1dSRodney W. Grimes if (start < map->min_offset) 1341df8bae1dSRodney W. Grimes start = map->min_offset; 1342986b43f8SAlan Cox if (start + length > map->max_offset || start + length < start) 1343df8bae1dSRodney W. Grimes return (1); 1344df8bae1dSRodney W. Grimes 13450164e057SAlan Cox /* Empty tree means wide open address space. */ 13460164e057SAlan Cox if (map->root == NULL) { 1347df8bae1dSRodney W. Grimes *addr = start; 1348f8616ebfSAlan Cox return (0); 134999448ed1SJohn Dyson } 13500164e057SAlan Cox 13510164e057SAlan Cox /* 13520164e057SAlan Cox * After splay, if start comes before root node, then there 13530164e057SAlan Cox * must be a gap from start to the root. 13540164e057SAlan Cox */ 13550164e057SAlan Cox map->root = vm_map_entry_splay(start, map->root); 13560164e057SAlan Cox if (start + length <= map->root->start) { 13570164e057SAlan Cox *addr = start; 1358f8616ebfSAlan Cox return (0); 13590164e057SAlan Cox } 13600164e057SAlan Cox 13610164e057SAlan Cox /* 13620164e057SAlan Cox * Root is the last node that might begin its gap before 1363986b43f8SAlan Cox * start, and this is the last comparison where address 1364986b43f8SAlan Cox * wrap might be a problem. 13650164e057SAlan Cox */ 13660164e057SAlan Cox st = (start > map->root->end) ? start : map->root->end; 1367986b43f8SAlan Cox if (length <= map->root->end + map->root->adj_free - st) { 13680164e057SAlan Cox *addr = st; 1369f8616ebfSAlan Cox return (0); 13700164e057SAlan Cox } 13710164e057SAlan Cox 13720164e057SAlan Cox /* With max_free, can immediately tell if no solution. */ 13730164e057SAlan Cox entry = map->root->right; 13740164e057SAlan Cox if (entry == NULL || length > entry->max_free) 13750164e057SAlan Cox return (1); 13760164e057SAlan Cox 13770164e057SAlan Cox /* 13780164e057SAlan Cox * Search the right subtree in the order: left subtree, root, 13790164e057SAlan Cox * right subtree (first fit). The previous splay implies that 13800164e057SAlan Cox * all regions in the right subtree have addresses > start. 13810164e057SAlan Cox */ 13820164e057SAlan Cox while (entry != NULL) { 13830164e057SAlan Cox if (entry->left != NULL && entry->left->max_free >= length) 13840164e057SAlan Cox entry = entry->left; 13850164e057SAlan Cox else if (entry->adj_free >= length) { 13860164e057SAlan Cox *addr = entry->end; 1387f8616ebfSAlan Cox return (0); 13880164e057SAlan Cox } else 13890164e057SAlan Cox entry = entry->right; 13900164e057SAlan Cox } 13910164e057SAlan Cox 13920164e057SAlan Cox /* Can't get here, so panic if we do. */ 13930164e057SAlan Cox panic("vm_map_findspace: max_free corrupt"); 1394df8bae1dSRodney W. Grimes } 1395df8bae1dSRodney W. Grimes 1396d239bd3cSKonstantin Belousov int 1397d239bd3cSKonstantin Belousov vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1398b8ca4ef2SAlan Cox vm_offset_t start, vm_size_t length, vm_prot_t prot, 1399d239bd3cSKonstantin Belousov vm_prot_t max, int cow) 1400d239bd3cSKonstantin Belousov { 1401b8ca4ef2SAlan Cox vm_offset_t end; 1402d239bd3cSKonstantin Belousov int result; 1403d239bd3cSKonstantin Belousov 1404d239bd3cSKonstantin Belousov end = start + length; 14054648ba0aSKonstantin Belousov KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 14064648ba0aSKonstantin Belousov object == NULL, 14074648ba0aSKonstantin Belousov ("vm_map_fixed: non-NULL backing object for stack")); 1408897d81a0SKonstantin Belousov vm_map_lock(map); 1409d239bd3cSKonstantin Belousov VM_MAP_RANGE_CHECK(map, start, end); 141011c42bccSKonstantin Belousov if ((cow & MAP_CHECK_EXCL) == 0) 141111c42bccSKonstantin Belousov vm_map_delete(map, start, end); 14124648ba0aSKonstantin Belousov if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 14134648ba0aSKonstantin Belousov result = vm_map_stack_locked(map, start, length, sgrowsiz, 14144648ba0aSKonstantin Belousov prot, max, cow); 14154648ba0aSKonstantin Belousov } else { 14164648ba0aSKonstantin Belousov result = vm_map_insert(map, object, offset, start, end, 14174648ba0aSKonstantin Belousov prot, max, cow); 14184648ba0aSKonstantin Belousov } 1419d239bd3cSKonstantin Belousov vm_map_unlock(map); 1420d239bd3cSKonstantin Belousov return (result); 1421d239bd3cSKonstantin Belousov } 1422d239bd3cSKonstantin Belousov 1423df8bae1dSRodney W. Grimes /* 1424df8bae1dSRodney W. Grimes * vm_map_find finds an unallocated region in the target address 1425df8bae1dSRodney W. Grimes * map with the given length. The search is defined to be 1426df8bae1dSRodney W. Grimes * first-fit from the specified address; the region found is 1427df8bae1dSRodney W. Grimes * returned in the same parameter. 1428df8bae1dSRodney W. Grimes * 14292aaeadf8SMatthew Dillon * If object is non-NULL, ref count must be bumped by caller 14302aaeadf8SMatthew Dillon * prior to making call to account for the new entry. 1431df8bae1dSRodney W. Grimes */ 1432df8bae1dSRodney W. Grimes int 1433b9dcd593SBruce Evans vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1434b9dcd593SBruce Evans vm_offset_t *addr, /* IN/OUT */ 1435edb572a3SJohn Baldwin vm_size_t length, vm_offset_t max_addr, int find_space, 1436edb572a3SJohn Baldwin vm_prot_t prot, vm_prot_t max, int cow) 1437df8bae1dSRodney W. Grimes { 14385aa60b6fSJohn Baldwin vm_offset_t alignment, initial_addr, start; 14396eaee3feSAlan Cox int result; 1440df8bae1dSRodney W. Grimes 14414648ba0aSKonstantin Belousov KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 14424648ba0aSKonstantin Belousov object == NULL, 14434648ba0aSKonstantin Belousov ("vm_map_find: non-NULL backing object for stack")); 1444ff74a3faSJohn Baldwin if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL || 1445ff74a3faSJohn Baldwin (object->flags & OBJ_COLORED) == 0)) 1446ff74a3faSJohn Baldwin find_space = VMFS_ANY_SPACE; 14475aa60b6fSJohn Baldwin if (find_space >> 8 != 0) { 14485aa60b6fSJohn Baldwin KASSERT((find_space & 0xff) == 0, ("bad VMFS flags")); 14495aa60b6fSJohn Baldwin alignment = (vm_offset_t)1 << (find_space >> 8); 14505aa60b6fSJohn Baldwin } else 14515aa60b6fSJohn Baldwin alignment = 0; 1452ff74a3faSJohn Baldwin initial_addr = *addr; 1453ff74a3faSJohn Baldwin again: 1454ff74a3faSJohn Baldwin start = initial_addr; 1455bea41bcfSDavid Greenman vm_map_lock(map); 145626c538ffSAlan Cox do { 145726c538ffSAlan Cox if (find_space != VMFS_NO_SPACE) { 1458edb572a3SJohn Baldwin if (vm_map_findspace(map, start, length, addr) || 1459edb572a3SJohn Baldwin (max_addr != 0 && *addr + length > max_addr)) { 1460df8bae1dSRodney W. Grimes vm_map_unlock(map); 1461ff74a3faSJohn Baldwin if (find_space == VMFS_OPTIMAL_SPACE) { 1462ff74a3faSJohn Baldwin find_space = VMFS_ANY_SPACE; 1463ff74a3faSJohn Baldwin goto again; 1464ff74a3faSJohn Baldwin } 1465df8bae1dSRodney W. Grimes return (KERN_NO_SPACE); 1466df8bae1dSRodney W. Grimes } 1467ca596a25SJuli Mallett switch (find_space) { 14685aa60b6fSJohn Baldwin case VMFS_SUPER_SPACE: 1469ff74a3faSJohn Baldwin case VMFS_OPTIMAL_SPACE: 147026c538ffSAlan Cox pmap_align_superpage(object, offset, addr, 147126c538ffSAlan Cox length); 1472ca596a25SJuli Mallett break; 14735aa60b6fSJohn Baldwin case VMFS_ANY_SPACE: 14745aa60b6fSJohn Baldwin break; 1475ca596a25SJuli Mallett default: 14765aa60b6fSJohn Baldwin if ((*addr & (alignment - 1)) != 0) { 14775aa60b6fSJohn Baldwin *addr &= ~(alignment - 1); 14785aa60b6fSJohn Baldwin *addr += alignment; 14795aa60b6fSJohn Baldwin } 1480ca596a25SJuli Mallett break; 1481ca596a25SJuli Mallett } 1482ca596a25SJuli Mallett 1483df8bae1dSRodney W. Grimes start = *addr; 1484df8bae1dSRodney W. Grimes } 14854648ba0aSKonstantin Belousov if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 14864648ba0aSKonstantin Belousov result = vm_map_stack_locked(map, start, length, 14874648ba0aSKonstantin Belousov sgrowsiz, prot, max, cow); 14884648ba0aSKonstantin Belousov } else { 14894648ba0aSKonstantin Belousov result = vm_map_insert(map, object, offset, start, 14904648ba0aSKonstantin Belousov start + length, prot, max, cow); 14914648ba0aSKonstantin Belousov } 14925aa60b6fSJohn Baldwin } while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE && 14935aa60b6fSJohn Baldwin find_space != VMFS_ANY_SPACE); 1494df8bae1dSRodney W. Grimes vm_map_unlock(map); 1495df8bae1dSRodney W. Grimes return (result); 1496df8bae1dSRodney W. Grimes } 1497df8bae1dSRodney W. Grimes 1498df8bae1dSRodney W. Grimes /* 1499b7b2aac2SJohn Dyson * vm_map_simplify_entry: 150067bf6868SJohn Dyson * 15014e71e795SMatthew Dillon * Simplify the given map entry by merging with either neighbor. This 15024e71e795SMatthew Dillon * routine also has the ability to merge with both neighbors. 15034e71e795SMatthew Dillon * 15044e71e795SMatthew Dillon * The map must be locked. 15054e71e795SMatthew Dillon * 15064e71e795SMatthew Dillon * This routine guarentees that the passed entry remains valid (though 15074e71e795SMatthew Dillon * possibly extended). When merging, this routine may delete one or 15084e71e795SMatthew Dillon * both neighbors. 1509df8bae1dSRodney W. Grimes */ 1510*077ec27cSAlan Cox static void 15111b40f8c0SMatthew Dillon vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 1512df8bae1dSRodney W. Grimes { 1513308c24baSJohn Dyson vm_map_entry_t next, prev; 1514b7b2aac2SJohn Dyson vm_size_t prevsize, esize; 1515df8bae1dSRodney W. Grimes 1516eaaf9f7fSAlan Cox if ((entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | 1517eaaf9f7fSAlan Cox MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) != 0) 1518df8bae1dSRodney W. Grimes return; 1519308c24baSJohn Dyson 1520308c24baSJohn Dyson prev = entry->prev; 1521308c24baSJohn Dyson if (prev != &map->header) { 152267bf6868SJohn Dyson prevsize = prev->end - prev->start; 152367bf6868SJohn Dyson if ( (prev->end == entry->start) && 152467bf6868SJohn Dyson (prev->object.vm_object == entry->object.vm_object) && 152595e5e988SJohn Dyson (!prev->object.vm_object || 152667bf6868SJohn Dyson (prev->offset + prevsize == entry->offset)) && 1527afa07f7eSJohn Dyson (prev->eflags == entry->eflags) && 152867bf6868SJohn Dyson (prev->protection == entry->protection) && 152967bf6868SJohn Dyson (prev->max_protection == entry->max_protection) && 153067bf6868SJohn Dyson (prev->inheritance == entry->inheritance) && 15313364c323SKonstantin Belousov (prev->wired_count == entry->wired_count) && 1532ef694c1aSEdward Tomasz Napierala (prev->cred == entry->cred)) { 1533308c24baSJohn Dyson vm_map_entry_unlink(map, prev); 1534308c24baSJohn Dyson entry->start = prev->start; 1535308c24baSJohn Dyson entry->offset = prev->offset; 15360164e057SAlan Cox if (entry->prev != &map->header) 15370164e057SAlan Cox vm_map_entry_resize_free(map, entry->prev); 15387fd10fb3SKonstantin Belousov 15397fd10fb3SKonstantin Belousov /* 1540b0994946SKonstantin Belousov * If the backing object is a vnode object, 1541b0994946SKonstantin Belousov * vm_object_deallocate() calls vrele(). 1542b0994946SKonstantin Belousov * However, vrele() does not lock the vnode 1543b0994946SKonstantin Belousov * because the vnode has additional 1544b0994946SKonstantin Belousov * references. Thus, the map lock can be kept 1545b0994946SKonstantin Belousov * without causing a lock-order reversal with 1546b0994946SKonstantin Belousov * the vnode lock. 154784110e7eSKonstantin Belousov * 154884110e7eSKonstantin Belousov * Since we count the number of virtual page 154984110e7eSKonstantin Belousov * mappings in object->un_pager.vnp.writemappings, 155084110e7eSKonstantin Belousov * the writemappings value should not be adjusted 155184110e7eSKonstantin Belousov * when the entry is disposed of. 15527fd10fb3SKonstantin Belousov */ 1553b18bfc3dSJohn Dyson if (prev->object.vm_object) 1554308c24baSJohn Dyson vm_object_deallocate(prev->object.vm_object); 1555ef694c1aSEdward Tomasz Napierala if (prev->cred != NULL) 1556ef694c1aSEdward Tomasz Napierala crfree(prev->cred); 1557308c24baSJohn Dyson vm_map_entry_dispose(map, prev); 1558308c24baSJohn Dyson } 1559308c24baSJohn Dyson } 1560de5f6a77SJohn Dyson 1561de5f6a77SJohn Dyson next = entry->next; 1562308c24baSJohn Dyson if (next != &map->header) { 156367bf6868SJohn Dyson esize = entry->end - entry->start; 156467bf6868SJohn Dyson if ((entry->end == next->start) && 156567bf6868SJohn Dyson (next->object.vm_object == entry->object.vm_object) && 156667bf6868SJohn Dyson (!entry->object.vm_object || 156767bf6868SJohn Dyson (entry->offset + esize == next->offset)) && 1568afa07f7eSJohn Dyson (next->eflags == entry->eflags) && 156967bf6868SJohn Dyson (next->protection == entry->protection) && 157067bf6868SJohn Dyson (next->max_protection == entry->max_protection) && 157167bf6868SJohn Dyson (next->inheritance == entry->inheritance) && 15723364c323SKonstantin Belousov (next->wired_count == entry->wired_count) && 1573ef694c1aSEdward Tomasz Napierala (next->cred == entry->cred)) { 1574de5f6a77SJohn Dyson vm_map_entry_unlink(map, next); 1575de5f6a77SJohn Dyson entry->end = next->end; 15760164e057SAlan Cox vm_map_entry_resize_free(map, entry); 15777fd10fb3SKonstantin Belousov 15787fd10fb3SKonstantin Belousov /* 15797fd10fb3SKonstantin Belousov * See comment above. 15807fd10fb3SKonstantin Belousov */ 1581b18bfc3dSJohn Dyson if (next->object.vm_object) 1582de5f6a77SJohn Dyson vm_object_deallocate(next->object.vm_object); 1583ef694c1aSEdward Tomasz Napierala if (next->cred != NULL) 1584ef694c1aSEdward Tomasz Napierala crfree(next->cred); 1585de5f6a77SJohn Dyson vm_map_entry_dispose(map, next); 1586df8bae1dSRodney W. Grimes } 1587df8bae1dSRodney W. Grimes } 1588de5f6a77SJohn Dyson } 1589df8bae1dSRodney W. Grimes /* 1590df8bae1dSRodney W. Grimes * vm_map_clip_start: [ internal use only ] 1591df8bae1dSRodney W. Grimes * 1592df8bae1dSRodney W. Grimes * Asserts that the given entry begins at or after 1593df8bae1dSRodney W. Grimes * the specified address; if necessary, 1594df8bae1dSRodney W. Grimes * it splits the entry into two. 1595df8bae1dSRodney W. Grimes */ 1596df8bae1dSRodney W. Grimes #define vm_map_clip_start(map, entry, startaddr) \ 1597df8bae1dSRodney W. Grimes { \ 1598df8bae1dSRodney W. Grimes if (startaddr > entry->start) \ 1599df8bae1dSRodney W. Grimes _vm_map_clip_start(map, entry, startaddr); \ 1600df8bae1dSRodney W. Grimes } 1601df8bae1dSRodney W. Grimes 1602df8bae1dSRodney W. Grimes /* 1603df8bae1dSRodney W. Grimes * This routine is called only when it is known that 1604df8bae1dSRodney W. Grimes * the entry must be split. 1605df8bae1dSRodney W. Grimes */ 16060d94caffSDavid Greenman static void 16071b40f8c0SMatthew Dillon _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 1608df8bae1dSRodney W. Grimes { 1609c0877f10SJohn Dyson vm_map_entry_t new_entry; 1610df8bae1dSRodney W. Grimes 16113a0916b8SKonstantin Belousov VM_MAP_ASSERT_LOCKED(map); 16123a0916b8SKonstantin Belousov 1613df8bae1dSRodney W. Grimes /* 16140d94caffSDavid Greenman * Split off the front portion -- note that we must insert the new 16150d94caffSDavid Greenman * entry BEFORE this one, so that this entry has the specified 16160d94caffSDavid Greenman * starting address. 1617df8bae1dSRodney W. Grimes */ 1618f32dbbeeSJohn Dyson vm_map_simplify_entry(map, entry); 1619f32dbbeeSJohn Dyson 162011cccda1SJohn Dyson /* 162111cccda1SJohn Dyson * If there is no object backing this entry, we might as well create 162211cccda1SJohn Dyson * one now. If we defer it, an object can get created after the map 162311cccda1SJohn Dyson * is clipped, and individual objects will be created for the split-up 162411cccda1SJohn Dyson * map. This is a bit of a hack, but is also about the best place to 162511cccda1SJohn Dyson * put this improvement. 162611cccda1SJohn Dyson */ 16274e71e795SMatthew Dillon if (entry->object.vm_object == NULL && !map->system_map) { 162811cccda1SJohn Dyson vm_object_t object; 162911cccda1SJohn Dyson object = vm_object_allocate(OBJT_DEFAULT, 1630c2e11a03SJohn Dyson atop(entry->end - entry->start)); 163111cccda1SJohn Dyson entry->object.vm_object = object; 163211cccda1SJohn Dyson entry->offset = 0; 1633ef694c1aSEdward Tomasz Napierala if (entry->cred != NULL) { 1634ef694c1aSEdward Tomasz Napierala object->cred = entry->cred; 16353364c323SKonstantin Belousov object->charge = entry->end - entry->start; 1636ef694c1aSEdward Tomasz Napierala entry->cred = NULL; 16373364c323SKonstantin Belousov } 16383364c323SKonstantin Belousov } else if (entry->object.vm_object != NULL && 16393364c323SKonstantin Belousov ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1640ef694c1aSEdward Tomasz Napierala entry->cred != NULL) { 164189f6b863SAttilio Rao VM_OBJECT_WLOCK(entry->object.vm_object); 1642ef694c1aSEdward Tomasz Napierala KASSERT(entry->object.vm_object->cred == NULL, 1643ef694c1aSEdward Tomasz Napierala ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry)); 1644ef694c1aSEdward Tomasz Napierala entry->object.vm_object->cred = entry->cred; 16453364c323SKonstantin Belousov entry->object.vm_object->charge = entry->end - entry->start; 164689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(entry->object.vm_object); 1647ef694c1aSEdward Tomasz Napierala entry->cred = NULL; 164811cccda1SJohn Dyson } 164911cccda1SJohn Dyson 1650df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(map); 1651df8bae1dSRodney W. Grimes *new_entry = *entry; 1652df8bae1dSRodney W. Grimes 1653df8bae1dSRodney W. Grimes new_entry->end = start; 1654df8bae1dSRodney W. Grimes entry->offset += (start - entry->start); 1655df8bae1dSRodney W. Grimes entry->start = start; 1656ef694c1aSEdward Tomasz Napierala if (new_entry->cred != NULL) 1657ef694c1aSEdward Tomasz Napierala crhold(entry->cred); 1658df8bae1dSRodney W. Grimes 1659df8bae1dSRodney W. Grimes vm_map_entry_link(map, entry->prev, new_entry); 1660df8bae1dSRodney W. Grimes 16619fdfe602SMatthew Dillon if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1662df8bae1dSRodney W. Grimes vm_object_reference(new_entry->object.vm_object); 166384110e7eSKonstantin Belousov /* 166484110e7eSKonstantin Belousov * The object->un_pager.vnp.writemappings for the 166584110e7eSKonstantin Belousov * object of MAP_ENTRY_VN_WRITECNT type entry shall be 166684110e7eSKonstantin Belousov * kept as is here. The virtual pages are 166784110e7eSKonstantin Belousov * re-distributed among the clipped entries, so the sum is 166884110e7eSKonstantin Belousov * left the same. 166984110e7eSKonstantin Belousov */ 1670df8bae1dSRodney W. Grimes } 1671c0877f10SJohn Dyson } 1672df8bae1dSRodney W. Grimes 1673df8bae1dSRodney W. Grimes /* 1674df8bae1dSRodney W. Grimes * vm_map_clip_end: [ internal use only ] 1675df8bae1dSRodney W. Grimes * 1676df8bae1dSRodney W. Grimes * Asserts that the given entry ends at or before 1677df8bae1dSRodney W. Grimes * the specified address; if necessary, 1678df8bae1dSRodney W. Grimes * it splits the entry into two. 1679df8bae1dSRodney W. Grimes */ 1680df8bae1dSRodney W. Grimes #define vm_map_clip_end(map, entry, endaddr) \ 1681df8bae1dSRodney W. Grimes { \ 1682af045176SPoul-Henning Kamp if ((endaddr) < (entry->end)) \ 1683af045176SPoul-Henning Kamp _vm_map_clip_end((map), (entry), (endaddr)); \ 1684df8bae1dSRodney W. Grimes } 1685df8bae1dSRodney W. Grimes 1686df8bae1dSRodney W. Grimes /* 1687df8bae1dSRodney W. Grimes * This routine is called only when it is known that 1688df8bae1dSRodney W. Grimes * the entry must be split. 1689df8bae1dSRodney W. Grimes */ 16900d94caffSDavid Greenman static void 16911b40f8c0SMatthew Dillon _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1692df8bae1dSRodney W. Grimes { 1693c0877f10SJohn Dyson vm_map_entry_t new_entry; 1694df8bae1dSRodney W. Grimes 16953a0916b8SKonstantin Belousov VM_MAP_ASSERT_LOCKED(map); 16963a0916b8SKonstantin Belousov 1697df8bae1dSRodney W. Grimes /* 169811cccda1SJohn Dyson * If there is no object backing this entry, we might as well create 169911cccda1SJohn Dyson * one now. If we defer it, an object can get created after the map 170011cccda1SJohn Dyson * is clipped, and individual objects will be created for the split-up 170111cccda1SJohn Dyson * map. This is a bit of a hack, but is also about the best place to 170211cccda1SJohn Dyson * put this improvement. 170311cccda1SJohn Dyson */ 17044e71e795SMatthew Dillon if (entry->object.vm_object == NULL && !map->system_map) { 170511cccda1SJohn Dyson vm_object_t object; 170611cccda1SJohn Dyson object = vm_object_allocate(OBJT_DEFAULT, 1707c2e11a03SJohn Dyson atop(entry->end - entry->start)); 170811cccda1SJohn Dyson entry->object.vm_object = object; 170911cccda1SJohn Dyson entry->offset = 0; 1710ef694c1aSEdward Tomasz Napierala if (entry->cred != NULL) { 1711ef694c1aSEdward Tomasz Napierala object->cred = entry->cred; 17123364c323SKonstantin Belousov object->charge = entry->end - entry->start; 1713ef694c1aSEdward Tomasz Napierala entry->cred = NULL; 17143364c323SKonstantin Belousov } 17153364c323SKonstantin Belousov } else if (entry->object.vm_object != NULL && 17163364c323SKonstantin Belousov ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1717ef694c1aSEdward Tomasz Napierala entry->cred != NULL) { 171889f6b863SAttilio Rao VM_OBJECT_WLOCK(entry->object.vm_object); 1719ef694c1aSEdward Tomasz Napierala KASSERT(entry->object.vm_object->cred == NULL, 1720ef694c1aSEdward Tomasz Napierala ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry)); 1721ef694c1aSEdward Tomasz Napierala entry->object.vm_object->cred = entry->cred; 17223364c323SKonstantin Belousov entry->object.vm_object->charge = entry->end - entry->start; 172389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(entry->object.vm_object); 1724ef694c1aSEdward Tomasz Napierala entry->cred = NULL; 172511cccda1SJohn Dyson } 172611cccda1SJohn Dyson 172711cccda1SJohn Dyson /* 17280d94caffSDavid Greenman * Create a new entry and insert it AFTER the specified entry 1729df8bae1dSRodney W. Grimes */ 1730df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(map); 1731df8bae1dSRodney W. Grimes *new_entry = *entry; 1732df8bae1dSRodney W. Grimes 1733df8bae1dSRodney W. Grimes new_entry->start = entry->end = end; 1734df8bae1dSRodney W. Grimes new_entry->offset += (end - entry->start); 1735ef694c1aSEdward Tomasz Napierala if (new_entry->cred != NULL) 1736ef694c1aSEdward Tomasz Napierala crhold(entry->cred); 1737df8bae1dSRodney W. Grimes 1738df8bae1dSRodney W. Grimes vm_map_entry_link(map, entry, new_entry); 1739df8bae1dSRodney W. Grimes 17409fdfe602SMatthew Dillon if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1741df8bae1dSRodney W. Grimes vm_object_reference(new_entry->object.vm_object); 1742df8bae1dSRodney W. Grimes } 1743c0877f10SJohn Dyson } 1744df8bae1dSRodney W. Grimes 1745df8bae1dSRodney W. Grimes /* 1746df8bae1dSRodney W. Grimes * vm_map_submap: [ kernel use only ] 1747df8bae1dSRodney W. Grimes * 1748df8bae1dSRodney W. Grimes * Mark the given range as handled by a subordinate map. 1749df8bae1dSRodney W. Grimes * 1750df8bae1dSRodney W. Grimes * This range must have been created with vm_map_find, 1751df8bae1dSRodney W. Grimes * and no other operations may have been performed on this 1752df8bae1dSRodney W. Grimes * range prior to calling vm_map_submap. 1753df8bae1dSRodney W. Grimes * 1754df8bae1dSRodney W. Grimes * Only a limited number of operations can be performed 1755df8bae1dSRodney W. Grimes * within this rage after calling vm_map_submap: 1756df8bae1dSRodney W. Grimes * vm_fault 1757df8bae1dSRodney W. Grimes * [Don't try vm_map_copy!] 1758df8bae1dSRodney W. Grimes * 1759df8bae1dSRodney W. Grimes * To remove a submapping, one must first remove the 1760df8bae1dSRodney W. Grimes * range from the superior map, and then destroy the 1761df8bae1dSRodney W. Grimes * submap (if desired). [Better yet, don't try it.] 1762df8bae1dSRodney W. Grimes */ 1763df8bae1dSRodney W. Grimes int 17641b40f8c0SMatthew Dillon vm_map_submap( 17651b40f8c0SMatthew Dillon vm_map_t map, 17661b40f8c0SMatthew Dillon vm_offset_t start, 17671b40f8c0SMatthew Dillon vm_offset_t end, 17681b40f8c0SMatthew Dillon vm_map_t submap) 1769df8bae1dSRodney W. Grimes { 1770df8bae1dSRodney W. Grimes vm_map_entry_t entry; 1771c0877f10SJohn Dyson int result = KERN_INVALID_ARGUMENT; 1772df8bae1dSRodney W. Grimes 1773df8bae1dSRodney W. Grimes vm_map_lock(map); 1774df8bae1dSRodney W. Grimes 1775df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1776df8bae1dSRodney W. Grimes 1777df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &entry)) { 1778df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 17790d94caffSDavid Greenman } else 1780df8bae1dSRodney W. Grimes entry = entry->next; 1781df8bae1dSRodney W. Grimes 1782df8bae1dSRodney W. Grimes vm_map_clip_end(map, entry, end); 1783df8bae1dSRodney W. Grimes 1784df8bae1dSRodney W. Grimes if ((entry->start == start) && (entry->end == end) && 17859fdfe602SMatthew Dillon ((entry->eflags & MAP_ENTRY_COW) == 0) && 1786afa07f7eSJohn Dyson (entry->object.vm_object == NULL)) { 17872d8acc0fSJohn Dyson entry->object.sub_map = submap; 1788afa07f7eSJohn Dyson entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1789df8bae1dSRodney W. Grimes result = KERN_SUCCESS; 1790df8bae1dSRodney W. Grimes } 1791df8bae1dSRodney W. Grimes vm_map_unlock(map); 1792df8bae1dSRodney W. Grimes 1793df8bae1dSRodney W. Grimes return (result); 1794df8bae1dSRodney W. Grimes } 1795df8bae1dSRodney W. Grimes 1796df8bae1dSRodney W. Grimes /* 1797dd05fa19SAlan Cox * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified 17981f78f902SAlan Cox */ 17991f78f902SAlan Cox #define MAX_INIT_PT 96 18001f78f902SAlan Cox 18011f78f902SAlan Cox /* 18020551c08dSAlan Cox * vm_map_pmap_enter: 18030551c08dSAlan Cox * 1804dd05fa19SAlan Cox * Preload the specified map's pmap with mappings to the specified 1805dd05fa19SAlan Cox * object's memory-resident pages. No further physical pages are 1806dd05fa19SAlan Cox * allocated, and no further virtual pages are retrieved from secondary 1807dd05fa19SAlan Cox * storage. If the specified flags include MAP_PREFAULT_PARTIAL, then a 1808dd05fa19SAlan Cox * limited number of page mappings are created at the low-end of the 1809dd05fa19SAlan Cox * specified address range. (For this purpose, a superpage mapping 1810dd05fa19SAlan Cox * counts as one page mapping.) Otherwise, all resident pages within 1811dd05fa19SAlan Cox * the specified address range are mapped. Because these mappings are 1812dd05fa19SAlan Cox * being created speculatively, cached pages are not reactivated and 1813a922d312SAlan Cox * mapped. 18140551c08dSAlan Cox */ 1815*077ec27cSAlan Cox static void 18164da4d293SAlan Cox vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 18170551c08dSAlan Cox vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 18180551c08dSAlan Cox { 18198fece8c3SAlan Cox vm_offset_t start; 1820ce142d9eSAlan Cox vm_page_t p, p_start; 1821dd05fa19SAlan Cox vm_pindex_t mask, psize, threshold, tmpidx; 18220551c08dSAlan Cox 1823ba8bca61SAlan Cox if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) 18241f78f902SAlan Cox return; 18259af6d512SAttilio Rao VM_OBJECT_RLOCK(object); 18269af6d512SAttilio Rao if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 18279af6d512SAttilio Rao VM_OBJECT_RUNLOCK(object); 182889f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 182901381811SJohn Baldwin if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 18309af6d512SAttilio Rao pmap_object_init_pt(map->pmap, addr, object, pindex, 18319af6d512SAttilio Rao size); 18329af6d512SAttilio Rao VM_OBJECT_WUNLOCK(object); 18339af6d512SAttilio Rao return; 18349af6d512SAttilio Rao } 18359af6d512SAttilio Rao VM_OBJECT_LOCK_DOWNGRADE(object); 18361f78f902SAlan Cox } 18371f78f902SAlan Cox 18381f78f902SAlan Cox psize = atop(size); 18391f78f902SAlan Cox if (psize + pindex > object->size) { 18409af6d512SAttilio Rao if (object->size < pindex) { 18419af6d512SAttilio Rao VM_OBJECT_RUNLOCK(object); 18429af6d512SAttilio Rao return; 18439af6d512SAttilio Rao } 18441f78f902SAlan Cox psize = object->size - pindex; 18451f78f902SAlan Cox } 18461f78f902SAlan Cox 1847ce142d9eSAlan Cox start = 0; 1848ce142d9eSAlan Cox p_start = NULL; 1849dd05fa19SAlan Cox threshold = MAX_INIT_PT; 18501f78f902SAlan Cox 1851b382c10aSKonstantin Belousov p = vm_page_find_least(object, pindex); 18521f78f902SAlan Cox /* 18531f78f902SAlan Cox * Assert: the variable p is either (1) the page with the 18541f78f902SAlan Cox * least pindex greater than or equal to the parameter pindex 18551f78f902SAlan Cox * or (2) NULL. 18561f78f902SAlan Cox */ 18571f78f902SAlan Cox for (; 18581f78f902SAlan Cox p != NULL && (tmpidx = p->pindex - pindex) < psize; 18591f78f902SAlan Cox p = TAILQ_NEXT(p, listq)) { 18601f78f902SAlan Cox /* 18611f78f902SAlan Cox * don't allow an madvise to blow away our really 18621f78f902SAlan Cox * free pages allocating pv entries. 18631f78f902SAlan Cox */ 1864dd05fa19SAlan Cox if (((flags & MAP_PREFAULT_MADVISE) != 0 && 1865dd05fa19SAlan Cox vm_cnt.v_free_count < vm_cnt.v_free_reserved) || 1866dd05fa19SAlan Cox ((flags & MAP_PREFAULT_PARTIAL) != 0 && 1867dd05fa19SAlan Cox tmpidx >= threshold)) { 1868379fb642SAlan Cox psize = tmpidx; 18691f78f902SAlan Cox break; 18701f78f902SAlan Cox } 18710a2e596aSAlan Cox if (p->valid == VM_PAGE_BITS_ALL) { 1872ce142d9eSAlan Cox if (p_start == NULL) { 1873ce142d9eSAlan Cox start = addr + ptoa(tmpidx); 1874ce142d9eSAlan Cox p_start = p; 1875ce142d9eSAlan Cox } 1876dd05fa19SAlan Cox /* Jump ahead if a superpage mapping is possible. */ 1877dd05fa19SAlan Cox if (p->psind > 0 && ((addr + ptoa(tmpidx)) & 1878dd05fa19SAlan Cox (pagesizes[p->psind] - 1)) == 0) { 1879dd05fa19SAlan Cox mask = atop(pagesizes[p->psind]) - 1; 1880dd05fa19SAlan Cox if (tmpidx + mask < psize && 1881dd05fa19SAlan Cox vm_page_ps_is_valid(p)) { 1882dd05fa19SAlan Cox p += mask; 1883dd05fa19SAlan Cox threshold += mask; 1884dd05fa19SAlan Cox } 1885dd05fa19SAlan Cox } 18867bfda801SAlan Cox } else if (p_start != NULL) { 1887cf4682aeSAlan Cox pmap_enter_object(map->pmap, start, addr + 1888cf4682aeSAlan Cox ptoa(tmpidx), p_start, prot); 1889cf4682aeSAlan Cox p_start = NULL; 1890cf4682aeSAlan Cox } 1891cf4682aeSAlan Cox } 1892c46b90e9SAlan Cox if (p_start != NULL) 1893379fb642SAlan Cox pmap_enter_object(map->pmap, start, addr + ptoa(psize), 1894379fb642SAlan Cox p_start, prot); 18959af6d512SAttilio Rao VM_OBJECT_RUNLOCK(object); 18960551c08dSAlan Cox } 18970551c08dSAlan Cox 18980551c08dSAlan Cox /* 1899df8bae1dSRodney W. Grimes * vm_map_protect: 1900df8bae1dSRodney W. Grimes * 1901df8bae1dSRodney W. Grimes * Sets the protection of the specified address 1902df8bae1dSRodney W. Grimes * region in the target map. If "set_max" is 1903df8bae1dSRodney W. Grimes * specified, the maximum protection is to be set; 1904df8bae1dSRodney W. Grimes * otherwise, only the current protection is affected. 1905df8bae1dSRodney W. Grimes */ 1906df8bae1dSRodney W. Grimes int 1907b9dcd593SBruce Evans vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1908b9dcd593SBruce Evans vm_prot_t new_prot, boolean_t set_max) 1909df8bae1dSRodney W. Grimes { 1910210a6886SKonstantin Belousov vm_map_entry_t current, entry; 19113364c323SKonstantin Belousov vm_object_t obj; 1912ef694c1aSEdward Tomasz Napierala struct ucred *cred; 1913210a6886SKonstantin Belousov vm_prot_t old_prot; 1914df8bae1dSRodney W. Grimes 191579e9451fSKonstantin Belousov if (start == end) 191679e9451fSKonstantin Belousov return (KERN_SUCCESS); 191779e9451fSKonstantin Belousov 1918df8bae1dSRodney W. Grimes vm_map_lock(map); 1919df8bae1dSRodney W. Grimes 1920df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1921df8bae1dSRodney W. Grimes 1922df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &entry)) { 1923df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 1924b7b2aac2SJohn Dyson } else { 1925df8bae1dSRodney W. Grimes entry = entry->next; 1926b7b2aac2SJohn Dyson } 1927df8bae1dSRodney W. Grimes 1928df8bae1dSRodney W. Grimes /* 19290d94caffSDavid Greenman * Make a first pass to check for protection violations. 1930df8bae1dSRodney W. Grimes */ 1931df8bae1dSRodney W. Grimes current = entry; 1932df8bae1dSRodney W. Grimes while ((current != &map->header) && (current->start < end)) { 1933afa07f7eSJohn Dyson if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1934a1f6d91cSDavid Greenman vm_map_unlock(map); 1935df8bae1dSRodney W. Grimes return (KERN_INVALID_ARGUMENT); 1936a1f6d91cSDavid Greenman } 1937df8bae1dSRodney W. Grimes if ((new_prot & current->max_protection) != new_prot) { 1938df8bae1dSRodney W. Grimes vm_map_unlock(map); 1939df8bae1dSRodney W. Grimes return (KERN_PROTECTION_FAILURE); 1940df8bae1dSRodney W. Grimes } 1941df8bae1dSRodney W. Grimes current = current->next; 1942df8bae1dSRodney W. Grimes } 1943df8bae1dSRodney W. Grimes 19443364c323SKonstantin Belousov 19453364c323SKonstantin Belousov /* 19463364c323SKonstantin Belousov * Do an accounting pass for private read-only mappings that 19473364c323SKonstantin Belousov * now will do cow due to allowed write (e.g. debugger sets 19483364c323SKonstantin Belousov * breakpoint on text segment) 19493364c323SKonstantin Belousov */ 19503364c323SKonstantin Belousov for (current = entry; (current != &map->header) && 19513364c323SKonstantin Belousov (current->start < end); current = current->next) { 19523364c323SKonstantin Belousov 19533364c323SKonstantin Belousov vm_map_clip_end(map, current, end); 19543364c323SKonstantin Belousov 19553364c323SKonstantin Belousov if (set_max || 19563364c323SKonstantin Belousov ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || 19573364c323SKonstantin Belousov ENTRY_CHARGED(current)) { 19583364c323SKonstantin Belousov continue; 19593364c323SKonstantin Belousov } 19603364c323SKonstantin Belousov 1961ef694c1aSEdward Tomasz Napierala cred = curthread->td_ucred; 19623364c323SKonstantin Belousov obj = current->object.vm_object; 19633364c323SKonstantin Belousov 19643364c323SKonstantin Belousov if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) { 19653364c323SKonstantin Belousov if (!swap_reserve(current->end - current->start)) { 19663364c323SKonstantin Belousov vm_map_unlock(map); 19673364c323SKonstantin Belousov return (KERN_RESOURCE_SHORTAGE); 19683364c323SKonstantin Belousov } 1969ef694c1aSEdward Tomasz Napierala crhold(cred); 1970ef694c1aSEdward Tomasz Napierala current->cred = cred; 19713364c323SKonstantin Belousov continue; 19723364c323SKonstantin Belousov } 19733364c323SKonstantin Belousov 197489f6b863SAttilio Rao VM_OBJECT_WLOCK(obj); 19753364c323SKonstantin Belousov if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) { 197689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(obj); 19773364c323SKonstantin Belousov continue; 19783364c323SKonstantin Belousov } 19793364c323SKonstantin Belousov 19803364c323SKonstantin Belousov /* 19813364c323SKonstantin Belousov * Charge for the whole object allocation now, since 19823364c323SKonstantin Belousov * we cannot distinguish between non-charged and 19833364c323SKonstantin Belousov * charged clipped mapping of the same object later. 19843364c323SKonstantin Belousov */ 19853364c323SKonstantin Belousov KASSERT(obj->charge == 0, 19863d95614fSKonstantin Belousov ("vm_map_protect: object %p overcharged (entry %p)", 19873d95614fSKonstantin Belousov obj, current)); 19883364c323SKonstantin Belousov if (!swap_reserve(ptoa(obj->size))) { 198989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(obj); 19903364c323SKonstantin Belousov vm_map_unlock(map); 19913364c323SKonstantin Belousov return (KERN_RESOURCE_SHORTAGE); 19923364c323SKonstantin Belousov } 19933364c323SKonstantin Belousov 1994ef694c1aSEdward Tomasz Napierala crhold(cred); 1995ef694c1aSEdward Tomasz Napierala obj->cred = cred; 19963364c323SKonstantin Belousov obj->charge = ptoa(obj->size); 199789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(obj); 19983364c323SKonstantin Belousov } 19993364c323SKonstantin Belousov 2000df8bae1dSRodney W. Grimes /* 20010d94caffSDavid Greenman * Go back and fix up protections. [Note that clipping is not 20020d94caffSDavid Greenman * necessary the second time.] 2003df8bae1dSRodney W. Grimes */ 2004df8bae1dSRodney W. Grimes current = entry; 2005df8bae1dSRodney W. Grimes while ((current != &map->header) && (current->start < end)) { 2006df8bae1dSRodney W. Grimes old_prot = current->protection; 2007210a6886SKonstantin Belousov 2008df8bae1dSRodney W. Grimes if (set_max) 2009df8bae1dSRodney W. Grimes current->protection = 2010df8bae1dSRodney W. Grimes (current->max_protection = new_prot) & 2011df8bae1dSRodney W. Grimes old_prot; 2012df8bae1dSRodney W. Grimes else 2013df8bae1dSRodney W. Grimes current->protection = new_prot; 2014df8bae1dSRodney W. Grimes 2015dd006a1bSAlan Cox /* 2016dd006a1bSAlan Cox * For user wired map entries, the normal lazy evaluation of 2017dd006a1bSAlan Cox * write access upgrades through soft page faults is 2018dd006a1bSAlan Cox * undesirable. Instead, immediately copy any pages that are 2019dd006a1bSAlan Cox * copy-on-write and enable write access in the physical map. 2020dd006a1bSAlan Cox */ 2021dd006a1bSAlan Cox if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 && 2022210a6886SKonstantin Belousov (current->protection & VM_PROT_WRITE) != 0 && 20235930251aSKonstantin Belousov (old_prot & VM_PROT_WRITE) == 0) 2024210a6886SKonstantin Belousov vm_fault_copy_entry(map, map, current, current, NULL); 2025210a6886SKonstantin Belousov 2026df8bae1dSRodney W. Grimes /* 20272fafce9eSAlan Cox * When restricting access, update the physical map. Worry 20282fafce9eSAlan Cox * about copy-on-write here. 2029df8bae1dSRodney W. Grimes */ 20302fafce9eSAlan Cox if ((old_prot & ~current->protection) != 0) { 2031afa07f7eSJohn Dyson #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 2032df8bae1dSRodney W. Grimes VM_PROT_ALL) 2033df8bae1dSRodney W. Grimes pmap_protect(map->pmap, current->start, 2034df8bae1dSRodney W. Grimes current->end, 20351c85e3dfSAlan Cox current->protection & MASK(current)); 2036df8bae1dSRodney W. Grimes #undef MASK 2037df8bae1dSRodney W. Grimes } 20387d78abc9SJohn Dyson vm_map_simplify_entry(map, current); 2039df8bae1dSRodney W. Grimes current = current->next; 2040df8bae1dSRodney W. Grimes } 2041df8bae1dSRodney W. Grimes vm_map_unlock(map); 2042df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 2043df8bae1dSRodney W. Grimes } 2044df8bae1dSRodney W. Grimes 2045df8bae1dSRodney W. Grimes /* 2046867a482dSJohn Dyson * vm_map_madvise: 2047867a482dSJohn Dyson * 2048867a482dSJohn Dyson * This routine traverses a processes map handling the madvise 2049f7fc307aSAlan Cox * system call. Advisories are classified as either those effecting 2050f7fc307aSAlan Cox * the vm_map_entry structure, or those effecting the underlying 2051f7fc307aSAlan Cox * objects. 2052867a482dSJohn Dyson */ 2053b4309055SMatthew Dillon int 20541b40f8c0SMatthew Dillon vm_map_madvise( 20551b40f8c0SMatthew Dillon vm_map_t map, 20561b40f8c0SMatthew Dillon vm_offset_t start, 20571b40f8c0SMatthew Dillon vm_offset_t end, 20581b40f8c0SMatthew Dillon int behav) 2059867a482dSJohn Dyson { 2060f7fc307aSAlan Cox vm_map_entry_t current, entry; 2061b4309055SMatthew Dillon int modify_map = 0; 2062867a482dSJohn Dyson 2063b4309055SMatthew Dillon /* 2064b4309055SMatthew Dillon * Some madvise calls directly modify the vm_map_entry, in which case 2065b4309055SMatthew Dillon * we need to use an exclusive lock on the map and we need to perform 2066b4309055SMatthew Dillon * various clipping operations. Otherwise we only need a read-lock 2067b4309055SMatthew Dillon * on the map. 2068b4309055SMatthew Dillon */ 2069b4309055SMatthew Dillon switch(behav) { 2070b4309055SMatthew Dillon case MADV_NORMAL: 2071b4309055SMatthew Dillon case MADV_SEQUENTIAL: 2072b4309055SMatthew Dillon case MADV_RANDOM: 20734f79d873SMatthew Dillon case MADV_NOSYNC: 20744f79d873SMatthew Dillon case MADV_AUTOSYNC: 20759730a5daSPaul Saab case MADV_NOCORE: 20769730a5daSPaul Saab case MADV_CORE: 207779e9451fSKonstantin Belousov if (start == end) 207879e9451fSKonstantin Belousov return (KERN_SUCCESS); 2079b4309055SMatthew Dillon modify_map = 1; 2080867a482dSJohn Dyson vm_map_lock(map); 2081b4309055SMatthew Dillon break; 2082b4309055SMatthew Dillon case MADV_WILLNEED: 2083b4309055SMatthew Dillon case MADV_DONTNEED: 2084b4309055SMatthew Dillon case MADV_FREE: 208579e9451fSKonstantin Belousov if (start == end) 208679e9451fSKonstantin Belousov return (KERN_SUCCESS); 2087f7fc307aSAlan Cox vm_map_lock_read(map); 2088b4309055SMatthew Dillon break; 2089b4309055SMatthew Dillon default: 2090b4309055SMatthew Dillon return (KERN_INVALID_ARGUMENT); 2091b4309055SMatthew Dillon } 2092b4309055SMatthew Dillon 2093b4309055SMatthew Dillon /* 2094b4309055SMatthew Dillon * Locate starting entry and clip if necessary. 2095b4309055SMatthew Dillon */ 2096867a482dSJohn Dyson VM_MAP_RANGE_CHECK(map, start, end); 2097867a482dSJohn Dyson 2098867a482dSJohn Dyson if (vm_map_lookup_entry(map, start, &entry)) { 2099f7fc307aSAlan Cox if (modify_map) 2100867a482dSJohn Dyson vm_map_clip_start(map, entry, start); 2101b4309055SMatthew Dillon } else { 2102867a482dSJohn Dyson entry = entry->next; 2103b4309055SMatthew Dillon } 2104867a482dSJohn Dyson 2105f7fc307aSAlan Cox if (modify_map) { 2106f7fc307aSAlan Cox /* 2107f7fc307aSAlan Cox * madvise behaviors that are implemented in the vm_map_entry. 2108f7fc307aSAlan Cox * 2109f7fc307aSAlan Cox * We clip the vm_map_entry so that behavioral changes are 2110f7fc307aSAlan Cox * limited to the specified address range. 2111f7fc307aSAlan Cox */ 2112867a482dSJohn Dyson for (current = entry; 2113867a482dSJohn Dyson (current != &map->header) && (current->start < end); 2114b4309055SMatthew Dillon current = current->next 2115b4309055SMatthew Dillon ) { 2116f7fc307aSAlan Cox if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2117867a482dSJohn Dyson continue; 2118fed9a903SJohn Dyson 211947221757SJohn Dyson vm_map_clip_end(map, current, end); 2120fed9a903SJohn Dyson 2121f7fc307aSAlan Cox switch (behav) { 2122867a482dSJohn Dyson case MADV_NORMAL: 21237f866e4bSAlan Cox vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 2124867a482dSJohn Dyson break; 2125867a482dSJohn Dyson case MADV_SEQUENTIAL: 21267f866e4bSAlan Cox vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 2127867a482dSJohn Dyson break; 2128867a482dSJohn Dyson case MADV_RANDOM: 21297f866e4bSAlan Cox vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 2130867a482dSJohn Dyson break; 21314f79d873SMatthew Dillon case MADV_NOSYNC: 21324f79d873SMatthew Dillon current->eflags |= MAP_ENTRY_NOSYNC; 21334f79d873SMatthew Dillon break; 21344f79d873SMatthew Dillon case MADV_AUTOSYNC: 21354f79d873SMatthew Dillon current->eflags &= ~MAP_ENTRY_NOSYNC; 21364f79d873SMatthew Dillon break; 21379730a5daSPaul Saab case MADV_NOCORE: 21389730a5daSPaul Saab current->eflags |= MAP_ENTRY_NOCOREDUMP; 21399730a5daSPaul Saab break; 21409730a5daSPaul Saab case MADV_CORE: 21419730a5daSPaul Saab current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 21429730a5daSPaul Saab break; 2143867a482dSJohn Dyson default: 2144867a482dSJohn Dyson break; 2145867a482dSJohn Dyson } 2146f7fc307aSAlan Cox vm_map_simplify_entry(map, current); 2147867a482dSJohn Dyson } 2148867a482dSJohn Dyson vm_map_unlock(map); 2149b4309055SMatthew Dillon } else { 215092a59946SJohn Baldwin vm_pindex_t pstart, pend; 2151f7fc307aSAlan Cox 2152f7fc307aSAlan Cox /* 2153f7fc307aSAlan Cox * madvise behaviors that are implemented in the underlying 2154f7fc307aSAlan Cox * vm_object. 2155f7fc307aSAlan Cox * 2156f7fc307aSAlan Cox * Since we don't clip the vm_map_entry, we have to clip 2157f7fc307aSAlan Cox * the vm_object pindex and count. 2158f7fc307aSAlan Cox */ 2159f7fc307aSAlan Cox for (current = entry; 2160f7fc307aSAlan Cox (current != &map->header) && (current->start < end); 2161b4309055SMatthew Dillon current = current->next 2162b4309055SMatthew Dillon ) { 216351321f7cSAlan Cox vm_offset_t useEnd, useStart; 21645f99b57cSMatthew Dillon 2165f7fc307aSAlan Cox if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2166f7fc307aSAlan Cox continue; 2167f7fc307aSAlan Cox 216892a59946SJohn Baldwin pstart = OFF_TO_IDX(current->offset); 216992a59946SJohn Baldwin pend = pstart + atop(current->end - current->start); 21705f99b57cSMatthew Dillon useStart = current->start; 217151321f7cSAlan Cox useEnd = current->end; 2172f7fc307aSAlan Cox 2173f7fc307aSAlan Cox if (current->start < start) { 217492a59946SJohn Baldwin pstart += atop(start - current->start); 21755f99b57cSMatthew Dillon useStart = start; 2176f7fc307aSAlan Cox } 217751321f7cSAlan Cox if (current->end > end) { 217892a59946SJohn Baldwin pend -= atop(current->end - end); 217951321f7cSAlan Cox useEnd = end; 218051321f7cSAlan Cox } 2181f7fc307aSAlan Cox 218292a59946SJohn Baldwin if (pstart >= pend) 2183f7fc307aSAlan Cox continue; 2184f7fc307aSAlan Cox 218551321f7cSAlan Cox /* 218651321f7cSAlan Cox * Perform the pmap_advise() before clearing 218751321f7cSAlan Cox * PGA_REFERENCED in vm_page_advise(). Otherwise, a 218851321f7cSAlan Cox * concurrent pmap operation, such as pmap_remove(), 218951321f7cSAlan Cox * could clear a reference in the pmap and set 219051321f7cSAlan Cox * PGA_REFERENCED on the page before the pmap_advise() 219151321f7cSAlan Cox * had completed. Consequently, the page would appear 219251321f7cSAlan Cox * referenced based upon an old reference that 219351321f7cSAlan Cox * occurred before this pmap_advise() ran. 219451321f7cSAlan Cox */ 219551321f7cSAlan Cox if (behav == MADV_DONTNEED || behav == MADV_FREE) 219651321f7cSAlan Cox pmap_advise(map->pmap, useStart, useEnd, 219751321f7cSAlan Cox behav); 219851321f7cSAlan Cox 219992a59946SJohn Baldwin vm_object_madvise(current->object.vm_object, pstart, 220092a59946SJohn Baldwin pend, behav); 2201b4309055SMatthew Dillon if (behav == MADV_WILLNEED) { 22020551c08dSAlan Cox vm_map_pmap_enter(map, 22035f99b57cSMatthew Dillon useStart, 22044da4d293SAlan Cox current->protection, 2205f7fc307aSAlan Cox current->object.vm_object, 220692a59946SJohn Baldwin pstart, 220792a59946SJohn Baldwin ptoa(pend - pstart), 2208e3026983SMatthew Dillon MAP_PREFAULT_MADVISE 2209b4309055SMatthew Dillon ); 2210f7fc307aSAlan Cox } 2211f7fc307aSAlan Cox } 2212f7fc307aSAlan Cox vm_map_unlock_read(map); 2213f7fc307aSAlan Cox } 2214b4309055SMatthew Dillon return (0); 2215867a482dSJohn Dyson } 2216867a482dSJohn Dyson 2217867a482dSJohn Dyson 2218867a482dSJohn Dyson /* 2219df8bae1dSRodney W. Grimes * vm_map_inherit: 2220df8bae1dSRodney W. Grimes * 2221df8bae1dSRodney W. Grimes * Sets the inheritance of the specified address 2222df8bae1dSRodney W. Grimes * range in the target map. Inheritance 2223df8bae1dSRodney W. Grimes * affects how the map will be shared with 2224e2abaaaaSAlan Cox * child maps at the time of vmspace_fork. 2225df8bae1dSRodney W. Grimes */ 2226df8bae1dSRodney W. Grimes int 2227b9dcd593SBruce Evans vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 2228b9dcd593SBruce Evans vm_inherit_t new_inheritance) 2229df8bae1dSRodney W. Grimes { 2230c0877f10SJohn Dyson vm_map_entry_t entry; 2231df8bae1dSRodney W. Grimes vm_map_entry_t temp_entry; 2232df8bae1dSRodney W. Grimes 2233df8bae1dSRodney W. Grimes switch (new_inheritance) { 2234df8bae1dSRodney W. Grimes case VM_INHERIT_NONE: 2235df8bae1dSRodney W. Grimes case VM_INHERIT_COPY: 2236df8bae1dSRodney W. Grimes case VM_INHERIT_SHARE: 2237df8bae1dSRodney W. Grimes break; 2238df8bae1dSRodney W. Grimes default: 2239df8bae1dSRodney W. Grimes return (KERN_INVALID_ARGUMENT); 2240df8bae1dSRodney W. Grimes } 224179e9451fSKonstantin Belousov if (start == end) 224279e9451fSKonstantin Belousov return (KERN_SUCCESS); 2243df8bae1dSRodney W. Grimes vm_map_lock(map); 2244df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 2245df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &temp_entry)) { 2246df8bae1dSRodney W. Grimes entry = temp_entry; 2247df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 22480d94caffSDavid Greenman } else 2249df8bae1dSRodney W. Grimes entry = temp_entry->next; 2250df8bae1dSRodney W. Grimes while ((entry != &map->header) && (entry->start < end)) { 2251df8bae1dSRodney W. Grimes vm_map_clip_end(map, entry, end); 2252df8bae1dSRodney W. Grimes entry->inheritance = new_inheritance; 225344428f62SAlan Cox vm_map_simplify_entry(map, entry); 2254df8bae1dSRodney W. Grimes entry = entry->next; 2255df8bae1dSRodney W. Grimes } 2256df8bae1dSRodney W. Grimes vm_map_unlock(map); 2257df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 2258df8bae1dSRodney W. Grimes } 2259df8bae1dSRodney W. Grimes 2260df8bae1dSRodney W. Grimes /* 2261acd9a301SAlan Cox * vm_map_unwire: 2262acd9a301SAlan Cox * 2263e27e17b7SAlan Cox * Implements both kernel and user unwiring. 2264acd9a301SAlan Cox */ 2265acd9a301SAlan Cox int 2266acd9a301SAlan Cox vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2267abd498aaSBruce M Simpson int flags) 2268acd9a301SAlan Cox { 2269acd9a301SAlan Cox vm_map_entry_t entry, first_entry, tmp_entry; 2270acd9a301SAlan Cox vm_offset_t saved_start; 2271acd9a301SAlan Cox unsigned int last_timestamp; 2272acd9a301SAlan Cox int rv; 2273abd498aaSBruce M Simpson boolean_t need_wakeup, result, user_unwire; 2274acd9a301SAlan Cox 227579e9451fSKonstantin Belousov if (start == end) 227679e9451fSKonstantin Belousov return (KERN_SUCCESS); 2277abd498aaSBruce M Simpson user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2278acd9a301SAlan Cox vm_map_lock(map); 2279acd9a301SAlan Cox VM_MAP_RANGE_CHECK(map, start, end); 2280acd9a301SAlan Cox if (!vm_map_lookup_entry(map, start, &first_entry)) { 2281abd498aaSBruce M Simpson if (flags & VM_MAP_WIRE_HOLESOK) 2282cbef13d8SAlan Cox first_entry = first_entry->next; 2283abd498aaSBruce M Simpson else { 2284acd9a301SAlan Cox vm_map_unlock(map); 2285acd9a301SAlan Cox return (KERN_INVALID_ADDRESS); 2286acd9a301SAlan Cox } 2287abd498aaSBruce M Simpson } 2288acd9a301SAlan Cox last_timestamp = map->timestamp; 2289acd9a301SAlan Cox entry = first_entry; 2290acd9a301SAlan Cox while (entry != &map->header && entry->start < end) { 2291acd9a301SAlan Cox if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2292acd9a301SAlan Cox /* 2293acd9a301SAlan Cox * We have not yet clipped the entry. 2294acd9a301SAlan Cox */ 2295acd9a301SAlan Cox saved_start = (start >= entry->start) ? start : 2296acd9a301SAlan Cox entry->start; 2297acd9a301SAlan Cox entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 22988ce2d00aSPawel Jakub Dawidek if (vm_map_unlock_and_wait(map, 0)) { 2299acd9a301SAlan Cox /* 2300acd9a301SAlan Cox * Allow interruption of user unwiring? 2301acd9a301SAlan Cox */ 2302acd9a301SAlan Cox } 2303acd9a301SAlan Cox vm_map_lock(map); 2304acd9a301SAlan Cox if (last_timestamp+1 != map->timestamp) { 2305acd9a301SAlan Cox /* 2306acd9a301SAlan Cox * Look again for the entry because the map was 2307acd9a301SAlan Cox * modified while it was unlocked. 2308acd9a301SAlan Cox * Specifically, the entry may have been 2309acd9a301SAlan Cox * clipped, merged, or deleted. 2310acd9a301SAlan Cox */ 2311acd9a301SAlan Cox if (!vm_map_lookup_entry(map, saved_start, 2312acd9a301SAlan Cox &tmp_entry)) { 2313cbef13d8SAlan Cox if (flags & VM_MAP_WIRE_HOLESOK) 2314cbef13d8SAlan Cox tmp_entry = tmp_entry->next; 2315cbef13d8SAlan Cox else { 2316acd9a301SAlan Cox if (saved_start == start) { 2317acd9a301SAlan Cox /* 2318acd9a301SAlan Cox * First_entry has been deleted. 2319acd9a301SAlan Cox */ 2320acd9a301SAlan Cox vm_map_unlock(map); 2321acd9a301SAlan Cox return (KERN_INVALID_ADDRESS); 2322acd9a301SAlan Cox } 2323acd9a301SAlan Cox end = saved_start; 2324acd9a301SAlan Cox rv = KERN_INVALID_ADDRESS; 2325acd9a301SAlan Cox goto done; 2326acd9a301SAlan Cox } 2327cbef13d8SAlan Cox } 2328acd9a301SAlan Cox if (entry == first_entry) 2329acd9a301SAlan Cox first_entry = tmp_entry; 2330acd9a301SAlan Cox else 2331acd9a301SAlan Cox first_entry = NULL; 2332acd9a301SAlan Cox entry = tmp_entry; 2333acd9a301SAlan Cox } 2334acd9a301SAlan Cox last_timestamp = map->timestamp; 2335acd9a301SAlan Cox continue; 2336acd9a301SAlan Cox } 2337acd9a301SAlan Cox vm_map_clip_start(map, entry, start); 2338acd9a301SAlan Cox vm_map_clip_end(map, entry, end); 2339acd9a301SAlan Cox /* 2340acd9a301SAlan Cox * Mark the entry in case the map lock is released. (See 2341acd9a301SAlan Cox * above.) 2342acd9a301SAlan Cox */ 2343ff3ae454SKonstantin Belousov KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2344ff3ae454SKonstantin Belousov entry->wiring_thread == NULL, 2345ff3ae454SKonstantin Belousov ("owned map entry %p", entry)); 2346acd9a301SAlan Cox entry->eflags |= MAP_ENTRY_IN_TRANSITION; 23470acea7dfSKonstantin Belousov entry->wiring_thread = curthread; 2348acd9a301SAlan Cox /* 2349acd9a301SAlan Cox * Check the map for holes in the specified region. 2350abd498aaSBruce M Simpson * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2351acd9a301SAlan Cox */ 2352abd498aaSBruce M Simpson if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 2353abd498aaSBruce M Simpson (entry->end < end && (entry->next == &map->header || 2354abd498aaSBruce M Simpson entry->next->start > entry->end))) { 2355acd9a301SAlan Cox end = entry->end; 2356acd9a301SAlan Cox rv = KERN_INVALID_ADDRESS; 2357acd9a301SAlan Cox goto done; 2358acd9a301SAlan Cox } 2359acd9a301SAlan Cox /* 23603ffbc0cdSAlan Cox * If system unwiring, require that the entry is system wired. 2361acd9a301SAlan Cox */ 23620ada205eSBrian Feldman if (!user_unwire && 23630ada205eSBrian Feldman vm_map_entry_system_wired_count(entry) == 0) { 2364acd9a301SAlan Cox end = entry->end; 2365acd9a301SAlan Cox rv = KERN_INVALID_ARGUMENT; 2366acd9a301SAlan Cox goto done; 2367acd9a301SAlan Cox } 2368acd9a301SAlan Cox entry = entry->next; 2369acd9a301SAlan Cox } 2370acd9a301SAlan Cox rv = KERN_SUCCESS; 2371acd9a301SAlan Cox done: 2372e27e17b7SAlan Cox need_wakeup = FALSE; 2373acd9a301SAlan Cox if (first_entry == NULL) { 2374acd9a301SAlan Cox result = vm_map_lookup_entry(map, start, &first_entry); 2375cbef13d8SAlan Cox if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2376cbef13d8SAlan Cox first_entry = first_entry->next; 2377cbef13d8SAlan Cox else 2378acd9a301SAlan Cox KASSERT(result, ("vm_map_unwire: lookup failed")); 2379acd9a301SAlan Cox } 23800acea7dfSKonstantin Belousov for (entry = first_entry; entry != &map->header && entry->start < end; 23810acea7dfSKonstantin Belousov entry = entry->next) { 23820acea7dfSKonstantin Belousov /* 23830acea7dfSKonstantin Belousov * If VM_MAP_WIRE_HOLESOK was specified, an empty 23840acea7dfSKonstantin Belousov * space in the unwired region could have been mapped 23850acea7dfSKonstantin Belousov * while the map lock was dropped for draining 23860acea7dfSKonstantin Belousov * MAP_ENTRY_IN_TRANSITION. Moreover, another thread 23870acea7dfSKonstantin Belousov * could be simultaneously wiring this new mapping 23880acea7dfSKonstantin Belousov * entry. Detect these cases and skip any entries 23890acea7dfSKonstantin Belousov * marked as in transition by us. 23900acea7dfSKonstantin Belousov */ 23910acea7dfSKonstantin Belousov if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 23920acea7dfSKonstantin Belousov entry->wiring_thread != curthread) { 23930acea7dfSKonstantin Belousov KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, 23940acea7dfSKonstantin Belousov ("vm_map_unwire: !HOLESOK and new/changed entry")); 23950acea7dfSKonstantin Belousov continue; 23960acea7dfSKonstantin Belousov } 23970acea7dfSKonstantin Belousov 23983ffbc0cdSAlan Cox if (rv == KERN_SUCCESS && (!user_unwire || 23993ffbc0cdSAlan Cox (entry->eflags & MAP_ENTRY_USER_WIRED))) { 2400b2f3846aSAlan Cox if (user_unwire) 2401b2f3846aSAlan Cox entry->eflags &= ~MAP_ENTRY_USER_WIRED; 240203462509SAlan Cox if (entry->wired_count == 1) 240303462509SAlan Cox vm_map_entry_unwire(map, entry); 240403462509SAlan Cox else 2405b2f3846aSAlan Cox entry->wired_count--; 2406b2f3846aSAlan Cox } 24070acea7dfSKonstantin Belousov KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2408ff3ae454SKonstantin Belousov ("vm_map_unwire: in-transition flag missing %p", entry)); 2409ff3ae454SKonstantin Belousov KASSERT(entry->wiring_thread == curthread, 2410ff3ae454SKonstantin Belousov ("vm_map_unwire: alien wire %p", entry)); 2411acd9a301SAlan Cox entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 24120acea7dfSKonstantin Belousov entry->wiring_thread = NULL; 2413acd9a301SAlan Cox if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2414acd9a301SAlan Cox entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2415acd9a301SAlan Cox need_wakeup = TRUE; 2416acd9a301SAlan Cox } 2417acd9a301SAlan Cox vm_map_simplify_entry(map, entry); 2418acd9a301SAlan Cox } 2419acd9a301SAlan Cox vm_map_unlock(map); 2420acd9a301SAlan Cox if (need_wakeup) 2421acd9a301SAlan Cox vm_map_wakeup(map); 2422acd9a301SAlan Cox return (rv); 2423acd9a301SAlan Cox } 2424acd9a301SAlan Cox 2425acd9a301SAlan Cox /* 242666cd575bSAlan Cox * vm_map_wire_entry_failure: 242766cd575bSAlan Cox * 242866cd575bSAlan Cox * Handle a wiring failure on the given entry. 242966cd575bSAlan Cox * 243066cd575bSAlan Cox * The map should be locked. 243166cd575bSAlan Cox */ 243266cd575bSAlan Cox static void 243366cd575bSAlan Cox vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 243466cd575bSAlan Cox vm_offset_t failed_addr) 243566cd575bSAlan Cox { 243666cd575bSAlan Cox 243766cd575bSAlan Cox VM_MAP_ASSERT_LOCKED(map); 243866cd575bSAlan Cox KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 && 243966cd575bSAlan Cox entry->wired_count == 1, 244066cd575bSAlan Cox ("vm_map_wire_entry_failure: entry %p isn't being wired", entry)); 244166cd575bSAlan Cox KASSERT(failed_addr < entry->end, 244266cd575bSAlan Cox ("vm_map_wire_entry_failure: entry %p was fully wired", entry)); 244366cd575bSAlan Cox 244466cd575bSAlan Cox /* 244566cd575bSAlan Cox * If any pages at the start of this entry were successfully wired, 244666cd575bSAlan Cox * then unwire them. 244766cd575bSAlan Cox */ 244866cd575bSAlan Cox if (failed_addr > entry->start) { 244966cd575bSAlan Cox pmap_unwire(map->pmap, entry->start, failed_addr); 245066cd575bSAlan Cox vm_object_unwire(entry->object.vm_object, entry->offset, 245166cd575bSAlan Cox failed_addr - entry->start, PQ_ACTIVE); 245266cd575bSAlan Cox } 245366cd575bSAlan Cox 245466cd575bSAlan Cox /* 245566cd575bSAlan Cox * Assign an out-of-range value to represent the failure to wire this 245666cd575bSAlan Cox * entry. 245766cd575bSAlan Cox */ 245866cd575bSAlan Cox entry->wired_count = -1; 245966cd575bSAlan Cox } 246066cd575bSAlan Cox 246166cd575bSAlan Cox /* 2462e27e17b7SAlan Cox * vm_map_wire: 2463e27e17b7SAlan Cox * 2464e27e17b7SAlan Cox * Implements both kernel and user wiring. 2465e27e17b7SAlan Cox */ 2466e27e17b7SAlan Cox int 2467e27e17b7SAlan Cox vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2468abd498aaSBruce M Simpson int flags) 2469e27e17b7SAlan Cox { 247012d7cc84SAlan Cox vm_map_entry_t entry, first_entry, tmp_entry; 247166cd575bSAlan Cox vm_offset_t faddr, saved_end, saved_start; 247212d7cc84SAlan Cox unsigned int last_timestamp; 247312d7cc84SAlan Cox int rv; 247466cd575bSAlan Cox boolean_t need_wakeup, result, user_wire; 2475e4cd31ddSJeff Roberson vm_prot_t prot; 2476e27e17b7SAlan Cox 247779e9451fSKonstantin Belousov if (start == end) 247879e9451fSKonstantin Belousov return (KERN_SUCCESS); 2479e4cd31ddSJeff Roberson prot = 0; 2480e4cd31ddSJeff Roberson if (flags & VM_MAP_WIRE_WRITE) 2481e4cd31ddSJeff Roberson prot |= VM_PROT_WRITE; 2482abd498aaSBruce M Simpson user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 248312d7cc84SAlan Cox vm_map_lock(map); 248412d7cc84SAlan Cox VM_MAP_RANGE_CHECK(map, start, end); 248512d7cc84SAlan Cox if (!vm_map_lookup_entry(map, start, &first_entry)) { 2486abd498aaSBruce M Simpson if (flags & VM_MAP_WIRE_HOLESOK) 2487cbef13d8SAlan Cox first_entry = first_entry->next; 2488abd498aaSBruce M Simpson else { 248912d7cc84SAlan Cox vm_map_unlock(map); 249012d7cc84SAlan Cox return (KERN_INVALID_ADDRESS); 249112d7cc84SAlan Cox } 2492abd498aaSBruce M Simpson } 249312d7cc84SAlan Cox last_timestamp = map->timestamp; 249412d7cc84SAlan Cox entry = first_entry; 249512d7cc84SAlan Cox while (entry != &map->header && entry->start < end) { 249612d7cc84SAlan Cox if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 249712d7cc84SAlan Cox /* 249812d7cc84SAlan Cox * We have not yet clipped the entry. 249912d7cc84SAlan Cox */ 250012d7cc84SAlan Cox saved_start = (start >= entry->start) ? start : 250112d7cc84SAlan Cox entry->start; 250212d7cc84SAlan Cox entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 25038ce2d00aSPawel Jakub Dawidek if (vm_map_unlock_and_wait(map, 0)) { 250412d7cc84SAlan Cox /* 250512d7cc84SAlan Cox * Allow interruption of user wiring? 250612d7cc84SAlan Cox */ 250712d7cc84SAlan Cox } 250812d7cc84SAlan Cox vm_map_lock(map); 250912d7cc84SAlan Cox if (last_timestamp + 1 != map->timestamp) { 251012d7cc84SAlan Cox /* 251112d7cc84SAlan Cox * Look again for the entry because the map was 251212d7cc84SAlan Cox * modified while it was unlocked. 251312d7cc84SAlan Cox * Specifically, the entry may have been 251412d7cc84SAlan Cox * clipped, merged, or deleted. 251512d7cc84SAlan Cox */ 251612d7cc84SAlan Cox if (!vm_map_lookup_entry(map, saved_start, 251712d7cc84SAlan Cox &tmp_entry)) { 2518cbef13d8SAlan Cox if (flags & VM_MAP_WIRE_HOLESOK) 2519cbef13d8SAlan Cox tmp_entry = tmp_entry->next; 2520cbef13d8SAlan Cox else { 252112d7cc84SAlan Cox if (saved_start == start) { 252212d7cc84SAlan Cox /* 252312d7cc84SAlan Cox * first_entry has been deleted. 252412d7cc84SAlan Cox */ 252512d7cc84SAlan Cox vm_map_unlock(map); 252612d7cc84SAlan Cox return (KERN_INVALID_ADDRESS); 252712d7cc84SAlan Cox } 252812d7cc84SAlan Cox end = saved_start; 252912d7cc84SAlan Cox rv = KERN_INVALID_ADDRESS; 253012d7cc84SAlan Cox goto done; 253112d7cc84SAlan Cox } 2532cbef13d8SAlan Cox } 253312d7cc84SAlan Cox if (entry == first_entry) 253412d7cc84SAlan Cox first_entry = tmp_entry; 253512d7cc84SAlan Cox else 253612d7cc84SAlan Cox first_entry = NULL; 253712d7cc84SAlan Cox entry = tmp_entry; 253812d7cc84SAlan Cox } 253912d7cc84SAlan Cox last_timestamp = map->timestamp; 254012d7cc84SAlan Cox continue; 254112d7cc84SAlan Cox } 254212d7cc84SAlan Cox vm_map_clip_start(map, entry, start); 254312d7cc84SAlan Cox vm_map_clip_end(map, entry, end); 254412d7cc84SAlan Cox /* 254512d7cc84SAlan Cox * Mark the entry in case the map lock is released. (See 254612d7cc84SAlan Cox * above.) 254712d7cc84SAlan Cox */ 2548ff3ae454SKonstantin Belousov KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2549ff3ae454SKonstantin Belousov entry->wiring_thread == NULL, 2550ff3ae454SKonstantin Belousov ("owned map entry %p", entry)); 255112d7cc84SAlan Cox entry->eflags |= MAP_ENTRY_IN_TRANSITION; 25520acea7dfSKonstantin Belousov entry->wiring_thread = curthread; 2553e4cd31ddSJeff Roberson if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 2554e4cd31ddSJeff Roberson || (entry->protection & prot) != prot) { 2555529ab57bSKonstantin Belousov entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; 25566d7e8091SKonstantin Belousov if ((flags & VM_MAP_WIRE_HOLESOK) == 0) { 25576d7e8091SKonstantin Belousov end = entry->end; 25586d7e8091SKonstantin Belousov rv = KERN_INVALID_ADDRESS; 25596d7e8091SKonstantin Belousov goto done; 25606d7e8091SKonstantin Belousov } 25616d7e8091SKonstantin Belousov goto next_entry; 25626d7e8091SKonstantin Belousov } 2563e4cd31ddSJeff Roberson if (entry->wired_count == 0) { 25640ada205eSBrian Feldman entry->wired_count++; 256512d7cc84SAlan Cox saved_start = entry->start; 256612d7cc84SAlan Cox saved_end = entry->end; 256766cd575bSAlan Cox 256812d7cc84SAlan Cox /* 256912d7cc84SAlan Cox * Release the map lock, relying on the in-transition 2570a5db445dSMax Laier * mark. Mark the map busy for fork. 257112d7cc84SAlan Cox */ 2572a5db445dSMax Laier vm_map_busy(map); 257312d7cc84SAlan Cox vm_map_unlock(map); 257466cd575bSAlan Cox 25750b695684SAlan Cox faddr = saved_start; 25760b695684SAlan Cox do { 257766cd575bSAlan Cox /* 257866cd575bSAlan Cox * Simulate a fault to get the page and enter 257966cd575bSAlan Cox * it into the physical map. 258066cd575bSAlan Cox */ 258166cd575bSAlan Cox if ((rv = vm_fault(map, faddr, VM_PROT_NONE, 258266cd575bSAlan Cox VM_FAULT_CHANGE_WIRING)) != KERN_SUCCESS) 258366cd575bSAlan Cox break; 25840b695684SAlan Cox } while ((faddr += PAGE_SIZE) < saved_end); 258512d7cc84SAlan Cox vm_map_lock(map); 2586a5db445dSMax Laier vm_map_unbusy(map); 258712d7cc84SAlan Cox if (last_timestamp + 1 != map->timestamp) { 258812d7cc84SAlan Cox /* 258912d7cc84SAlan Cox * Look again for the entry because the map was 259012d7cc84SAlan Cox * modified while it was unlocked. The entry 259112d7cc84SAlan Cox * may have been clipped, but NOT merged or 259212d7cc84SAlan Cox * deleted. 259312d7cc84SAlan Cox */ 259412d7cc84SAlan Cox result = vm_map_lookup_entry(map, saved_start, 259512d7cc84SAlan Cox &tmp_entry); 259612d7cc84SAlan Cox KASSERT(result, ("vm_map_wire: lookup failed")); 259712d7cc84SAlan Cox if (entry == first_entry) 259812d7cc84SAlan Cox first_entry = tmp_entry; 259912d7cc84SAlan Cox else 260012d7cc84SAlan Cox first_entry = NULL; 260112d7cc84SAlan Cox entry = tmp_entry; 260228c58286SAlan Cox while (entry->end < saved_end) { 260366cd575bSAlan Cox /* 260466cd575bSAlan Cox * In case of failure, handle entries 260566cd575bSAlan Cox * that were not fully wired here; 260666cd575bSAlan Cox * fully wired entries are handled 260766cd575bSAlan Cox * later. 260866cd575bSAlan Cox */ 260966cd575bSAlan Cox if (rv != KERN_SUCCESS && 261066cd575bSAlan Cox faddr < entry->end) 261166cd575bSAlan Cox vm_map_wire_entry_failure(map, 261266cd575bSAlan Cox entry, faddr); 261312d7cc84SAlan Cox entry = entry->next; 261412d7cc84SAlan Cox } 261528c58286SAlan Cox } 261612d7cc84SAlan Cox last_timestamp = map->timestamp; 261712d7cc84SAlan Cox if (rv != KERN_SUCCESS) { 261866cd575bSAlan Cox vm_map_wire_entry_failure(map, entry, faddr); 261912d7cc84SAlan Cox end = entry->end; 262012d7cc84SAlan Cox goto done; 262112d7cc84SAlan Cox } 26220ada205eSBrian Feldman } else if (!user_wire || 26230ada205eSBrian Feldman (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 26240ada205eSBrian Feldman entry->wired_count++; 262512d7cc84SAlan Cox } 262612d7cc84SAlan Cox /* 262712d7cc84SAlan Cox * Check the map for holes in the specified region. 2628abd498aaSBruce M Simpson * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 262912d7cc84SAlan Cox */ 26306d7e8091SKonstantin Belousov next_entry: 2631abd498aaSBruce M Simpson if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 2632abd498aaSBruce M Simpson (entry->end < end && (entry->next == &map->header || 2633abd498aaSBruce M Simpson entry->next->start > entry->end))) { 263412d7cc84SAlan Cox end = entry->end; 263512d7cc84SAlan Cox rv = KERN_INVALID_ADDRESS; 263612d7cc84SAlan Cox goto done; 263712d7cc84SAlan Cox } 263812d7cc84SAlan Cox entry = entry->next; 263912d7cc84SAlan Cox } 264012d7cc84SAlan Cox rv = KERN_SUCCESS; 264112d7cc84SAlan Cox done: 264212d7cc84SAlan Cox need_wakeup = FALSE; 264312d7cc84SAlan Cox if (first_entry == NULL) { 264412d7cc84SAlan Cox result = vm_map_lookup_entry(map, start, &first_entry); 2645cbef13d8SAlan Cox if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2646cbef13d8SAlan Cox first_entry = first_entry->next; 2647cbef13d8SAlan Cox else 264812d7cc84SAlan Cox KASSERT(result, ("vm_map_wire: lookup failed")); 264912d7cc84SAlan Cox } 26500acea7dfSKonstantin Belousov for (entry = first_entry; entry != &map->header && entry->start < end; 26510acea7dfSKonstantin Belousov entry = entry->next) { 26526d7e8091SKonstantin Belousov if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) 26536d7e8091SKonstantin Belousov goto next_entry_done; 26540acea7dfSKonstantin Belousov 26550acea7dfSKonstantin Belousov /* 26560acea7dfSKonstantin Belousov * If VM_MAP_WIRE_HOLESOK was specified, an empty 26570acea7dfSKonstantin Belousov * space in the unwired region could have been mapped 26580acea7dfSKonstantin Belousov * while the map lock was dropped for faulting in the 26590acea7dfSKonstantin Belousov * pages or draining MAP_ENTRY_IN_TRANSITION. 26600acea7dfSKonstantin Belousov * Moreover, another thread could be simultaneously 26610acea7dfSKonstantin Belousov * wiring this new mapping entry. Detect these cases 26620acea7dfSKonstantin Belousov * and skip any entries marked as in transition by us. 26630acea7dfSKonstantin Belousov */ 26640acea7dfSKonstantin Belousov if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 26650acea7dfSKonstantin Belousov entry->wiring_thread != curthread) { 26660acea7dfSKonstantin Belousov KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, 26670acea7dfSKonstantin Belousov ("vm_map_wire: !HOLESOK and new/changed entry")); 26680acea7dfSKonstantin Belousov continue; 26690acea7dfSKonstantin Belousov } 26700acea7dfSKonstantin Belousov 267112d7cc84SAlan Cox if (rv == KERN_SUCCESS) { 267212d7cc84SAlan Cox if (user_wire) 267312d7cc84SAlan Cox entry->eflags |= MAP_ENTRY_USER_WIRED; 267428c58286SAlan Cox } else if (entry->wired_count == -1) { 267528c58286SAlan Cox /* 267628c58286SAlan Cox * Wiring failed on this entry. Thus, unwiring is 267728c58286SAlan Cox * unnecessary. 267828c58286SAlan Cox */ 267928c58286SAlan Cox entry->wired_count = 0; 268003462509SAlan Cox } else if (!user_wire || 268103462509SAlan Cox (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 268266cd575bSAlan Cox /* 268366cd575bSAlan Cox * Undo the wiring. Wiring succeeded on this entry 268466cd575bSAlan Cox * but failed on a later entry. 268566cd575bSAlan Cox */ 268603462509SAlan Cox if (entry->wired_count == 1) 268703462509SAlan Cox vm_map_entry_unwire(map, entry); 268803462509SAlan Cox else 268912d7cc84SAlan Cox entry->wired_count--; 269012d7cc84SAlan Cox } 26916d7e8091SKonstantin Belousov next_entry_done: 26920acea7dfSKonstantin Belousov KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 26930acea7dfSKonstantin Belousov ("vm_map_wire: in-transition flag missing %p", entry)); 26940acea7dfSKonstantin Belousov KASSERT(entry->wiring_thread == curthread, 26950acea7dfSKonstantin Belousov ("vm_map_wire: alien wire %p", entry)); 26960acea7dfSKonstantin Belousov entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | 26970acea7dfSKonstantin Belousov MAP_ENTRY_WIRE_SKIPPED); 26980acea7dfSKonstantin Belousov entry->wiring_thread = NULL; 269912d7cc84SAlan Cox if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 270012d7cc84SAlan Cox entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 270112d7cc84SAlan Cox need_wakeup = TRUE; 270212d7cc84SAlan Cox } 270312d7cc84SAlan Cox vm_map_simplify_entry(map, entry); 270412d7cc84SAlan Cox } 270512d7cc84SAlan Cox vm_map_unlock(map); 270612d7cc84SAlan Cox if (need_wakeup) 270712d7cc84SAlan Cox vm_map_wakeup(map); 270812d7cc84SAlan Cox return (rv); 2709e27e17b7SAlan Cox } 2710e27e17b7SAlan Cox 2711e27e17b7SAlan Cox /* 2712950f8459SAlan Cox * vm_map_sync 2713df8bae1dSRodney W. Grimes * 2714df8bae1dSRodney W. Grimes * Push any dirty cached pages in the address range to their pager. 2715df8bae1dSRodney W. Grimes * If syncio is TRUE, dirty pages are written synchronously. 2716df8bae1dSRodney W. Grimes * If invalidate is TRUE, any cached pages are freed as well. 2717df8bae1dSRodney W. Grimes * 2718637315edSAlan Cox * If the size of the region from start to end is zero, we are 2719637315edSAlan Cox * supposed to flush all modified pages within the region containing 2720637315edSAlan Cox * start. Unfortunately, a region can be split or coalesced with 2721637315edSAlan Cox * neighboring regions, making it difficult to determine what the 2722637315edSAlan Cox * original region was. Therefore, we approximate this requirement by 2723637315edSAlan Cox * flushing the current region containing start. 2724637315edSAlan Cox * 2725df8bae1dSRodney W. Grimes * Returns an error if any part of the specified range is not mapped. 2726df8bae1dSRodney W. Grimes */ 2727df8bae1dSRodney W. Grimes int 2728950f8459SAlan Cox vm_map_sync( 27291b40f8c0SMatthew Dillon vm_map_t map, 27301b40f8c0SMatthew Dillon vm_offset_t start, 27311b40f8c0SMatthew Dillon vm_offset_t end, 27321b40f8c0SMatthew Dillon boolean_t syncio, 27331b40f8c0SMatthew Dillon boolean_t invalidate) 2734df8bae1dSRodney W. Grimes { 2735c0877f10SJohn Dyson vm_map_entry_t current; 2736df8bae1dSRodney W. Grimes vm_map_entry_t entry; 2737df8bae1dSRodney W. Grimes vm_size_t size; 2738df8bae1dSRodney W. Grimes vm_object_t object; 2739a316d390SJohn Dyson vm_ooffset_t offset; 2740e53fa61bSKonstantin Belousov unsigned int last_timestamp; 2741126d6082SKonstantin Belousov boolean_t failed; 2742df8bae1dSRodney W. Grimes 2743df8bae1dSRodney W. Grimes vm_map_lock_read(map); 2744df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 2745df8bae1dSRodney W. Grimes if (!vm_map_lookup_entry(map, start, &entry)) { 2746df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 2747df8bae1dSRodney W. Grimes return (KERN_INVALID_ADDRESS); 2748637315edSAlan Cox } else if (start == end) { 2749637315edSAlan Cox start = entry->start; 2750637315edSAlan Cox end = entry->end; 2751df8bae1dSRodney W. Grimes } 2752df8bae1dSRodney W. Grimes /* 2753b7b7cd44SAlan Cox * Make a first pass to check for user-wired memory and holes. 2754df8bae1dSRodney W. Grimes */ 27557b0e72d1SAlan Cox for (current = entry; current != &map->header && current->start < end; 27567b0e72d1SAlan Cox current = current->next) { 2757b7b7cd44SAlan Cox if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 2758df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 2759df8bae1dSRodney W. Grimes return (KERN_INVALID_ARGUMENT); 2760df8bae1dSRodney W. Grimes } 2761df8bae1dSRodney W. Grimes if (end > current->end && 2762df8bae1dSRodney W. Grimes (current->next == &map->header || 2763df8bae1dSRodney W. Grimes current->end != current->next->start)) { 2764df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 2765df8bae1dSRodney W. Grimes return (KERN_INVALID_ADDRESS); 2766df8bae1dSRodney W. Grimes } 2767df8bae1dSRodney W. Grimes } 2768df8bae1dSRodney W. Grimes 27692cf13952SAlan Cox if (invalidate) 2770bc105a67SAlan Cox pmap_remove(map->pmap, start, end); 2771126d6082SKonstantin Belousov failed = FALSE; 27722cf13952SAlan Cox 2773df8bae1dSRodney W. Grimes /* 2774df8bae1dSRodney W. Grimes * Make a second pass, cleaning/uncaching pages from the indicated 2775df8bae1dSRodney W. Grimes * objects as we go. 2776df8bae1dSRodney W. Grimes */ 2777e53fa61bSKonstantin Belousov for (current = entry; current != &map->header && current->start < end;) { 2778df8bae1dSRodney W. Grimes offset = current->offset + (start - current->start); 2779df8bae1dSRodney W. Grimes size = (end <= current->end ? end : current->end) - start; 27809fdfe602SMatthew Dillon if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2781c0877f10SJohn Dyson vm_map_t smap; 2782df8bae1dSRodney W. Grimes vm_map_entry_t tentry; 2783df8bae1dSRodney W. Grimes vm_size_t tsize; 2784df8bae1dSRodney W. Grimes 27859fdfe602SMatthew Dillon smap = current->object.sub_map; 2786df8bae1dSRodney W. Grimes vm_map_lock_read(smap); 2787df8bae1dSRodney W. Grimes (void) vm_map_lookup_entry(smap, offset, &tentry); 2788df8bae1dSRodney W. Grimes tsize = tentry->end - offset; 2789df8bae1dSRodney W. Grimes if (tsize < size) 2790df8bae1dSRodney W. Grimes size = tsize; 2791df8bae1dSRodney W. Grimes object = tentry->object.vm_object; 2792df8bae1dSRodney W. Grimes offset = tentry->offset + (offset - tentry->start); 2793df8bae1dSRodney W. Grimes vm_map_unlock_read(smap); 2794df8bae1dSRodney W. Grimes } else { 2795df8bae1dSRodney W. Grimes object = current->object.vm_object; 2796df8bae1dSRodney W. Grimes } 2797e53fa61bSKonstantin Belousov vm_object_reference(object); 2798e53fa61bSKonstantin Belousov last_timestamp = map->timestamp; 2799e53fa61bSKonstantin Belousov vm_map_unlock_read(map); 2800126d6082SKonstantin Belousov if (!vm_object_sync(object, offset, size, syncio, invalidate)) 2801126d6082SKonstantin Belousov failed = TRUE; 2802df8bae1dSRodney W. Grimes start += size; 2803e53fa61bSKonstantin Belousov vm_object_deallocate(object); 2804e53fa61bSKonstantin Belousov vm_map_lock_read(map); 2805e53fa61bSKonstantin Belousov if (last_timestamp == map->timestamp || 2806e53fa61bSKonstantin Belousov !vm_map_lookup_entry(map, start, ¤t)) 2807e53fa61bSKonstantin Belousov current = current->next; 2808df8bae1dSRodney W. Grimes } 2809df8bae1dSRodney W. Grimes 2810df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 2811126d6082SKonstantin Belousov return (failed ? KERN_FAILURE : KERN_SUCCESS); 2812df8bae1dSRodney W. Grimes } 2813df8bae1dSRodney W. Grimes 2814df8bae1dSRodney W. Grimes /* 2815df8bae1dSRodney W. Grimes * vm_map_entry_unwire: [ internal use only ] 2816df8bae1dSRodney W. Grimes * 2817df8bae1dSRodney W. Grimes * Make the region specified by this entry pageable. 2818df8bae1dSRodney W. Grimes * 2819df8bae1dSRodney W. Grimes * The map in question should be locked. 2820df8bae1dSRodney W. Grimes * [This is the reason for this routine's existence.] 2821df8bae1dSRodney W. Grimes */ 28220362d7d7SJohn Dyson static void 28231b40f8c0SMatthew Dillon vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2824df8bae1dSRodney W. Grimes { 282503462509SAlan Cox 282603462509SAlan Cox VM_MAP_ASSERT_LOCKED(map); 282703462509SAlan Cox KASSERT(entry->wired_count > 0, 282803462509SAlan Cox ("vm_map_entry_unwire: entry %p isn't wired", entry)); 282903462509SAlan Cox pmap_unwire(map->pmap, entry->start, entry->end); 283003462509SAlan Cox vm_object_unwire(entry->object.vm_object, entry->offset, entry->end - 283103462509SAlan Cox entry->start, PQ_ACTIVE); 2832df8bae1dSRodney W. Grimes entry->wired_count = 0; 2833df8bae1dSRodney W. Grimes } 2834df8bae1dSRodney W. Grimes 28350b367bd8SKonstantin Belousov static void 28360b367bd8SKonstantin Belousov vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) 28370b367bd8SKonstantin Belousov { 28380b367bd8SKonstantin Belousov 28390b367bd8SKonstantin Belousov if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 28400b367bd8SKonstantin Belousov vm_object_deallocate(entry->object.vm_object); 28410b367bd8SKonstantin Belousov uma_zfree(system_map ? kmapentzone : mapentzone, entry); 28420b367bd8SKonstantin Belousov } 28430b367bd8SKonstantin Belousov 2844df8bae1dSRodney W. Grimes /* 2845df8bae1dSRodney W. Grimes * vm_map_entry_delete: [ internal use only ] 2846df8bae1dSRodney W. Grimes * 2847df8bae1dSRodney W. Grimes * Deallocate the given entry from the target map. 2848df8bae1dSRodney W. Grimes */ 28490362d7d7SJohn Dyson static void 28501b40f8c0SMatthew Dillon vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 2851df8bae1dSRodney W. Grimes { 285232a89c32SAlan Cox vm_object_t object; 28533364c323SKonstantin Belousov vm_pindex_t offidxstart, offidxend, count, size1; 28543364c323SKonstantin Belousov vm_ooffset_t size; 285532a89c32SAlan Cox 2856df8bae1dSRodney W. Grimes vm_map_entry_unlink(map, entry); 28573364c323SKonstantin Belousov object = entry->object.vm_object; 28583364c323SKonstantin Belousov size = entry->end - entry->start; 28593364c323SKonstantin Belousov map->size -= size; 28603364c323SKonstantin Belousov 2861ef694c1aSEdward Tomasz Napierala if (entry->cred != NULL) { 2862ef694c1aSEdward Tomasz Napierala swap_release_by_cred(size, entry->cred); 2863ef694c1aSEdward Tomasz Napierala crfree(entry->cred); 28643364c323SKonstantin Belousov } 2865df8bae1dSRodney W. Grimes 286632a89c32SAlan Cox if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 28673364c323SKonstantin Belousov (object != NULL)) { 2868ef694c1aSEdward Tomasz Napierala KASSERT(entry->cred == NULL || object->cred == NULL || 28693364c323SKonstantin Belousov (entry->eflags & MAP_ENTRY_NEEDS_COPY), 2870ef694c1aSEdward Tomasz Napierala ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); 28713364c323SKonstantin Belousov count = OFF_TO_IDX(size); 287232a89c32SAlan Cox offidxstart = OFF_TO_IDX(entry->offset); 287332a89c32SAlan Cox offidxend = offidxstart + count; 287489f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 287532a89c32SAlan Cox if (object->ref_count != 1 && 287632a89c32SAlan Cox ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || 28779f5c801bSAlan Cox object == kernel_object || object == kmem_object)) { 287832a89c32SAlan Cox vm_object_collapse(object); 28796bbee8e2SAlan Cox 28806bbee8e2SAlan Cox /* 28816bbee8e2SAlan Cox * The option OBJPR_NOTMAPPED can be passed here 28826bbee8e2SAlan Cox * because vm_map_delete() already performed 28836bbee8e2SAlan Cox * pmap_remove() on the only mapping to this range 28846bbee8e2SAlan Cox * of pages. 28856bbee8e2SAlan Cox */ 28866bbee8e2SAlan Cox vm_object_page_remove(object, offidxstart, offidxend, 28876bbee8e2SAlan Cox OBJPR_NOTMAPPED); 288832a89c32SAlan Cox if (object->type == OBJT_SWAP) 288932a89c32SAlan Cox swap_pager_freespace(object, offidxstart, count); 289032a89c32SAlan Cox if (offidxend >= object->size && 28913364c323SKonstantin Belousov offidxstart < object->size) { 28923364c323SKonstantin Belousov size1 = object->size; 289332a89c32SAlan Cox object->size = offidxstart; 2894ef694c1aSEdward Tomasz Napierala if (object->cred != NULL) { 28953364c323SKonstantin Belousov size1 -= object->size; 28963364c323SKonstantin Belousov KASSERT(object->charge >= ptoa(size1), 28973364c323SKonstantin Belousov ("vm_map_entry_delete: object->charge < 0")); 2898ef694c1aSEdward Tomasz Napierala swap_release_by_cred(ptoa(size1), object->cred); 28993364c323SKonstantin Belousov object->charge -= ptoa(size1); 29003364c323SKonstantin Belousov } 29013364c323SKonstantin Belousov } 290232a89c32SAlan Cox } 290389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 2904897d81a0SKonstantin Belousov } else 2905897d81a0SKonstantin Belousov entry->object.vm_object = NULL; 29060b367bd8SKonstantin Belousov if (map->system_map) 29070b367bd8SKonstantin Belousov vm_map_entry_deallocate(entry, TRUE); 29080b367bd8SKonstantin Belousov else { 29090b367bd8SKonstantin Belousov entry->next = curthread->td_map_def_user; 29100b367bd8SKonstantin Belousov curthread->td_map_def_user = entry; 29110b367bd8SKonstantin Belousov } 2912df8bae1dSRodney W. Grimes } 2913df8bae1dSRodney W. Grimes 2914df8bae1dSRodney W. Grimes /* 2915df8bae1dSRodney W. Grimes * vm_map_delete: [ internal use only ] 2916df8bae1dSRodney W. Grimes * 2917df8bae1dSRodney W. Grimes * Deallocates the given address range from the target 2918df8bae1dSRodney W. Grimes * map. 2919df8bae1dSRodney W. Grimes */ 2920df8bae1dSRodney W. Grimes int 2921655c3490SKonstantin Belousov vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 2922df8bae1dSRodney W. Grimes { 2923c0877f10SJohn Dyson vm_map_entry_t entry; 2924df8bae1dSRodney W. Grimes vm_map_entry_t first_entry; 2925df8bae1dSRodney W. Grimes 29263a0916b8SKonstantin Belousov VM_MAP_ASSERT_LOCKED(map); 292779e9451fSKonstantin Belousov if (start == end) 292879e9451fSKonstantin Belousov return (KERN_SUCCESS); 29293a0916b8SKonstantin Belousov 2930df8bae1dSRodney W. Grimes /* 2931df8bae1dSRodney W. Grimes * Find the start of the region, and clip it 2932df8bae1dSRodney W. Grimes */ 2933876318ecSAlan Cox if (!vm_map_lookup_entry(map, start, &first_entry)) 2934df8bae1dSRodney W. Grimes entry = first_entry->next; 2935876318ecSAlan Cox else { 2936df8bae1dSRodney W. Grimes entry = first_entry; 2937df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 2938df8bae1dSRodney W. Grimes } 2939df8bae1dSRodney W. Grimes 2940df8bae1dSRodney W. Grimes /* 2941df8bae1dSRodney W. Grimes * Step through all entries in this region 2942df8bae1dSRodney W. Grimes */ 2943df8bae1dSRodney W. Grimes while ((entry != &map->header) && (entry->start < end)) { 2944df8bae1dSRodney W. Grimes vm_map_entry_t next; 2945df8bae1dSRodney W. Grimes 294673b2baceSAlan Cox /* 294773b2baceSAlan Cox * Wait for wiring or unwiring of an entry to complete. 29487c938963SBrian Feldman * Also wait for any system wirings to disappear on 29497c938963SBrian Feldman * user maps. 295073b2baceSAlan Cox */ 29517c938963SBrian Feldman if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || 29527c938963SBrian Feldman (vm_map_pmap(map) != kernel_pmap && 29537c938963SBrian Feldman vm_map_entry_system_wired_count(entry) != 0)) { 295473b2baceSAlan Cox unsigned int last_timestamp; 295573b2baceSAlan Cox vm_offset_t saved_start; 295673b2baceSAlan Cox vm_map_entry_t tmp_entry; 295773b2baceSAlan Cox 295873b2baceSAlan Cox saved_start = entry->start; 295973b2baceSAlan Cox entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 296073b2baceSAlan Cox last_timestamp = map->timestamp; 29618ce2d00aSPawel Jakub Dawidek (void) vm_map_unlock_and_wait(map, 0); 296273b2baceSAlan Cox vm_map_lock(map); 296373b2baceSAlan Cox if (last_timestamp + 1 != map->timestamp) { 296473b2baceSAlan Cox /* 296573b2baceSAlan Cox * Look again for the entry because the map was 296673b2baceSAlan Cox * modified while it was unlocked. 296773b2baceSAlan Cox * Specifically, the entry may have been 296873b2baceSAlan Cox * clipped, merged, or deleted. 296973b2baceSAlan Cox */ 297073b2baceSAlan Cox if (!vm_map_lookup_entry(map, saved_start, 297173b2baceSAlan Cox &tmp_entry)) 297273b2baceSAlan Cox entry = tmp_entry->next; 297373b2baceSAlan Cox else { 297473b2baceSAlan Cox entry = tmp_entry; 297573b2baceSAlan Cox vm_map_clip_start(map, entry, 297673b2baceSAlan Cox saved_start); 297773b2baceSAlan Cox } 297873b2baceSAlan Cox } 297973b2baceSAlan Cox continue; 298073b2baceSAlan Cox } 2981df8bae1dSRodney W. Grimes vm_map_clip_end(map, entry, end); 2982df8bae1dSRodney W. Grimes 2983c0877f10SJohn Dyson next = entry->next; 2984df8bae1dSRodney W. Grimes 2985df8bae1dSRodney W. Grimes /* 29860d94caffSDavid Greenman * Unwire before removing addresses from the pmap; otherwise, 29870d94caffSDavid Greenman * unwiring will put the entries back in the pmap. 2988df8bae1dSRodney W. Grimes */ 2989c0877f10SJohn Dyson if (entry->wired_count != 0) { 2990df8bae1dSRodney W. Grimes vm_map_entry_unwire(map, entry); 2991c0877f10SJohn Dyson } 2992df8bae1dSRodney W. Grimes 299332a89c32SAlan Cox pmap_remove(map->pmap, entry->start, entry->end); 2994df8bae1dSRodney W. Grimes 2995df8bae1dSRodney W. Grimes /* 2996e608cc3cSKonstantin Belousov * Delete the entry only after removing all pmap 2997e608cc3cSKonstantin Belousov * entries pointing to its pages. (Otherwise, its 2998e608cc3cSKonstantin Belousov * page frames may be reallocated, and any modify bits 2999e608cc3cSKonstantin Belousov * will be set in the wrong object!) 3000df8bae1dSRodney W. Grimes */ 3001df8bae1dSRodney W. Grimes vm_map_entry_delete(map, entry); 3002df8bae1dSRodney W. Grimes entry = next; 3003df8bae1dSRodney W. Grimes } 3004df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 3005df8bae1dSRodney W. Grimes } 3006df8bae1dSRodney W. Grimes 3007df8bae1dSRodney W. Grimes /* 3008df8bae1dSRodney W. Grimes * vm_map_remove: 3009df8bae1dSRodney W. Grimes * 3010df8bae1dSRodney W. Grimes * Remove the given address range from the target map. 3011df8bae1dSRodney W. Grimes * This is the exported form of vm_map_delete. 3012df8bae1dSRodney W. Grimes */ 3013df8bae1dSRodney W. Grimes int 30141b40f8c0SMatthew Dillon vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 3015df8bae1dSRodney W. Grimes { 30166eaee3feSAlan Cox int result; 3017df8bae1dSRodney W. Grimes 3018df8bae1dSRodney W. Grimes vm_map_lock(map); 3019df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 3020655c3490SKonstantin Belousov result = vm_map_delete(map, start, end); 3021df8bae1dSRodney W. Grimes vm_map_unlock(map); 3022df8bae1dSRodney W. Grimes return (result); 3023df8bae1dSRodney W. Grimes } 3024df8bae1dSRodney W. Grimes 3025df8bae1dSRodney W. Grimes /* 3026df8bae1dSRodney W. Grimes * vm_map_check_protection: 3027df8bae1dSRodney W. Grimes * 30282d5c7e45SMatthew Dillon * Assert that the target map allows the specified privilege on the 30292d5c7e45SMatthew Dillon * entire address region given. The entire region must be allocated. 30302d5c7e45SMatthew Dillon * 30312d5c7e45SMatthew Dillon * WARNING! This code does not and should not check whether the 30322d5c7e45SMatthew Dillon * contents of the region is accessible. For example a smaller file 30332d5c7e45SMatthew Dillon * might be mapped into a larger address space. 30342d5c7e45SMatthew Dillon * 30352d5c7e45SMatthew Dillon * NOTE! This code is also called by munmap(). 3036d8834602SAlan Cox * 3037d8834602SAlan Cox * The map must be locked. A read lock is sufficient. 3038df8bae1dSRodney W. Grimes */ 30390d94caffSDavid Greenman boolean_t 3040b9dcd593SBruce Evans vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 3041b9dcd593SBruce Evans vm_prot_t protection) 3042df8bae1dSRodney W. Grimes { 3043c0877f10SJohn Dyson vm_map_entry_t entry; 3044df8bae1dSRodney W. Grimes vm_map_entry_t tmp_entry; 3045df8bae1dSRodney W. Grimes 3046d8834602SAlan Cox if (!vm_map_lookup_entry(map, start, &tmp_entry)) 3047df8bae1dSRodney W. Grimes return (FALSE); 3048df8bae1dSRodney W. Grimes entry = tmp_entry; 3049df8bae1dSRodney W. Grimes 3050df8bae1dSRodney W. Grimes while (start < end) { 3051d8834602SAlan Cox if (entry == &map->header) 3052df8bae1dSRodney W. Grimes return (FALSE); 3053df8bae1dSRodney W. Grimes /* 3054df8bae1dSRodney W. Grimes * No holes allowed! 3055df8bae1dSRodney W. Grimes */ 3056d8834602SAlan Cox if (start < entry->start) 3057df8bae1dSRodney W. Grimes return (FALSE); 3058df8bae1dSRodney W. Grimes /* 3059df8bae1dSRodney W. Grimes * Check protection associated with entry. 3060df8bae1dSRodney W. Grimes */ 3061d8834602SAlan Cox if ((entry->protection & protection) != protection) 3062df8bae1dSRodney W. Grimes return (FALSE); 3063df8bae1dSRodney W. Grimes /* go to next entry */ 3064df8bae1dSRodney W. Grimes start = entry->end; 3065df8bae1dSRodney W. Grimes entry = entry->next; 3066df8bae1dSRodney W. Grimes } 3067df8bae1dSRodney W. Grimes return (TRUE); 3068df8bae1dSRodney W. Grimes } 3069df8bae1dSRodney W. Grimes 307086524867SJohn Dyson /* 3071df8bae1dSRodney W. Grimes * vm_map_copy_entry: 3072df8bae1dSRodney W. Grimes * 3073df8bae1dSRodney W. Grimes * Copies the contents of the source entry to the destination 3074df8bae1dSRodney W. Grimes * entry. The entries *must* be aligned properly. 3075df8bae1dSRodney W. Grimes */ 3076f708ef1bSPoul-Henning Kamp static void 30771b40f8c0SMatthew Dillon vm_map_copy_entry( 30781b40f8c0SMatthew Dillon vm_map_t src_map, 30791b40f8c0SMatthew Dillon vm_map_t dst_map, 30801b40f8c0SMatthew Dillon vm_map_entry_t src_entry, 30813364c323SKonstantin Belousov vm_map_entry_t dst_entry, 30823364c323SKonstantin Belousov vm_ooffset_t *fork_charge) 3083df8bae1dSRodney W. Grimes { 3084c0877f10SJohn Dyson vm_object_t src_object; 308584110e7eSKonstantin Belousov vm_map_entry_t fake_entry; 30863364c323SKonstantin Belousov vm_offset_t size; 3087ef694c1aSEdward Tomasz Napierala struct ucred *cred; 30883364c323SKonstantin Belousov int charged; 3089c0877f10SJohn Dyson 30903a0916b8SKonstantin Belousov VM_MAP_ASSERT_LOCKED(dst_map); 30913a0916b8SKonstantin Belousov 30929fdfe602SMatthew Dillon if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 3093df8bae1dSRodney W. Grimes return; 3094df8bae1dSRodney W. Grimes 3095afaa41f6SAlan Cox if (src_entry->wired_count == 0 || 3096afaa41f6SAlan Cox (src_entry->protection & VM_PROT_WRITE) == 0) { 3097df8bae1dSRodney W. Grimes /* 30980d94caffSDavid Greenman * If the source entry is marked needs_copy, it is already 30990d94caffSDavid Greenman * write-protected. 3100df8bae1dSRodney W. Grimes */ 3101d9a9209aSAlan Cox if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && 3102d9a9209aSAlan Cox (src_entry->protection & VM_PROT_WRITE) != 0) { 3103df8bae1dSRodney W. Grimes pmap_protect(src_map->pmap, 3104df8bae1dSRodney W. Grimes src_entry->start, 3105df8bae1dSRodney W. Grimes src_entry->end, 3106df8bae1dSRodney W. Grimes src_entry->protection & ~VM_PROT_WRITE); 3107df8bae1dSRodney W. Grimes } 3108b18bfc3dSJohn Dyson 3109df8bae1dSRodney W. Grimes /* 3110df8bae1dSRodney W. Grimes * Make a copy of the object. 3111df8bae1dSRodney W. Grimes */ 31123364c323SKonstantin Belousov size = src_entry->end - src_entry->start; 31138aef1712SMatthew Dillon if ((src_object = src_entry->object.vm_object) != NULL) { 311489f6b863SAttilio Rao VM_OBJECT_WLOCK(src_object); 31153364c323SKonstantin Belousov charged = ENTRY_CHARGED(src_entry); 3116c0877f10SJohn Dyson if ((src_object->handle == NULL) && 3117c0877f10SJohn Dyson (src_object->type == OBJT_DEFAULT || 3118c0877f10SJohn Dyson src_object->type == OBJT_SWAP)) { 3119c0877f10SJohn Dyson vm_object_collapse(src_object); 312096fb8cf2SJohn Dyson if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 3121c5aaa06dSAlan Cox vm_object_split(src_entry); 3122c0877f10SJohn Dyson src_object = src_entry->object.vm_object; 3123a89c6258SAlan Cox } 3124a89c6258SAlan Cox } 3125b921a12bSAlan Cox vm_object_reference_locked(src_object); 3126069e9bc1SDoug Rabson vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 3127ef694c1aSEdward Tomasz Napierala if (src_entry->cred != NULL && 31283364c323SKonstantin Belousov !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 3129ef694c1aSEdward Tomasz Napierala KASSERT(src_object->cred == NULL, 3130ef694c1aSEdward Tomasz Napierala ("OVERCOMMIT: vm_map_copy_entry: cred %p", 31313364c323SKonstantin Belousov src_object)); 3132ef694c1aSEdward Tomasz Napierala src_object->cred = src_entry->cred; 31333364c323SKonstantin Belousov src_object->charge = size; 31343364c323SKonstantin Belousov } 313589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(src_object); 3136c0877f10SJohn Dyson dst_entry->object.vm_object = src_object; 31373364c323SKonstantin Belousov if (charged) { 3138ef694c1aSEdward Tomasz Napierala cred = curthread->td_ucred; 3139ef694c1aSEdward Tomasz Napierala crhold(cred); 3140ef694c1aSEdward Tomasz Napierala dst_entry->cred = cred; 31413364c323SKonstantin Belousov *fork_charge += size; 31423364c323SKonstantin Belousov if (!(src_entry->eflags & 31433364c323SKonstantin Belousov MAP_ENTRY_NEEDS_COPY)) { 3144ef694c1aSEdward Tomasz Napierala crhold(cred); 3145ef694c1aSEdward Tomasz Napierala src_entry->cred = cred; 31463364c323SKonstantin Belousov *fork_charge += size; 31473364c323SKonstantin Belousov } 31483364c323SKonstantin Belousov } 3149afa07f7eSJohn Dyson src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 3150afa07f7eSJohn Dyson dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 3151b18bfc3dSJohn Dyson dst_entry->offset = src_entry->offset; 315284110e7eSKonstantin Belousov if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 315384110e7eSKonstantin Belousov /* 315484110e7eSKonstantin Belousov * MAP_ENTRY_VN_WRITECNT cannot 315584110e7eSKonstantin Belousov * indicate write reference from 315684110e7eSKonstantin Belousov * src_entry, since the entry is 315784110e7eSKonstantin Belousov * marked as needs copy. Allocate a 315884110e7eSKonstantin Belousov * fake entry that is used to 315984110e7eSKonstantin Belousov * decrement object->un_pager.vnp.writecount 316084110e7eSKonstantin Belousov * at the appropriate time. Attach 316184110e7eSKonstantin Belousov * fake_entry to the deferred list. 316284110e7eSKonstantin Belousov */ 316384110e7eSKonstantin Belousov fake_entry = vm_map_entry_create(dst_map); 316484110e7eSKonstantin Belousov fake_entry->eflags = MAP_ENTRY_VN_WRITECNT; 316584110e7eSKonstantin Belousov src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT; 316684110e7eSKonstantin Belousov vm_object_reference(src_object); 316784110e7eSKonstantin Belousov fake_entry->object.vm_object = src_object; 316884110e7eSKonstantin Belousov fake_entry->start = src_entry->start; 316984110e7eSKonstantin Belousov fake_entry->end = src_entry->end; 317084110e7eSKonstantin Belousov fake_entry->next = curthread->td_map_def_user; 317184110e7eSKonstantin Belousov curthread->td_map_def_user = fake_entry; 317284110e7eSKonstantin Belousov } 3173b18bfc3dSJohn Dyson } else { 3174b18bfc3dSJohn Dyson dst_entry->object.vm_object = NULL; 3175b18bfc3dSJohn Dyson dst_entry->offset = 0; 3176ef694c1aSEdward Tomasz Napierala if (src_entry->cred != NULL) { 3177ef694c1aSEdward Tomasz Napierala dst_entry->cred = curthread->td_ucred; 3178ef694c1aSEdward Tomasz Napierala crhold(dst_entry->cred); 31793364c323SKonstantin Belousov *fork_charge += size; 31803364c323SKonstantin Belousov } 3181b18bfc3dSJohn Dyson } 3182df8bae1dSRodney W. Grimes 3183df8bae1dSRodney W. Grimes pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 3184df8bae1dSRodney W. Grimes dst_entry->end - dst_entry->start, src_entry->start); 31850d94caffSDavid Greenman } else { 3186df8bae1dSRodney W. Grimes /* 3187afaa41f6SAlan Cox * We don't want to make writeable wired pages copy-on-write. 3188afaa41f6SAlan Cox * Immediately copy these pages into the new map by simulating 3189afaa41f6SAlan Cox * page faults. The new pages are pageable. 3190df8bae1dSRodney W. Grimes */ 3191121fd461SKonstantin Belousov vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry, 3192121fd461SKonstantin Belousov fork_charge); 3193df8bae1dSRodney W. Grimes } 3194df8bae1dSRodney W. Grimes } 3195df8bae1dSRodney W. Grimes 3196df8bae1dSRodney W. Grimes /* 31972a7be1b6SBrian Feldman * vmspace_map_entry_forked: 31982a7be1b6SBrian Feldman * Update the newly-forked vmspace each time a map entry is inherited 31992a7be1b6SBrian Feldman * or copied. The values for vm_dsize and vm_tsize are approximate 32002a7be1b6SBrian Feldman * (and mostly-obsolete ideas in the face of mmap(2) et al.) 32012a7be1b6SBrian Feldman */ 32022a7be1b6SBrian Feldman static void 32032a7be1b6SBrian Feldman vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 32042a7be1b6SBrian Feldman vm_map_entry_t entry) 32052a7be1b6SBrian Feldman { 32062a7be1b6SBrian Feldman vm_size_t entrysize; 32072a7be1b6SBrian Feldman vm_offset_t newend; 32082a7be1b6SBrian Feldman 32092a7be1b6SBrian Feldman entrysize = entry->end - entry->start; 32102a7be1b6SBrian Feldman vm2->vm_map.size += entrysize; 32112a7be1b6SBrian Feldman if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 32122a7be1b6SBrian Feldman vm2->vm_ssize += btoc(entrysize); 32132a7be1b6SBrian Feldman } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 32142a7be1b6SBrian Feldman entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 3215b351299cSAndrew Gallatin newend = MIN(entry->end, 32162a7be1b6SBrian Feldman (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 32172a7be1b6SBrian Feldman vm2->vm_dsize += btoc(newend - entry->start); 32182a7be1b6SBrian Feldman } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 32192a7be1b6SBrian Feldman entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 3220b351299cSAndrew Gallatin newend = MIN(entry->end, 32212a7be1b6SBrian Feldman (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 32222a7be1b6SBrian Feldman vm2->vm_tsize += btoc(newend - entry->start); 32232a7be1b6SBrian Feldman } 32242a7be1b6SBrian Feldman } 32252a7be1b6SBrian Feldman 32262a7be1b6SBrian Feldman /* 3227df8bae1dSRodney W. Grimes * vmspace_fork: 3228df8bae1dSRodney W. Grimes * Create a new process vmspace structure and vm_map 3229df8bae1dSRodney W. Grimes * based on those of an existing process. The new map 3230df8bae1dSRodney W. Grimes * is based on the old map, according to the inheritance 3231df8bae1dSRodney W. Grimes * values on the regions in that map. 3232df8bae1dSRodney W. Grimes * 32332a7be1b6SBrian Feldman * XXX It might be worth coalescing the entries added to the new vmspace. 32342a7be1b6SBrian Feldman * 3235df8bae1dSRodney W. Grimes * The source map must not be locked. 3236df8bae1dSRodney W. Grimes */ 3237df8bae1dSRodney W. Grimes struct vmspace * 32383364c323SKonstantin Belousov vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge) 3239df8bae1dSRodney W. Grimes { 3240c0877f10SJohn Dyson struct vmspace *vm2; 324179e53838SAlan Cox vm_map_t new_map, old_map; 324279e53838SAlan Cox vm_map_entry_t new_entry, old_entry; 3243de5f6a77SJohn Dyson vm_object_t object; 32441fac7d7fSKonstantin Belousov int locked; 3245df8bae1dSRodney W. Grimes 324679e53838SAlan Cox old_map = &vm1->vm_map; 324779e53838SAlan Cox /* Copy immutable fields of vm1 to vm2. */ 324874d1d2b7SNeel Natu vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, NULL); 324989b57fcfSKonstantin Belousov if (vm2 == NULL) 325079e53838SAlan Cox return (NULL); 32512a7be1b6SBrian Feldman vm2->vm_taddr = vm1->vm_taddr; 32522a7be1b6SBrian Feldman vm2->vm_daddr = vm1->vm_daddr; 32532a7be1b6SBrian Feldman vm2->vm_maxsaddr = vm1->vm_maxsaddr; 325479e53838SAlan Cox vm_map_lock(old_map); 325579e53838SAlan Cox if (old_map->busy) 325679e53838SAlan Cox vm_map_wait_busy(old_map); 325779e53838SAlan Cox new_map = &vm2->vm_map; 32581fac7d7fSKonstantin Belousov locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */ 32591fac7d7fSKonstantin Belousov KASSERT(locked, ("vmspace_fork: lock failed")); 3260df8bae1dSRodney W. Grimes 3261df8bae1dSRodney W. Grimes old_entry = old_map->header.next; 3262df8bae1dSRodney W. Grimes 3263df8bae1dSRodney W. Grimes while (old_entry != &old_map->header) { 3264afa07f7eSJohn Dyson if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 3265df8bae1dSRodney W. Grimes panic("vm_map_fork: encountered a submap"); 3266df8bae1dSRodney W. Grimes 3267df8bae1dSRodney W. Grimes switch (old_entry->inheritance) { 3268df8bae1dSRodney W. Grimes case VM_INHERIT_NONE: 3269df8bae1dSRodney W. Grimes break; 3270df8bae1dSRodney W. Grimes 3271df8bae1dSRodney W. Grimes case VM_INHERIT_SHARE: 3272df8bae1dSRodney W. Grimes /* 3273fed9a903SJohn Dyson * Clone the entry, creating the shared object if necessary. 3274fed9a903SJohn Dyson */ 3275fed9a903SJohn Dyson object = old_entry->object.vm_object; 3276fed9a903SJohn Dyson if (object == NULL) { 3277fed9a903SJohn Dyson object = vm_object_allocate(OBJT_DEFAULT, 3278c2e11a03SJohn Dyson atop(old_entry->end - old_entry->start)); 3279fed9a903SJohn Dyson old_entry->object.vm_object = object; 328015d2d313SAlan Cox old_entry->offset = 0; 3281ef694c1aSEdward Tomasz Napierala if (old_entry->cred != NULL) { 3282ef694c1aSEdward Tomasz Napierala object->cred = old_entry->cred; 32833364c323SKonstantin Belousov object->charge = old_entry->end - 32843364c323SKonstantin Belousov old_entry->start; 3285ef694c1aSEdward Tomasz Napierala old_entry->cred = NULL; 32863364c323SKonstantin Belousov } 32879a2f6362SAlan Cox } 32889a2f6362SAlan Cox 32899a2f6362SAlan Cox /* 32909a2f6362SAlan Cox * Add the reference before calling vm_object_shadow 32919a2f6362SAlan Cox * to insure that a shadow object is created. 32929a2f6362SAlan Cox */ 32939a2f6362SAlan Cox vm_object_reference(object); 32949a2f6362SAlan Cox if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 32955069bf57SJohn Dyson vm_object_shadow(&old_entry->object.vm_object, 32965069bf57SJohn Dyson &old_entry->offset, 32970cc74f14SAlan Cox old_entry->end - old_entry->start); 32985069bf57SJohn Dyson old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3299d30344bdSIan Dowse /* Transfer the second reference too. */ 3300d30344bdSIan Dowse vm_object_reference( 3301d30344bdSIan Dowse old_entry->object.vm_object); 33027fd10fb3SKonstantin Belousov 33037fd10fb3SKonstantin Belousov /* 33047fd10fb3SKonstantin Belousov * As in vm_map_simplify_entry(), the 3305b0994946SKonstantin Belousov * vnode lock will not be acquired in 33067fd10fb3SKonstantin Belousov * this call to vm_object_deallocate(). 33077fd10fb3SKonstantin Belousov */ 3308d30344bdSIan Dowse vm_object_deallocate(object); 33095069bf57SJohn Dyson object = old_entry->object.vm_object; 3310fed9a903SJohn Dyson } 331189f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 3312069e9bc1SDoug Rabson vm_object_clear_flag(object, OBJ_ONEMAPPING); 3313ef694c1aSEdward Tomasz Napierala if (old_entry->cred != NULL) { 3314ef694c1aSEdward Tomasz Napierala KASSERT(object->cred == NULL, ("vmspace_fork both cred")); 3315ef694c1aSEdward Tomasz Napierala object->cred = old_entry->cred; 33163364c323SKonstantin Belousov object->charge = old_entry->end - old_entry->start; 3317ef694c1aSEdward Tomasz Napierala old_entry->cred = NULL; 33183364c323SKonstantin Belousov } 3319b9781cf6SKonstantin Belousov 3320b9781cf6SKonstantin Belousov /* 3321b9781cf6SKonstantin Belousov * Assert the correct state of the vnode 3322b9781cf6SKonstantin Belousov * v_writecount while the object is locked, to 3323b9781cf6SKonstantin Belousov * not relock it later for the assertion 3324b9781cf6SKonstantin Belousov * correctness. 3325b9781cf6SKonstantin Belousov */ 3326b9781cf6SKonstantin Belousov if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT && 3327b9781cf6SKonstantin Belousov object->type == OBJT_VNODE) { 3328b9781cf6SKonstantin Belousov KASSERT(((struct vnode *)object->handle)-> 3329b9781cf6SKonstantin Belousov v_writecount > 0, 3330b9781cf6SKonstantin Belousov ("vmspace_fork: v_writecount %p", object)); 3331b9781cf6SKonstantin Belousov KASSERT(object->un_pager.vnp.writemappings > 0, 3332b9781cf6SKonstantin Belousov ("vmspace_fork: vnp.writecount %p", 3333b9781cf6SKonstantin Belousov object)); 3334b9781cf6SKonstantin Belousov } 333589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 3336fed9a903SJohn Dyson 3337fed9a903SJohn Dyson /* 3338ad5fca3bSAlan Cox * Clone the entry, referencing the shared object. 3339df8bae1dSRodney W. Grimes */ 3340df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(new_map); 3341df8bae1dSRodney W. Grimes *new_entry = *old_entry; 33429f6acfd1SKonstantin Belousov new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 33439f6acfd1SKonstantin Belousov MAP_ENTRY_IN_TRANSITION); 33440acea7dfSKonstantin Belousov new_entry->wiring_thread = NULL; 3345df8bae1dSRodney W. Grimes new_entry->wired_count = 0; 334684110e7eSKonstantin Belousov if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 334784110e7eSKonstantin Belousov vnode_pager_update_writecount(object, 334884110e7eSKonstantin Belousov new_entry->start, new_entry->end); 334984110e7eSKonstantin Belousov } 3350df8bae1dSRodney W. Grimes 3351df8bae1dSRodney W. Grimes /* 33520d94caffSDavid Greenman * Insert the entry into the new map -- we know we're 33530d94caffSDavid Greenman * inserting at the end of the new map. 3354df8bae1dSRodney W. Grimes */ 3355df8bae1dSRodney W. Grimes vm_map_entry_link(new_map, new_map->header.prev, 3356df8bae1dSRodney W. Grimes new_entry); 33572a7be1b6SBrian Feldman vmspace_map_entry_forked(vm1, vm2, new_entry); 3358df8bae1dSRodney W. Grimes 3359df8bae1dSRodney W. Grimes /* 3360df8bae1dSRodney W. Grimes * Update the physical map 3361df8bae1dSRodney W. Grimes */ 3362df8bae1dSRodney W. Grimes pmap_copy(new_map->pmap, old_map->pmap, 3363df8bae1dSRodney W. Grimes new_entry->start, 3364df8bae1dSRodney W. Grimes (old_entry->end - old_entry->start), 3365df8bae1dSRodney W. Grimes old_entry->start); 3366df8bae1dSRodney W. Grimes break; 3367df8bae1dSRodney W. Grimes 3368df8bae1dSRodney W. Grimes case VM_INHERIT_COPY: 3369df8bae1dSRodney W. Grimes /* 3370df8bae1dSRodney W. Grimes * Clone the entry and link into the map. 3371df8bae1dSRodney W. Grimes */ 3372df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(new_map); 3373df8bae1dSRodney W. Grimes *new_entry = *old_entry; 337484110e7eSKonstantin Belousov /* 337584110e7eSKonstantin Belousov * Copied entry is COW over the old object. 337684110e7eSKonstantin Belousov */ 33779f6acfd1SKonstantin Belousov new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 337884110e7eSKonstantin Belousov MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT); 33790acea7dfSKonstantin Belousov new_entry->wiring_thread = NULL; 3380df8bae1dSRodney W. Grimes new_entry->wired_count = 0; 3381df8bae1dSRodney W. Grimes new_entry->object.vm_object = NULL; 3382ef694c1aSEdward Tomasz Napierala new_entry->cred = NULL; 3383df8bae1dSRodney W. Grimes vm_map_entry_link(new_map, new_map->header.prev, 3384df8bae1dSRodney W. Grimes new_entry); 33852a7be1b6SBrian Feldman vmspace_map_entry_forked(vm1, vm2, new_entry); 3386bd7e5f99SJohn Dyson vm_map_copy_entry(old_map, new_map, old_entry, 33873364c323SKonstantin Belousov new_entry, fork_charge); 3388df8bae1dSRodney W. Grimes break; 3389df8bae1dSRodney W. Grimes } 3390df8bae1dSRodney W. Grimes old_entry = old_entry->next; 3391df8bae1dSRodney W. Grimes } 339284110e7eSKonstantin Belousov /* 339384110e7eSKonstantin Belousov * Use inlined vm_map_unlock() to postpone handling the deferred 339484110e7eSKonstantin Belousov * map entries, which cannot be done until both old_map and 339584110e7eSKonstantin Belousov * new_map locks are released. 339684110e7eSKonstantin Belousov */ 339784110e7eSKonstantin Belousov sx_xunlock(&old_map->lock); 339884110e7eSKonstantin Belousov sx_xunlock(&new_map->lock); 339984110e7eSKonstantin Belousov vm_map_process_deferred(); 3400df8bae1dSRodney W. Grimes 3401df8bae1dSRodney W. Grimes return (vm2); 3402df8bae1dSRodney W. Grimes } 3403df8bae1dSRodney W. Grimes 340494f7e29aSAlan Cox int 340594f7e29aSAlan Cox vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 340694f7e29aSAlan Cox vm_prot_t prot, vm_prot_t max, int cow) 340794f7e29aSAlan Cox { 34084648ba0aSKonstantin Belousov vm_size_t growsize, init_ssize; 34094648ba0aSKonstantin Belousov rlim_t lmemlim, vmemlim; 34104648ba0aSKonstantin Belousov int rv; 34114648ba0aSKonstantin Belousov 34124648ba0aSKonstantin Belousov growsize = sgrowsiz; 34134648ba0aSKonstantin Belousov init_ssize = (max_ssize < growsize) ? max_ssize : growsize; 34144648ba0aSKonstantin Belousov vm_map_lock(map); 34154648ba0aSKonstantin Belousov PROC_LOCK(curproc); 34164648ba0aSKonstantin Belousov lmemlim = lim_cur(curproc, RLIMIT_MEMLOCK); 34174648ba0aSKonstantin Belousov vmemlim = lim_cur(curproc, RLIMIT_VMEM); 34184648ba0aSKonstantin Belousov PROC_UNLOCK(curproc); 34194648ba0aSKonstantin Belousov if (!old_mlock && map->flags & MAP_WIREFUTURE) { 34204648ba0aSKonstantin Belousov if (ptoa(pmap_wired_count(map->pmap)) + init_ssize > lmemlim) { 34214648ba0aSKonstantin Belousov rv = KERN_NO_SPACE; 34224648ba0aSKonstantin Belousov goto out; 34234648ba0aSKonstantin Belousov } 34244648ba0aSKonstantin Belousov } 34254648ba0aSKonstantin Belousov /* If we would blow our VMEM resource limit, no go */ 34264648ba0aSKonstantin Belousov if (map->size + init_ssize > vmemlim) { 34274648ba0aSKonstantin Belousov rv = KERN_NO_SPACE; 34284648ba0aSKonstantin Belousov goto out; 34294648ba0aSKonstantin Belousov } 3430e1f92cccSAlan Cox rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot, 34314648ba0aSKonstantin Belousov max, cow); 34324648ba0aSKonstantin Belousov out: 34334648ba0aSKonstantin Belousov vm_map_unlock(map); 34344648ba0aSKonstantin Belousov return (rv); 34354648ba0aSKonstantin Belousov } 34364648ba0aSKonstantin Belousov 34374648ba0aSKonstantin Belousov static int 34384648ba0aSKonstantin Belousov vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 34394648ba0aSKonstantin Belousov vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow) 34404648ba0aSKonstantin Belousov { 3441fd75d710SMarcel Moolenaar vm_map_entry_t new_entry, prev_entry; 3442fd75d710SMarcel Moolenaar vm_offset_t bot, top; 34434648ba0aSKonstantin Belousov vm_size_t init_ssize; 3444fd75d710SMarcel Moolenaar int orient, rv; 344594f7e29aSAlan Cox 3446fd75d710SMarcel Moolenaar /* 3447fd75d710SMarcel Moolenaar * The stack orientation is piggybacked with the cow argument. 3448fd75d710SMarcel Moolenaar * Extract it into orient and mask the cow argument so that we 3449fd75d710SMarcel Moolenaar * don't pass it around further. 3450fd75d710SMarcel Moolenaar * NOTE: We explicitly allow bi-directional stacks. 3451fd75d710SMarcel Moolenaar */ 3452fd75d710SMarcel Moolenaar orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP); 3453fd75d710SMarcel Moolenaar KASSERT(orient != 0, ("No stack grow direction")); 3454fd75d710SMarcel Moolenaar 345577bc7900SKonstantin Belousov if (addrbos < vm_map_min(map) || 345677bc7900SKonstantin Belousov addrbos > vm_map_max(map) || 345777bc7900SKonstantin Belousov addrbos + max_ssize < addrbos) 345894f7e29aSAlan Cox return (KERN_NO_SPACE); 3459fd75d710SMarcel Moolenaar 3460cfe52ecfSAndrey Zonov init_ssize = (max_ssize < growsize) ? max_ssize : growsize; 346194f7e29aSAlan Cox 346294f7e29aSAlan Cox /* If addr is already mapped, no go */ 34634648ba0aSKonstantin Belousov if (vm_map_lookup_entry(map, addrbos, &prev_entry)) 346494f7e29aSAlan Cox return (KERN_NO_SPACE); 3465a69ac174SMatthew Dillon 3466fd75d710SMarcel Moolenaar /* 3467fd75d710SMarcel Moolenaar * If we can't accomodate max_ssize in the current mapping, no go. 3468fd75d710SMarcel Moolenaar * However, we need to be aware that subsequent user mappings might 3469fd75d710SMarcel Moolenaar * map into the space we have reserved for stack, and currently this 3470fd75d710SMarcel Moolenaar * space is not protected. 347194f7e29aSAlan Cox * 3472fd75d710SMarcel Moolenaar * Hopefully we will at least detect this condition when we try to 3473fd75d710SMarcel Moolenaar * grow the stack. 347494f7e29aSAlan Cox */ 347594f7e29aSAlan Cox if ((prev_entry->next != &map->header) && 34764648ba0aSKonstantin Belousov (prev_entry->next->start < addrbos + max_ssize)) 347794f7e29aSAlan Cox return (KERN_NO_SPACE); 347894f7e29aSAlan Cox 3479fd75d710SMarcel Moolenaar /* 3480fd75d710SMarcel Moolenaar * We initially map a stack of only init_ssize. We will grow as 3481fd75d710SMarcel Moolenaar * needed later. Depending on the orientation of the stack (i.e. 3482fd75d710SMarcel Moolenaar * the grow direction) we either map at the top of the range, the 3483fd75d710SMarcel Moolenaar * bottom of the range or in the middle. 348494f7e29aSAlan Cox * 3485fd75d710SMarcel Moolenaar * Note: we would normally expect prot and max to be VM_PROT_ALL, 3486fd75d710SMarcel Moolenaar * and cow to be 0. Possibly we should eliminate these as input 3487fd75d710SMarcel Moolenaar * parameters, and just pass these values here in the insert call. 348894f7e29aSAlan Cox */ 3489fd75d710SMarcel Moolenaar if (orient == MAP_STACK_GROWS_DOWN) 3490fd75d710SMarcel Moolenaar bot = addrbos + max_ssize - init_ssize; 3491fd75d710SMarcel Moolenaar else if (orient == MAP_STACK_GROWS_UP) 3492fd75d710SMarcel Moolenaar bot = addrbos; 3493fd75d710SMarcel Moolenaar else 3494fd75d710SMarcel Moolenaar bot = round_page(addrbos + max_ssize/2 - init_ssize/2); 3495fd75d710SMarcel Moolenaar top = bot + init_ssize; 3496fd75d710SMarcel Moolenaar rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 349794f7e29aSAlan Cox 3498fd75d710SMarcel Moolenaar /* Now set the avail_ssize amount. */ 349994f7e29aSAlan Cox if (rv == KERN_SUCCESS) { 3500fd75d710SMarcel Moolenaar new_entry = prev_entry->next; 3501fd75d710SMarcel Moolenaar if (new_entry->end != top || new_entry->start != bot) 350294f7e29aSAlan Cox panic("Bad entry start/end for new stack entry"); 3503b21a0008SMarcel Moolenaar 3504fd75d710SMarcel Moolenaar new_entry->avail_ssize = max_ssize - init_ssize; 3505712efe66SAlan Cox KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 || 3506712efe66SAlan Cox (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, 3507712efe66SAlan Cox ("new entry lacks MAP_ENTRY_GROWS_DOWN")); 3508712efe66SAlan Cox KASSERT((orient & MAP_STACK_GROWS_UP) == 0 || 3509712efe66SAlan Cox (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0, 3510712efe66SAlan Cox ("new entry lacks MAP_ENTRY_GROWS_UP")); 351194f7e29aSAlan Cox } 351294f7e29aSAlan Cox 351394f7e29aSAlan Cox return (rv); 351494f7e29aSAlan Cox } 351594f7e29aSAlan Cox 35169a6d144fSKonstantin Belousov static int stack_guard_page = 0; 3517af3b2549SHans Petter Selasky SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN, 35189a6d144fSKonstantin Belousov &stack_guard_page, 0, 35199a6d144fSKonstantin Belousov "Insert stack guard page ahead of the growable segments."); 35209a6d144fSKonstantin Belousov 352194f7e29aSAlan Cox /* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 352294f7e29aSAlan Cox * desired address is already mapped, or if we successfully grow 352394f7e29aSAlan Cox * the stack. Also returns KERN_SUCCESS if addr is outside the 352494f7e29aSAlan Cox * stack range (this is strange, but preserves compatibility with 352594f7e29aSAlan Cox * the grow function in vm_machdep.c). 352694f7e29aSAlan Cox */ 352794f7e29aSAlan Cox int 352894f7e29aSAlan Cox vm_map_growstack(struct proc *p, vm_offset_t addr) 352994f7e29aSAlan Cox { 3530b21a0008SMarcel Moolenaar vm_map_entry_t next_entry, prev_entry; 3531b21a0008SMarcel Moolenaar vm_map_entry_t new_entry, stack_entry; 353294f7e29aSAlan Cox struct vmspace *vm = p->p_vmspace; 353394f7e29aSAlan Cox vm_map_t map = &vm->vm_map; 353494f7e29aSAlan Cox vm_offset_t end; 3535cfe52ecfSAndrey Zonov vm_size_t growsize; 3536b21a0008SMarcel Moolenaar size_t grow_amount, max_grow; 35377e19eda4SAndrey Zonov rlim_t lmemlim, stacklim, vmemlim; 3538b21a0008SMarcel Moolenaar int is_procstack, rv; 3539ef694c1aSEdward Tomasz Napierala struct ucred *cred; 35401ba5ad42SEdward Tomasz Napierala #ifdef notyet 35411ba5ad42SEdward Tomasz Napierala uint64_t limit; 35421ba5ad42SEdward Tomasz Napierala #endif 3543afcc55f3SEdward Tomasz Napierala #ifdef RACCT 35441ba5ad42SEdward Tomasz Napierala int error; 3545afcc55f3SEdward Tomasz Napierala #endif 354623955314SAlfred Perlstein 354794f7e29aSAlan Cox Retry: 354891d5354aSJohn Baldwin PROC_LOCK(p); 35497e19eda4SAndrey Zonov lmemlim = lim_cur(p, RLIMIT_MEMLOCK); 355091d5354aSJohn Baldwin stacklim = lim_cur(p, RLIMIT_STACK); 3551bfee999dSAlan Cox vmemlim = lim_cur(p, RLIMIT_VMEM); 355291d5354aSJohn Baldwin PROC_UNLOCK(p); 355391d5354aSJohn Baldwin 355494f7e29aSAlan Cox vm_map_lock_read(map); 355594f7e29aSAlan Cox 355694f7e29aSAlan Cox /* If addr is already in the entry range, no need to grow.*/ 355794f7e29aSAlan Cox if (vm_map_lookup_entry(map, addr, &prev_entry)) { 355894f7e29aSAlan Cox vm_map_unlock_read(map); 35590cddd8f0SMatthew Dillon return (KERN_SUCCESS); 356094f7e29aSAlan Cox } 356194f7e29aSAlan Cox 3562b21a0008SMarcel Moolenaar next_entry = prev_entry->next; 3563b21a0008SMarcel Moolenaar if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) { 3564b21a0008SMarcel Moolenaar /* 3565b21a0008SMarcel Moolenaar * This entry does not grow upwards. Since the address lies 3566b21a0008SMarcel Moolenaar * beyond this entry, the next entry (if one exists) has to 3567b21a0008SMarcel Moolenaar * be a downward growable entry. The entry list header is 3568b21a0008SMarcel Moolenaar * never a growable entry, so it suffices to check the flags. 356994f7e29aSAlan Cox */ 3570b21a0008SMarcel Moolenaar if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) { 357194f7e29aSAlan Cox vm_map_unlock_read(map); 35720cddd8f0SMatthew Dillon return (KERN_SUCCESS); 357394f7e29aSAlan Cox } 3574b21a0008SMarcel Moolenaar stack_entry = next_entry; 3575b21a0008SMarcel Moolenaar } else { 3576b21a0008SMarcel Moolenaar /* 3577b21a0008SMarcel Moolenaar * This entry grows upward. If the next entry does not at 3578b21a0008SMarcel Moolenaar * least grow downwards, this is the entry we need to grow. 3579b21a0008SMarcel Moolenaar * otherwise we have two possible choices and we have to 3580b21a0008SMarcel Moolenaar * select one. 3581b21a0008SMarcel Moolenaar */ 3582b21a0008SMarcel Moolenaar if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) { 3583b21a0008SMarcel Moolenaar /* 3584b21a0008SMarcel Moolenaar * We have two choices; grow the entry closest to 3585b21a0008SMarcel Moolenaar * the address to minimize the amount of growth. 3586b21a0008SMarcel Moolenaar */ 3587b21a0008SMarcel Moolenaar if (addr - prev_entry->end <= next_entry->start - addr) 3588b21a0008SMarcel Moolenaar stack_entry = prev_entry; 3589b21a0008SMarcel Moolenaar else 3590b21a0008SMarcel Moolenaar stack_entry = next_entry; 3591b21a0008SMarcel Moolenaar } else 3592b21a0008SMarcel Moolenaar stack_entry = prev_entry; 3593b21a0008SMarcel Moolenaar } 359494f7e29aSAlan Cox 3595b21a0008SMarcel Moolenaar if (stack_entry == next_entry) { 3596b21a0008SMarcel Moolenaar KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo")); 3597b21a0008SMarcel Moolenaar KASSERT(addr < stack_entry->start, ("foo")); 3598b21a0008SMarcel Moolenaar end = (prev_entry != &map->header) ? prev_entry->end : 3599b21a0008SMarcel Moolenaar stack_entry->start - stack_entry->avail_ssize; 360094f7e29aSAlan Cox grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE); 3601b21a0008SMarcel Moolenaar max_grow = stack_entry->start - end; 3602b21a0008SMarcel Moolenaar } else { 3603b21a0008SMarcel Moolenaar KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo")); 360408667f6dSMarcel Moolenaar KASSERT(addr >= stack_entry->end, ("foo")); 3605b21a0008SMarcel Moolenaar end = (next_entry != &map->header) ? next_entry->start : 3606b21a0008SMarcel Moolenaar stack_entry->end + stack_entry->avail_ssize; 3607fd75d710SMarcel Moolenaar grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE); 3608b21a0008SMarcel Moolenaar max_grow = end - stack_entry->end; 3609b21a0008SMarcel Moolenaar } 3610b21a0008SMarcel Moolenaar 361194f7e29aSAlan Cox if (grow_amount > stack_entry->avail_ssize) { 361294f7e29aSAlan Cox vm_map_unlock_read(map); 36130cddd8f0SMatthew Dillon return (KERN_NO_SPACE); 361494f7e29aSAlan Cox } 361594f7e29aSAlan Cox 3616b21a0008SMarcel Moolenaar /* 3617b21a0008SMarcel Moolenaar * If there is no longer enough space between the entries nogo, and 3618b21a0008SMarcel Moolenaar * adjust the available space. Note: this should only happen if the 3619b21a0008SMarcel Moolenaar * user has mapped into the stack area after the stack was created, 3620b21a0008SMarcel Moolenaar * and is probably an error. 362194f7e29aSAlan Cox * 3622b21a0008SMarcel Moolenaar * This also effectively destroys any guard page the user might have 3623b21a0008SMarcel Moolenaar * intended by limiting the stack size. 362494f7e29aSAlan Cox */ 36259a6d144fSKonstantin Belousov if (grow_amount + (stack_guard_page ? PAGE_SIZE : 0) > max_grow) { 362625adb370SBrian Feldman if (vm_map_lock_upgrade(map)) 362794f7e29aSAlan Cox goto Retry; 362894f7e29aSAlan Cox 3629b21a0008SMarcel Moolenaar stack_entry->avail_ssize = max_grow; 363094f7e29aSAlan Cox 363194f7e29aSAlan Cox vm_map_unlock(map); 36320cddd8f0SMatthew Dillon return (KERN_NO_SPACE); 363394f7e29aSAlan Cox } 363494f7e29aSAlan Cox 3635b21a0008SMarcel Moolenaar is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0; 363694f7e29aSAlan Cox 3637b21a0008SMarcel Moolenaar /* 3638b21a0008SMarcel Moolenaar * If this is the main process stack, see if we're over the stack 3639b21a0008SMarcel Moolenaar * limit. 364094f7e29aSAlan Cox */ 364191d5354aSJohn Baldwin if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 364294f7e29aSAlan Cox vm_map_unlock_read(map); 36430cddd8f0SMatthew Dillon return (KERN_NO_SPACE); 364494f7e29aSAlan Cox } 3645afcc55f3SEdward Tomasz Napierala #ifdef RACCT 36461ba5ad42SEdward Tomasz Napierala PROC_LOCK(p); 36471ba5ad42SEdward Tomasz Napierala if (is_procstack && 36481ba5ad42SEdward Tomasz Napierala racct_set(p, RACCT_STACK, ctob(vm->vm_ssize) + grow_amount)) { 36491ba5ad42SEdward Tomasz Napierala PROC_UNLOCK(p); 36501ba5ad42SEdward Tomasz Napierala vm_map_unlock_read(map); 36511ba5ad42SEdward Tomasz Napierala return (KERN_NO_SPACE); 36521ba5ad42SEdward Tomasz Napierala } 36531ba5ad42SEdward Tomasz Napierala PROC_UNLOCK(p); 3654afcc55f3SEdward Tomasz Napierala #endif 365594f7e29aSAlan Cox 3656cfe52ecfSAndrey Zonov /* Round up the grow amount modulo sgrowsiz */ 3657cfe52ecfSAndrey Zonov growsize = sgrowsiz; 3658cfe52ecfSAndrey Zonov grow_amount = roundup(grow_amount, growsize); 3659b21a0008SMarcel Moolenaar if (grow_amount > stack_entry->avail_ssize) 366094f7e29aSAlan Cox grow_amount = stack_entry->avail_ssize; 366191d5354aSJohn Baldwin if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 3662e4826248SAlan Cox grow_amount = trunc_page((vm_size_t)stacklim) - 3663e4826248SAlan Cox ctob(vm->vm_ssize); 366494f7e29aSAlan Cox } 36651ba5ad42SEdward Tomasz Napierala #ifdef notyet 36661ba5ad42SEdward Tomasz Napierala PROC_LOCK(p); 36671ba5ad42SEdward Tomasz Napierala limit = racct_get_available(p, RACCT_STACK); 36681ba5ad42SEdward Tomasz Napierala PROC_UNLOCK(p); 36691ba5ad42SEdward Tomasz Napierala if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) 36701ba5ad42SEdward Tomasz Napierala grow_amount = limit - ctob(vm->vm_ssize); 36711ba5ad42SEdward Tomasz Napierala #endif 36727e19eda4SAndrey Zonov if (!old_mlock && map->flags & MAP_WIREFUTURE) { 36733ac7d297SAndrey Zonov if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { 36747e19eda4SAndrey Zonov vm_map_unlock_read(map); 36757e19eda4SAndrey Zonov rv = KERN_NO_SPACE; 36767e19eda4SAndrey Zonov goto out; 36777e19eda4SAndrey Zonov } 36787e19eda4SAndrey Zonov #ifdef RACCT 36797e19eda4SAndrey Zonov PROC_LOCK(p); 36807e19eda4SAndrey Zonov if (racct_set(p, RACCT_MEMLOCK, 36813ac7d297SAndrey Zonov ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { 36827e19eda4SAndrey Zonov PROC_UNLOCK(p); 36837e19eda4SAndrey Zonov vm_map_unlock_read(map); 36847e19eda4SAndrey Zonov rv = KERN_NO_SPACE; 36857e19eda4SAndrey Zonov goto out; 36867e19eda4SAndrey Zonov } 36877e19eda4SAndrey Zonov PROC_UNLOCK(p); 36887e19eda4SAndrey Zonov #endif 36897e19eda4SAndrey Zonov } 3690a69ac174SMatthew Dillon /* If we would blow our VMEM resource limit, no go */ 369191d5354aSJohn Baldwin if (map->size + grow_amount > vmemlim) { 3692a69ac174SMatthew Dillon vm_map_unlock_read(map); 36931ba5ad42SEdward Tomasz Napierala rv = KERN_NO_SPACE; 36941ba5ad42SEdward Tomasz Napierala goto out; 3695a69ac174SMatthew Dillon } 3696afcc55f3SEdward Tomasz Napierala #ifdef RACCT 36971ba5ad42SEdward Tomasz Napierala PROC_LOCK(p); 36981ba5ad42SEdward Tomasz Napierala if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { 36991ba5ad42SEdward Tomasz Napierala PROC_UNLOCK(p); 37001ba5ad42SEdward Tomasz Napierala vm_map_unlock_read(map); 37011ba5ad42SEdward Tomasz Napierala rv = KERN_NO_SPACE; 37021ba5ad42SEdward Tomasz Napierala goto out; 37031ba5ad42SEdward Tomasz Napierala } 37041ba5ad42SEdward Tomasz Napierala PROC_UNLOCK(p); 3705afcc55f3SEdward Tomasz Napierala #endif 3706a69ac174SMatthew Dillon 370725adb370SBrian Feldman if (vm_map_lock_upgrade(map)) 370894f7e29aSAlan Cox goto Retry; 370994f7e29aSAlan Cox 3710b21a0008SMarcel Moolenaar if (stack_entry == next_entry) { 3711b21a0008SMarcel Moolenaar /* 3712b21a0008SMarcel Moolenaar * Growing downward. 3713b21a0008SMarcel Moolenaar */ 371494f7e29aSAlan Cox /* Get the preliminary new entry start value */ 371594f7e29aSAlan Cox addr = stack_entry->start - grow_amount; 371694f7e29aSAlan Cox 3717b21a0008SMarcel Moolenaar /* 3718b21a0008SMarcel Moolenaar * If this puts us into the previous entry, cut back our 3719b21a0008SMarcel Moolenaar * growth to the available space. Also, see the note above. 372094f7e29aSAlan Cox */ 372194f7e29aSAlan Cox if (addr < end) { 3722b21a0008SMarcel Moolenaar stack_entry->avail_ssize = max_grow; 372394f7e29aSAlan Cox addr = end; 37249a6d144fSKonstantin Belousov if (stack_guard_page) 37259a6d144fSKonstantin Belousov addr += PAGE_SIZE; 372694f7e29aSAlan Cox } 372794f7e29aSAlan Cox 372894f7e29aSAlan Cox rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, 3729712efe66SAlan Cox next_entry->protection, next_entry->max_protection, 3730712efe66SAlan Cox MAP_STACK_GROWS_DOWN); 373194f7e29aSAlan Cox 373294f7e29aSAlan Cox /* Adjust the available stack space by the amount we grew. */ 373394f7e29aSAlan Cox if (rv == KERN_SUCCESS) { 3734b21a0008SMarcel Moolenaar new_entry = prev_entry->next; 3735b21a0008SMarcel Moolenaar KASSERT(new_entry == stack_entry->prev, ("foo")); 3736b21a0008SMarcel Moolenaar KASSERT(new_entry->end == stack_entry->start, ("foo")); 3737b21a0008SMarcel Moolenaar KASSERT(new_entry->start == addr, ("foo")); 3738712efe66SAlan Cox KASSERT((new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 3739712efe66SAlan Cox 0, ("new entry lacks MAP_ENTRY_GROWS_DOWN")); 3740b21a0008SMarcel Moolenaar grow_amount = new_entry->end - new_entry->start; 3741b21a0008SMarcel Moolenaar new_entry->avail_ssize = stack_entry->avail_ssize - 3742b21a0008SMarcel Moolenaar grow_amount; 3743b21a0008SMarcel Moolenaar stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN; 374494f7e29aSAlan Cox } 3745b21a0008SMarcel Moolenaar } else { 3746b21a0008SMarcel Moolenaar /* 3747b21a0008SMarcel Moolenaar * Growing upward. 3748b21a0008SMarcel Moolenaar */ 3749b21a0008SMarcel Moolenaar addr = stack_entry->end + grow_amount; 3750b21a0008SMarcel Moolenaar 3751b21a0008SMarcel Moolenaar /* 3752b21a0008SMarcel Moolenaar * If this puts us into the next entry, cut back our growth 3753b21a0008SMarcel Moolenaar * to the available space. Also, see the note above. 3754b21a0008SMarcel Moolenaar */ 3755b21a0008SMarcel Moolenaar if (addr > end) { 3756b21a0008SMarcel Moolenaar stack_entry->avail_ssize = end - stack_entry->end; 3757b21a0008SMarcel Moolenaar addr = end; 37589a6d144fSKonstantin Belousov if (stack_guard_page) 37599a6d144fSKonstantin Belousov addr -= PAGE_SIZE; 376094f7e29aSAlan Cox } 376194f7e29aSAlan Cox 3762b21a0008SMarcel Moolenaar grow_amount = addr - stack_entry->end; 3763ef694c1aSEdward Tomasz Napierala cred = stack_entry->cred; 3764ef694c1aSEdward Tomasz Napierala if (cred == NULL && stack_entry->object.vm_object != NULL) 3765ef694c1aSEdward Tomasz Napierala cred = stack_entry->object.vm_object->cred; 3766ef694c1aSEdward Tomasz Napierala if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred)) 37673364c323SKonstantin Belousov rv = KERN_NO_SPACE; 3768b21a0008SMarcel Moolenaar /* Grow the underlying object if applicable. */ 37693364c323SKonstantin Belousov else if (stack_entry->object.vm_object == NULL || 3770b21a0008SMarcel Moolenaar vm_object_coalesce(stack_entry->object.vm_object, 377157a21abaSAlan Cox stack_entry->offset, 3772b21a0008SMarcel Moolenaar (vm_size_t)(stack_entry->end - stack_entry->start), 3773ef694c1aSEdward Tomasz Napierala (vm_size_t)grow_amount, cred != NULL)) { 377408667f6dSMarcel Moolenaar map->size += (addr - stack_entry->end); 3775b21a0008SMarcel Moolenaar /* Update the current entry. */ 3776b21a0008SMarcel Moolenaar stack_entry->end = addr; 3777199c91abSMarcel Moolenaar stack_entry->avail_ssize -= grow_amount; 37780164e057SAlan Cox vm_map_entry_resize_free(map, stack_entry); 3779b21a0008SMarcel Moolenaar rv = KERN_SUCCESS; 3780b21a0008SMarcel Moolenaar } else 3781b21a0008SMarcel Moolenaar rv = KERN_FAILURE; 3782b21a0008SMarcel Moolenaar } 3783b21a0008SMarcel Moolenaar 3784b21a0008SMarcel Moolenaar if (rv == KERN_SUCCESS && is_procstack) 3785b21a0008SMarcel Moolenaar vm->vm_ssize += btoc(grow_amount); 3786b21a0008SMarcel Moolenaar 378794f7e29aSAlan Cox vm_map_unlock(map); 3788b21a0008SMarcel Moolenaar 3789abd498aaSBruce M Simpson /* 3790abd498aaSBruce M Simpson * Heed the MAP_WIREFUTURE flag if it was set for this process. 3791abd498aaSBruce M Simpson */ 3792b21a0008SMarcel Moolenaar if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) { 3793b21a0008SMarcel Moolenaar vm_map_wire(map, 3794b21a0008SMarcel Moolenaar (stack_entry == next_entry) ? addr : addr - grow_amount, 3795b21a0008SMarcel Moolenaar (stack_entry == next_entry) ? stack_entry->start : addr, 3796b21a0008SMarcel Moolenaar (p->p_flag & P_SYSTEM) 3797b21a0008SMarcel Moolenaar ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES 3798b21a0008SMarcel Moolenaar : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 3799b21a0008SMarcel Moolenaar } 3800abd498aaSBruce M Simpson 38011ba5ad42SEdward Tomasz Napierala out: 3802afcc55f3SEdward Tomasz Napierala #ifdef RACCT 38031ba5ad42SEdward Tomasz Napierala if (rv != KERN_SUCCESS) { 38041ba5ad42SEdward Tomasz Napierala PROC_LOCK(p); 38051ba5ad42SEdward Tomasz Napierala error = racct_set(p, RACCT_VMEM, map->size); 38061ba5ad42SEdward Tomasz Napierala KASSERT(error == 0, ("decreasing RACCT_VMEM failed")); 38077e19eda4SAndrey Zonov if (!old_mlock) { 38087e19eda4SAndrey Zonov error = racct_set(p, RACCT_MEMLOCK, 38093ac7d297SAndrey Zonov ptoa(pmap_wired_count(map->pmap))); 38107e19eda4SAndrey Zonov KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed")); 38117e19eda4SAndrey Zonov } 38121ba5ad42SEdward Tomasz Napierala error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); 38131ba5ad42SEdward Tomasz Napierala KASSERT(error == 0, ("decreasing RACCT_STACK failed")); 38141ba5ad42SEdward Tomasz Napierala PROC_UNLOCK(p); 38151ba5ad42SEdward Tomasz Napierala } 3816afcc55f3SEdward Tomasz Napierala #endif 38171ba5ad42SEdward Tomasz Napierala 38180cddd8f0SMatthew Dillon return (rv); 381994f7e29aSAlan Cox } 382094f7e29aSAlan Cox 3821df8bae1dSRodney W. Grimes /* 38225856e12eSJohn Dyson * Unshare the specified VM space for exec. If other processes are 38235856e12eSJohn Dyson * mapped to it, then create a new one. The new vmspace is null. 38245856e12eSJohn Dyson */ 382589b57fcfSKonstantin Belousov int 38263ebc1248SPeter Wemm vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 38271b40f8c0SMatthew Dillon { 38285856e12eSJohn Dyson struct vmspace *oldvmspace = p->p_vmspace; 38295856e12eSJohn Dyson struct vmspace *newvmspace; 38305856e12eSJohn Dyson 38317032434eSKonstantin Belousov KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0, 38327032434eSKonstantin Belousov ("vmspace_exec recursed")); 383374d1d2b7SNeel Natu newvmspace = vmspace_alloc(minuser, maxuser, NULL); 383489b57fcfSKonstantin Belousov if (newvmspace == NULL) 383589b57fcfSKonstantin Belousov return (ENOMEM); 383651ab6c28SAlan Cox newvmspace->vm_swrss = oldvmspace->vm_swrss; 38375856e12eSJohn Dyson /* 38385856e12eSJohn Dyson * This code is written like this for prototype purposes. The 38395856e12eSJohn Dyson * goal is to avoid running down the vmspace here, but let the 38405856e12eSJohn Dyson * other process's that are still using the vmspace to finally 38415856e12eSJohn Dyson * run it down. Even though there is little or no chance of blocking 38425856e12eSJohn Dyson * here, it is a good idea to keep this form for future mods. 38435856e12eSJohn Dyson */ 384457051fdcSTor Egge PROC_VMSPACE_LOCK(p); 38455856e12eSJohn Dyson p->p_vmspace = newvmspace; 384657051fdcSTor Egge PROC_VMSPACE_UNLOCK(p); 38476617724cSJeff Roberson if (p == curthread->td_proc) 3848b40ce416SJulian Elischer pmap_activate(curthread); 38497032434eSKonstantin Belousov curthread->td_pflags |= TDP_EXECVMSPC; 385089b57fcfSKonstantin Belousov return (0); 38515856e12eSJohn Dyson } 38525856e12eSJohn Dyson 38535856e12eSJohn Dyson /* 38545856e12eSJohn Dyson * Unshare the specified VM space for forcing COW. This 38555856e12eSJohn Dyson * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 38565856e12eSJohn Dyson */ 385789b57fcfSKonstantin Belousov int 38581b40f8c0SMatthew Dillon vmspace_unshare(struct proc *p) 38591b40f8c0SMatthew Dillon { 38605856e12eSJohn Dyson struct vmspace *oldvmspace = p->p_vmspace; 38615856e12eSJohn Dyson struct vmspace *newvmspace; 38623364c323SKonstantin Belousov vm_ooffset_t fork_charge; 38635856e12eSJohn Dyson 38645856e12eSJohn Dyson if (oldvmspace->vm_refcnt == 1) 386589b57fcfSKonstantin Belousov return (0); 38663364c323SKonstantin Belousov fork_charge = 0; 38673364c323SKonstantin Belousov newvmspace = vmspace_fork(oldvmspace, &fork_charge); 386889b57fcfSKonstantin Belousov if (newvmspace == NULL) 386989b57fcfSKonstantin Belousov return (ENOMEM); 3870ef694c1aSEdward Tomasz Napierala if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { 38713364c323SKonstantin Belousov vmspace_free(newvmspace); 38723364c323SKonstantin Belousov return (ENOMEM); 38733364c323SKonstantin Belousov } 387457051fdcSTor Egge PROC_VMSPACE_LOCK(p); 38755856e12eSJohn Dyson p->p_vmspace = newvmspace; 387657051fdcSTor Egge PROC_VMSPACE_UNLOCK(p); 38776617724cSJeff Roberson if (p == curthread->td_proc) 3878b40ce416SJulian Elischer pmap_activate(curthread); 3879b56ef1c1SJohn Baldwin vmspace_free(oldvmspace); 388089b57fcfSKonstantin Belousov return (0); 38815856e12eSJohn Dyson } 38825856e12eSJohn Dyson 38835856e12eSJohn Dyson /* 3884df8bae1dSRodney W. Grimes * vm_map_lookup: 3885df8bae1dSRodney W. Grimes * 3886df8bae1dSRodney W. Grimes * Finds the VM object, offset, and 3887df8bae1dSRodney W. Grimes * protection for a given virtual address in the 3888df8bae1dSRodney W. Grimes * specified map, assuming a page fault of the 3889df8bae1dSRodney W. Grimes * type specified. 3890df8bae1dSRodney W. Grimes * 3891df8bae1dSRodney W. Grimes * Leaves the map in question locked for read; return 3892df8bae1dSRodney W. Grimes * values are guaranteed until a vm_map_lookup_done 3893df8bae1dSRodney W. Grimes * call is performed. Note that the map argument 3894df8bae1dSRodney W. Grimes * is in/out; the returned map must be used in 3895df8bae1dSRodney W. Grimes * the call to vm_map_lookup_done. 3896df8bae1dSRodney W. Grimes * 3897df8bae1dSRodney W. Grimes * A handle (out_entry) is returned for use in 3898df8bae1dSRodney W. Grimes * vm_map_lookup_done, to make that fast. 3899df8bae1dSRodney W. Grimes * 3900df8bae1dSRodney W. Grimes * If a lookup is requested with "write protection" 3901df8bae1dSRodney W. Grimes * specified, the map may be changed to perform virtual 3902df8bae1dSRodney W. Grimes * copying operations, although the data referenced will 3903df8bae1dSRodney W. Grimes * remain the same. 3904df8bae1dSRodney W. Grimes */ 3905df8bae1dSRodney W. Grimes int 3906b9dcd593SBruce Evans vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 3907b9dcd593SBruce Evans vm_offset_t vaddr, 390847221757SJohn Dyson vm_prot_t fault_typea, 3909b9dcd593SBruce Evans vm_map_entry_t *out_entry, /* OUT */ 3910b9dcd593SBruce Evans vm_object_t *object, /* OUT */ 3911b9dcd593SBruce Evans vm_pindex_t *pindex, /* OUT */ 3912b9dcd593SBruce Evans vm_prot_t *out_prot, /* OUT */ 39132d8acc0fSJohn Dyson boolean_t *wired) /* OUT */ 3914df8bae1dSRodney W. Grimes { 3915c0877f10SJohn Dyson vm_map_entry_t entry; 3916c0877f10SJohn Dyson vm_map_t map = *var_map; 3917c0877f10SJohn Dyson vm_prot_t prot; 391847221757SJohn Dyson vm_prot_t fault_type = fault_typea; 39193364c323SKonstantin Belousov vm_object_t eobject; 39200cc74f14SAlan Cox vm_size_t size; 3921ef694c1aSEdward Tomasz Napierala struct ucred *cred; 3922df8bae1dSRodney W. Grimes 3923df8bae1dSRodney W. Grimes RetryLookup:; 3924df8bae1dSRodney W. Grimes 3925df8bae1dSRodney W. Grimes vm_map_lock_read(map); 3926df8bae1dSRodney W. Grimes 3927df8bae1dSRodney W. Grimes /* 39284c3ef59eSAlan Cox * Lookup the faulting address. 3929df8bae1dSRodney W. Grimes */ 3930095104acSAlan Cox if (!vm_map_lookup_entry(map, vaddr, out_entry)) { 3931095104acSAlan Cox vm_map_unlock_read(map); 3932095104acSAlan Cox return (KERN_INVALID_ADDRESS); 3933095104acSAlan Cox } 3934df8bae1dSRodney W. Grimes 39354e94f402SAlan Cox entry = *out_entry; 3936b7b2aac2SJohn Dyson 3937df8bae1dSRodney W. Grimes /* 3938df8bae1dSRodney W. Grimes * Handle submaps. 3939df8bae1dSRodney W. Grimes */ 3940afa07f7eSJohn Dyson if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3941df8bae1dSRodney W. Grimes vm_map_t old_map = map; 3942df8bae1dSRodney W. Grimes 3943df8bae1dSRodney W. Grimes *var_map = map = entry->object.sub_map; 3944df8bae1dSRodney W. Grimes vm_map_unlock_read(old_map); 3945df8bae1dSRodney W. Grimes goto RetryLookup; 3946df8bae1dSRodney W. Grimes } 3947a04c970aSJohn Dyson 3948df8bae1dSRodney W. Grimes /* 39490d94caffSDavid Greenman * Check whether this task is allowed to have this page. 3950df8bae1dSRodney W. Grimes */ 3951df8bae1dSRodney W. Grimes prot = entry->protection; 395247221757SJohn Dyson fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 39532db65ab4SAlan Cox if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { 3954095104acSAlan Cox vm_map_unlock_read(map); 3955095104acSAlan Cox return (KERN_PROTECTION_FAILURE); 395647221757SJohn Dyson } 39572ed14a92SAlan Cox if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 395847221757SJohn Dyson (entry->eflags & MAP_ENTRY_COW) && 3959a6d42a0dSAlan Cox (fault_type & VM_PROT_WRITE)) { 3960095104acSAlan Cox vm_map_unlock_read(map); 3961095104acSAlan Cox return (KERN_PROTECTION_FAILURE); 3962a04c970aSJohn Dyson } 39635b3e0257SDag-Erling Smørgrav if ((fault_typea & VM_PROT_COPY) != 0 && 39645b3e0257SDag-Erling Smørgrav (entry->max_protection & VM_PROT_WRITE) == 0 && 39655b3e0257SDag-Erling Smørgrav (entry->eflags & MAP_ENTRY_COW) == 0) { 39665b3e0257SDag-Erling Smørgrav vm_map_unlock_read(map); 39675b3e0257SDag-Erling Smørgrav return (KERN_PROTECTION_FAILURE); 39685b3e0257SDag-Erling Smørgrav } 3969df8bae1dSRodney W. Grimes 3970df8bae1dSRodney W. Grimes /* 39710d94caffSDavid Greenman * If this page is not pageable, we have to get it for all possible 39720d94caffSDavid Greenman * accesses. 3973df8bae1dSRodney W. Grimes */ 397405f0fdd2SPoul-Henning Kamp *wired = (entry->wired_count != 0); 397505f0fdd2SPoul-Henning Kamp if (*wired) 3976a6d42a0dSAlan Cox fault_type = entry->protection; 39773364c323SKonstantin Belousov size = entry->end - entry->start; 3978df8bae1dSRodney W. Grimes /* 3979df8bae1dSRodney W. Grimes * If the entry was copy-on-write, we either ... 3980df8bae1dSRodney W. Grimes */ 3981afa07f7eSJohn Dyson if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3982df8bae1dSRodney W. Grimes /* 39830d94caffSDavid Greenman * If we want to write the page, we may as well handle that 3984ad5fca3bSAlan Cox * now since we've got the map locked. 3985df8bae1dSRodney W. Grimes * 39860d94caffSDavid Greenman * If we don't need to write the page, we just demote the 39870d94caffSDavid Greenman * permissions allowed. 3988df8bae1dSRodney W. Grimes */ 3989a6d42a0dSAlan Cox if ((fault_type & VM_PROT_WRITE) != 0 || 3990a6d42a0dSAlan Cox (fault_typea & VM_PROT_COPY) != 0) { 3991df8bae1dSRodney W. Grimes /* 39920d94caffSDavid Greenman * Make a new object, and place it in the object 39930d94caffSDavid Greenman * chain. Note that no new references have appeared 3994ad5fca3bSAlan Cox * -- one just moved from the map to the new 39950d94caffSDavid Greenman * object. 3996df8bae1dSRodney W. Grimes */ 399725adb370SBrian Feldman if (vm_map_lock_upgrade(map)) 3998df8bae1dSRodney W. Grimes goto RetryLookup; 39999917e010SAlan Cox 4000ef694c1aSEdward Tomasz Napierala if (entry->cred == NULL) { 40013364c323SKonstantin Belousov /* 40023364c323SKonstantin Belousov * The debugger owner is charged for 40033364c323SKonstantin Belousov * the memory. 40043364c323SKonstantin Belousov */ 4005ef694c1aSEdward Tomasz Napierala cred = curthread->td_ucred; 4006ef694c1aSEdward Tomasz Napierala crhold(cred); 4007ef694c1aSEdward Tomasz Napierala if (!swap_reserve_by_cred(size, cred)) { 4008ef694c1aSEdward Tomasz Napierala crfree(cred); 40093364c323SKonstantin Belousov vm_map_unlock(map); 40103364c323SKonstantin Belousov return (KERN_RESOURCE_SHORTAGE); 40113364c323SKonstantin Belousov } 4012ef694c1aSEdward Tomasz Napierala entry->cred = cred; 40133364c323SKonstantin Belousov } 40140cc74f14SAlan Cox vm_object_shadow(&entry->object.vm_object, 40150cc74f14SAlan Cox &entry->offset, size); 4016afa07f7eSJohn Dyson entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 40173364c323SKonstantin Belousov eobject = entry->object.vm_object; 4018ef694c1aSEdward Tomasz Napierala if (eobject->cred != NULL) { 40193364c323SKonstantin Belousov /* 40203364c323SKonstantin Belousov * The object was not shadowed. 40213364c323SKonstantin Belousov */ 4022ef694c1aSEdward Tomasz Napierala swap_release_by_cred(size, entry->cred); 4023ef694c1aSEdward Tomasz Napierala crfree(entry->cred); 4024ef694c1aSEdward Tomasz Napierala entry->cred = NULL; 4025ef694c1aSEdward Tomasz Napierala } else if (entry->cred != NULL) { 402689f6b863SAttilio Rao VM_OBJECT_WLOCK(eobject); 4027ef694c1aSEdward Tomasz Napierala eobject->cred = entry->cred; 40283364c323SKonstantin Belousov eobject->charge = size; 402989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(eobject); 4030ef694c1aSEdward Tomasz Napierala entry->cred = NULL; 40313364c323SKonstantin Belousov } 40329917e010SAlan Cox 40339b09b6c7SMatthew Dillon vm_map_lock_downgrade(map); 40340d94caffSDavid Greenman } else { 4035df8bae1dSRodney W. Grimes /* 40360d94caffSDavid Greenman * We're attempting to read a copy-on-write page -- 40370d94caffSDavid Greenman * don't allow writes. 4038df8bae1dSRodney W. Grimes */ 40392d8acc0fSJohn Dyson prot &= ~VM_PROT_WRITE; 4040df8bae1dSRodney W. Grimes } 4041df8bae1dSRodney W. Grimes } 40422d8acc0fSJohn Dyson 4043df8bae1dSRodney W. Grimes /* 4044df8bae1dSRodney W. Grimes * Create an object if necessary. 4045df8bae1dSRodney W. Grimes */ 40464e71e795SMatthew Dillon if (entry->object.vm_object == NULL && 40474e71e795SMatthew Dillon !map->system_map) { 404825adb370SBrian Feldman if (vm_map_lock_upgrade(map)) 4049df8bae1dSRodney W. Grimes goto RetryLookup; 405024a1cce3SDavid Greenman entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 40513364c323SKonstantin Belousov atop(size)); 4052df8bae1dSRodney W. Grimes entry->offset = 0; 4053ef694c1aSEdward Tomasz Napierala if (entry->cred != NULL) { 405489f6b863SAttilio Rao VM_OBJECT_WLOCK(entry->object.vm_object); 4055ef694c1aSEdward Tomasz Napierala entry->object.vm_object->cred = entry->cred; 40563364c323SKonstantin Belousov entry->object.vm_object->charge = size; 405789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(entry->object.vm_object); 4058ef694c1aSEdward Tomasz Napierala entry->cred = NULL; 40593364c323SKonstantin Belousov } 40609b09b6c7SMatthew Dillon vm_map_lock_downgrade(map); 4061df8bae1dSRodney W. Grimes } 4062b5b40fa6SJohn Dyson 4063df8bae1dSRodney W. Grimes /* 40640d94caffSDavid Greenman * Return the object/offset from this entry. If the entry was 40650d94caffSDavid Greenman * copy-on-write or empty, it has been fixed up. 4066df8bae1dSRodney W. Grimes */ 40679b09b6c7SMatthew Dillon *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 4068df8bae1dSRodney W. Grimes *object = entry->object.vm_object; 4069df8bae1dSRodney W. Grimes 4070df8bae1dSRodney W. Grimes *out_prot = prot; 4071df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 4072df8bae1dSRodney W. Grimes } 4073df8bae1dSRodney W. Grimes 4074df8bae1dSRodney W. Grimes /* 407519dc5607STor Egge * vm_map_lookup_locked: 407619dc5607STor Egge * 407719dc5607STor Egge * Lookup the faulting address. A version of vm_map_lookup that returns 407819dc5607STor Egge * KERN_FAILURE instead of blocking on map lock or memory allocation. 407919dc5607STor Egge */ 408019dc5607STor Egge int 408119dc5607STor Egge vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ 408219dc5607STor Egge vm_offset_t vaddr, 408319dc5607STor Egge vm_prot_t fault_typea, 408419dc5607STor Egge vm_map_entry_t *out_entry, /* OUT */ 408519dc5607STor Egge vm_object_t *object, /* OUT */ 408619dc5607STor Egge vm_pindex_t *pindex, /* OUT */ 408719dc5607STor Egge vm_prot_t *out_prot, /* OUT */ 408819dc5607STor Egge boolean_t *wired) /* OUT */ 408919dc5607STor Egge { 409019dc5607STor Egge vm_map_entry_t entry; 409119dc5607STor Egge vm_map_t map = *var_map; 409219dc5607STor Egge vm_prot_t prot; 409319dc5607STor Egge vm_prot_t fault_type = fault_typea; 409419dc5607STor Egge 409519dc5607STor Egge /* 40964c3ef59eSAlan Cox * Lookup the faulting address. 409719dc5607STor Egge */ 409819dc5607STor Egge if (!vm_map_lookup_entry(map, vaddr, out_entry)) 409919dc5607STor Egge return (KERN_INVALID_ADDRESS); 410019dc5607STor Egge 410119dc5607STor Egge entry = *out_entry; 410219dc5607STor Egge 410319dc5607STor Egge /* 410419dc5607STor Egge * Fail if the entry refers to a submap. 410519dc5607STor Egge */ 410619dc5607STor Egge if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 410719dc5607STor Egge return (KERN_FAILURE); 410819dc5607STor Egge 410919dc5607STor Egge /* 411019dc5607STor Egge * Check whether this task is allowed to have this page. 411119dc5607STor Egge */ 411219dc5607STor Egge prot = entry->protection; 411319dc5607STor Egge fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 411419dc5607STor Egge if ((fault_type & prot) != fault_type) 411519dc5607STor Egge return (KERN_PROTECTION_FAILURE); 411619dc5607STor Egge if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 411719dc5607STor Egge (entry->eflags & MAP_ENTRY_COW) && 4118a6d42a0dSAlan Cox (fault_type & VM_PROT_WRITE)) 411919dc5607STor Egge return (KERN_PROTECTION_FAILURE); 412019dc5607STor Egge 412119dc5607STor Egge /* 412219dc5607STor Egge * If this page is not pageable, we have to get it for all possible 412319dc5607STor Egge * accesses. 412419dc5607STor Egge */ 412519dc5607STor Egge *wired = (entry->wired_count != 0); 412619dc5607STor Egge if (*wired) 4127a6d42a0dSAlan Cox fault_type = entry->protection; 412819dc5607STor Egge 412919dc5607STor Egge if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 413019dc5607STor Egge /* 413119dc5607STor Egge * Fail if the entry was copy-on-write for a write fault. 413219dc5607STor Egge */ 413319dc5607STor Egge if (fault_type & VM_PROT_WRITE) 413419dc5607STor Egge return (KERN_FAILURE); 413519dc5607STor Egge /* 413619dc5607STor Egge * We're attempting to read a copy-on-write page -- 413719dc5607STor Egge * don't allow writes. 413819dc5607STor Egge */ 413919dc5607STor Egge prot &= ~VM_PROT_WRITE; 414019dc5607STor Egge } 414119dc5607STor Egge 414219dc5607STor Egge /* 414319dc5607STor Egge * Fail if an object should be created. 414419dc5607STor Egge */ 414519dc5607STor Egge if (entry->object.vm_object == NULL && !map->system_map) 414619dc5607STor Egge return (KERN_FAILURE); 414719dc5607STor Egge 414819dc5607STor Egge /* 414919dc5607STor Egge * Return the object/offset from this entry. If the entry was 415019dc5607STor Egge * copy-on-write or empty, it has been fixed up. 415119dc5607STor Egge */ 415219dc5607STor Egge *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 415319dc5607STor Egge *object = entry->object.vm_object; 415419dc5607STor Egge 415519dc5607STor Egge *out_prot = prot; 415619dc5607STor Egge return (KERN_SUCCESS); 415719dc5607STor Egge } 415819dc5607STor Egge 415919dc5607STor Egge /* 4160df8bae1dSRodney W. Grimes * vm_map_lookup_done: 4161df8bae1dSRodney W. Grimes * 4162df8bae1dSRodney W. Grimes * Releases locks acquired by a vm_map_lookup 4163df8bae1dSRodney W. Grimes * (according to the handle returned by that lookup). 4164df8bae1dSRodney W. Grimes */ 41650d94caffSDavid Greenman void 41661b40f8c0SMatthew Dillon vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 4167df8bae1dSRodney W. Grimes { 4168df8bae1dSRodney W. Grimes /* 4169df8bae1dSRodney W. Grimes * Unlock the main-level map 4170df8bae1dSRodney W. Grimes */ 4171df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 4172df8bae1dSRodney W. Grimes } 4173df8bae1dSRodney W. Grimes 4174c7c34a24SBruce Evans #include "opt_ddb.h" 4175c3cb3e12SDavid Greenman #ifdef DDB 4176c7c34a24SBruce Evans #include <sys/kernel.h> 4177c7c34a24SBruce Evans 4178c7c34a24SBruce Evans #include <ddb/ddb.h> 4179c7c34a24SBruce Evans 41802ebcd458SAttilio Rao static void 41812ebcd458SAttilio Rao vm_map_print(vm_map_t map) 4182df8bae1dSRodney W. Grimes { 4183c0877f10SJohn Dyson vm_map_entry_t entry; 4184c7c34a24SBruce Evans 4185e5f251d2SAlan Cox db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 4186e5f251d2SAlan Cox (void *)map, 4187101eeb7fSBruce Evans (void *)map->pmap, map->nentries, map->timestamp); 4188df8bae1dSRodney W. Grimes 4189c7c34a24SBruce Evans db_indent += 2; 4190df8bae1dSRodney W. Grimes for (entry = map->header.next; entry != &map->header; 4191df8bae1dSRodney W. Grimes entry = entry->next) { 4192fc62ef1fSBruce Evans db_iprintf("map entry %p: start=%p, end=%p\n", 4193fc62ef1fSBruce Evans (void *)entry, (void *)entry->start, (void *)entry->end); 4194e5f251d2SAlan Cox { 4195df8bae1dSRodney W. Grimes static char *inheritance_name[4] = 4196df8bae1dSRodney W. Grimes {"share", "copy", "none", "donate_copy"}; 41970d94caffSDavid Greenman 419895e5e988SJohn Dyson db_iprintf(" prot=%x/%x/%s", 4199df8bae1dSRodney W. Grimes entry->protection, 4200df8bae1dSRodney W. Grimes entry->max_protection, 42018aef1712SMatthew Dillon inheritance_name[(int)(unsigned char)entry->inheritance]); 4202df8bae1dSRodney W. Grimes if (entry->wired_count != 0) 420395e5e988SJohn Dyson db_printf(", wired"); 4204df8bae1dSRodney W. Grimes } 42059fdfe602SMatthew Dillon if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4206cd034a5bSMaxime Henrion db_printf(", share=%p, offset=0x%jx\n", 42079fdfe602SMatthew Dillon (void *)entry->object.sub_map, 4208cd034a5bSMaxime Henrion (uintmax_t)entry->offset); 4209df8bae1dSRodney W. Grimes if ((entry->prev == &map->header) || 42109fdfe602SMatthew Dillon (entry->prev->object.sub_map != 42119fdfe602SMatthew Dillon entry->object.sub_map)) { 4212c7c34a24SBruce Evans db_indent += 2; 42132ebcd458SAttilio Rao vm_map_print((vm_map_t)entry->object.sub_map); 4214c7c34a24SBruce Evans db_indent -= 2; 4215df8bae1dSRodney W. Grimes } 42160d94caffSDavid Greenman } else { 4217ef694c1aSEdward Tomasz Napierala if (entry->cred != NULL) 4218ef694c1aSEdward Tomasz Napierala db_printf(", ruid %d", entry->cred->cr_ruid); 4219cd034a5bSMaxime Henrion db_printf(", object=%p, offset=0x%jx", 4220101eeb7fSBruce Evans (void *)entry->object.vm_object, 4221cd034a5bSMaxime Henrion (uintmax_t)entry->offset); 4222ef694c1aSEdward Tomasz Napierala if (entry->object.vm_object && entry->object.vm_object->cred) 4223ef694c1aSEdward Tomasz Napierala db_printf(", obj ruid %d charge %jx", 4224ef694c1aSEdward Tomasz Napierala entry->object.vm_object->cred->cr_ruid, 42253364c323SKonstantin Belousov (uintmax_t)entry->object.vm_object->charge); 4226afa07f7eSJohn Dyson if (entry->eflags & MAP_ENTRY_COW) 4227c7c34a24SBruce Evans db_printf(", copy (%s)", 4228afa07f7eSJohn Dyson (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4229c7c34a24SBruce Evans db_printf("\n"); 4230df8bae1dSRodney W. Grimes 4231df8bae1dSRodney W. Grimes if ((entry->prev == &map->header) || 4232df8bae1dSRodney W. Grimes (entry->prev->object.vm_object != 4233df8bae1dSRodney W. Grimes entry->object.vm_object)) { 4234c7c34a24SBruce Evans db_indent += 2; 4235101eeb7fSBruce Evans vm_object_print((db_expr_t)(intptr_t) 4236101eeb7fSBruce Evans entry->object.vm_object, 423744bbc3b7SKonstantin Belousov 0, 0, (char *)0); 4238c7c34a24SBruce Evans db_indent -= 2; 4239df8bae1dSRodney W. Grimes } 4240df8bae1dSRodney W. Grimes } 4241df8bae1dSRodney W. Grimes } 4242c7c34a24SBruce Evans db_indent -= 2; 4243df8bae1dSRodney W. Grimes } 424495e5e988SJohn Dyson 42452ebcd458SAttilio Rao DB_SHOW_COMMAND(map, map) 42462ebcd458SAttilio Rao { 42472ebcd458SAttilio Rao 42482ebcd458SAttilio Rao if (!have_addr) { 42492ebcd458SAttilio Rao db_printf("usage: show map <addr>\n"); 42502ebcd458SAttilio Rao return; 42512ebcd458SAttilio Rao } 42522ebcd458SAttilio Rao vm_map_print((vm_map_t)addr); 42532ebcd458SAttilio Rao } 425495e5e988SJohn Dyson 425595e5e988SJohn Dyson DB_SHOW_COMMAND(procvm, procvm) 425695e5e988SJohn Dyson { 425795e5e988SJohn Dyson struct proc *p; 425895e5e988SJohn Dyson 425995e5e988SJohn Dyson if (have_addr) { 426095e5e988SJohn Dyson p = (struct proc *) addr; 426195e5e988SJohn Dyson } else { 426295e5e988SJohn Dyson p = curproc; 426395e5e988SJohn Dyson } 426495e5e988SJohn Dyson 4265ac1e407bSBruce Evans db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 4266ac1e407bSBruce Evans (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 4267b1028ad1SLuoqi Chen (void *)vmspace_pmap(p->p_vmspace)); 426895e5e988SJohn Dyson 42692ebcd458SAttilio Rao vm_map_print((vm_map_t)&p->p_vmspace->vm_map); 427095e5e988SJohn Dyson } 427195e5e988SJohn Dyson 4272c7c34a24SBruce Evans #endif /* DDB */ 4273