1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 6df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 9df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 10df8bae1dSRodney W. Grimes * are met: 11df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 12df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 13df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 15df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 16df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 17df8bae1dSRodney W. Grimes * must display the following acknowledgement: 18df8bae1dSRodney W. Grimes * This product includes software developed by the University of 19df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 20df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 21df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 22df8bae1dSRodney W. Grimes * without specific prior written permission. 23df8bae1dSRodney W. Grimes * 24df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34df8bae1dSRodney W. Grimes * SUCH DAMAGE. 35df8bae1dSRodney W. Grimes * 363c4dd356SDavid Greenman * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 37df8bae1dSRodney W. Grimes * 38df8bae1dSRodney W. Grimes * 39df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40df8bae1dSRodney W. Grimes * All rights reserved. 41df8bae1dSRodney W. Grimes * 42df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43df8bae1dSRodney W. Grimes * 44df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 45df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 46df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 47df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 48df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53df8bae1dSRodney W. Grimes * 54df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 55df8bae1dSRodney W. Grimes * 56df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57df8bae1dSRodney W. Grimes * School of Computer Science 58df8bae1dSRodney W. Grimes * Carnegie Mellon University 59df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 60df8bae1dSRodney W. Grimes * 61df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 62df8bae1dSRodney W. Grimes * rights to redistribute these changes. 633c4dd356SDavid Greenman * 64de5f6a77SJohn Dyson * $Id: vm_map.c,v 1.33 1996/02/11 22:03:49 dyson Exp $ 65df8bae1dSRodney W. Grimes */ 66df8bae1dSRodney W. Grimes 67df8bae1dSRodney W. Grimes /* 68df8bae1dSRodney W. Grimes * Virtual memory mapping module. 69df8bae1dSRodney W. Grimes */ 700e41ee30SGarrett Wollman #include "opt_ddb.h" 71df8bae1dSRodney W. Grimes 72df8bae1dSRodney W. Grimes #include <sys/param.h> 73df8bae1dSRodney W. Grimes #include <sys/systm.h> 74df8bae1dSRodney W. Grimes #include <sys/malloc.h> 75b5e8ce9fSBruce Evans #include <sys/proc.h> 76efeaf95aSDavid Greenman #include <sys/queue.h> 77efeaf95aSDavid Greenman #include <sys/vmmeter.h> 78df8bae1dSRodney W. Grimes 79df8bae1dSRodney W. Grimes #include <vm/vm.h> 80efeaf95aSDavid Greenman #include <vm/vm_param.h> 81efeaf95aSDavid Greenman #include <vm/vm_prot.h> 82efeaf95aSDavid Greenman #include <vm/vm_inherit.h> 83efeaf95aSDavid Greenman #include <vm/lock.h> 84efeaf95aSDavid Greenman #include <vm/pmap.h> 85efeaf95aSDavid Greenman #include <vm/vm_map.h> 86df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 87df8bae1dSRodney W. Grimes #include <vm/vm_object.h> 8826f9a767SRodney W. Grimes #include <vm/vm_kern.h> 8924a1cce3SDavid Greenman #include <vm/vm_pager.h> 90efeaf95aSDavid Greenman #include <vm/vm_extern.h> 91df8bae1dSRodney W. Grimes 92df8bae1dSRodney W. Grimes /* 93df8bae1dSRodney W. Grimes * Virtual memory maps provide for the mapping, protection, 94df8bae1dSRodney W. Grimes * and sharing of virtual memory objects. In addition, 95df8bae1dSRodney W. Grimes * this module provides for an efficient virtual copy of 96df8bae1dSRodney W. Grimes * memory from one map to another. 97df8bae1dSRodney W. Grimes * 98df8bae1dSRodney W. Grimes * Synchronization is required prior to most operations. 99df8bae1dSRodney W. Grimes * 100df8bae1dSRodney W. Grimes * Maps consist of an ordered doubly-linked list of simple 101df8bae1dSRodney W. Grimes * entries; a single hint is used to speed up lookups. 102df8bae1dSRodney W. Grimes * 103df8bae1dSRodney W. Grimes * In order to properly represent the sharing of virtual 104df8bae1dSRodney W. Grimes * memory regions among maps, the map structure is bi-level. 105df8bae1dSRodney W. Grimes * Top-level ("address") maps refer to regions of sharable 106df8bae1dSRodney W. Grimes * virtual memory. These regions are implemented as 107df8bae1dSRodney W. Grimes * ("sharing") maps, which then refer to the actual virtual 108df8bae1dSRodney W. Grimes * memory objects. When two address maps "share" memory, 109df8bae1dSRodney W. Grimes * their top-level maps both have references to the same 110df8bae1dSRodney W. Grimes * sharing map. When memory is virtual-copied from one 111df8bae1dSRodney W. Grimes * address map to another, the references in the sharing 112df8bae1dSRodney W. Grimes * maps are actually copied -- no copying occurs at the 113df8bae1dSRodney W. Grimes * virtual memory object level. 114df8bae1dSRodney W. Grimes * 115df8bae1dSRodney W. Grimes * Since portions of maps are specified by start/end addreses, 116df8bae1dSRodney W. Grimes * which may not align with existing map entries, all 117df8bae1dSRodney W. Grimes * routines merely "clip" entries to these start/end values. 118df8bae1dSRodney W. Grimes * [That is, an entry is split into two, bordering at a 119df8bae1dSRodney W. Grimes * start or end value.] Note that these clippings may not 120df8bae1dSRodney W. Grimes * always be necessary (as the two resulting entries are then 121df8bae1dSRodney W. Grimes * not changed); however, the clipping is done for convenience. 122df8bae1dSRodney W. Grimes * No attempt is currently made to "glue back together" two 123df8bae1dSRodney W. Grimes * abutting entries. 124df8bae1dSRodney W. Grimes * 125df8bae1dSRodney W. Grimes * As mentioned above, virtual copy operations are performed 126df8bae1dSRodney W. Grimes * by copying VM object references from one sharing map to 127df8bae1dSRodney W. Grimes * another, and then marking both regions as copy-on-write. 128df8bae1dSRodney W. Grimes * It is important to note that only one writeable reference 129df8bae1dSRodney W. Grimes * to a VM object region exists in any map -- this means that 130df8bae1dSRodney W. Grimes * shadow object creation can be delayed until a write operation 131df8bae1dSRodney W. Grimes * occurs. 132df8bae1dSRodney W. Grimes */ 133df8bae1dSRodney W. Grimes 134df8bae1dSRodney W. Grimes /* 135df8bae1dSRodney W. Grimes * vm_map_startup: 136df8bae1dSRodney W. Grimes * 137df8bae1dSRodney W. Grimes * Initialize the vm_map module. Must be called before 138df8bae1dSRodney W. Grimes * any other vm_map routines. 139df8bae1dSRodney W. Grimes * 140df8bae1dSRodney W. Grimes * Map and entry structures are allocated from the general 141df8bae1dSRodney W. Grimes * purpose memory pool with some exceptions: 142df8bae1dSRodney W. Grimes * 143df8bae1dSRodney W. Grimes * - The kernel map and kmem submap are allocated statically. 144df8bae1dSRodney W. Grimes * - Kernel map entries are allocated out of a static pool. 145df8bae1dSRodney W. Grimes * 146df8bae1dSRodney W. Grimes * These restrictions are necessary since malloc() uses the 147df8bae1dSRodney W. Grimes * maps and requires map entries. 148df8bae1dSRodney W. Grimes */ 149df8bae1dSRodney W. Grimes 150df8bae1dSRodney W. Grimes vm_offset_t kentry_data; 151df8bae1dSRodney W. Grimes vm_size_t kentry_data_size; 152f708ef1bSPoul-Henning Kamp static vm_map_entry_t kentry_free; 153f708ef1bSPoul-Henning Kamp static vm_map_t kmap_free; 154bd7e5f99SJohn Dyson extern char kstack[]; 155df8bae1dSRodney W. Grimes 156f708ef1bSPoul-Henning Kamp static int kentry_count; 157c3cb3e12SDavid Greenman static vm_offset_t mapvm_start, mapvm, mapvmmax; 158c3cb3e12SDavid Greenman static int mapvmpgcnt; 15926f9a767SRodney W. Grimes 160df8bae1dSRodney W. Grimes static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t)); 161df8bae1dSRodney W. Grimes static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t)); 162f708ef1bSPoul-Henning Kamp static vm_map_entry_t vm_map_entry_create __P((vm_map_t)); 163f708ef1bSPoul-Henning Kamp static void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t)); 164f708ef1bSPoul-Henning Kamp static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t)); 165f708ef1bSPoul-Henning Kamp static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t)); 166f708ef1bSPoul-Henning Kamp static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, 167f708ef1bSPoul-Henning Kamp vm_map_entry_t)); 168f708ef1bSPoul-Henning Kamp #ifdef notyet 169f708ef1bSPoul-Henning Kamp static void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t)); 170f708ef1bSPoul-Henning Kamp #endif 171df8bae1dSRodney W. Grimes 1720d94caffSDavid Greenman void 1730d94caffSDavid Greenman vm_map_startup() 174df8bae1dSRodney W. Grimes { 175df8bae1dSRodney W. Grimes register int i; 176df8bae1dSRodney W. Grimes register vm_map_entry_t mep; 177df8bae1dSRodney W. Grimes vm_map_t mp; 178df8bae1dSRodney W. Grimes 179df8bae1dSRodney W. Grimes /* 180df8bae1dSRodney W. Grimes * Static map structures for allocation before initialization of 181df8bae1dSRodney W. Grimes * kernel map or kmem map. vm_map_create knows how to deal with them. 182df8bae1dSRodney W. Grimes */ 183df8bae1dSRodney W. Grimes kmap_free = mp = (vm_map_t) kentry_data; 184df8bae1dSRodney W. Grimes i = MAX_KMAP; 185df8bae1dSRodney W. Grimes while (--i > 0) { 186df8bae1dSRodney W. Grimes mp->header.next = (vm_map_entry_t) (mp + 1); 187df8bae1dSRodney W. Grimes mp++; 188df8bae1dSRodney W. Grimes } 189df8bae1dSRodney W. Grimes mp++->header.next = NULL; 190df8bae1dSRodney W. Grimes 191df8bae1dSRodney W. Grimes /* 1920d94caffSDavid Greenman * Form a free list of statically allocated kernel map entries with 1930d94caffSDavid Greenman * the rest. 194df8bae1dSRodney W. Grimes */ 195df8bae1dSRodney W. Grimes kentry_free = mep = (vm_map_entry_t) mp; 19666ecebedSDavid Greenman kentry_count = i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep; 197df8bae1dSRodney W. Grimes while (--i > 0) { 198df8bae1dSRodney W. Grimes mep->next = mep + 1; 199df8bae1dSRodney W. Grimes mep++; 200df8bae1dSRodney W. Grimes } 201df8bae1dSRodney W. Grimes mep->next = NULL; 202df8bae1dSRodney W. Grimes } 203df8bae1dSRodney W. Grimes 204df8bae1dSRodney W. Grimes /* 205df8bae1dSRodney W. Grimes * Allocate a vmspace structure, including a vm_map and pmap, 206df8bae1dSRodney W. Grimes * and initialize those structures. The refcnt is set to 1. 207df8bae1dSRodney W. Grimes * The remaining fields must be initialized by the caller. 208df8bae1dSRodney W. Grimes */ 209df8bae1dSRodney W. Grimes struct vmspace * 210df8bae1dSRodney W. Grimes vmspace_alloc(min, max, pageable) 211df8bae1dSRodney W. Grimes vm_offset_t min, max; 212df8bae1dSRodney W. Grimes int pageable; 213df8bae1dSRodney W. Grimes { 214df8bae1dSRodney W. Grimes register struct vmspace *vm; 2150d94caffSDavid Greenman 216d6a6c0f6SDavid Greenman if (mapvmpgcnt == 0 && mapvm == 0) { 217d6a6c0f6SDavid Greenman int s; 2180d94caffSDavid Greenman 219d6a6c0f6SDavid Greenman mapvmpgcnt = (cnt.v_page_count * sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE; 220d6a6c0f6SDavid Greenman s = splhigh(); 22166ecebedSDavid Greenman mapvm_start = mapvm = kmem_alloc_pageable(kmem_map, mapvmpgcnt * PAGE_SIZE); 22266ecebedSDavid Greenman mapvmmax = mapvm_start + mapvmpgcnt * PAGE_SIZE; 223d6a6c0f6SDavid Greenman splx(s); 224d6a6c0f6SDavid Greenman if (!mapvm) 225d6a6c0f6SDavid Greenman mapvmpgcnt = 0; 226d6a6c0f6SDavid Greenman } 227df8bae1dSRodney W. Grimes MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK); 228df8bae1dSRodney W. Grimes bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm); 229df8bae1dSRodney W. Grimes vm_map_init(&vm->vm_map, min, max, pageable); 230df8bae1dSRodney W. Grimes pmap_pinit(&vm->vm_pmap); 231df8bae1dSRodney W. Grimes vm->vm_map.pmap = &vm->vm_pmap; /* XXX */ 232df8bae1dSRodney W. Grimes vm->vm_refcnt = 1; 233df8bae1dSRodney W. Grimes return (vm); 234df8bae1dSRodney W. Grimes } 235df8bae1dSRodney W. Grimes 236df8bae1dSRodney W. Grimes void 237df8bae1dSRodney W. Grimes vmspace_free(vm) 238df8bae1dSRodney W. Grimes register struct vmspace *vm; 239df8bae1dSRodney W. Grimes { 240df8bae1dSRodney W. Grimes 241a1f6d91cSDavid Greenman if (vm->vm_refcnt == 0) 242a1f6d91cSDavid Greenman panic("vmspace_free: attempt to free already freed vmspace"); 243a1f6d91cSDavid Greenman 244df8bae1dSRodney W. Grimes if (--vm->vm_refcnt == 0) { 245bd7e5f99SJohn Dyson int s, i; 246bd7e5f99SJohn Dyson 247bd7e5f99SJohn Dyson pmap_remove(&vm->vm_pmap, (vm_offset_t) kstack, (vm_offset_t) kstack+UPAGES*PAGE_SIZE); 248bd7e5f99SJohn Dyson 249df8bae1dSRodney W. Grimes /* 250df8bae1dSRodney W. Grimes * Lock the map, to wait out all other references to it. 2510d94caffSDavid Greenman * Delete all of the mappings and pages they hold, then call 2520d94caffSDavid Greenman * the pmap module to reclaim anything left. 253df8bae1dSRodney W. Grimes */ 254df8bae1dSRodney W. Grimes vm_map_lock(&vm->vm_map); 255bd7e5f99SJohn Dyson vm_object_deallocate(vm->vm_upages_obj); 256df8bae1dSRodney W. Grimes (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 257df8bae1dSRodney W. Grimes vm->vm_map.max_offset); 258a1f6d91cSDavid Greenman vm_map_unlock(&vm->vm_map); 259a1f6d91cSDavid Greenman while( vm->vm_map.ref_count != 1) 260a1f6d91cSDavid Greenman tsleep(&vm->vm_map.ref_count, PVM, "vmsfre", 0); 261a1f6d91cSDavid Greenman --vm->vm_map.ref_count; 262df8bae1dSRodney W. Grimes pmap_release(&vm->vm_pmap); 263df8bae1dSRodney W. Grimes FREE(vm, M_VMMAP); 264df8bae1dSRodney W. Grimes } 265df8bae1dSRodney W. Grimes } 266df8bae1dSRodney W. Grimes 267df8bae1dSRodney W. Grimes /* 268df8bae1dSRodney W. Grimes * vm_map_create: 269df8bae1dSRodney W. Grimes * 270df8bae1dSRodney W. Grimes * Creates and returns a new empty VM map with 271df8bae1dSRodney W. Grimes * the given physical map structure, and having 272df8bae1dSRodney W. Grimes * the given lower and upper address bounds. 273df8bae1dSRodney W. Grimes */ 2740d94caffSDavid Greenman vm_map_t 2750d94caffSDavid Greenman vm_map_create(pmap, min, max, pageable) 276df8bae1dSRodney W. Grimes pmap_t pmap; 277df8bae1dSRodney W. Grimes vm_offset_t min, max; 278df8bae1dSRodney W. Grimes boolean_t pageable; 279df8bae1dSRodney W. Grimes { 280df8bae1dSRodney W. Grimes register vm_map_t result; 281df8bae1dSRodney W. Grimes 282df8bae1dSRodney W. Grimes if (kmem_map == NULL) { 283df8bae1dSRodney W. Grimes result = kmap_free; 284df8bae1dSRodney W. Grimes kmap_free = (vm_map_t) result->header.next; 285df8bae1dSRodney W. Grimes if (result == NULL) 286df8bae1dSRodney W. Grimes panic("vm_map_create: out of maps"); 287df8bae1dSRodney W. Grimes } else 288df8bae1dSRodney W. Grimes MALLOC(result, vm_map_t, sizeof(struct vm_map), 289df8bae1dSRodney W. Grimes M_VMMAP, M_WAITOK); 290df8bae1dSRodney W. Grimes 291df8bae1dSRodney W. Grimes vm_map_init(result, min, max, pageable); 292df8bae1dSRodney W. Grimes result->pmap = pmap; 293df8bae1dSRodney W. Grimes return (result); 294df8bae1dSRodney W. Grimes } 295df8bae1dSRodney W. Grimes 296df8bae1dSRodney W. Grimes /* 297df8bae1dSRodney W. Grimes * Initialize an existing vm_map structure 298df8bae1dSRodney W. Grimes * such as that in the vmspace structure. 299df8bae1dSRodney W. Grimes * The pmap is set elsewhere. 300df8bae1dSRodney W. Grimes */ 301df8bae1dSRodney W. Grimes void 302df8bae1dSRodney W. Grimes vm_map_init(map, min, max, pageable) 303df8bae1dSRodney W. Grimes register struct vm_map *map; 304df8bae1dSRodney W. Grimes vm_offset_t min, max; 305df8bae1dSRodney W. Grimes boolean_t pageable; 306df8bae1dSRodney W. Grimes { 307df8bae1dSRodney W. Grimes map->header.next = map->header.prev = &map->header; 308df8bae1dSRodney W. Grimes map->nentries = 0; 309df8bae1dSRodney W. Grimes map->size = 0; 310df8bae1dSRodney W. Grimes map->ref_count = 1; 311df8bae1dSRodney W. Grimes map->is_main_map = TRUE; 312df8bae1dSRodney W. Grimes map->min_offset = min; 313df8bae1dSRodney W. Grimes map->max_offset = max; 314df8bae1dSRodney W. Grimes map->entries_pageable = pageable; 315df8bae1dSRodney W. Grimes map->first_free = &map->header; 316df8bae1dSRodney W. Grimes map->hint = &map->header; 317df8bae1dSRodney W. Grimes map->timestamp = 0; 318df8bae1dSRodney W. Grimes lock_init(&map->lock, TRUE); 319df8bae1dSRodney W. Grimes } 320df8bae1dSRodney W. Grimes 321df8bae1dSRodney W. Grimes /* 322df8bae1dSRodney W. Grimes * vm_map_entry_create: [ internal use only ] 323df8bae1dSRodney W. Grimes * 324df8bae1dSRodney W. Grimes * Allocates a VM map entry for insertion. 325df8bae1dSRodney W. Grimes * No entry fields are filled in. This routine is 326df8bae1dSRodney W. Grimes */ 32726f9a767SRodney W. Grimes static struct vm_map_entry *mappool; 32826f9a767SRodney W. Grimes static int mappoolcnt; 32926f9a767SRodney W. Grimes 330f708ef1bSPoul-Henning Kamp static vm_map_entry_t 33126f9a767SRodney W. Grimes vm_map_entry_create(map) 332df8bae1dSRodney W. Grimes vm_map_t map; 333df8bae1dSRodney W. Grimes { 334df8bae1dSRodney W. Grimes vm_map_entry_t entry; 33526f9a767SRodney W. Grimes int i; 3360d94caffSDavid Greenman 33726f9a767SRodney W. Grimes #define KENTRY_LOW_WATER 64 33866ecebedSDavid Greenman #define MAPENTRY_LOW_WATER 128 339df8bae1dSRodney W. Grimes 34026f9a767SRodney W. Grimes /* 34126f9a767SRodney W. Grimes * This is a *very* nasty (and sort of incomplete) hack!!!! 34226f9a767SRodney W. Grimes */ 34326f9a767SRodney W. Grimes if (kentry_count < KENTRY_LOW_WATER) { 34426f9a767SRodney W. Grimes if (mapvmpgcnt && mapvm) { 34526f9a767SRodney W. Grimes vm_page_t m; 3460d94caffSDavid Greenman 34705f0fdd2SPoul-Henning Kamp m = vm_page_alloc(kmem_object, 348a316d390SJohn Dyson OFF_TO_IDX(mapvm - vm_map_min(kmem_map)), 3496d40c3d3SDavid Greenman (map == kmem_map) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL); 35005f0fdd2SPoul-Henning Kamp if (m) { 35126f9a767SRodney W. Grimes int newentries; 3520d94caffSDavid Greenman 353a91c5a7eSJohn Dyson newentries = (PAGE_SIZE / sizeof(struct vm_map_entry)); 35426f9a767SRodney W. Grimes vm_page_wire(m); 35526f9a767SRodney W. Grimes m->flags &= ~PG_BUSY; 356d9459480SDavid Greenman m->valid = VM_PAGE_BITS_ALL; 35726f9a767SRodney W. Grimes pmap_enter(vm_map_pmap(kmem_map), mapvm, 35826f9a767SRodney W. Grimes VM_PAGE_TO_PHYS(m), VM_PROT_DEFAULT, 1); 359f919ebdeSDavid Greenman m->flags |= PG_WRITEABLE|PG_MAPPED; 36026f9a767SRodney W. Grimes 36126f9a767SRodney W. Grimes entry = (vm_map_entry_t) mapvm; 362a91c5a7eSJohn Dyson mapvm += PAGE_SIZE; 36326f9a767SRodney W. Grimes --mapvmpgcnt; 36426f9a767SRodney W. Grimes 36526f9a767SRodney W. Grimes for (i = 0; i < newentries; i++) { 36626f9a767SRodney W. Grimes vm_map_entry_dispose(kernel_map, entry); 36726f9a767SRodney W. Grimes entry++; 36826f9a767SRodney W. Grimes } 36926f9a767SRodney W. Grimes } 37026f9a767SRodney W. Grimes } 37126f9a767SRodney W. Grimes } 37226f9a767SRodney W. Grimes if (map == kernel_map || map == kmem_map || map == pager_map) { 37326f9a767SRodney W. Grimes 37405f0fdd2SPoul-Henning Kamp entry = kentry_free; 37505f0fdd2SPoul-Henning Kamp if (entry) { 37626f9a767SRodney W. Grimes kentry_free = entry->next; 37726f9a767SRodney W. Grimes --kentry_count; 37826f9a767SRodney W. Grimes return entry; 37926f9a767SRodney W. Grimes } 38005f0fdd2SPoul-Henning Kamp entry = mappool; 38105f0fdd2SPoul-Henning Kamp if (entry) { 38226f9a767SRodney W. Grimes mappool = entry->next; 38326f9a767SRodney W. Grimes --mappoolcnt; 38426f9a767SRodney W. Grimes return entry; 38526f9a767SRodney W. Grimes } 38626f9a767SRodney W. Grimes } else { 38705f0fdd2SPoul-Henning Kamp entry = mappool; 38805f0fdd2SPoul-Henning Kamp if (entry) { 38926f9a767SRodney W. Grimes mappool = entry->next; 39026f9a767SRodney W. Grimes --mappoolcnt; 39126f9a767SRodney W. Grimes return entry; 39226f9a767SRodney W. Grimes } 393df8bae1dSRodney W. Grimes MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry), 394df8bae1dSRodney W. Grimes M_VMMAPENT, M_WAITOK); 395df8bae1dSRodney W. Grimes } 396df8bae1dSRodney W. Grimes if (entry == NULL) 397df8bae1dSRodney W. Grimes panic("vm_map_entry_create: out of map entries"); 398df8bae1dSRodney W. Grimes 399df8bae1dSRodney W. Grimes return (entry); 400df8bae1dSRodney W. Grimes } 401df8bae1dSRodney W. Grimes 402df8bae1dSRodney W. Grimes /* 403df8bae1dSRodney W. Grimes * vm_map_entry_dispose: [ internal use only ] 404df8bae1dSRodney W. Grimes * 405df8bae1dSRodney W. Grimes * Inverse of vm_map_entry_create. 406df8bae1dSRodney W. Grimes */ 407f708ef1bSPoul-Henning Kamp static void 40826f9a767SRodney W. Grimes vm_map_entry_dispose(map, entry) 409df8bae1dSRodney W. Grimes vm_map_t map; 410df8bae1dSRodney W. Grimes vm_map_entry_t entry; 411df8bae1dSRodney W. Grimes { 412053bbc78SDavid Greenman if ((kentry_count < KENTRY_LOW_WATER) || 41366ecebedSDavid Greenman ((vm_offset_t) entry >= kentry_data && (vm_offset_t) entry < (kentry_data + kentry_data_size)) || 41466ecebedSDavid Greenman ((vm_offset_t) entry >= mapvm_start && (vm_offset_t) entry < mapvmmax)) { 415df8bae1dSRodney W. Grimes entry->next = kentry_free; 416df8bae1dSRodney W. Grimes kentry_free = entry; 41726f9a767SRodney W. Grimes ++kentry_count; 418053bbc78SDavid Greenman return; 41926f9a767SRodney W. Grimes } else { 42026f9a767SRodney W. Grimes if (mappoolcnt < MAPENTRY_LOW_WATER) { 42126f9a767SRodney W. Grimes entry->next = mappool; 42226f9a767SRodney W. Grimes mappool = entry; 42326f9a767SRodney W. Grimes ++mappoolcnt; 42426f9a767SRodney W. Grimes return; 42526f9a767SRodney W. Grimes } 42626f9a767SRodney W. Grimes FREE(entry, M_VMMAPENT); 427df8bae1dSRodney W. Grimes } 428df8bae1dSRodney W. Grimes } 429df8bae1dSRodney W. Grimes 430df8bae1dSRodney W. Grimes /* 431df8bae1dSRodney W. Grimes * vm_map_entry_{un,}link: 432df8bae1dSRodney W. Grimes * 433df8bae1dSRodney W. Grimes * Insert/remove entries from maps. 434df8bae1dSRodney W. Grimes */ 435df8bae1dSRodney W. Grimes #define vm_map_entry_link(map, after_where, entry) \ 436df8bae1dSRodney W. Grimes { \ 437df8bae1dSRodney W. Grimes (map)->nentries++; \ 438df8bae1dSRodney W. Grimes (entry)->prev = (after_where); \ 439df8bae1dSRodney W. Grimes (entry)->next = (after_where)->next; \ 440df8bae1dSRodney W. Grimes (entry)->prev->next = (entry); \ 441df8bae1dSRodney W. Grimes (entry)->next->prev = (entry); \ 442df8bae1dSRodney W. Grimes } 443df8bae1dSRodney W. Grimes #define vm_map_entry_unlink(map, entry) \ 444df8bae1dSRodney W. Grimes { \ 445df8bae1dSRodney W. Grimes (map)->nentries--; \ 446df8bae1dSRodney W. Grimes (entry)->next->prev = (entry)->prev; \ 447df8bae1dSRodney W. Grimes (entry)->prev->next = (entry)->next; \ 448df8bae1dSRodney W. Grimes } 449df8bae1dSRodney W. Grimes 450df8bae1dSRodney W. Grimes /* 451df8bae1dSRodney W. Grimes * vm_map_reference: 452df8bae1dSRodney W. Grimes * 453df8bae1dSRodney W. Grimes * Creates another valid reference to the given map. 454df8bae1dSRodney W. Grimes * 455df8bae1dSRodney W. Grimes */ 4560d94caffSDavid Greenman void 4570d94caffSDavid Greenman vm_map_reference(map) 458df8bae1dSRodney W. Grimes register vm_map_t map; 459df8bae1dSRodney W. Grimes { 460df8bae1dSRodney W. Grimes if (map == NULL) 461df8bae1dSRodney W. Grimes return; 462df8bae1dSRodney W. Grimes 463df8bae1dSRodney W. Grimes map->ref_count++; 464df8bae1dSRodney W. Grimes } 465df8bae1dSRodney W. Grimes 466df8bae1dSRodney W. Grimes /* 467df8bae1dSRodney W. Grimes * vm_map_deallocate: 468df8bae1dSRodney W. Grimes * 469df8bae1dSRodney W. Grimes * Removes a reference from the specified map, 470df8bae1dSRodney W. Grimes * destroying it if no references remain. 471df8bae1dSRodney W. Grimes * The map should not be locked. 472df8bae1dSRodney W. Grimes */ 4730d94caffSDavid Greenman void 4740d94caffSDavid Greenman vm_map_deallocate(map) 475df8bae1dSRodney W. Grimes register vm_map_t map; 476df8bae1dSRodney W. Grimes { 477df8bae1dSRodney W. Grimes register int c; 478df8bae1dSRodney W. Grimes 479df8bae1dSRodney W. Grimes if (map == NULL) 480df8bae1dSRodney W. Grimes return; 481df8bae1dSRodney W. Grimes 482a1f6d91cSDavid Greenman c = map->ref_count; 483df8bae1dSRodney W. Grimes 484a1f6d91cSDavid Greenman if (c == 0) 485a1f6d91cSDavid Greenman panic("vm_map_deallocate: deallocating already freed map"); 486a1f6d91cSDavid Greenman 487a1f6d91cSDavid Greenman if (c != 1) { 488a1f6d91cSDavid Greenman --map->ref_count; 48924a1cce3SDavid Greenman wakeup(&map->ref_count); 490df8bae1dSRodney W. Grimes return; 491df8bae1dSRodney W. Grimes } 492df8bae1dSRodney W. Grimes /* 4930d94caffSDavid Greenman * Lock the map, to wait out all other references to it. 494df8bae1dSRodney W. Grimes */ 495df8bae1dSRodney W. Grimes 496df8bae1dSRodney W. Grimes vm_map_lock(map); 497df8bae1dSRodney W. Grimes (void) vm_map_delete(map, map->min_offset, map->max_offset); 498a1f6d91cSDavid Greenman --map->ref_count; 499a1f6d91cSDavid Greenman if( map->ref_count != 0) { 500a1f6d91cSDavid Greenman vm_map_unlock(map); 501a1f6d91cSDavid Greenman return; 502a1f6d91cSDavid Greenman } 503df8bae1dSRodney W. Grimes 504df8bae1dSRodney W. Grimes pmap_destroy(map->pmap); 505df8bae1dSRodney W. Grimes FREE(map, M_VMMAP); 506df8bae1dSRodney W. Grimes } 507df8bae1dSRodney W. Grimes 508df8bae1dSRodney W. Grimes /* 509df8bae1dSRodney W. Grimes * vm_map_insert: 510df8bae1dSRodney W. Grimes * 511df8bae1dSRodney W. Grimes * Inserts the given whole VM object into the target 512df8bae1dSRodney W. Grimes * map at the specified address range. The object's 513df8bae1dSRodney W. Grimes * size should match that of the address range. 514df8bae1dSRodney W. Grimes * 515df8bae1dSRodney W. Grimes * Requires that the map be locked, and leaves it so. 516df8bae1dSRodney W. Grimes */ 517df8bae1dSRodney W. Grimes int 518bd7e5f99SJohn Dyson vm_map_insert(map, object, offset, start, end, prot, max, cow) 519df8bae1dSRodney W. Grimes vm_map_t map; 520df8bae1dSRodney W. Grimes vm_object_t object; 521a316d390SJohn Dyson vm_ooffset_t offset; 522df8bae1dSRodney W. Grimes vm_offset_t start; 523df8bae1dSRodney W. Grimes vm_offset_t end; 524bd7e5f99SJohn Dyson vm_prot_t prot, max; 525bd7e5f99SJohn Dyson int cow; 526df8bae1dSRodney W. Grimes { 527df8bae1dSRodney W. Grimes register vm_map_entry_t new_entry; 528df8bae1dSRodney W. Grimes register vm_map_entry_t prev_entry; 529df8bae1dSRodney W. Grimes vm_map_entry_t temp_entry; 530df8bae1dSRodney W. Grimes 531df8bae1dSRodney W. Grimes /* 532df8bae1dSRodney W. Grimes * Check that the start and end points are not bogus. 533df8bae1dSRodney W. Grimes */ 534df8bae1dSRodney W. Grimes 535df8bae1dSRodney W. Grimes if ((start < map->min_offset) || (end > map->max_offset) || 536df8bae1dSRodney W. Grimes (start >= end)) 537df8bae1dSRodney W. Grimes return (KERN_INVALID_ADDRESS); 538df8bae1dSRodney W. Grimes 539df8bae1dSRodney W. Grimes /* 5400d94caffSDavid Greenman * Find the entry prior to the proposed starting address; if it's part 5410d94caffSDavid Greenman * of an existing entry, this range is bogus. 542df8bae1dSRodney W. Grimes */ 543df8bae1dSRodney W. Grimes 544df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &temp_entry)) 545df8bae1dSRodney W. Grimes return (KERN_NO_SPACE); 546df8bae1dSRodney W. Grimes 547df8bae1dSRodney W. Grimes prev_entry = temp_entry; 548df8bae1dSRodney W. Grimes 549df8bae1dSRodney W. Grimes /* 5500d94caffSDavid Greenman * Assert that the next entry doesn't overlap the end point. 551df8bae1dSRodney W. Grimes */ 552df8bae1dSRodney W. Grimes 553df8bae1dSRodney W. Grimes if ((prev_entry->next != &map->header) && 554df8bae1dSRodney W. Grimes (prev_entry->next->start < end)) 555df8bae1dSRodney W. Grimes return (KERN_NO_SPACE); 556df8bae1dSRodney W. Grimes 557df8bae1dSRodney W. Grimes /* 5580d94caffSDavid Greenman * See if we can avoid creating a new entry by extending one of our 5590d94caffSDavid Greenman * neighbors. 560df8bae1dSRodney W. Grimes */ 561df8bae1dSRodney W. Grimes 562df8bae1dSRodney W. Grimes if (object == NULL) { 563df8bae1dSRodney W. Grimes if ((prev_entry != &map->header) && 564df8bae1dSRodney W. Grimes (prev_entry->end == start) && 565df8bae1dSRodney W. Grimes (map->is_main_map) && 566df8bae1dSRodney W. Grimes (prev_entry->is_a_map == FALSE) && 567df8bae1dSRodney W. Grimes (prev_entry->is_sub_map == FALSE) && 568df8bae1dSRodney W. Grimes (prev_entry->inheritance == VM_INHERIT_DEFAULT) && 569bd7e5f99SJohn Dyson (prev_entry->protection == prot) && 570bd7e5f99SJohn Dyson (prev_entry->max_protection == max) && 571df8bae1dSRodney W. Grimes (prev_entry->wired_count == 0)) { 572df8bae1dSRodney W. Grimes 573df8bae1dSRodney W. Grimes if (vm_object_coalesce(prev_entry->object.vm_object, 574a316d390SJohn Dyson OFF_TO_IDX(prev_entry->offset), 575df8bae1dSRodney W. Grimes (vm_size_t) (prev_entry->end 576df8bae1dSRodney W. Grimes - prev_entry->start), 577df8bae1dSRodney W. Grimes (vm_size_t) (end - prev_entry->end))) { 578df8bae1dSRodney W. Grimes /* 5790d94caffSDavid Greenman * Coalesced the two objects - can extend the 5800d94caffSDavid Greenman * previous map entry to include the new 5810d94caffSDavid Greenman * range. 582df8bae1dSRodney W. Grimes */ 583df8bae1dSRodney W. Grimes map->size += (end - prev_entry->end); 584df8bae1dSRodney W. Grimes prev_entry->end = end; 585df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 586df8bae1dSRodney W. Grimes } 587df8bae1dSRodney W. Grimes } 588df8bae1dSRodney W. Grimes } 589df8bae1dSRodney W. Grimes /* 590df8bae1dSRodney W. Grimes * Create a new entry 591df8bae1dSRodney W. Grimes */ 592df8bae1dSRodney W. Grimes 593df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(map); 594df8bae1dSRodney W. Grimes new_entry->start = start; 595df8bae1dSRodney W. Grimes new_entry->end = end; 596df8bae1dSRodney W. Grimes 597df8bae1dSRodney W. Grimes new_entry->is_a_map = FALSE; 598df8bae1dSRodney W. Grimes new_entry->is_sub_map = FALSE; 599df8bae1dSRodney W. Grimes new_entry->object.vm_object = object; 600df8bae1dSRodney W. Grimes new_entry->offset = offset; 601df8bae1dSRodney W. Grimes 602bd7e5f99SJohn Dyson if (cow & MAP_COPY_NEEDED) 603bd7e5f99SJohn Dyson new_entry->needs_copy = TRUE; 604bd7e5f99SJohn Dyson else 605df8bae1dSRodney W. Grimes new_entry->needs_copy = FALSE; 606df8bae1dSRodney W. Grimes 607bd7e5f99SJohn Dyson if (cow & MAP_COPY_ON_WRITE) 608bd7e5f99SJohn Dyson new_entry->copy_on_write = TRUE; 609bd7e5f99SJohn Dyson else 610bd7e5f99SJohn Dyson new_entry->copy_on_write = FALSE; 611bd7e5f99SJohn Dyson 612df8bae1dSRodney W. Grimes if (map->is_main_map) { 613df8bae1dSRodney W. Grimes new_entry->inheritance = VM_INHERIT_DEFAULT; 614bd7e5f99SJohn Dyson new_entry->protection = prot; 615bd7e5f99SJohn Dyson new_entry->max_protection = max; 616df8bae1dSRodney W. Grimes new_entry->wired_count = 0; 617df8bae1dSRodney W. Grimes } 618df8bae1dSRodney W. Grimes /* 619df8bae1dSRodney W. Grimes * Insert the new entry into the list 620df8bae1dSRodney W. Grimes */ 621df8bae1dSRodney W. Grimes 622df8bae1dSRodney W. Grimes vm_map_entry_link(map, prev_entry, new_entry); 623df8bae1dSRodney W. Grimes map->size += new_entry->end - new_entry->start; 624df8bae1dSRodney W. Grimes 625df8bae1dSRodney W. Grimes /* 626df8bae1dSRodney W. Grimes * Update the free space hint 627df8bae1dSRodney W. Grimes */ 628df8bae1dSRodney W. Grimes 629bd7e5f99SJohn Dyson if ((map->first_free == prev_entry) && 630bd7e5f99SJohn Dyson (prev_entry->end >= new_entry->start)) 631df8bae1dSRodney W. Grimes map->first_free = new_entry; 632df8bae1dSRodney W. Grimes 633df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 634df8bae1dSRodney W. Grimes } 635df8bae1dSRodney W. Grimes 636df8bae1dSRodney W. Grimes /* 637df8bae1dSRodney W. Grimes * SAVE_HINT: 638df8bae1dSRodney W. Grimes * 639df8bae1dSRodney W. Grimes * Saves the specified entry as the hint for 64024a1cce3SDavid Greenman * future lookups. 641df8bae1dSRodney W. Grimes */ 642df8bae1dSRodney W. Grimes #define SAVE_HINT(map,value) \ 64324a1cce3SDavid Greenman (map)->hint = (value); 644df8bae1dSRodney W. Grimes 645df8bae1dSRodney W. Grimes /* 646df8bae1dSRodney W. Grimes * vm_map_lookup_entry: [ internal use only ] 647df8bae1dSRodney W. Grimes * 648df8bae1dSRodney W. Grimes * Finds the map entry containing (or 649df8bae1dSRodney W. Grimes * immediately preceding) the specified address 650df8bae1dSRodney W. Grimes * in the given map; the entry is returned 651df8bae1dSRodney W. Grimes * in the "entry" parameter. The boolean 652df8bae1dSRodney W. Grimes * result indicates whether the address is 653df8bae1dSRodney W. Grimes * actually contained in the map. 654df8bae1dSRodney W. Grimes */ 6550d94caffSDavid Greenman boolean_t 6560d94caffSDavid Greenman vm_map_lookup_entry(map, address, entry) 657df8bae1dSRodney W. Grimes register vm_map_t map; 658df8bae1dSRodney W. Grimes register vm_offset_t address; 659df8bae1dSRodney W. Grimes vm_map_entry_t *entry; /* OUT */ 660df8bae1dSRodney W. Grimes { 661df8bae1dSRodney W. Grimes register vm_map_entry_t cur; 662df8bae1dSRodney W. Grimes register vm_map_entry_t last; 663df8bae1dSRodney W. Grimes 664df8bae1dSRodney W. Grimes /* 6650d94caffSDavid Greenman * Start looking either from the head of the list, or from the hint. 666df8bae1dSRodney W. Grimes */ 667df8bae1dSRodney W. Grimes 668df8bae1dSRodney W. Grimes cur = map->hint; 669df8bae1dSRodney W. Grimes 670df8bae1dSRodney W. Grimes if (cur == &map->header) 671df8bae1dSRodney W. Grimes cur = cur->next; 672df8bae1dSRodney W. Grimes 673df8bae1dSRodney W. Grimes if (address >= cur->start) { 674df8bae1dSRodney W. Grimes /* 675df8bae1dSRodney W. Grimes * Go from hint to end of list. 676df8bae1dSRodney W. Grimes * 6770d94caffSDavid Greenman * But first, make a quick check to see if we are already looking 6780d94caffSDavid Greenman * at the entry we want (which is usually the case). Note also 6790d94caffSDavid Greenman * that we don't need to save the hint here... it is the same 6800d94caffSDavid Greenman * hint (unless we are at the header, in which case the hint 6810d94caffSDavid Greenman * didn't buy us anything anyway). 682df8bae1dSRodney W. Grimes */ 683df8bae1dSRodney W. Grimes last = &map->header; 684df8bae1dSRodney W. Grimes if ((cur != last) && (cur->end > address)) { 685df8bae1dSRodney W. Grimes *entry = cur; 686df8bae1dSRodney W. Grimes return (TRUE); 687df8bae1dSRodney W. Grimes } 6880d94caffSDavid Greenman } else { 689df8bae1dSRodney W. Grimes /* 690df8bae1dSRodney W. Grimes * Go from start to hint, *inclusively* 691df8bae1dSRodney W. Grimes */ 692df8bae1dSRodney W. Grimes last = cur->next; 693df8bae1dSRodney W. Grimes cur = map->header.next; 694df8bae1dSRodney W. Grimes } 695df8bae1dSRodney W. Grimes 696df8bae1dSRodney W. Grimes /* 697df8bae1dSRodney W. Grimes * Search linearly 698df8bae1dSRodney W. Grimes */ 699df8bae1dSRodney W. Grimes 700df8bae1dSRodney W. Grimes while (cur != last) { 701df8bae1dSRodney W. Grimes if (cur->end > address) { 702df8bae1dSRodney W. Grimes if (address >= cur->start) { 703df8bae1dSRodney W. Grimes /* 7040d94caffSDavid Greenman * Save this lookup for future hints, and 7050d94caffSDavid Greenman * return 706df8bae1dSRodney W. Grimes */ 707df8bae1dSRodney W. Grimes 708df8bae1dSRodney W. Grimes *entry = cur; 709df8bae1dSRodney W. Grimes SAVE_HINT(map, cur); 710df8bae1dSRodney W. Grimes return (TRUE); 711df8bae1dSRodney W. Grimes } 712df8bae1dSRodney W. Grimes break; 713df8bae1dSRodney W. Grimes } 714df8bae1dSRodney W. Grimes cur = cur->next; 715df8bae1dSRodney W. Grimes } 716df8bae1dSRodney W. Grimes *entry = cur->prev; 717df8bae1dSRodney W. Grimes SAVE_HINT(map, *entry); 718df8bae1dSRodney W. Grimes return (FALSE); 719df8bae1dSRodney W. Grimes } 720df8bae1dSRodney W. Grimes 721df8bae1dSRodney W. Grimes /* 722df8bae1dSRodney W. Grimes * Find sufficient space for `length' bytes in the given map, starting at 723df8bae1dSRodney W. Grimes * `start'. The map must be locked. Returns 0 on success, 1 on no space. 724df8bae1dSRodney W. Grimes */ 725df8bae1dSRodney W. Grimes int 726df8bae1dSRodney W. Grimes vm_map_findspace(map, start, length, addr) 727df8bae1dSRodney W. Grimes register vm_map_t map; 728df8bae1dSRodney W. Grimes register vm_offset_t start; 729df8bae1dSRodney W. Grimes vm_size_t length; 730df8bae1dSRodney W. Grimes vm_offset_t *addr; 731df8bae1dSRodney W. Grimes { 732df8bae1dSRodney W. Grimes register vm_map_entry_t entry, next; 733df8bae1dSRodney W. Grimes register vm_offset_t end; 734df8bae1dSRodney W. Grimes 735df8bae1dSRodney W. Grimes if (start < map->min_offset) 736df8bae1dSRodney W. Grimes start = map->min_offset; 737df8bae1dSRodney W. Grimes if (start > map->max_offset) 738df8bae1dSRodney W. Grimes return (1); 739df8bae1dSRodney W. Grimes 740df8bae1dSRodney W. Grimes /* 7410d94caffSDavid Greenman * Look for the first possible address; if there's already something 7420d94caffSDavid Greenman * at this address, we have to start after it. 743df8bae1dSRodney W. Grimes */ 744df8bae1dSRodney W. Grimes if (start == map->min_offset) { 745df8bae1dSRodney W. Grimes if ((entry = map->first_free) != &map->header) 746df8bae1dSRodney W. Grimes start = entry->end; 747df8bae1dSRodney W. Grimes } else { 748df8bae1dSRodney W. Grimes vm_map_entry_t tmp; 7490d94caffSDavid Greenman 750df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &tmp)) 751df8bae1dSRodney W. Grimes start = tmp->end; 752df8bae1dSRodney W. Grimes entry = tmp; 753df8bae1dSRodney W. Grimes } 754df8bae1dSRodney W. Grimes 755df8bae1dSRodney W. Grimes /* 7560d94caffSDavid Greenman * Look through the rest of the map, trying to fit a new region in the 7570d94caffSDavid Greenman * gap between existing regions, or after the very last region. 758df8bae1dSRodney W. Grimes */ 759df8bae1dSRodney W. Grimes for (;; start = (entry = next)->end) { 760df8bae1dSRodney W. Grimes /* 761df8bae1dSRodney W. Grimes * Find the end of the proposed new region. Be sure we didn't 762df8bae1dSRodney W. Grimes * go beyond the end of the map, or wrap around the address; 763df8bae1dSRodney W. Grimes * if so, we lose. Otherwise, if this is the last entry, or 764df8bae1dSRodney W. Grimes * if the proposed new region fits before the next entry, we 765df8bae1dSRodney W. Grimes * win. 766df8bae1dSRodney W. Grimes */ 767df8bae1dSRodney W. Grimes end = start + length; 768df8bae1dSRodney W. Grimes if (end > map->max_offset || end < start) 769df8bae1dSRodney W. Grimes return (1); 770df8bae1dSRodney W. Grimes next = entry->next; 771df8bae1dSRodney W. Grimes if (next == &map->header || next->start >= end) 772df8bae1dSRodney W. Grimes break; 773df8bae1dSRodney W. Grimes } 774df8bae1dSRodney W. Grimes SAVE_HINT(map, entry); 775df8bae1dSRodney W. Grimes *addr = start; 7760d94caffSDavid Greenman if (map == kernel_map && round_page(start + length) > kernel_vm_end) 7770d94caffSDavid Greenman pmap_growkernel(round_page(start + length)); 778df8bae1dSRodney W. Grimes return (0); 779df8bae1dSRodney W. Grimes } 780df8bae1dSRodney W. Grimes 781df8bae1dSRodney W. Grimes /* 782df8bae1dSRodney W. Grimes * vm_map_find finds an unallocated region in the target address 783df8bae1dSRodney W. Grimes * map with the given length. The search is defined to be 784df8bae1dSRodney W. Grimes * first-fit from the specified address; the region found is 785df8bae1dSRodney W. Grimes * returned in the same parameter. 786df8bae1dSRodney W. Grimes * 787df8bae1dSRodney W. Grimes */ 788df8bae1dSRodney W. Grimes int 789bd7e5f99SJohn Dyson vm_map_find(map, object, offset, addr, length, find_space, prot, max, cow) 790df8bae1dSRodney W. Grimes vm_map_t map; 791df8bae1dSRodney W. Grimes vm_object_t object; 792a316d390SJohn Dyson vm_ooffset_t offset; 793df8bae1dSRodney W. Grimes vm_offset_t *addr; /* IN/OUT */ 794df8bae1dSRodney W. Grimes vm_size_t length; 795df8bae1dSRodney W. Grimes boolean_t find_space; 796bd7e5f99SJohn Dyson vm_prot_t prot, max; 797bd7e5f99SJohn Dyson int cow; 798df8bae1dSRodney W. Grimes { 799df8bae1dSRodney W. Grimes register vm_offset_t start; 8008d6e8edeSDavid Greenman int result, s = 0; 801df8bae1dSRodney W. Grimes 802df8bae1dSRodney W. Grimes start = *addr; 8038d6e8edeSDavid Greenman 8048d6e8edeSDavid Greenman if (map == kmem_map) 8058d6e8edeSDavid Greenman s = splhigh(); 8068d6e8edeSDavid Greenman 807bea41bcfSDavid Greenman vm_map_lock(map); 808df8bae1dSRodney W. Grimes if (find_space) { 809df8bae1dSRodney W. Grimes if (vm_map_findspace(map, start, length, addr)) { 810df8bae1dSRodney W. Grimes vm_map_unlock(map); 8118d6e8edeSDavid Greenman if (map == kmem_map) 8128d6e8edeSDavid Greenman splx(s); 813df8bae1dSRodney W. Grimes return (KERN_NO_SPACE); 814df8bae1dSRodney W. Grimes } 815df8bae1dSRodney W. Grimes start = *addr; 816df8bae1dSRodney W. Grimes } 817bd7e5f99SJohn Dyson result = vm_map_insert(map, object, offset, 818bd7e5f99SJohn Dyson start, start + length, prot, max, cow); 819df8bae1dSRodney W. Grimes vm_map_unlock(map); 8208d6e8edeSDavid Greenman 8218d6e8edeSDavid Greenman if (map == kmem_map) 8228d6e8edeSDavid Greenman splx(s); 8238d6e8edeSDavid Greenman 824df8bae1dSRodney W. Grimes return (result); 825df8bae1dSRodney W. Grimes } 826df8bae1dSRodney W. Grimes 827df8bae1dSRodney W. Grimes /* 828df8bae1dSRodney W. Grimes * vm_map_simplify_entry: [ internal use only ] 829df8bae1dSRodney W. Grimes * 830df8bae1dSRodney W. Grimes * Simplify the given map entry by: 831df8bae1dSRodney W. Grimes * removing extra sharing maps 832df8bae1dSRodney W. Grimes * [XXX maybe later] merging with a neighbor 833df8bae1dSRodney W. Grimes */ 834f708ef1bSPoul-Henning Kamp static void 8350d94caffSDavid Greenman vm_map_simplify_entry(map, entry) 836df8bae1dSRodney W. Grimes vm_map_t map; 837df8bae1dSRodney W. Grimes vm_map_entry_t entry; 838df8bae1dSRodney W. Grimes { 839de5f6a77SJohn Dyson vm_map_entry_t prev, next; 840de5f6a77SJohn Dyson vm_size_t prevsize, nextsize, esize; 841df8bae1dSRodney W. Grimes 842df8bae1dSRodney W. Grimes /* 8430d94caffSDavid Greenman * If this entry corresponds to a sharing map, then see if we can 8440d94caffSDavid Greenman * remove the level of indirection. If it's not a sharing map, then it 8450d94caffSDavid Greenman * points to a VM object, so see if we can merge with either of our 8460d94caffSDavid Greenman * neighbors. 847df8bae1dSRodney W. Grimes */ 848df8bae1dSRodney W. Grimes 849df8bae1dSRodney W. Grimes if (entry->is_sub_map) 850df8bae1dSRodney W. Grimes return; 851df8bae1dSRodney W. Grimes if (entry->is_a_map) { 852de5f6a77SJohn Dyson return; 8530d94caffSDavid Greenman } else { 854de5f6a77SJohn Dyson if (entry->wired_count) 855de5f6a77SJohn Dyson return; 856de5f6a77SJohn Dyson 857de5f6a77SJohn Dyson prev = entry->prev; 858de5f6a77SJohn Dyson prevsize = prev->end - prev->start; 859de5f6a77SJohn Dyson next = entry->next; 860de5f6a77SJohn Dyson nextsize = next->end - next->start; 861de5f6a77SJohn Dyson esize = entry->end - entry->start; 862de5f6a77SJohn Dyson 863de5f6a77SJohn Dyson if (prev != &map->header && 864de5f6a77SJohn Dyson prev->end == entry->start && 865de5f6a77SJohn Dyson prev->is_a_map == FALSE && 866de5f6a77SJohn Dyson prev->is_sub_map == FALSE && 867de5f6a77SJohn Dyson prev->object.vm_object == entry->object.vm_object && 868de5f6a77SJohn Dyson prev->protection == entry->protection && 869de5f6a77SJohn Dyson prev->max_protection == entry->max_protection && 870de5f6a77SJohn Dyson prev->inheritance == entry->inheritance && 871de5f6a77SJohn Dyson prev->needs_copy == entry->needs_copy && 872de5f6a77SJohn Dyson prev->copy_on_write == entry->copy_on_write && 873de5f6a77SJohn Dyson prev->offset + prevsize == entry->offset && 874de5f6a77SJohn Dyson prev->wired_count == 0) { 875de5f6a77SJohn Dyson vm_map_entry_unlink(map, prev); 876de5f6a77SJohn Dyson entry->start = prev->start; 877de5f6a77SJohn Dyson vm_object_deallocate(prev->object.vm_object); 878de5f6a77SJohn Dyson vm_map_entry_dispose(map, prev); 879de5f6a77SJohn Dyson esize = entry->end - entry->start; 880de5f6a77SJohn Dyson } 881de5f6a77SJohn Dyson 882de5f6a77SJohn Dyson if (next != &map->header && 883de5f6a77SJohn Dyson entry->end == next->start && 884de5f6a77SJohn Dyson next->is_a_map == FALSE && 885de5f6a77SJohn Dyson next->is_sub_map == FALSE && 886de5f6a77SJohn Dyson next->object.vm_object == entry->object.vm_object && 887de5f6a77SJohn Dyson next->protection == entry->protection && 888de5f6a77SJohn Dyson next->max_protection == entry->max_protection && 889de5f6a77SJohn Dyson next->inheritance == entry->inheritance && 890de5f6a77SJohn Dyson next->needs_copy == entry->needs_copy && 891de5f6a77SJohn Dyson next->copy_on_write == entry->copy_on_write && 892de5f6a77SJohn Dyson entry->offset + esize == next->offset && 893de5f6a77SJohn Dyson next->wired_count == 0) { 894de5f6a77SJohn Dyson vm_map_entry_unlink(map, next); 895de5f6a77SJohn Dyson entry->end = next->end; 896de5f6a77SJohn Dyson vm_object_deallocate(next->object.vm_object); 897de5f6a77SJohn Dyson vm_map_entry_dispose(map, next); 898df8bae1dSRodney W. Grimes } 899df8bae1dSRodney W. Grimes } 900de5f6a77SJohn Dyson } 901df8bae1dSRodney W. Grimes 902df8bae1dSRodney W. Grimes /* 903df8bae1dSRodney W. Grimes * vm_map_clip_start: [ internal use only ] 904df8bae1dSRodney W. Grimes * 905df8bae1dSRodney W. Grimes * Asserts that the given entry begins at or after 906df8bae1dSRodney W. Grimes * the specified address; if necessary, 907df8bae1dSRodney W. Grimes * it splits the entry into two. 908df8bae1dSRodney W. Grimes */ 909df8bae1dSRodney W. Grimes #define vm_map_clip_start(map, entry, startaddr) \ 910df8bae1dSRodney W. Grimes { \ 911df8bae1dSRodney W. Grimes if (startaddr > entry->start) \ 912df8bae1dSRodney W. Grimes _vm_map_clip_start(map, entry, startaddr); \ 913df8bae1dSRodney W. Grimes } 914df8bae1dSRodney W. Grimes 915df8bae1dSRodney W. Grimes /* 916df8bae1dSRodney W. Grimes * This routine is called only when it is known that 917df8bae1dSRodney W. Grimes * the entry must be split. 918df8bae1dSRodney W. Grimes */ 9190d94caffSDavid Greenman static void 9200d94caffSDavid Greenman _vm_map_clip_start(map, entry, start) 921df8bae1dSRodney W. Grimes register vm_map_t map; 922df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 923df8bae1dSRodney W. Grimes register vm_offset_t start; 924df8bae1dSRodney W. Grimes { 925df8bae1dSRodney W. Grimes register vm_map_entry_t new_entry; 926df8bae1dSRodney W. Grimes 927df8bae1dSRodney W. Grimes /* 928df8bae1dSRodney W. Grimes * See if we can simplify this entry first 929df8bae1dSRodney W. Grimes */ 930df8bae1dSRodney W. Grimes 931de5f6a77SJohn Dyson vm_map_simplify_entry(map, entry); 932df8bae1dSRodney W. Grimes 933df8bae1dSRodney W. Grimes /* 9340d94caffSDavid Greenman * Split off the front portion -- note that we must insert the new 9350d94caffSDavid Greenman * entry BEFORE this one, so that this entry has the specified 9360d94caffSDavid Greenman * starting address. 937df8bae1dSRodney W. Grimes */ 938df8bae1dSRodney W. Grimes 939df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(map); 940df8bae1dSRodney W. Grimes *new_entry = *entry; 941df8bae1dSRodney W. Grimes 942df8bae1dSRodney W. Grimes new_entry->end = start; 943df8bae1dSRodney W. Grimes entry->offset += (start - entry->start); 944df8bae1dSRodney W. Grimes entry->start = start; 945df8bae1dSRodney W. Grimes 946df8bae1dSRodney W. Grimes vm_map_entry_link(map, entry->prev, new_entry); 947df8bae1dSRodney W. Grimes 948df8bae1dSRodney W. Grimes if (entry->is_a_map || entry->is_sub_map) 949df8bae1dSRodney W. Grimes vm_map_reference(new_entry->object.share_map); 950df8bae1dSRodney W. Grimes else 951df8bae1dSRodney W. Grimes vm_object_reference(new_entry->object.vm_object); 952df8bae1dSRodney W. Grimes } 953df8bae1dSRodney W. Grimes 954df8bae1dSRodney W. Grimes /* 955df8bae1dSRodney W. Grimes * vm_map_clip_end: [ internal use only ] 956df8bae1dSRodney W. Grimes * 957df8bae1dSRodney W. Grimes * Asserts that the given entry ends at or before 958df8bae1dSRodney W. Grimes * the specified address; if necessary, 959df8bae1dSRodney W. Grimes * it splits the entry into two. 960df8bae1dSRodney W. Grimes */ 961df8bae1dSRodney W. Grimes 962df8bae1dSRodney W. Grimes #define vm_map_clip_end(map, entry, endaddr) \ 963df8bae1dSRodney W. Grimes { \ 964df8bae1dSRodney W. Grimes if (endaddr < entry->end) \ 965df8bae1dSRodney W. Grimes _vm_map_clip_end(map, entry, endaddr); \ 966df8bae1dSRodney W. Grimes } 967df8bae1dSRodney W. Grimes 968df8bae1dSRodney W. Grimes /* 969df8bae1dSRodney W. Grimes * This routine is called only when it is known that 970df8bae1dSRodney W. Grimes * the entry must be split. 971df8bae1dSRodney W. Grimes */ 9720d94caffSDavid Greenman static void 9730d94caffSDavid Greenman _vm_map_clip_end(map, entry, end) 974df8bae1dSRodney W. Grimes register vm_map_t map; 975df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 976df8bae1dSRodney W. Grimes register vm_offset_t end; 977df8bae1dSRodney W. Grimes { 978df8bae1dSRodney W. Grimes register vm_map_entry_t new_entry; 979df8bae1dSRodney W. Grimes 980df8bae1dSRodney W. Grimes /* 9810d94caffSDavid Greenman * Create a new entry and insert it AFTER the specified entry 982df8bae1dSRodney W. Grimes */ 983df8bae1dSRodney W. Grimes 984df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(map); 985df8bae1dSRodney W. Grimes *new_entry = *entry; 986df8bae1dSRodney W. Grimes 987df8bae1dSRodney W. Grimes new_entry->start = entry->end = end; 988df8bae1dSRodney W. Grimes new_entry->offset += (end - entry->start); 989df8bae1dSRodney W. Grimes 990df8bae1dSRodney W. Grimes vm_map_entry_link(map, entry, new_entry); 991df8bae1dSRodney W. Grimes 992df8bae1dSRodney W. Grimes if (entry->is_a_map || entry->is_sub_map) 993df8bae1dSRodney W. Grimes vm_map_reference(new_entry->object.share_map); 994df8bae1dSRodney W. Grimes else 995df8bae1dSRodney W. Grimes vm_object_reference(new_entry->object.vm_object); 996df8bae1dSRodney W. Grimes } 997df8bae1dSRodney W. Grimes 998df8bae1dSRodney W. Grimes /* 999df8bae1dSRodney W. Grimes * VM_MAP_RANGE_CHECK: [ internal use only ] 1000df8bae1dSRodney W. Grimes * 1001df8bae1dSRodney W. Grimes * Asserts that the starting and ending region 1002df8bae1dSRodney W. Grimes * addresses fall within the valid range of the map. 1003df8bae1dSRodney W. Grimes */ 1004df8bae1dSRodney W. Grimes #define VM_MAP_RANGE_CHECK(map, start, end) \ 1005df8bae1dSRodney W. Grimes { \ 1006df8bae1dSRodney W. Grimes if (start < vm_map_min(map)) \ 1007df8bae1dSRodney W. Grimes start = vm_map_min(map); \ 1008df8bae1dSRodney W. Grimes if (end > vm_map_max(map)) \ 1009df8bae1dSRodney W. Grimes end = vm_map_max(map); \ 1010df8bae1dSRodney W. Grimes if (start > end) \ 1011df8bae1dSRodney W. Grimes start = end; \ 1012df8bae1dSRodney W. Grimes } 1013df8bae1dSRodney W. Grimes 1014df8bae1dSRodney W. Grimes /* 1015df8bae1dSRodney W. Grimes * vm_map_submap: [ kernel use only ] 1016df8bae1dSRodney W. Grimes * 1017df8bae1dSRodney W. Grimes * Mark the given range as handled by a subordinate map. 1018df8bae1dSRodney W. Grimes * 1019df8bae1dSRodney W. Grimes * This range must have been created with vm_map_find, 1020df8bae1dSRodney W. Grimes * and no other operations may have been performed on this 1021df8bae1dSRodney W. Grimes * range prior to calling vm_map_submap. 1022df8bae1dSRodney W. Grimes * 1023df8bae1dSRodney W. Grimes * Only a limited number of operations can be performed 1024df8bae1dSRodney W. Grimes * within this rage after calling vm_map_submap: 1025df8bae1dSRodney W. Grimes * vm_fault 1026df8bae1dSRodney W. Grimes * [Don't try vm_map_copy!] 1027df8bae1dSRodney W. Grimes * 1028df8bae1dSRodney W. Grimes * To remove a submapping, one must first remove the 1029df8bae1dSRodney W. Grimes * range from the superior map, and then destroy the 1030df8bae1dSRodney W. Grimes * submap (if desired). [Better yet, don't try it.] 1031df8bae1dSRodney W. Grimes */ 1032df8bae1dSRodney W. Grimes int 1033df8bae1dSRodney W. Grimes vm_map_submap(map, start, end, submap) 1034df8bae1dSRodney W. Grimes register vm_map_t map; 1035df8bae1dSRodney W. Grimes register vm_offset_t start; 1036df8bae1dSRodney W. Grimes register vm_offset_t end; 1037df8bae1dSRodney W. Grimes vm_map_t submap; 1038df8bae1dSRodney W. Grimes { 1039df8bae1dSRodney W. Grimes vm_map_entry_t entry; 1040df8bae1dSRodney W. Grimes register int result = KERN_INVALID_ARGUMENT; 1041df8bae1dSRodney W. Grimes 1042df8bae1dSRodney W. Grimes vm_map_lock(map); 1043df8bae1dSRodney W. Grimes 1044df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1045df8bae1dSRodney W. Grimes 1046df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &entry)) { 1047df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 10480d94caffSDavid Greenman } else 1049df8bae1dSRodney W. Grimes entry = entry->next; 1050df8bae1dSRodney W. Grimes 1051df8bae1dSRodney W. Grimes vm_map_clip_end(map, entry, end); 1052df8bae1dSRodney W. Grimes 1053df8bae1dSRodney W. Grimes if ((entry->start == start) && (entry->end == end) && 1054df8bae1dSRodney W. Grimes (!entry->is_a_map) && 1055df8bae1dSRodney W. Grimes (entry->object.vm_object == NULL) && 1056df8bae1dSRodney W. Grimes (!entry->copy_on_write)) { 1057df8bae1dSRodney W. Grimes entry->is_a_map = FALSE; 1058df8bae1dSRodney W. Grimes entry->is_sub_map = TRUE; 1059df8bae1dSRodney W. Grimes vm_map_reference(entry->object.sub_map = submap); 1060df8bae1dSRodney W. Grimes result = KERN_SUCCESS; 1061df8bae1dSRodney W. Grimes } 1062df8bae1dSRodney W. Grimes vm_map_unlock(map); 1063df8bae1dSRodney W. Grimes 1064df8bae1dSRodney W. Grimes return (result); 1065df8bae1dSRodney W. Grimes } 1066df8bae1dSRodney W. Grimes 1067df8bae1dSRodney W. Grimes /* 1068df8bae1dSRodney W. Grimes * vm_map_protect: 1069df8bae1dSRodney W. Grimes * 1070df8bae1dSRodney W. Grimes * Sets the protection of the specified address 1071df8bae1dSRodney W. Grimes * region in the target map. If "set_max" is 1072df8bae1dSRodney W. Grimes * specified, the maximum protection is to be set; 1073df8bae1dSRodney W. Grimes * otherwise, only the current protection is affected. 1074df8bae1dSRodney W. Grimes */ 1075df8bae1dSRodney W. Grimes int 1076df8bae1dSRodney W. Grimes vm_map_protect(map, start, end, new_prot, set_max) 1077df8bae1dSRodney W. Grimes register vm_map_t map; 1078df8bae1dSRodney W. Grimes register vm_offset_t start; 1079df8bae1dSRodney W. Grimes register vm_offset_t end; 1080df8bae1dSRodney W. Grimes register vm_prot_t new_prot; 1081df8bae1dSRodney W. Grimes register boolean_t set_max; 1082df8bae1dSRodney W. Grimes { 1083df8bae1dSRodney W. Grimes register vm_map_entry_t current; 1084df8bae1dSRodney W. Grimes vm_map_entry_t entry; 1085df8bae1dSRodney W. Grimes 1086df8bae1dSRodney W. Grimes vm_map_lock(map); 1087df8bae1dSRodney W. Grimes 1088df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1089df8bae1dSRodney W. Grimes 1090df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &entry)) { 1091df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 10920d94caffSDavid Greenman } else 1093df8bae1dSRodney W. Grimes entry = entry->next; 1094df8bae1dSRodney W. Grimes 1095df8bae1dSRodney W. Grimes /* 10960d94caffSDavid Greenman * Make a first pass to check for protection violations. 1097df8bae1dSRodney W. Grimes */ 1098df8bae1dSRodney W. Grimes 1099df8bae1dSRodney W. Grimes current = entry; 1100df8bae1dSRodney W. Grimes while ((current != &map->header) && (current->start < end)) { 1101a1f6d91cSDavid Greenman if (current->is_sub_map) { 1102a1f6d91cSDavid Greenman vm_map_unlock(map); 1103df8bae1dSRodney W. Grimes return (KERN_INVALID_ARGUMENT); 1104a1f6d91cSDavid Greenman } 1105df8bae1dSRodney W. Grimes if ((new_prot & current->max_protection) != new_prot) { 1106df8bae1dSRodney W. Grimes vm_map_unlock(map); 1107df8bae1dSRodney W. Grimes return (KERN_PROTECTION_FAILURE); 1108df8bae1dSRodney W. Grimes } 1109df8bae1dSRodney W. Grimes current = current->next; 1110df8bae1dSRodney W. Grimes } 1111df8bae1dSRodney W. Grimes 1112df8bae1dSRodney W. Grimes /* 11130d94caffSDavid Greenman * Go back and fix up protections. [Note that clipping is not 11140d94caffSDavid Greenman * necessary the second time.] 1115df8bae1dSRodney W. Grimes */ 1116df8bae1dSRodney W. Grimes 1117df8bae1dSRodney W. Grimes current = entry; 1118df8bae1dSRodney W. Grimes 1119df8bae1dSRodney W. Grimes while ((current != &map->header) && (current->start < end)) { 1120df8bae1dSRodney W. Grimes vm_prot_t old_prot; 1121df8bae1dSRodney W. Grimes 1122df8bae1dSRodney W. Grimes vm_map_clip_end(map, current, end); 1123df8bae1dSRodney W. Grimes 1124df8bae1dSRodney W. Grimes old_prot = current->protection; 1125df8bae1dSRodney W. Grimes if (set_max) 1126df8bae1dSRodney W. Grimes current->protection = 1127df8bae1dSRodney W. Grimes (current->max_protection = new_prot) & 1128df8bae1dSRodney W. Grimes old_prot; 1129df8bae1dSRodney W. Grimes else 1130df8bae1dSRodney W. Grimes current->protection = new_prot; 1131df8bae1dSRodney W. Grimes 1132df8bae1dSRodney W. Grimes /* 11330d94caffSDavid Greenman * Update physical map if necessary. Worry about copy-on-write 11340d94caffSDavid Greenman * here -- CHECK THIS XXX 1135df8bae1dSRodney W. Grimes */ 1136df8bae1dSRodney W. Grimes 1137df8bae1dSRodney W. Grimes if (current->protection != old_prot) { 1138df8bae1dSRodney W. Grimes 1139df8bae1dSRodney W. Grimes #define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \ 1140df8bae1dSRodney W. Grimes VM_PROT_ALL) 1141df8bae1dSRodney W. Grimes #define max(a,b) ((a) > (b) ? (a) : (b)) 1142df8bae1dSRodney W. Grimes 1143df8bae1dSRodney W. Grimes if (current->is_a_map) { 1144df8bae1dSRodney W. Grimes vm_map_entry_t share_entry; 1145df8bae1dSRodney W. Grimes vm_offset_t share_end; 1146df8bae1dSRodney W. Grimes 1147df8bae1dSRodney W. Grimes vm_map_lock(current->object.share_map); 1148df8bae1dSRodney W. Grimes (void) vm_map_lookup_entry( 1149df8bae1dSRodney W. Grimes current->object.share_map, 1150df8bae1dSRodney W. Grimes current->offset, 1151df8bae1dSRodney W. Grimes &share_entry); 1152df8bae1dSRodney W. Grimes share_end = current->offset + 1153df8bae1dSRodney W. Grimes (current->end - current->start); 1154df8bae1dSRodney W. Grimes while ((share_entry != 1155df8bae1dSRodney W. Grimes ¤t->object.share_map->header) && 1156df8bae1dSRodney W. Grimes (share_entry->start < share_end)) { 1157df8bae1dSRodney W. Grimes 1158df8bae1dSRodney W. Grimes pmap_protect(map->pmap, 1159df8bae1dSRodney W. Grimes (max(share_entry->start, 1160df8bae1dSRodney W. Grimes current->offset) - 1161df8bae1dSRodney W. Grimes current->offset + 1162df8bae1dSRodney W. Grimes current->start), 1163df8bae1dSRodney W. Grimes min(share_entry->end, 1164df8bae1dSRodney W. Grimes share_end) - 1165df8bae1dSRodney W. Grimes current->offset + 1166df8bae1dSRodney W. Grimes current->start, 1167df8bae1dSRodney W. Grimes current->protection & 1168df8bae1dSRodney W. Grimes MASK(share_entry)); 1169df8bae1dSRodney W. Grimes 1170df8bae1dSRodney W. Grimes share_entry = share_entry->next; 1171df8bae1dSRodney W. Grimes } 1172df8bae1dSRodney W. Grimes vm_map_unlock(current->object.share_map); 11730d94caffSDavid Greenman } else 1174df8bae1dSRodney W. Grimes pmap_protect(map->pmap, current->start, 1175df8bae1dSRodney W. Grimes current->end, 1176df8bae1dSRodney W. Grimes current->protection & MASK(entry)); 1177df8bae1dSRodney W. Grimes #undef max 1178df8bae1dSRodney W. Grimes #undef MASK 1179df8bae1dSRodney W. Grimes } 1180df8bae1dSRodney W. Grimes current = current->next; 1181df8bae1dSRodney W. Grimes } 1182df8bae1dSRodney W. Grimes 1183df8bae1dSRodney W. Grimes vm_map_unlock(map); 1184df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1185df8bae1dSRodney W. Grimes } 1186df8bae1dSRodney W. Grimes 1187df8bae1dSRodney W. Grimes /* 1188df8bae1dSRodney W. Grimes * vm_map_inherit: 1189df8bae1dSRodney W. Grimes * 1190df8bae1dSRodney W. Grimes * Sets the inheritance of the specified address 1191df8bae1dSRodney W. Grimes * range in the target map. Inheritance 1192df8bae1dSRodney W. Grimes * affects how the map will be shared with 1193df8bae1dSRodney W. Grimes * child maps at the time of vm_map_fork. 1194df8bae1dSRodney W. Grimes */ 1195df8bae1dSRodney W. Grimes int 1196df8bae1dSRodney W. Grimes vm_map_inherit(map, start, end, new_inheritance) 1197df8bae1dSRodney W. Grimes register vm_map_t map; 1198df8bae1dSRodney W. Grimes register vm_offset_t start; 1199df8bae1dSRodney W. Grimes register vm_offset_t end; 1200df8bae1dSRodney W. Grimes register vm_inherit_t new_inheritance; 1201df8bae1dSRodney W. Grimes { 1202df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 1203df8bae1dSRodney W. Grimes vm_map_entry_t temp_entry; 1204df8bae1dSRodney W. Grimes 1205df8bae1dSRodney W. Grimes switch (new_inheritance) { 1206df8bae1dSRodney W. Grimes case VM_INHERIT_NONE: 1207df8bae1dSRodney W. Grimes case VM_INHERIT_COPY: 1208df8bae1dSRodney W. Grimes case VM_INHERIT_SHARE: 1209df8bae1dSRodney W. Grimes break; 1210df8bae1dSRodney W. Grimes default: 1211df8bae1dSRodney W. Grimes return (KERN_INVALID_ARGUMENT); 1212df8bae1dSRodney W. Grimes } 1213df8bae1dSRodney W. Grimes 1214df8bae1dSRodney W. Grimes vm_map_lock(map); 1215df8bae1dSRodney W. Grimes 1216df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1217df8bae1dSRodney W. Grimes 1218df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &temp_entry)) { 1219df8bae1dSRodney W. Grimes entry = temp_entry; 1220df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 12210d94caffSDavid Greenman } else 1222df8bae1dSRodney W. Grimes entry = temp_entry->next; 1223df8bae1dSRodney W. Grimes 1224df8bae1dSRodney W. Grimes while ((entry != &map->header) && (entry->start < end)) { 1225df8bae1dSRodney W. Grimes vm_map_clip_end(map, entry, end); 1226df8bae1dSRodney W. Grimes 1227df8bae1dSRodney W. Grimes entry->inheritance = new_inheritance; 1228df8bae1dSRodney W. Grimes 1229df8bae1dSRodney W. Grimes entry = entry->next; 1230df8bae1dSRodney W. Grimes } 1231df8bae1dSRodney W. Grimes 1232df8bae1dSRodney W. Grimes vm_map_unlock(map); 1233df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1234df8bae1dSRodney W. Grimes } 1235df8bae1dSRodney W. Grimes 1236df8bae1dSRodney W. Grimes /* 1237df8bae1dSRodney W. Grimes * vm_map_pageable: 1238df8bae1dSRodney W. Grimes * 1239df8bae1dSRodney W. Grimes * Sets the pageability of the specified address 1240df8bae1dSRodney W. Grimes * range in the target map. Regions specified 1241df8bae1dSRodney W. Grimes * as not pageable require locked-down physical 1242df8bae1dSRodney W. Grimes * memory and physical page maps. 1243df8bae1dSRodney W. Grimes * 1244df8bae1dSRodney W. Grimes * The map must not be locked, but a reference 1245df8bae1dSRodney W. Grimes * must remain to the map throughout the call. 1246df8bae1dSRodney W. Grimes */ 1247df8bae1dSRodney W. Grimes int 1248df8bae1dSRodney W. Grimes vm_map_pageable(map, start, end, new_pageable) 1249df8bae1dSRodney W. Grimes register vm_map_t map; 1250df8bae1dSRodney W. Grimes register vm_offset_t start; 1251df8bae1dSRodney W. Grimes register vm_offset_t end; 1252df8bae1dSRodney W. Grimes register boolean_t new_pageable; 1253df8bae1dSRodney W. Grimes { 1254df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 1255df8bae1dSRodney W. Grimes vm_map_entry_t start_entry; 125626f9a767SRodney W. Grimes register vm_offset_t failed = 0; 1257df8bae1dSRodney W. Grimes int rv; 1258df8bae1dSRodney W. Grimes 1259df8bae1dSRodney W. Grimes vm_map_lock(map); 1260df8bae1dSRodney W. Grimes 1261df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1262df8bae1dSRodney W. Grimes 1263df8bae1dSRodney W. Grimes /* 12640d94caffSDavid Greenman * Only one pageability change may take place at one time, since 12650d94caffSDavid Greenman * vm_fault assumes it will be called only once for each 12660d94caffSDavid Greenman * wiring/unwiring. Therefore, we have to make sure we're actually 12670d94caffSDavid Greenman * changing the pageability for the entire region. We do so before 12680d94caffSDavid Greenman * making any changes. 1269df8bae1dSRodney W. Grimes */ 1270df8bae1dSRodney W. Grimes 1271df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1272df8bae1dSRodney W. Grimes vm_map_unlock(map); 1273df8bae1dSRodney W. Grimes return (KERN_INVALID_ADDRESS); 1274df8bae1dSRodney W. Grimes } 1275df8bae1dSRodney W. Grimes entry = start_entry; 1276df8bae1dSRodney W. Grimes 1277df8bae1dSRodney W. Grimes /* 12780d94caffSDavid Greenman * Actions are rather different for wiring and unwiring, so we have 12790d94caffSDavid Greenman * two separate cases. 1280df8bae1dSRodney W. Grimes */ 1281df8bae1dSRodney W. Grimes 1282df8bae1dSRodney W. Grimes if (new_pageable) { 1283df8bae1dSRodney W. Grimes 1284df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 1285df8bae1dSRodney W. Grimes 1286df8bae1dSRodney W. Grimes /* 12870d94caffSDavid Greenman * Unwiring. First ensure that the range to be unwired is 12880d94caffSDavid Greenman * really wired down and that there are no holes. 1289df8bae1dSRodney W. Grimes */ 1290df8bae1dSRodney W. Grimes while ((entry != &map->header) && (entry->start < end)) { 1291df8bae1dSRodney W. Grimes 1292df8bae1dSRodney W. Grimes if (entry->wired_count == 0 || 1293df8bae1dSRodney W. Grimes (entry->end < end && 1294df8bae1dSRodney W. Grimes (entry->next == &map->header || 1295df8bae1dSRodney W. Grimes entry->next->start > entry->end))) { 1296df8bae1dSRodney W. Grimes vm_map_unlock(map); 1297df8bae1dSRodney W. Grimes return (KERN_INVALID_ARGUMENT); 1298df8bae1dSRodney W. Grimes } 1299df8bae1dSRodney W. Grimes entry = entry->next; 1300df8bae1dSRodney W. Grimes } 1301df8bae1dSRodney W. Grimes 1302df8bae1dSRodney W. Grimes /* 13030d94caffSDavid Greenman * Now decrement the wiring count for each region. If a region 13040d94caffSDavid Greenman * becomes completely unwired, unwire its physical pages and 13050d94caffSDavid Greenman * mappings. 1306df8bae1dSRodney W. Grimes */ 1307df8bae1dSRodney W. Grimes lock_set_recursive(&map->lock); 1308df8bae1dSRodney W. Grimes 1309df8bae1dSRodney W. Grimes entry = start_entry; 1310df8bae1dSRodney W. Grimes while ((entry != &map->header) && (entry->start < end)) { 1311df8bae1dSRodney W. Grimes vm_map_clip_end(map, entry, end); 1312df8bae1dSRodney W. Grimes 1313df8bae1dSRodney W. Grimes entry->wired_count--; 1314df8bae1dSRodney W. Grimes if (entry->wired_count == 0) 1315df8bae1dSRodney W. Grimes vm_fault_unwire(map, entry->start, entry->end); 1316df8bae1dSRodney W. Grimes 1317df8bae1dSRodney W. Grimes entry = entry->next; 1318df8bae1dSRodney W. Grimes } 1319df8bae1dSRodney W. Grimes lock_clear_recursive(&map->lock); 13200d94caffSDavid Greenman } else { 1321df8bae1dSRodney W. Grimes /* 1322df8bae1dSRodney W. Grimes * Wiring. We must do this in two passes: 1323df8bae1dSRodney W. Grimes * 13240d94caffSDavid Greenman * 1. Holding the write lock, we create any shadow or zero-fill 13250d94caffSDavid Greenman * objects that need to be created. Then we clip each map 13260d94caffSDavid Greenman * entry to the region to be wired and increment its wiring 13270d94caffSDavid Greenman * count. We create objects before clipping the map entries 1328df8bae1dSRodney W. Grimes * to avoid object proliferation. 1329df8bae1dSRodney W. Grimes * 13300d94caffSDavid Greenman * 2. We downgrade to a read lock, and call vm_fault_wire to 13310d94caffSDavid Greenman * fault in the pages for any newly wired area (wired_count is 13320d94caffSDavid Greenman * 1). 1333df8bae1dSRodney W. Grimes * 13340d94caffSDavid Greenman * Downgrading to a read lock for vm_fault_wire avoids a possible 133524a1cce3SDavid Greenman * deadlock with another process that may have faulted on one 13360d94caffSDavid Greenman * of the pages to be wired (it would mark the page busy, 13370d94caffSDavid Greenman * blocking us, then in turn block on the map lock that we 13380d94caffSDavid Greenman * hold). Because of problems in the recursive lock package, 13390d94caffSDavid Greenman * we cannot upgrade to a write lock in vm_map_lookup. Thus, 13400d94caffSDavid Greenman * any actions that require the write lock must be done 13410d94caffSDavid Greenman * beforehand. Because we keep the read lock on the map, the 13420d94caffSDavid Greenman * copy-on-write status of the entries we modify here cannot 13430d94caffSDavid Greenman * change. 1344df8bae1dSRodney W. Grimes */ 1345df8bae1dSRodney W. Grimes 1346df8bae1dSRodney W. Grimes /* 1347df8bae1dSRodney W. Grimes * Pass 1. 1348df8bae1dSRodney W. Grimes */ 1349df8bae1dSRodney W. Grimes while ((entry != &map->header) && (entry->start < end)) { 1350df8bae1dSRodney W. Grimes if (entry->wired_count == 0) { 1351df8bae1dSRodney W. Grimes 1352df8bae1dSRodney W. Grimes /* 1353df8bae1dSRodney W. Grimes * Perform actions of vm_map_lookup that need 1354df8bae1dSRodney W. Grimes * the write lock on the map: create a shadow 1355df8bae1dSRodney W. Grimes * object for a copy-on-write region, or an 1356df8bae1dSRodney W. Grimes * object for a zero-fill region. 1357df8bae1dSRodney W. Grimes * 1358df8bae1dSRodney W. Grimes * We don't have to do this for entries that 13590d94caffSDavid Greenman * point to sharing maps, because we won't 13600d94caffSDavid Greenman * hold the lock on the sharing map. 1361df8bae1dSRodney W. Grimes */ 1362bf4bd9bdSDavid Greenman if (!entry->is_a_map && !entry->is_sub_map) { 1363df8bae1dSRodney W. Grimes if (entry->needs_copy && 1364df8bae1dSRodney W. Grimes ((entry->protection & VM_PROT_WRITE) != 0)) { 1365df8bae1dSRodney W. Grimes 1366df8bae1dSRodney W. Grimes vm_object_shadow(&entry->object.vm_object, 1367df8bae1dSRodney W. Grimes &entry->offset, 1368a316d390SJohn Dyson OFF_TO_IDX(entry->end 1369df8bae1dSRodney W. Grimes - entry->start)); 1370df8bae1dSRodney W. Grimes entry->needs_copy = FALSE; 13710d94caffSDavid Greenman } else if (entry->object.vm_object == NULL) { 1372df8bae1dSRodney W. Grimes entry->object.vm_object = 1373a316d390SJohn Dyson vm_object_allocate(OBJT_DEFAULT, 1374a316d390SJohn Dyson OFF_TO_IDX(entry->end - entry->start)); 1375df8bae1dSRodney W. Grimes entry->offset = (vm_offset_t) 0; 1376df8bae1dSRodney W. Grimes } 1377df8bae1dSRodney W. Grimes } 1378df8bae1dSRodney W. Grimes } 1379df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 1380df8bae1dSRodney W. Grimes vm_map_clip_end(map, entry, end); 1381df8bae1dSRodney W. Grimes entry->wired_count++; 1382df8bae1dSRodney W. Grimes 1383df8bae1dSRodney W. Grimes /* 1384df8bae1dSRodney W. Grimes * Check for holes 1385df8bae1dSRodney W. Grimes */ 1386df8bae1dSRodney W. Grimes if (entry->end < end && 1387df8bae1dSRodney W. Grimes (entry->next == &map->header || 1388df8bae1dSRodney W. Grimes entry->next->start > entry->end)) { 1389df8bae1dSRodney W. Grimes /* 13900d94caffSDavid Greenman * Found one. Object creation actions do not 13910d94caffSDavid Greenman * need to be undone, but the wired counts 13920d94caffSDavid Greenman * need to be restored. 1393df8bae1dSRodney W. Grimes */ 1394df8bae1dSRodney W. Grimes while (entry != &map->header && entry->end > start) { 1395df8bae1dSRodney W. Grimes entry->wired_count--; 1396df8bae1dSRodney W. Grimes entry = entry->prev; 1397df8bae1dSRodney W. Grimes } 1398df8bae1dSRodney W. Grimes vm_map_unlock(map); 1399df8bae1dSRodney W. Grimes return (KERN_INVALID_ARGUMENT); 1400df8bae1dSRodney W. Grimes } 1401df8bae1dSRodney W. Grimes entry = entry->next; 1402df8bae1dSRodney W. Grimes } 1403df8bae1dSRodney W. Grimes 1404df8bae1dSRodney W. Grimes /* 1405df8bae1dSRodney W. Grimes * Pass 2. 1406df8bae1dSRodney W. Grimes */ 1407df8bae1dSRodney W. Grimes 1408df8bae1dSRodney W. Grimes /* 1409df8bae1dSRodney W. Grimes * HACK HACK HACK HACK 1410df8bae1dSRodney W. Grimes * 141124a1cce3SDavid Greenman * If we are wiring in the kernel map or a submap of it, 141224a1cce3SDavid Greenman * unlock the map to avoid deadlocks. We trust that the 141324a1cce3SDavid Greenman * kernel is well-behaved, and therefore will not do 141424a1cce3SDavid Greenman * anything destructive to this region of the map while 141524a1cce3SDavid Greenman * we have it unlocked. We cannot trust user processes 141624a1cce3SDavid Greenman * to do the same. 1417df8bae1dSRodney W. Grimes * 1418df8bae1dSRodney W. Grimes * HACK HACK HACK HACK 1419df8bae1dSRodney W. Grimes */ 1420df8bae1dSRodney W. Grimes if (vm_map_pmap(map) == kernel_pmap) { 1421df8bae1dSRodney W. Grimes vm_map_unlock(map); /* trust me ... */ 14220d94caffSDavid Greenman } else { 1423df8bae1dSRodney W. Grimes lock_set_recursive(&map->lock); 1424df8bae1dSRodney W. Grimes lock_write_to_read(&map->lock); 1425df8bae1dSRodney W. Grimes } 1426df8bae1dSRodney W. Grimes 1427df8bae1dSRodney W. Grimes rv = 0; 1428df8bae1dSRodney W. Grimes entry = start_entry; 1429df8bae1dSRodney W. Grimes while (entry != &map->header && entry->start < end) { 1430df8bae1dSRodney W. Grimes /* 14310d94caffSDavid Greenman * If vm_fault_wire fails for any page we need to undo 14320d94caffSDavid Greenman * what has been done. We decrement the wiring count 14330d94caffSDavid Greenman * for those pages which have not yet been wired (now) 14340d94caffSDavid Greenman * and unwire those that have (later). 1435df8bae1dSRodney W. Grimes * 1436df8bae1dSRodney W. Grimes * XXX this violates the locking protocol on the map, 1437df8bae1dSRodney W. Grimes * needs to be fixed. 1438df8bae1dSRodney W. Grimes */ 1439df8bae1dSRodney W. Grimes if (rv) 1440df8bae1dSRodney W. Grimes entry->wired_count--; 1441df8bae1dSRodney W. Grimes else if (entry->wired_count == 1) { 1442df8bae1dSRodney W. Grimes rv = vm_fault_wire(map, entry->start, entry->end); 1443df8bae1dSRodney W. Grimes if (rv) { 1444df8bae1dSRodney W. Grimes failed = entry->start; 1445df8bae1dSRodney W. Grimes entry->wired_count--; 1446df8bae1dSRodney W. Grimes } 1447df8bae1dSRodney W. Grimes } 1448df8bae1dSRodney W. Grimes entry = entry->next; 1449df8bae1dSRodney W. Grimes } 1450df8bae1dSRodney W. Grimes 1451df8bae1dSRodney W. Grimes if (vm_map_pmap(map) == kernel_pmap) { 1452df8bae1dSRodney W. Grimes vm_map_lock(map); 14530d94caffSDavid Greenman } else { 1454df8bae1dSRodney W. Grimes lock_clear_recursive(&map->lock); 1455df8bae1dSRodney W. Grimes } 1456df8bae1dSRodney W. Grimes if (rv) { 1457df8bae1dSRodney W. Grimes vm_map_unlock(map); 1458df8bae1dSRodney W. Grimes (void) vm_map_pageable(map, start, failed, TRUE); 1459df8bae1dSRodney W. Grimes return (rv); 1460df8bae1dSRodney W. Grimes } 1461df8bae1dSRodney W. Grimes } 1462df8bae1dSRodney W. Grimes 1463df8bae1dSRodney W. Grimes vm_map_unlock(map); 1464df8bae1dSRodney W. Grimes 1465df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1466df8bae1dSRodney W. Grimes } 1467df8bae1dSRodney W. Grimes 1468df8bae1dSRodney W. Grimes /* 1469df8bae1dSRodney W. Grimes * vm_map_clean 1470df8bae1dSRodney W. Grimes * 1471df8bae1dSRodney W. Grimes * Push any dirty cached pages in the address range to their pager. 1472df8bae1dSRodney W. Grimes * If syncio is TRUE, dirty pages are written synchronously. 1473df8bae1dSRodney W. Grimes * If invalidate is TRUE, any cached pages are freed as well. 1474df8bae1dSRodney W. Grimes * 1475df8bae1dSRodney W. Grimes * Returns an error if any part of the specified range is not mapped. 1476df8bae1dSRodney W. Grimes */ 1477df8bae1dSRodney W. Grimes int 1478df8bae1dSRodney W. Grimes vm_map_clean(map, start, end, syncio, invalidate) 1479df8bae1dSRodney W. Grimes vm_map_t map; 1480df8bae1dSRodney W. Grimes vm_offset_t start; 1481df8bae1dSRodney W. Grimes vm_offset_t end; 1482df8bae1dSRodney W. Grimes boolean_t syncio; 1483df8bae1dSRodney W. Grimes boolean_t invalidate; 1484df8bae1dSRodney W. Grimes { 1485df8bae1dSRodney W. Grimes register vm_map_entry_t current; 1486df8bae1dSRodney W. Grimes vm_map_entry_t entry; 1487df8bae1dSRodney W. Grimes vm_size_t size; 1488df8bae1dSRodney W. Grimes vm_object_t object; 1489a316d390SJohn Dyson vm_ooffset_t offset; 1490df8bae1dSRodney W. Grimes 1491df8bae1dSRodney W. Grimes vm_map_lock_read(map); 1492df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1493df8bae1dSRodney W. Grimes if (!vm_map_lookup_entry(map, start, &entry)) { 1494df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 1495df8bae1dSRodney W. Grimes return (KERN_INVALID_ADDRESS); 1496df8bae1dSRodney W. Grimes } 1497df8bae1dSRodney W. Grimes /* 1498df8bae1dSRodney W. Grimes * Make a first pass to check for holes. 1499df8bae1dSRodney W. Grimes */ 1500df8bae1dSRodney W. Grimes for (current = entry; current->start < end; current = current->next) { 1501df8bae1dSRodney W. Grimes if (current->is_sub_map) { 1502df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 1503df8bae1dSRodney W. Grimes return (KERN_INVALID_ARGUMENT); 1504df8bae1dSRodney W. Grimes } 1505df8bae1dSRodney W. Grimes if (end > current->end && 1506df8bae1dSRodney W. Grimes (current->next == &map->header || 1507df8bae1dSRodney W. Grimes current->end != current->next->start)) { 1508df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 1509df8bae1dSRodney W. Grimes return (KERN_INVALID_ADDRESS); 1510df8bae1dSRodney W. Grimes } 1511df8bae1dSRodney W. Grimes } 1512df8bae1dSRodney W. Grimes 1513df8bae1dSRodney W. Grimes /* 1514df8bae1dSRodney W. Grimes * Make a second pass, cleaning/uncaching pages from the indicated 1515df8bae1dSRodney W. Grimes * objects as we go. 1516df8bae1dSRodney W. Grimes */ 1517df8bae1dSRodney W. Grimes for (current = entry; current->start < end; current = current->next) { 1518df8bae1dSRodney W. Grimes offset = current->offset + (start - current->start); 1519df8bae1dSRodney W. Grimes size = (end <= current->end ? end : current->end) - start; 1520bf4bd9bdSDavid Greenman if (current->is_a_map || current->is_sub_map) { 1521df8bae1dSRodney W. Grimes register vm_map_t smap; 1522df8bae1dSRodney W. Grimes vm_map_entry_t tentry; 1523df8bae1dSRodney W. Grimes vm_size_t tsize; 1524df8bae1dSRodney W. Grimes 1525df8bae1dSRodney W. Grimes smap = current->object.share_map; 1526df8bae1dSRodney W. Grimes vm_map_lock_read(smap); 1527df8bae1dSRodney W. Grimes (void) vm_map_lookup_entry(smap, offset, &tentry); 1528df8bae1dSRodney W. Grimes tsize = tentry->end - offset; 1529df8bae1dSRodney W. Grimes if (tsize < size) 1530df8bae1dSRodney W. Grimes size = tsize; 1531df8bae1dSRodney W. Grimes object = tentry->object.vm_object; 1532df8bae1dSRodney W. Grimes offset = tentry->offset + (offset - tentry->start); 1533df8bae1dSRodney W. Grimes vm_map_unlock_read(smap); 1534df8bae1dSRodney W. Grimes } else { 1535df8bae1dSRodney W. Grimes object = current->object.vm_object; 1536df8bae1dSRodney W. Grimes } 153724a1cce3SDavid Greenman if (object && (object->type == OBJT_VNODE)) { 1538df8bae1dSRodney W. Grimes /* 15390d94caffSDavid Greenman * Flush pages if writing is allowed. XXX should we continue 15400d94caffSDavid Greenman * on an error? 1541f5cf85d4SDavid Greenman * 1542f5cf85d4SDavid Greenman * XXX Doing async I/O and then removing all the pages from 1543f5cf85d4SDavid Greenman * the object before it completes is probably a very bad 1544f5cf85d4SDavid Greenman * idea. 1545df8bae1dSRodney W. Grimes */ 1546a02051c3SJohn Dyson if (current->protection & VM_PROT_WRITE) { 1547a316d390SJohn Dyson vm_object_page_clean(object, 1548a316d390SJohn Dyson OFF_TO_IDX(offset), 1549a316d390SJohn Dyson OFF_TO_IDX(offset + size), 1550a02051c3SJohn Dyson (syncio||invalidate)?1:0, TRUE); 1551df8bae1dSRodney W. Grimes if (invalidate) 1552a316d390SJohn Dyson vm_object_page_remove(object, 1553a316d390SJohn Dyson OFF_TO_IDX(offset), 1554a316d390SJohn Dyson OFF_TO_IDX(offset + size), 1555a316d390SJohn Dyson FALSE); 1556bf4bd9bdSDavid Greenman } 1557a02051c3SJohn Dyson } 1558df8bae1dSRodney W. Grimes start += size; 1559df8bae1dSRodney W. Grimes } 1560df8bae1dSRodney W. Grimes 1561df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 1562df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1563df8bae1dSRodney W. Grimes } 1564df8bae1dSRodney W. Grimes 1565df8bae1dSRodney W. Grimes /* 1566df8bae1dSRodney W. Grimes * vm_map_entry_unwire: [ internal use only ] 1567df8bae1dSRodney W. Grimes * 1568df8bae1dSRodney W. Grimes * Make the region specified by this entry pageable. 1569df8bae1dSRodney W. Grimes * 1570df8bae1dSRodney W. Grimes * The map in question should be locked. 1571df8bae1dSRodney W. Grimes * [This is the reason for this routine's existence.] 1572df8bae1dSRodney W. Grimes */ 1573f708ef1bSPoul-Henning Kamp static void 15740d94caffSDavid Greenman vm_map_entry_unwire(map, entry) 1575df8bae1dSRodney W. Grimes vm_map_t map; 1576df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 1577df8bae1dSRodney W. Grimes { 1578df8bae1dSRodney W. Grimes vm_fault_unwire(map, entry->start, entry->end); 1579df8bae1dSRodney W. Grimes entry->wired_count = 0; 1580df8bae1dSRodney W. Grimes } 1581df8bae1dSRodney W. Grimes 1582df8bae1dSRodney W. Grimes /* 1583df8bae1dSRodney W. Grimes * vm_map_entry_delete: [ internal use only ] 1584df8bae1dSRodney W. Grimes * 1585df8bae1dSRodney W. Grimes * Deallocate the given entry from the target map. 1586df8bae1dSRodney W. Grimes */ 1587f708ef1bSPoul-Henning Kamp static void 15880d94caffSDavid Greenman vm_map_entry_delete(map, entry) 1589df8bae1dSRodney W. Grimes register vm_map_t map; 1590df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 1591df8bae1dSRodney W. Grimes { 1592df8bae1dSRodney W. Grimes if (entry->wired_count != 0) 1593df8bae1dSRodney W. Grimes vm_map_entry_unwire(map, entry); 1594df8bae1dSRodney W. Grimes 1595df8bae1dSRodney W. Grimes vm_map_entry_unlink(map, entry); 1596df8bae1dSRodney W. Grimes map->size -= entry->end - entry->start; 1597df8bae1dSRodney W. Grimes 1598df8bae1dSRodney W. Grimes if (entry->is_a_map || entry->is_sub_map) 1599df8bae1dSRodney W. Grimes vm_map_deallocate(entry->object.share_map); 1600df8bae1dSRodney W. Grimes else 1601df8bae1dSRodney W. Grimes vm_object_deallocate(entry->object.vm_object); 1602df8bae1dSRodney W. Grimes 1603df8bae1dSRodney W. Grimes vm_map_entry_dispose(map, entry); 1604df8bae1dSRodney W. Grimes } 1605df8bae1dSRodney W. Grimes 1606df8bae1dSRodney W. Grimes /* 1607df8bae1dSRodney W. Grimes * vm_map_delete: [ internal use only ] 1608df8bae1dSRodney W. Grimes * 1609df8bae1dSRodney W. Grimes * Deallocates the given address range from the target 1610df8bae1dSRodney W. Grimes * map. 1611df8bae1dSRodney W. Grimes * 1612df8bae1dSRodney W. Grimes * When called with a sharing map, removes pages from 1613df8bae1dSRodney W. Grimes * that region from all physical maps. 1614df8bae1dSRodney W. Grimes */ 1615df8bae1dSRodney W. Grimes int 1616df8bae1dSRodney W. Grimes vm_map_delete(map, start, end) 1617df8bae1dSRodney W. Grimes register vm_map_t map; 1618df8bae1dSRodney W. Grimes vm_offset_t start; 1619df8bae1dSRodney W. Grimes register vm_offset_t end; 1620df8bae1dSRodney W. Grimes { 1621df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 1622df8bae1dSRodney W. Grimes vm_map_entry_t first_entry; 1623df8bae1dSRodney W. Grimes 1624df8bae1dSRodney W. Grimes /* 1625df8bae1dSRodney W. Grimes * Find the start of the region, and clip it 1626df8bae1dSRodney W. Grimes */ 1627df8bae1dSRodney W. Grimes 1628df8bae1dSRodney W. Grimes if (!vm_map_lookup_entry(map, start, &first_entry)) 1629df8bae1dSRodney W. Grimes entry = first_entry->next; 1630df8bae1dSRodney W. Grimes else { 1631df8bae1dSRodney W. Grimes entry = first_entry; 1632df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 1633df8bae1dSRodney W. Grimes 1634df8bae1dSRodney W. Grimes /* 16350d94caffSDavid Greenman * Fix the lookup hint now, rather than each time though the 16360d94caffSDavid Greenman * loop. 1637df8bae1dSRodney W. Grimes */ 1638df8bae1dSRodney W. Grimes 1639df8bae1dSRodney W. Grimes SAVE_HINT(map, entry->prev); 1640df8bae1dSRodney W. Grimes } 1641df8bae1dSRodney W. Grimes 1642df8bae1dSRodney W. Grimes /* 1643df8bae1dSRodney W. Grimes * Save the free space hint 1644df8bae1dSRodney W. Grimes */ 1645df8bae1dSRodney W. Grimes 1646df8bae1dSRodney W. Grimes if (map->first_free->start >= start) 1647df8bae1dSRodney W. Grimes map->first_free = entry->prev; 1648df8bae1dSRodney W. Grimes 1649df8bae1dSRodney W. Grimes /* 1650df8bae1dSRodney W. Grimes * Step through all entries in this region 1651df8bae1dSRodney W. Grimes */ 1652df8bae1dSRodney W. Grimes 1653df8bae1dSRodney W. Grimes while ((entry != &map->header) && (entry->start < end)) { 1654df8bae1dSRodney W. Grimes vm_map_entry_t next; 1655df8bae1dSRodney W. Grimes register vm_offset_t s, e; 1656df8bae1dSRodney W. Grimes register vm_object_t object; 1657df8bae1dSRodney W. Grimes 1658df8bae1dSRodney W. Grimes vm_map_clip_end(map, entry, end); 1659df8bae1dSRodney W. Grimes 1660df8bae1dSRodney W. Grimes next = entry->next; 1661df8bae1dSRodney W. Grimes s = entry->start; 1662df8bae1dSRodney W. Grimes e = entry->end; 1663df8bae1dSRodney W. Grimes 1664df8bae1dSRodney W. Grimes /* 16650d94caffSDavid Greenman * Unwire before removing addresses from the pmap; otherwise, 16660d94caffSDavid Greenman * unwiring will put the entries back in the pmap. 1667df8bae1dSRodney W. Grimes */ 1668df8bae1dSRodney W. Grimes 1669df8bae1dSRodney W. Grimes object = entry->object.vm_object; 1670df8bae1dSRodney W. Grimes if (entry->wired_count != 0) 1671df8bae1dSRodney W. Grimes vm_map_entry_unwire(map, entry); 1672df8bae1dSRodney W. Grimes 1673df8bae1dSRodney W. Grimes /* 16740d94caffSDavid Greenman * If this is a sharing map, we must remove *all* references 16750d94caffSDavid Greenman * to this data, since we can't find all of the physical maps 16760d94caffSDavid Greenman * which are sharing it. 1677df8bae1dSRodney W. Grimes */ 1678df8bae1dSRodney W. Grimes 1679df8bae1dSRodney W. Grimes if (object == kernel_object || object == kmem_object) 1680a316d390SJohn Dyson vm_object_page_remove(object, OFF_TO_IDX(entry->offset), 1681a316d390SJohn Dyson OFF_TO_IDX(entry->offset + (e - s)), FALSE); 1682df8bae1dSRodney W. Grimes else if (!map->is_main_map) 1683df8bae1dSRodney W. Grimes vm_object_pmap_remove(object, 1684a316d390SJohn Dyson OFF_TO_IDX(entry->offset), 1685a316d390SJohn Dyson OFF_TO_IDX(entry->offset + (e - s))); 1686df8bae1dSRodney W. Grimes else 1687df8bae1dSRodney W. Grimes pmap_remove(map->pmap, s, e); 1688df8bae1dSRodney W. Grimes 1689df8bae1dSRodney W. Grimes /* 16900d94caffSDavid Greenman * Delete the entry (which may delete the object) only after 16910d94caffSDavid Greenman * removing all pmap entries pointing to its pages. 16920d94caffSDavid Greenman * (Otherwise, its page frames may be reallocated, and any 16930d94caffSDavid Greenman * modify bits will be set in the wrong object!) 1694df8bae1dSRodney W. Grimes */ 1695df8bae1dSRodney W. Grimes 1696df8bae1dSRodney W. Grimes vm_map_entry_delete(map, entry); 1697df8bae1dSRodney W. Grimes entry = next; 1698df8bae1dSRodney W. Grimes } 1699df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1700df8bae1dSRodney W. Grimes } 1701df8bae1dSRodney W. Grimes 1702df8bae1dSRodney W. Grimes /* 1703df8bae1dSRodney W. Grimes * vm_map_remove: 1704df8bae1dSRodney W. Grimes * 1705df8bae1dSRodney W. Grimes * Remove the given address range from the target map. 1706df8bae1dSRodney W. Grimes * This is the exported form of vm_map_delete. 1707df8bae1dSRodney W. Grimes */ 1708df8bae1dSRodney W. Grimes int 1709df8bae1dSRodney W. Grimes vm_map_remove(map, start, end) 1710df8bae1dSRodney W. Grimes register vm_map_t map; 1711df8bae1dSRodney W. Grimes register vm_offset_t start; 1712df8bae1dSRodney W. Grimes register vm_offset_t end; 1713df8bae1dSRodney W. Grimes { 17148d6e8edeSDavid Greenman register int result, s = 0; 17158d6e8edeSDavid Greenman 17168d6e8edeSDavid Greenman if (map == kmem_map) 17178d6e8edeSDavid Greenman s = splhigh(); 1718df8bae1dSRodney W. Grimes 1719df8bae1dSRodney W. Grimes vm_map_lock(map); 1720df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1721df8bae1dSRodney W. Grimes result = vm_map_delete(map, start, end); 1722df8bae1dSRodney W. Grimes vm_map_unlock(map); 1723df8bae1dSRodney W. Grimes 17248d6e8edeSDavid Greenman if (map == kmem_map) 17258d6e8edeSDavid Greenman splx(s); 17268d6e8edeSDavid Greenman 1727df8bae1dSRodney W. Grimes return (result); 1728df8bae1dSRodney W. Grimes } 1729df8bae1dSRodney W. Grimes 1730df8bae1dSRodney W. Grimes /* 1731df8bae1dSRodney W. Grimes * vm_map_check_protection: 1732df8bae1dSRodney W. Grimes * 1733df8bae1dSRodney W. Grimes * Assert that the target map allows the specified 1734df8bae1dSRodney W. Grimes * privilege on the entire address region given. 1735df8bae1dSRodney W. Grimes * The entire region must be allocated. 1736df8bae1dSRodney W. Grimes */ 17370d94caffSDavid Greenman boolean_t 17380d94caffSDavid Greenman vm_map_check_protection(map, start, end, protection) 1739df8bae1dSRodney W. Grimes register vm_map_t map; 1740df8bae1dSRodney W. Grimes register vm_offset_t start; 1741df8bae1dSRodney W. Grimes register vm_offset_t end; 1742df8bae1dSRodney W. Grimes register vm_prot_t protection; 1743df8bae1dSRodney W. Grimes { 1744df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 1745df8bae1dSRodney W. Grimes vm_map_entry_t tmp_entry; 1746df8bae1dSRodney W. Grimes 1747df8bae1dSRodney W. Grimes if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 1748df8bae1dSRodney W. Grimes return (FALSE); 1749df8bae1dSRodney W. Grimes } 1750df8bae1dSRodney W. Grimes entry = tmp_entry; 1751df8bae1dSRodney W. Grimes 1752df8bae1dSRodney W. Grimes while (start < end) { 1753df8bae1dSRodney W. Grimes if (entry == &map->header) { 1754df8bae1dSRodney W. Grimes return (FALSE); 1755df8bae1dSRodney W. Grimes } 1756df8bae1dSRodney W. Grimes /* 1757df8bae1dSRodney W. Grimes * No holes allowed! 1758df8bae1dSRodney W. Grimes */ 1759df8bae1dSRodney W. Grimes 1760df8bae1dSRodney W. Grimes if (start < entry->start) { 1761df8bae1dSRodney W. Grimes return (FALSE); 1762df8bae1dSRodney W. Grimes } 1763df8bae1dSRodney W. Grimes /* 1764df8bae1dSRodney W. Grimes * Check protection associated with entry. 1765df8bae1dSRodney W. Grimes */ 1766df8bae1dSRodney W. Grimes 1767df8bae1dSRodney W. Grimes if ((entry->protection & protection) != protection) { 1768df8bae1dSRodney W. Grimes return (FALSE); 1769df8bae1dSRodney W. Grimes } 1770df8bae1dSRodney W. Grimes /* go to next entry */ 1771df8bae1dSRodney W. Grimes 1772df8bae1dSRodney W. Grimes start = entry->end; 1773df8bae1dSRodney W. Grimes entry = entry->next; 1774df8bae1dSRodney W. Grimes } 1775df8bae1dSRodney W. Grimes return (TRUE); 1776df8bae1dSRodney W. Grimes } 1777df8bae1dSRodney W. Grimes 1778df8bae1dSRodney W. Grimes /* 1779df8bae1dSRodney W. Grimes * vm_map_copy_entry: 1780df8bae1dSRodney W. Grimes * 1781df8bae1dSRodney W. Grimes * Copies the contents of the source entry to the destination 1782df8bae1dSRodney W. Grimes * entry. The entries *must* be aligned properly. 1783df8bae1dSRodney W. Grimes */ 1784f708ef1bSPoul-Henning Kamp static void 17850d94caffSDavid Greenman vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry) 1786df8bae1dSRodney W. Grimes vm_map_t src_map, dst_map; 1787df8bae1dSRodney W. Grimes register vm_map_entry_t src_entry, dst_entry; 1788df8bae1dSRodney W. Grimes { 1789a316d390SJohn Dyson vm_pindex_t temp_pindex; 1790a316d390SJohn Dyson 1791df8bae1dSRodney W. Grimes if (src_entry->is_sub_map || dst_entry->is_sub_map) 1792df8bae1dSRodney W. Grimes return; 1793df8bae1dSRodney W. Grimes 179424a1cce3SDavid Greenman if (dst_entry->object.vm_object != NULL) 179524a1cce3SDavid Greenman printf("vm_map_copy_entry: dst_entry object not NULL!\n"); 1796df8bae1dSRodney W. Grimes 1797df8bae1dSRodney W. Grimes /* 17980d94caffSDavid Greenman * If our destination map was wired down, unwire it now. 1799df8bae1dSRodney W. Grimes */ 1800df8bae1dSRodney W. Grimes 1801df8bae1dSRodney W. Grimes if (dst_entry->wired_count != 0) 1802df8bae1dSRodney W. Grimes vm_map_entry_unwire(dst_map, dst_entry); 1803df8bae1dSRodney W. Grimes 1804df8bae1dSRodney W. Grimes if (src_entry->wired_count == 0) { 1805df8bae1dSRodney W. Grimes 1806df8bae1dSRodney W. Grimes boolean_t src_needs_copy; 1807df8bae1dSRodney W. Grimes 1808df8bae1dSRodney W. Grimes /* 18090d94caffSDavid Greenman * If the source entry is marked needs_copy, it is already 18100d94caffSDavid Greenman * write-protected. 1811df8bae1dSRodney W. Grimes */ 1812df8bae1dSRodney W. Grimes if (!src_entry->needs_copy) { 1813df8bae1dSRodney W. Grimes 1814df8bae1dSRodney W. Grimes boolean_t su; 1815df8bae1dSRodney W. Grimes 1816df8bae1dSRodney W. Grimes /* 18170d94caffSDavid Greenman * If the source entry has only one mapping, we can 18180d94caffSDavid Greenman * just protect the virtual address range. 1819df8bae1dSRodney W. Grimes */ 1820df8bae1dSRodney W. Grimes if (!(su = src_map->is_main_map)) { 1821df8bae1dSRodney W. Grimes su = (src_map->ref_count == 1); 1822df8bae1dSRodney W. Grimes } 1823df8bae1dSRodney W. Grimes if (su) { 1824df8bae1dSRodney W. Grimes pmap_protect(src_map->pmap, 1825df8bae1dSRodney W. Grimes src_entry->start, 1826df8bae1dSRodney W. Grimes src_entry->end, 1827df8bae1dSRodney W. Grimes src_entry->protection & ~VM_PROT_WRITE); 18280d94caffSDavid Greenman } else { 1829df8bae1dSRodney W. Grimes vm_object_pmap_copy(src_entry->object.vm_object, 1830a316d390SJohn Dyson OFF_TO_IDX(src_entry->offset), 1831a316d390SJohn Dyson OFF_TO_IDX(src_entry->offset + (src_entry->end 1832a316d390SJohn Dyson - src_entry->start))); 1833df8bae1dSRodney W. Grimes } 1834df8bae1dSRodney W. Grimes } 1835df8bae1dSRodney W. Grimes /* 1836df8bae1dSRodney W. Grimes * Make a copy of the object. 1837df8bae1dSRodney W. Grimes */ 1838a316d390SJohn Dyson temp_pindex = OFF_TO_IDX(dst_entry->offset); 1839df8bae1dSRodney W. Grimes vm_object_copy(src_entry->object.vm_object, 1840a316d390SJohn Dyson OFF_TO_IDX(src_entry->offset), 1841df8bae1dSRodney W. Grimes &dst_entry->object.vm_object, 1842a316d390SJohn Dyson &temp_pindex, 1843df8bae1dSRodney W. Grimes &src_needs_copy); 1844a316d390SJohn Dyson dst_entry->offset = IDX_TO_OFF(temp_pindex); 1845df8bae1dSRodney W. Grimes /* 18460d94caffSDavid Greenman * If we didn't get a copy-object now, mark the source map 18470d94caffSDavid Greenman * entry so that a shadow will be created to hold its changed 18480d94caffSDavid Greenman * pages. 1849df8bae1dSRodney W. Grimes */ 1850df8bae1dSRodney W. Grimes if (src_needs_copy) 1851df8bae1dSRodney W. Grimes src_entry->needs_copy = TRUE; 1852df8bae1dSRodney W. Grimes 1853df8bae1dSRodney W. Grimes /* 18540d94caffSDavid Greenman * The destination always needs to have a shadow created. 1855df8bae1dSRodney W. Grimes */ 1856df8bae1dSRodney W. Grimes dst_entry->needs_copy = TRUE; 1857df8bae1dSRodney W. Grimes 1858df8bae1dSRodney W. Grimes /* 18590d94caffSDavid Greenman * Mark the entries copy-on-write, so that write-enabling the 18600d94caffSDavid Greenman * entry won't make copy-on-write pages writable. 1861df8bae1dSRodney W. Grimes */ 1862df8bae1dSRodney W. Grimes src_entry->copy_on_write = TRUE; 1863df8bae1dSRodney W. Grimes dst_entry->copy_on_write = TRUE; 1864df8bae1dSRodney W. Grimes 1865df8bae1dSRodney W. Grimes pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 1866df8bae1dSRodney W. Grimes dst_entry->end - dst_entry->start, src_entry->start); 18670d94caffSDavid Greenman } else { 1868df8bae1dSRodney W. Grimes /* 1869df8bae1dSRodney W. Grimes * Of course, wired down pages can't be set copy-on-write. 18700d94caffSDavid Greenman * Cause wired pages to be copied into the new map by 18710d94caffSDavid Greenman * simulating faults (the new pages are pageable) 1872df8bae1dSRodney W. Grimes */ 1873df8bae1dSRodney W. Grimes vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 1874df8bae1dSRodney W. Grimes } 1875df8bae1dSRodney W. Grimes } 1876df8bae1dSRodney W. Grimes 1877df8bae1dSRodney W. Grimes /* 1878df8bae1dSRodney W. Grimes * vmspace_fork: 1879df8bae1dSRodney W. Grimes * Create a new process vmspace structure and vm_map 1880df8bae1dSRodney W. Grimes * based on those of an existing process. The new map 1881df8bae1dSRodney W. Grimes * is based on the old map, according to the inheritance 1882df8bae1dSRodney W. Grimes * values on the regions in that map. 1883df8bae1dSRodney W. Grimes * 1884df8bae1dSRodney W. Grimes * The source map must not be locked. 1885df8bae1dSRodney W. Grimes */ 1886df8bae1dSRodney W. Grimes struct vmspace * 1887df8bae1dSRodney W. Grimes vmspace_fork(vm1) 1888df8bae1dSRodney W. Grimes register struct vmspace *vm1; 1889df8bae1dSRodney W. Grimes { 1890df8bae1dSRodney W. Grimes register struct vmspace *vm2; 1891df8bae1dSRodney W. Grimes vm_map_t old_map = &vm1->vm_map; 1892df8bae1dSRodney W. Grimes vm_map_t new_map; 1893df8bae1dSRodney W. Grimes vm_map_entry_t old_entry; 1894df8bae1dSRodney W. Grimes vm_map_entry_t new_entry; 1895df8bae1dSRodney W. Grimes pmap_t new_pmap; 1896de5f6a77SJohn Dyson vm_object_t object; 1897de5f6a77SJohn Dyson vm_page_t p; 1898df8bae1dSRodney W. Grimes 1899df8bae1dSRodney W. Grimes vm_map_lock(old_map); 1900df8bae1dSRodney W. Grimes 1901df8bae1dSRodney W. Grimes vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, 1902df8bae1dSRodney W. Grimes old_map->entries_pageable); 1903df8bae1dSRodney W. Grimes bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 1904df8bae1dSRodney W. Grimes (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); 1905df8bae1dSRodney W. Grimes new_pmap = &vm2->vm_pmap; /* XXX */ 1906df8bae1dSRodney W. Grimes new_map = &vm2->vm_map; /* XXX */ 1907df8bae1dSRodney W. Grimes 1908df8bae1dSRodney W. Grimes old_entry = old_map->header.next; 1909df8bae1dSRodney W. Grimes 1910df8bae1dSRodney W. Grimes while (old_entry != &old_map->header) { 1911df8bae1dSRodney W. Grimes if (old_entry->is_sub_map) 1912df8bae1dSRodney W. Grimes panic("vm_map_fork: encountered a submap"); 1913df8bae1dSRodney W. Grimes 1914df8bae1dSRodney W. Grimes switch (old_entry->inheritance) { 1915df8bae1dSRodney W. Grimes case VM_INHERIT_NONE: 1916df8bae1dSRodney W. Grimes break; 1917df8bae1dSRodney W. Grimes 1918df8bae1dSRodney W. Grimes case VM_INHERIT_SHARE: 1919df8bae1dSRodney W. Grimes /* 1920df8bae1dSRodney W. Grimes * Clone the entry, referencing the sharing map. 1921df8bae1dSRodney W. Grimes */ 1922df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(new_map); 1923df8bae1dSRodney W. Grimes *new_entry = *old_entry; 1924df8bae1dSRodney W. Grimes new_entry->wired_count = 0; 1925de5f6a77SJohn Dyson object = new_entry->object.vm_object; 1926de5f6a77SJohn Dyson ++object->ref_count; 1927df8bae1dSRodney W. Grimes 1928df8bae1dSRodney W. Grimes /* 19290d94caffSDavid Greenman * Insert the entry into the new map -- we know we're 19300d94caffSDavid Greenman * inserting at the end of the new map. 1931df8bae1dSRodney W. Grimes */ 1932df8bae1dSRodney W. Grimes 1933df8bae1dSRodney W. Grimes vm_map_entry_link(new_map, new_map->header.prev, 1934df8bae1dSRodney W. Grimes new_entry); 1935df8bae1dSRodney W. Grimes 1936df8bae1dSRodney W. Grimes /* 1937df8bae1dSRodney W. Grimes * Update the physical map 1938df8bae1dSRodney W. Grimes */ 1939df8bae1dSRodney W. Grimes 1940df8bae1dSRodney W. Grimes pmap_copy(new_map->pmap, old_map->pmap, 1941df8bae1dSRodney W. Grimes new_entry->start, 1942df8bae1dSRodney W. Grimes (old_entry->end - old_entry->start), 1943df8bae1dSRodney W. Grimes old_entry->start); 1944df8bae1dSRodney W. Grimes break; 1945df8bae1dSRodney W. Grimes 1946df8bae1dSRodney W. Grimes case VM_INHERIT_COPY: 1947df8bae1dSRodney W. Grimes /* 1948df8bae1dSRodney W. Grimes * Clone the entry and link into the map. 1949df8bae1dSRodney W. Grimes */ 1950df8bae1dSRodney W. Grimes 1951df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(new_map); 1952df8bae1dSRodney W. Grimes *new_entry = *old_entry; 1953df8bae1dSRodney W. Grimes new_entry->wired_count = 0; 1954df8bae1dSRodney W. Grimes new_entry->object.vm_object = NULL; 1955df8bae1dSRodney W. Grimes new_entry->is_a_map = FALSE; 1956df8bae1dSRodney W. Grimes vm_map_entry_link(new_map, new_map->header.prev, 1957df8bae1dSRodney W. Grimes new_entry); 1958bd7e5f99SJohn Dyson vm_map_copy_entry(old_map, new_map, old_entry, 1959bd7e5f99SJohn Dyson new_entry); 1960df8bae1dSRodney W. Grimes break; 1961df8bae1dSRodney W. Grimes } 1962df8bae1dSRodney W. Grimes old_entry = old_entry->next; 1963df8bae1dSRodney W. Grimes } 1964df8bae1dSRodney W. Grimes 1965df8bae1dSRodney W. Grimes new_map->size = old_map->size; 1966df8bae1dSRodney W. Grimes vm_map_unlock(old_map); 1967df8bae1dSRodney W. Grimes 1968df8bae1dSRodney W. Grimes return (vm2); 1969df8bae1dSRodney W. Grimes } 1970df8bae1dSRodney W. Grimes 1971df8bae1dSRodney W. Grimes /* 1972df8bae1dSRodney W. Grimes * vm_map_lookup: 1973df8bae1dSRodney W. Grimes * 1974df8bae1dSRodney W. Grimes * Finds the VM object, offset, and 1975df8bae1dSRodney W. Grimes * protection for a given virtual address in the 1976df8bae1dSRodney W. Grimes * specified map, assuming a page fault of the 1977df8bae1dSRodney W. Grimes * type specified. 1978df8bae1dSRodney W. Grimes * 1979df8bae1dSRodney W. Grimes * Leaves the map in question locked for read; return 1980df8bae1dSRodney W. Grimes * values are guaranteed until a vm_map_lookup_done 1981df8bae1dSRodney W. Grimes * call is performed. Note that the map argument 1982df8bae1dSRodney W. Grimes * is in/out; the returned map must be used in 1983df8bae1dSRodney W. Grimes * the call to vm_map_lookup_done. 1984df8bae1dSRodney W. Grimes * 1985df8bae1dSRodney W. Grimes * A handle (out_entry) is returned for use in 1986df8bae1dSRodney W. Grimes * vm_map_lookup_done, to make that fast. 1987df8bae1dSRodney W. Grimes * 1988df8bae1dSRodney W. Grimes * If a lookup is requested with "write protection" 1989df8bae1dSRodney W. Grimes * specified, the map may be changed to perform virtual 1990df8bae1dSRodney W. Grimes * copying operations, although the data referenced will 1991df8bae1dSRodney W. Grimes * remain the same. 1992df8bae1dSRodney W. Grimes */ 1993df8bae1dSRodney W. Grimes int 1994df8bae1dSRodney W. Grimes vm_map_lookup(var_map, vaddr, fault_type, out_entry, 1995a316d390SJohn Dyson object, pindex, out_prot, wired, single_use) 1996df8bae1dSRodney W. Grimes vm_map_t *var_map; /* IN/OUT */ 1997df8bae1dSRodney W. Grimes register vm_offset_t vaddr; 1998df8bae1dSRodney W. Grimes register vm_prot_t fault_type; 1999df8bae1dSRodney W. Grimes 2000df8bae1dSRodney W. Grimes vm_map_entry_t *out_entry; /* OUT */ 2001df8bae1dSRodney W. Grimes vm_object_t *object; /* OUT */ 2002a316d390SJohn Dyson vm_pindex_t *pindex; /* OUT */ 2003df8bae1dSRodney W. Grimes vm_prot_t *out_prot; /* OUT */ 2004df8bae1dSRodney W. Grimes boolean_t *wired; /* OUT */ 2005df8bae1dSRodney W. Grimes boolean_t *single_use; /* OUT */ 2006df8bae1dSRodney W. Grimes { 2007df8bae1dSRodney W. Grimes vm_map_t share_map; 2008df8bae1dSRodney W. Grimes vm_offset_t share_offset; 2009df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 2010df8bae1dSRodney W. Grimes register vm_map_t map = *var_map; 2011df8bae1dSRodney W. Grimes register vm_prot_t prot; 2012df8bae1dSRodney W. Grimes register boolean_t su; 2013df8bae1dSRodney W. Grimes 2014df8bae1dSRodney W. Grimes RetryLookup:; 2015df8bae1dSRodney W. Grimes 2016df8bae1dSRodney W. Grimes /* 2017df8bae1dSRodney W. Grimes * Lookup the faulting address. 2018df8bae1dSRodney W. Grimes */ 2019df8bae1dSRodney W. Grimes 2020df8bae1dSRodney W. Grimes vm_map_lock_read(map); 2021df8bae1dSRodney W. Grimes 2022df8bae1dSRodney W. Grimes #define RETURN(why) \ 2023df8bae1dSRodney W. Grimes { \ 2024df8bae1dSRodney W. Grimes vm_map_unlock_read(map); \ 2025df8bae1dSRodney W. Grimes return(why); \ 2026df8bae1dSRodney W. Grimes } 2027df8bae1dSRodney W. Grimes 2028df8bae1dSRodney W. Grimes /* 20290d94caffSDavid Greenman * If the map has an interesting hint, try it before calling full 20300d94caffSDavid Greenman * blown lookup routine. 2031df8bae1dSRodney W. Grimes */ 2032df8bae1dSRodney W. Grimes 2033df8bae1dSRodney W. Grimes entry = map->hint; 2034df8bae1dSRodney W. Grimes 2035df8bae1dSRodney W. Grimes *out_entry = entry; 2036df8bae1dSRodney W. Grimes 2037df8bae1dSRodney W. Grimes if ((entry == &map->header) || 2038df8bae1dSRodney W. Grimes (vaddr < entry->start) || (vaddr >= entry->end)) { 2039df8bae1dSRodney W. Grimes vm_map_entry_t tmp_entry; 2040df8bae1dSRodney W. Grimes 2041df8bae1dSRodney W. Grimes /* 20420d94caffSDavid Greenman * Entry was either not a valid hint, or the vaddr was not 20430d94caffSDavid Greenman * contained in the entry, so do a full lookup. 2044df8bae1dSRodney W. Grimes */ 2045df8bae1dSRodney W. Grimes if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) 2046df8bae1dSRodney W. Grimes RETURN(KERN_INVALID_ADDRESS); 2047df8bae1dSRodney W. Grimes 2048df8bae1dSRodney W. Grimes entry = tmp_entry; 2049df8bae1dSRodney W. Grimes *out_entry = entry; 2050df8bae1dSRodney W. Grimes } 2051df8bae1dSRodney W. Grimes /* 2052df8bae1dSRodney W. Grimes * Handle submaps. 2053df8bae1dSRodney W. Grimes */ 2054df8bae1dSRodney W. Grimes 2055df8bae1dSRodney W. Grimes if (entry->is_sub_map) { 2056df8bae1dSRodney W. Grimes vm_map_t old_map = map; 2057df8bae1dSRodney W. Grimes 2058df8bae1dSRodney W. Grimes *var_map = map = entry->object.sub_map; 2059df8bae1dSRodney W. Grimes vm_map_unlock_read(old_map); 2060df8bae1dSRodney W. Grimes goto RetryLookup; 2061df8bae1dSRodney W. Grimes } 2062df8bae1dSRodney W. Grimes /* 20630d94caffSDavid Greenman * Check whether this task is allowed to have this page. 2064df8bae1dSRodney W. Grimes */ 2065df8bae1dSRodney W. Grimes 2066df8bae1dSRodney W. Grimes prot = entry->protection; 2067df8bae1dSRodney W. Grimes if ((fault_type & (prot)) != fault_type) 2068df8bae1dSRodney W. Grimes RETURN(KERN_PROTECTION_FAILURE); 2069df8bae1dSRodney W. Grimes 2070df8bae1dSRodney W. Grimes /* 20710d94caffSDavid Greenman * If this page is not pageable, we have to get it for all possible 20720d94caffSDavid Greenman * accesses. 2073df8bae1dSRodney W. Grimes */ 2074df8bae1dSRodney W. Grimes 207505f0fdd2SPoul-Henning Kamp *wired = (entry->wired_count != 0); 207605f0fdd2SPoul-Henning Kamp if (*wired) 2077df8bae1dSRodney W. Grimes prot = fault_type = entry->protection; 2078df8bae1dSRodney W. Grimes 2079df8bae1dSRodney W. Grimes /* 20800d94caffSDavid Greenman * If we don't already have a VM object, track it down. 2081df8bae1dSRodney W. Grimes */ 2082df8bae1dSRodney W. Grimes 208305f0fdd2SPoul-Henning Kamp su = !entry->is_a_map; 208405f0fdd2SPoul-Henning Kamp if (su) { 2085df8bae1dSRodney W. Grimes share_map = map; 2086df8bae1dSRodney W. Grimes share_offset = vaddr; 20870d94caffSDavid Greenman } else { 2088df8bae1dSRodney W. Grimes vm_map_entry_t share_entry; 2089df8bae1dSRodney W. Grimes 2090df8bae1dSRodney W. Grimes /* 2091df8bae1dSRodney W. Grimes * Compute the sharing map, and offset into it. 2092df8bae1dSRodney W. Grimes */ 2093df8bae1dSRodney W. Grimes 2094df8bae1dSRodney W. Grimes share_map = entry->object.share_map; 2095df8bae1dSRodney W. Grimes share_offset = (vaddr - entry->start) + entry->offset; 2096df8bae1dSRodney W. Grimes 2097df8bae1dSRodney W. Grimes /* 2098df8bae1dSRodney W. Grimes * Look for the backing store object and offset 2099df8bae1dSRodney W. Grimes */ 2100df8bae1dSRodney W. Grimes 2101df8bae1dSRodney W. Grimes vm_map_lock_read(share_map); 2102df8bae1dSRodney W. Grimes 2103df8bae1dSRodney W. Grimes if (!vm_map_lookup_entry(share_map, share_offset, 2104df8bae1dSRodney W. Grimes &share_entry)) { 2105df8bae1dSRodney W. Grimes vm_map_unlock_read(share_map); 2106df8bae1dSRodney W. Grimes RETURN(KERN_INVALID_ADDRESS); 2107df8bae1dSRodney W. Grimes } 2108df8bae1dSRodney W. Grimes entry = share_entry; 2109df8bae1dSRodney W. Grimes } 2110df8bae1dSRodney W. Grimes 2111df8bae1dSRodney W. Grimes /* 2112df8bae1dSRodney W. Grimes * If the entry was copy-on-write, we either ... 2113df8bae1dSRodney W. Grimes */ 2114df8bae1dSRodney W. Grimes 2115df8bae1dSRodney W. Grimes if (entry->needs_copy) { 2116df8bae1dSRodney W. Grimes /* 21170d94caffSDavid Greenman * If we want to write the page, we may as well handle that 21180d94caffSDavid Greenman * now since we've got the sharing map locked. 2119df8bae1dSRodney W. Grimes * 21200d94caffSDavid Greenman * If we don't need to write the page, we just demote the 21210d94caffSDavid Greenman * permissions allowed. 2122df8bae1dSRodney W. Grimes */ 2123df8bae1dSRodney W. Grimes 2124df8bae1dSRodney W. Grimes if (fault_type & VM_PROT_WRITE) { 2125df8bae1dSRodney W. Grimes /* 21260d94caffSDavid Greenman * Make a new object, and place it in the object 21270d94caffSDavid Greenman * chain. Note that no new references have appeared 21280d94caffSDavid Greenman * -- one just moved from the share map to the new 21290d94caffSDavid Greenman * object. 2130df8bae1dSRodney W. Grimes */ 2131df8bae1dSRodney W. Grimes 2132df8bae1dSRodney W. Grimes if (lock_read_to_write(&share_map->lock)) { 2133df8bae1dSRodney W. Grimes if (share_map != map) 2134df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 2135df8bae1dSRodney W. Grimes goto RetryLookup; 2136df8bae1dSRodney W. Grimes } 2137df8bae1dSRodney W. Grimes vm_object_shadow( 2138df8bae1dSRodney W. Grimes &entry->object.vm_object, 2139df8bae1dSRodney W. Grimes &entry->offset, 2140a316d390SJohn Dyson OFF_TO_IDX(entry->end - entry->start)); 2141df8bae1dSRodney W. Grimes 2142df8bae1dSRodney W. Grimes entry->needs_copy = FALSE; 2143df8bae1dSRodney W. Grimes 2144df8bae1dSRodney W. Grimes lock_write_to_read(&share_map->lock); 21450d94caffSDavid Greenman } else { 2146df8bae1dSRodney W. Grimes /* 21470d94caffSDavid Greenman * We're attempting to read a copy-on-write page -- 21480d94caffSDavid Greenman * don't allow writes. 2149df8bae1dSRodney W. Grimes */ 2150df8bae1dSRodney W. Grimes 2151df8bae1dSRodney W. Grimes prot &= (~VM_PROT_WRITE); 2152df8bae1dSRodney W. Grimes } 2153df8bae1dSRodney W. Grimes } 2154df8bae1dSRodney W. Grimes /* 2155df8bae1dSRodney W. Grimes * Create an object if necessary. 2156df8bae1dSRodney W. Grimes */ 2157df8bae1dSRodney W. Grimes if (entry->object.vm_object == NULL) { 2158df8bae1dSRodney W. Grimes 2159df8bae1dSRodney W. Grimes if (lock_read_to_write(&share_map->lock)) { 2160df8bae1dSRodney W. Grimes if (share_map != map) 2161df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 2162df8bae1dSRodney W. Grimes goto RetryLookup; 2163df8bae1dSRodney W. Grimes } 216424a1cce3SDavid Greenman entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 2165a316d390SJohn Dyson OFF_TO_IDX(entry->end - entry->start)); 2166df8bae1dSRodney W. Grimes entry->offset = 0; 2167df8bae1dSRodney W. Grimes lock_write_to_read(&share_map->lock); 2168df8bae1dSRodney W. Grimes } 2169df8bae1dSRodney W. Grimes /* 21700d94caffSDavid Greenman * Return the object/offset from this entry. If the entry was 21710d94caffSDavid Greenman * copy-on-write or empty, it has been fixed up. 2172df8bae1dSRodney W. Grimes */ 2173df8bae1dSRodney W. Grimes 2174a316d390SJohn Dyson *pindex = OFF_TO_IDX((share_offset - entry->start) + entry->offset); 2175df8bae1dSRodney W. Grimes *object = entry->object.vm_object; 2176df8bae1dSRodney W. Grimes 2177df8bae1dSRodney W. Grimes /* 2178df8bae1dSRodney W. Grimes * Return whether this is the only map sharing this data. 2179df8bae1dSRodney W. Grimes */ 2180df8bae1dSRodney W. Grimes 2181df8bae1dSRodney W. Grimes if (!su) { 2182df8bae1dSRodney W. Grimes su = (share_map->ref_count == 1); 2183df8bae1dSRodney W. Grimes } 2184df8bae1dSRodney W. Grimes *out_prot = prot; 2185df8bae1dSRodney W. Grimes *single_use = su; 2186df8bae1dSRodney W. Grimes 2187df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 2188df8bae1dSRodney W. Grimes 2189df8bae1dSRodney W. Grimes #undef RETURN 2190df8bae1dSRodney W. Grimes } 2191df8bae1dSRodney W. Grimes 2192df8bae1dSRodney W. Grimes /* 2193df8bae1dSRodney W. Grimes * vm_map_lookup_done: 2194df8bae1dSRodney W. Grimes * 2195df8bae1dSRodney W. Grimes * Releases locks acquired by a vm_map_lookup 2196df8bae1dSRodney W. Grimes * (according to the handle returned by that lookup). 2197df8bae1dSRodney W. Grimes */ 2198df8bae1dSRodney W. Grimes 21990d94caffSDavid Greenman void 22000d94caffSDavid Greenman vm_map_lookup_done(map, entry) 2201df8bae1dSRodney W. Grimes register vm_map_t map; 2202df8bae1dSRodney W. Grimes vm_map_entry_t entry; 2203df8bae1dSRodney W. Grimes { 2204df8bae1dSRodney W. Grimes /* 2205df8bae1dSRodney W. Grimes * If this entry references a map, unlock it first. 2206df8bae1dSRodney W. Grimes */ 2207df8bae1dSRodney W. Grimes 2208df8bae1dSRodney W. Grimes if (entry->is_a_map) 2209df8bae1dSRodney W. Grimes vm_map_unlock_read(entry->object.share_map); 2210df8bae1dSRodney W. Grimes 2211df8bae1dSRodney W. Grimes /* 2212df8bae1dSRodney W. Grimes * Unlock the main-level map 2213df8bae1dSRodney W. Grimes */ 2214df8bae1dSRodney W. Grimes 2215df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 2216df8bae1dSRodney W. Grimes } 2217df8bae1dSRodney W. Grimes 2218df8bae1dSRodney W. Grimes /* 2219df8bae1dSRodney W. Grimes * Routine: vm_map_simplify 2220df8bae1dSRodney W. Grimes * Purpose: 2221df8bae1dSRodney W. Grimes * Attempt to simplify the map representation in 2222df8bae1dSRodney W. Grimes * the vicinity of the given starting address. 2223df8bae1dSRodney W. Grimes * Note: 2224df8bae1dSRodney W. Grimes * This routine is intended primarily to keep the 2225df8bae1dSRodney W. Grimes * kernel maps more compact -- they generally don't 2226df8bae1dSRodney W. Grimes * benefit from the "expand a map entry" technology 2227df8bae1dSRodney W. Grimes * at allocation time because the adjacent entry 2228df8bae1dSRodney W. Grimes * is often wired down. 2229df8bae1dSRodney W. Grimes */ 22300d94caffSDavid Greenman void 22310d94caffSDavid Greenman vm_map_simplify(map, start) 2232df8bae1dSRodney W. Grimes vm_map_t map; 2233df8bae1dSRodney W. Grimes vm_offset_t start; 2234df8bae1dSRodney W. Grimes { 2235df8bae1dSRodney W. Grimes vm_map_entry_t this_entry; 2236df8bae1dSRodney W. Grimes vm_map_entry_t prev_entry; 2237df8bae1dSRodney W. Grimes 2238df8bae1dSRodney W. Grimes vm_map_lock(map); 2239df8bae1dSRodney W. Grimes if ( 2240df8bae1dSRodney W. Grimes (vm_map_lookup_entry(map, start, &this_entry)) && 2241df8bae1dSRodney W. Grimes ((prev_entry = this_entry->prev) != &map->header) && 2242df8bae1dSRodney W. Grimes 2243df8bae1dSRodney W. Grimes (prev_entry->end == start) && 2244df8bae1dSRodney W. Grimes (map->is_main_map) && 2245df8bae1dSRodney W. Grimes 2246df8bae1dSRodney W. Grimes (prev_entry->is_a_map == FALSE) && 2247df8bae1dSRodney W. Grimes (prev_entry->is_sub_map == FALSE) && 2248df8bae1dSRodney W. Grimes 2249df8bae1dSRodney W. Grimes (this_entry->is_a_map == FALSE) && 2250df8bae1dSRodney W. Grimes (this_entry->is_sub_map == FALSE) && 2251df8bae1dSRodney W. Grimes 2252df8bae1dSRodney W. Grimes (prev_entry->inheritance == this_entry->inheritance) && 2253df8bae1dSRodney W. Grimes (prev_entry->protection == this_entry->protection) && 2254df8bae1dSRodney W. Grimes (prev_entry->max_protection == this_entry->max_protection) && 2255df8bae1dSRodney W. Grimes (prev_entry->wired_count == this_entry->wired_count) && 2256df8bae1dSRodney W. Grimes 2257df8bae1dSRodney W. Grimes (prev_entry->copy_on_write == this_entry->copy_on_write) && 2258df8bae1dSRodney W. Grimes (prev_entry->needs_copy == this_entry->needs_copy) && 2259df8bae1dSRodney W. Grimes 2260df8bae1dSRodney W. Grimes (prev_entry->object.vm_object == this_entry->object.vm_object) && 2261df8bae1dSRodney W. Grimes ((prev_entry->offset + (prev_entry->end - prev_entry->start)) 2262df8bae1dSRodney W. Grimes == this_entry->offset) 2263df8bae1dSRodney W. Grimes ) { 2264df8bae1dSRodney W. Grimes if (map->first_free == this_entry) 2265df8bae1dSRodney W. Grimes map->first_free = prev_entry; 2266df8bae1dSRodney W. Grimes 226726f9a767SRodney W. Grimes if (!this_entry->object.vm_object->paging_in_progress) { 2268df8bae1dSRodney W. Grimes SAVE_HINT(map, prev_entry); 2269df8bae1dSRodney W. Grimes vm_map_entry_unlink(map, this_entry); 2270df8bae1dSRodney W. Grimes prev_entry->end = this_entry->end; 2271df8bae1dSRodney W. Grimes vm_object_deallocate(this_entry->object.vm_object); 2272df8bae1dSRodney W. Grimes vm_map_entry_dispose(map, this_entry); 2273df8bae1dSRodney W. Grimes } 227426f9a767SRodney W. Grimes } 2275df8bae1dSRodney W. Grimes vm_map_unlock(map); 2276df8bae1dSRodney W. Grimes } 2277df8bae1dSRodney W. Grimes 2278c3cb3e12SDavid Greenman #ifdef DDB 2279df8bae1dSRodney W. Grimes /* 2280df8bae1dSRodney W. Grimes * vm_map_print: [ debug ] 2281df8bae1dSRodney W. Grimes */ 22820d94caffSDavid Greenman void 2283914181e7SBruce Evans vm_map_print(imap, full, dummy3, dummy4) 2284914181e7SBruce Evans /* db_expr_t */ int imap; 2285df8bae1dSRodney W. Grimes boolean_t full; 2286914181e7SBruce Evans /* db_expr_t */ int dummy3; 2287914181e7SBruce Evans char *dummy4; 2288df8bae1dSRodney W. Grimes { 2289df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 2290914181e7SBruce Evans register vm_map_t map = (vm_map_t)imap; /* XXX */ 2291df8bae1dSRodney W. Grimes 2292df8bae1dSRodney W. Grimes iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n", 2293df8bae1dSRodney W. Grimes (map->is_main_map ? "Task" : "Share"), 2294df8bae1dSRodney W. Grimes (int) map, (int) (map->pmap), map->ref_count, map->nentries, 2295df8bae1dSRodney W. Grimes map->timestamp); 2296df8bae1dSRodney W. Grimes 2297df8bae1dSRodney W. Grimes if (!full && indent) 2298df8bae1dSRodney W. Grimes return; 2299df8bae1dSRodney W. Grimes 2300df8bae1dSRodney W. Grimes indent += 2; 2301df8bae1dSRodney W. Grimes for (entry = map->header.next; entry != &map->header; 2302df8bae1dSRodney W. Grimes entry = entry->next) { 2303df8bae1dSRodney W. Grimes iprintf("map entry 0x%x: start=0x%x, end=0x%x, ", 2304df8bae1dSRodney W. Grimes (int) entry, (int) entry->start, (int) entry->end); 2305df8bae1dSRodney W. Grimes if (map->is_main_map) { 2306df8bae1dSRodney W. Grimes static char *inheritance_name[4] = 2307df8bae1dSRodney W. Grimes {"share", "copy", "none", "donate_copy"}; 23080d94caffSDavid Greenman 2309df8bae1dSRodney W. Grimes printf("prot=%x/%x/%s, ", 2310df8bae1dSRodney W. Grimes entry->protection, 2311df8bae1dSRodney W. Grimes entry->max_protection, 2312df8bae1dSRodney W. Grimes inheritance_name[entry->inheritance]); 2313df8bae1dSRodney W. Grimes if (entry->wired_count != 0) 2314df8bae1dSRodney W. Grimes printf("wired, "); 2315df8bae1dSRodney W. Grimes } 2316df8bae1dSRodney W. Grimes if (entry->is_a_map || entry->is_sub_map) { 2317df8bae1dSRodney W. Grimes printf("share=0x%x, offset=0x%x\n", 2318df8bae1dSRodney W. Grimes (int) entry->object.share_map, 2319df8bae1dSRodney W. Grimes (int) entry->offset); 2320df8bae1dSRodney W. Grimes if ((entry->prev == &map->header) || 2321df8bae1dSRodney W. Grimes (!entry->prev->is_a_map) || 2322df8bae1dSRodney W. Grimes (entry->prev->object.share_map != 2323df8bae1dSRodney W. Grimes entry->object.share_map)) { 2324df8bae1dSRodney W. Grimes indent += 2; 2325914181e7SBruce Evans vm_map_print((int)entry->object.share_map, 2326914181e7SBruce Evans full, 0, (char *)0); 2327df8bae1dSRodney W. Grimes indent -= 2; 2328df8bae1dSRodney W. Grimes } 23290d94caffSDavid Greenman } else { 2330df8bae1dSRodney W. Grimes printf("object=0x%x, offset=0x%x", 2331df8bae1dSRodney W. Grimes (int) entry->object.vm_object, 2332df8bae1dSRodney W. Grimes (int) entry->offset); 2333df8bae1dSRodney W. Grimes if (entry->copy_on_write) 2334df8bae1dSRodney W. Grimes printf(", copy (%s)", 2335df8bae1dSRodney W. Grimes entry->needs_copy ? "needed" : "done"); 2336df8bae1dSRodney W. Grimes printf("\n"); 2337df8bae1dSRodney W. Grimes 2338df8bae1dSRodney W. Grimes if ((entry->prev == &map->header) || 2339df8bae1dSRodney W. Grimes (entry->prev->is_a_map) || 2340df8bae1dSRodney W. Grimes (entry->prev->object.vm_object != 2341df8bae1dSRodney W. Grimes entry->object.vm_object)) { 2342df8bae1dSRodney W. Grimes indent += 2; 2343914181e7SBruce Evans vm_object_print((int)entry->object.vm_object, 2344914181e7SBruce Evans full, 0, (char *)0); 2345df8bae1dSRodney W. Grimes indent -= 2; 2346df8bae1dSRodney W. Grimes } 2347df8bae1dSRodney W. Grimes } 2348df8bae1dSRodney W. Grimes } 2349df8bae1dSRodney W. Grimes indent -= 2; 2350df8bae1dSRodney W. Grimes } 2351c3cb3e12SDavid Greenman #endif 2352