1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 6df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 9df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 10df8bae1dSRodney W. Grimes * are met: 11df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 12df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 13df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 15df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 16df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 17df8bae1dSRodney W. Grimes * must display the following acknowledgement: 18df8bae1dSRodney W. Grimes * This product includes software developed by the University of 19df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 20df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 21df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 22df8bae1dSRodney W. Grimes * without specific prior written permission. 23df8bae1dSRodney W. Grimes * 24df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34df8bae1dSRodney W. Grimes * SUCH DAMAGE. 35df8bae1dSRodney W. Grimes * 363c4dd356SDavid Greenman * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 37df8bae1dSRodney W. Grimes * 38df8bae1dSRodney W. Grimes * 39df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40df8bae1dSRodney W. Grimes * All rights reserved. 41df8bae1dSRodney W. Grimes * 42df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43df8bae1dSRodney W. Grimes * 44df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 45df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 46df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 47df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 48df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53df8bae1dSRodney W. Grimes * 54df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 55df8bae1dSRodney W. Grimes * 56df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57df8bae1dSRodney W. Grimes * School of Computer Science 58df8bae1dSRodney W. Grimes * Carnegie Mellon University 59df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 60df8bae1dSRodney W. Grimes * 61df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 62df8bae1dSRodney W. Grimes * rights to redistribute these changes. 633c4dd356SDavid Greenman * 6430dcfc09SJohn Dyson * $Id: vm_map.c,v 1.40 1996/03/28 04:22:17 dyson Exp $ 65df8bae1dSRodney W. Grimes */ 66df8bae1dSRodney W. Grimes 67df8bae1dSRodney W. Grimes /* 68df8bae1dSRodney W. Grimes * Virtual memory mapping module. 69df8bae1dSRodney W. Grimes */ 700e41ee30SGarrett Wollman #include "opt_ddb.h" 71df8bae1dSRodney W. Grimes 72df8bae1dSRodney W. Grimes #include <sys/param.h> 73df8bae1dSRodney W. Grimes #include <sys/systm.h> 74df8bae1dSRodney W. Grimes #include <sys/malloc.h> 75b5e8ce9fSBruce Evans #include <sys/proc.h> 76efeaf95aSDavid Greenman #include <sys/queue.h> 77efeaf95aSDavid Greenman #include <sys/vmmeter.h> 78df8bae1dSRodney W. Grimes 79df8bae1dSRodney W. Grimes #include <vm/vm.h> 80efeaf95aSDavid Greenman #include <vm/vm_param.h> 81efeaf95aSDavid Greenman #include <vm/vm_prot.h> 82efeaf95aSDavid Greenman #include <vm/vm_inherit.h> 83efeaf95aSDavid Greenman #include <vm/lock.h> 84efeaf95aSDavid Greenman #include <vm/pmap.h> 85efeaf95aSDavid Greenman #include <vm/vm_map.h> 86df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 87df8bae1dSRodney W. Grimes #include <vm/vm_object.h> 8826f9a767SRodney W. Grimes #include <vm/vm_kern.h> 8924a1cce3SDavid Greenman #include <vm/vm_pager.h> 90efeaf95aSDavid Greenman #include <vm/vm_extern.h> 91df8bae1dSRodney W. Grimes 92df8bae1dSRodney W. Grimes /* 93df8bae1dSRodney W. Grimes * Virtual memory maps provide for the mapping, protection, 94df8bae1dSRodney W. Grimes * and sharing of virtual memory objects. In addition, 95df8bae1dSRodney W. Grimes * this module provides for an efficient virtual copy of 96df8bae1dSRodney W. Grimes * memory from one map to another. 97df8bae1dSRodney W. Grimes * 98df8bae1dSRodney W. Grimes * Synchronization is required prior to most operations. 99df8bae1dSRodney W. Grimes * 100df8bae1dSRodney W. Grimes * Maps consist of an ordered doubly-linked list of simple 101df8bae1dSRodney W. Grimes * entries; a single hint is used to speed up lookups. 102df8bae1dSRodney W. Grimes * 103df8bae1dSRodney W. Grimes * In order to properly represent the sharing of virtual 104df8bae1dSRodney W. Grimes * memory regions among maps, the map structure is bi-level. 105df8bae1dSRodney W. Grimes * Top-level ("address") maps refer to regions of sharable 106df8bae1dSRodney W. Grimes * virtual memory. These regions are implemented as 107df8bae1dSRodney W. Grimes * ("sharing") maps, which then refer to the actual virtual 108df8bae1dSRodney W. Grimes * memory objects. When two address maps "share" memory, 109df8bae1dSRodney W. Grimes * their top-level maps both have references to the same 110df8bae1dSRodney W. Grimes * sharing map. When memory is virtual-copied from one 111df8bae1dSRodney W. Grimes * address map to another, the references in the sharing 112df8bae1dSRodney W. Grimes * maps are actually copied -- no copying occurs at the 113df8bae1dSRodney W. Grimes * virtual memory object level. 114df8bae1dSRodney W. Grimes * 115df8bae1dSRodney W. Grimes * Since portions of maps are specified by start/end addreses, 116df8bae1dSRodney W. Grimes * which may not align with existing map entries, all 117df8bae1dSRodney W. Grimes * routines merely "clip" entries to these start/end values. 118df8bae1dSRodney W. Grimes * [That is, an entry is split into two, bordering at a 119df8bae1dSRodney W. Grimes * start or end value.] Note that these clippings may not 120df8bae1dSRodney W. Grimes * always be necessary (as the two resulting entries are then 121df8bae1dSRodney W. Grimes * not changed); however, the clipping is done for convenience. 122df8bae1dSRodney W. Grimes * No attempt is currently made to "glue back together" two 123df8bae1dSRodney W. Grimes * abutting entries. 124df8bae1dSRodney W. Grimes * 125df8bae1dSRodney W. Grimes * As mentioned above, virtual copy operations are performed 126df8bae1dSRodney W. Grimes * by copying VM object references from one sharing map to 127df8bae1dSRodney W. Grimes * another, and then marking both regions as copy-on-write. 128df8bae1dSRodney W. Grimes * It is important to note that only one writeable reference 129df8bae1dSRodney W. Grimes * to a VM object region exists in any map -- this means that 130df8bae1dSRodney W. Grimes * shadow object creation can be delayed until a write operation 131df8bae1dSRodney W. Grimes * occurs. 132df8bae1dSRodney W. Grimes */ 133df8bae1dSRodney W. Grimes 134df8bae1dSRodney W. Grimes /* 135df8bae1dSRodney W. Grimes * vm_map_startup: 136df8bae1dSRodney W. Grimes * 137df8bae1dSRodney W. Grimes * Initialize the vm_map module. Must be called before 138df8bae1dSRodney W. Grimes * any other vm_map routines. 139df8bae1dSRodney W. Grimes * 140df8bae1dSRodney W. Grimes * Map and entry structures are allocated from the general 141df8bae1dSRodney W. Grimes * purpose memory pool with some exceptions: 142df8bae1dSRodney W. Grimes * 143df8bae1dSRodney W. Grimes * - The kernel map and kmem submap are allocated statically. 144df8bae1dSRodney W. Grimes * - Kernel map entries are allocated out of a static pool. 145df8bae1dSRodney W. Grimes * 146df8bae1dSRodney W. Grimes * These restrictions are necessary since malloc() uses the 147df8bae1dSRodney W. Grimes * maps and requires map entries. 148df8bae1dSRodney W. Grimes */ 149df8bae1dSRodney W. Grimes 150df8bae1dSRodney W. Grimes vm_offset_t kentry_data; 151df8bae1dSRodney W. Grimes vm_size_t kentry_data_size; 152f708ef1bSPoul-Henning Kamp static vm_map_entry_t kentry_free; 153f708ef1bSPoul-Henning Kamp static vm_map_t kmap_free; 154bd7e5f99SJohn Dyson extern char kstack[]; 155df8bae1dSRodney W. Grimes 156f708ef1bSPoul-Henning Kamp static int kentry_count; 157c3cb3e12SDavid Greenman static vm_offset_t mapvm_start, mapvm, mapvmmax; 158c3cb3e12SDavid Greenman static int mapvmpgcnt; 15926f9a767SRodney W. Grimes 160df8bae1dSRodney W. Grimes static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t)); 161df8bae1dSRodney W. Grimes static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t)); 162f708ef1bSPoul-Henning Kamp static vm_map_entry_t vm_map_entry_create __P((vm_map_t)); 163f708ef1bSPoul-Henning Kamp static void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t)); 164f708ef1bSPoul-Henning Kamp static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t)); 165f708ef1bSPoul-Henning Kamp static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t)); 166f708ef1bSPoul-Henning Kamp static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, 167f708ef1bSPoul-Henning Kamp vm_map_entry_t)); 168f708ef1bSPoul-Henning Kamp static void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t)); 169df8bae1dSRodney W. Grimes 1700d94caffSDavid Greenman void 1710d94caffSDavid Greenman vm_map_startup() 172df8bae1dSRodney W. Grimes { 173df8bae1dSRodney W. Grimes register int i; 174df8bae1dSRodney W. Grimes register vm_map_entry_t mep; 175df8bae1dSRodney W. Grimes vm_map_t mp; 176df8bae1dSRodney W. Grimes 177df8bae1dSRodney W. Grimes /* 178df8bae1dSRodney W. Grimes * Static map structures for allocation before initialization of 179df8bae1dSRodney W. Grimes * kernel map or kmem map. vm_map_create knows how to deal with them. 180df8bae1dSRodney W. Grimes */ 181df8bae1dSRodney W. Grimes kmap_free = mp = (vm_map_t) kentry_data; 182df8bae1dSRodney W. Grimes i = MAX_KMAP; 183df8bae1dSRodney W. Grimes while (--i > 0) { 184df8bae1dSRodney W. Grimes mp->header.next = (vm_map_entry_t) (mp + 1); 185df8bae1dSRodney W. Grimes mp++; 186df8bae1dSRodney W. Grimes } 187df8bae1dSRodney W. Grimes mp++->header.next = NULL; 188df8bae1dSRodney W. Grimes 189df8bae1dSRodney W. Grimes /* 1900d94caffSDavid Greenman * Form a free list of statically allocated kernel map entries with 1910d94caffSDavid Greenman * the rest. 192df8bae1dSRodney W. Grimes */ 193df8bae1dSRodney W. Grimes kentry_free = mep = (vm_map_entry_t) mp; 19466ecebedSDavid Greenman kentry_count = i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep; 195df8bae1dSRodney W. Grimes while (--i > 0) { 196df8bae1dSRodney W. Grimes mep->next = mep + 1; 197df8bae1dSRodney W. Grimes mep++; 198df8bae1dSRodney W. Grimes } 199df8bae1dSRodney W. Grimes mep->next = NULL; 200df8bae1dSRodney W. Grimes } 201df8bae1dSRodney W. Grimes 202df8bae1dSRodney W. Grimes /* 203df8bae1dSRodney W. Grimes * Allocate a vmspace structure, including a vm_map and pmap, 204df8bae1dSRodney W. Grimes * and initialize those structures. The refcnt is set to 1. 205df8bae1dSRodney W. Grimes * The remaining fields must be initialized by the caller. 206df8bae1dSRodney W. Grimes */ 207df8bae1dSRodney W. Grimes struct vmspace * 208df8bae1dSRodney W. Grimes vmspace_alloc(min, max, pageable) 209df8bae1dSRodney W. Grimes vm_offset_t min, max; 210df8bae1dSRodney W. Grimes int pageable; 211df8bae1dSRodney W. Grimes { 212df8bae1dSRodney W. Grimes register struct vmspace *vm; 2130d94caffSDavid Greenman 214d6a6c0f6SDavid Greenman if (mapvmpgcnt == 0 && mapvm == 0) { 215d6a6c0f6SDavid Greenman int s; 2160d94caffSDavid Greenman 217d6a6c0f6SDavid Greenman mapvmpgcnt = (cnt.v_page_count * sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE; 218d6a6c0f6SDavid Greenman s = splhigh(); 21966ecebedSDavid Greenman mapvm_start = mapvm = kmem_alloc_pageable(kmem_map, mapvmpgcnt * PAGE_SIZE); 22066ecebedSDavid Greenman mapvmmax = mapvm_start + mapvmpgcnt * PAGE_SIZE; 221d6a6c0f6SDavid Greenman splx(s); 222d6a6c0f6SDavid Greenman if (!mapvm) 223d6a6c0f6SDavid Greenman mapvmpgcnt = 0; 224d6a6c0f6SDavid Greenman } 225df8bae1dSRodney W. Grimes MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK); 226df8bae1dSRodney W. Grimes bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm); 227df8bae1dSRodney W. Grimes vm_map_init(&vm->vm_map, min, max, pageable); 228df8bae1dSRodney W. Grimes pmap_pinit(&vm->vm_pmap); 229df8bae1dSRodney W. Grimes vm->vm_map.pmap = &vm->vm_pmap; /* XXX */ 230df8bae1dSRodney W. Grimes vm->vm_refcnt = 1; 231df8bae1dSRodney W. Grimes return (vm); 232df8bae1dSRodney W. Grimes } 233df8bae1dSRodney W. Grimes 234df8bae1dSRodney W. Grimes void 235df8bae1dSRodney W. Grimes vmspace_free(vm) 236df8bae1dSRodney W. Grimes register struct vmspace *vm; 237df8bae1dSRodney W. Grimes { 238df8bae1dSRodney W. Grimes 239a1f6d91cSDavid Greenman if (vm->vm_refcnt == 0) 240a1f6d91cSDavid Greenman panic("vmspace_free: attempt to free already freed vmspace"); 241a1f6d91cSDavid Greenman 242df8bae1dSRodney W. Grimes if (--vm->vm_refcnt == 0) { 243bd7e5f99SJohn Dyson int s, i; 244bd7e5f99SJohn Dyson 24530dcfc09SJohn Dyson /* 246bd7e5f99SJohn Dyson pmap_remove(&vm->vm_pmap, (vm_offset_t) kstack, (vm_offset_t) kstack+UPAGES*PAGE_SIZE); 24730dcfc09SJohn Dyson */ 248bd7e5f99SJohn Dyson 249df8bae1dSRodney W. Grimes /* 250df8bae1dSRodney W. Grimes * Lock the map, to wait out all other references to it. 2510d94caffSDavid Greenman * Delete all of the mappings and pages they hold, then call 2520d94caffSDavid Greenman * the pmap module to reclaim anything left. 253df8bae1dSRodney W. Grimes */ 254df8bae1dSRodney W. Grimes vm_map_lock(&vm->vm_map); 255df8bae1dSRodney W. Grimes (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 256df8bae1dSRodney W. Grimes vm->vm_map.max_offset); 25730dcfc09SJohn Dyson vm_object_deallocate(vm->vm_upages_obj); 258a1f6d91cSDavid Greenman vm_map_unlock(&vm->vm_map); 259a1f6d91cSDavid Greenman while( vm->vm_map.ref_count != 1) 260a1f6d91cSDavid Greenman tsleep(&vm->vm_map.ref_count, PVM, "vmsfre", 0); 261a1f6d91cSDavid Greenman --vm->vm_map.ref_count; 262df8bae1dSRodney W. Grimes pmap_release(&vm->vm_pmap); 263df8bae1dSRodney W. Grimes FREE(vm, M_VMMAP); 264df8bae1dSRodney W. Grimes } 265df8bae1dSRodney W. Grimes } 266df8bae1dSRodney W. Grimes 267df8bae1dSRodney W. Grimes /* 268df8bae1dSRodney W. Grimes * vm_map_create: 269df8bae1dSRodney W. Grimes * 270df8bae1dSRodney W. Grimes * Creates and returns a new empty VM map with 271df8bae1dSRodney W. Grimes * the given physical map structure, and having 272df8bae1dSRodney W. Grimes * the given lower and upper address bounds. 273df8bae1dSRodney W. Grimes */ 2740d94caffSDavid Greenman vm_map_t 2750d94caffSDavid Greenman vm_map_create(pmap, min, max, pageable) 276df8bae1dSRodney W. Grimes pmap_t pmap; 277df8bae1dSRodney W. Grimes vm_offset_t min, max; 278df8bae1dSRodney W. Grimes boolean_t pageable; 279df8bae1dSRodney W. Grimes { 280df8bae1dSRodney W. Grimes register vm_map_t result; 281df8bae1dSRodney W. Grimes 282df8bae1dSRodney W. Grimes if (kmem_map == NULL) { 283df8bae1dSRodney W. Grimes result = kmap_free; 284df8bae1dSRodney W. Grimes kmap_free = (vm_map_t) result->header.next; 285df8bae1dSRodney W. Grimes if (result == NULL) 286df8bae1dSRodney W. Grimes panic("vm_map_create: out of maps"); 287df8bae1dSRodney W. Grimes } else 288df8bae1dSRodney W. Grimes MALLOC(result, vm_map_t, sizeof(struct vm_map), 289df8bae1dSRodney W. Grimes M_VMMAP, M_WAITOK); 290df8bae1dSRodney W. Grimes 291df8bae1dSRodney W. Grimes vm_map_init(result, min, max, pageable); 292df8bae1dSRodney W. Grimes result->pmap = pmap; 293df8bae1dSRodney W. Grimes return (result); 294df8bae1dSRodney W. Grimes } 295df8bae1dSRodney W. Grimes 296df8bae1dSRodney W. Grimes /* 297df8bae1dSRodney W. Grimes * Initialize an existing vm_map structure 298df8bae1dSRodney W. Grimes * such as that in the vmspace structure. 299df8bae1dSRodney W. Grimes * The pmap is set elsewhere. 300df8bae1dSRodney W. Grimes */ 301df8bae1dSRodney W. Grimes void 302df8bae1dSRodney W. Grimes vm_map_init(map, min, max, pageable) 303df8bae1dSRodney W. Grimes register struct vm_map *map; 304df8bae1dSRodney W. Grimes vm_offset_t min, max; 305df8bae1dSRodney W. Grimes boolean_t pageable; 306df8bae1dSRodney W. Grimes { 307df8bae1dSRodney W. Grimes map->header.next = map->header.prev = &map->header; 308df8bae1dSRodney W. Grimes map->nentries = 0; 309df8bae1dSRodney W. Grimes map->size = 0; 310df8bae1dSRodney W. Grimes map->ref_count = 1; 311df8bae1dSRodney W. Grimes map->is_main_map = TRUE; 312df8bae1dSRodney W. Grimes map->min_offset = min; 313df8bae1dSRodney W. Grimes map->max_offset = max; 314df8bae1dSRodney W. Grimes map->entries_pageable = pageable; 315df8bae1dSRodney W. Grimes map->first_free = &map->header; 316df8bae1dSRodney W. Grimes map->hint = &map->header; 317df8bae1dSRodney W. Grimes map->timestamp = 0; 318df8bae1dSRodney W. Grimes lock_init(&map->lock, TRUE); 319df8bae1dSRodney W. Grimes } 320df8bae1dSRodney W. Grimes 321df8bae1dSRodney W. Grimes /* 322df8bae1dSRodney W. Grimes * vm_map_entry_create: [ internal use only ] 323df8bae1dSRodney W. Grimes * 324df8bae1dSRodney W. Grimes * Allocates a VM map entry for insertion. 325df8bae1dSRodney W. Grimes * No entry fields are filled in. This routine is 326df8bae1dSRodney W. Grimes */ 32726f9a767SRodney W. Grimes static struct vm_map_entry *mappool; 32826f9a767SRodney W. Grimes static int mappoolcnt; 32926f9a767SRodney W. Grimes 330f708ef1bSPoul-Henning Kamp static vm_map_entry_t 33126f9a767SRodney W. Grimes vm_map_entry_create(map) 332df8bae1dSRodney W. Grimes vm_map_t map; 333df8bae1dSRodney W. Grimes { 334df8bae1dSRodney W. Grimes vm_map_entry_t entry; 33526f9a767SRodney W. Grimes int i; 3360d94caffSDavid Greenman 33726f9a767SRodney W. Grimes #define KENTRY_LOW_WATER 64 33866ecebedSDavid Greenman #define MAPENTRY_LOW_WATER 128 339df8bae1dSRodney W. Grimes 34026f9a767SRodney W. Grimes /* 34126f9a767SRodney W. Grimes * This is a *very* nasty (and sort of incomplete) hack!!!! 34226f9a767SRodney W. Grimes */ 34326f9a767SRodney W. Grimes if (kentry_count < KENTRY_LOW_WATER) { 34426f9a767SRodney W. Grimes if (mapvmpgcnt && mapvm) { 34526f9a767SRodney W. Grimes vm_page_t m; 3460d94caffSDavid Greenman 34705f0fdd2SPoul-Henning Kamp m = vm_page_alloc(kmem_object, 348a316d390SJohn Dyson OFF_TO_IDX(mapvm - vm_map_min(kmem_map)), 3496d40c3d3SDavid Greenman (map == kmem_map) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL); 35005f0fdd2SPoul-Henning Kamp if (m) { 35126f9a767SRodney W. Grimes int newentries; 3520d94caffSDavid Greenman 353a91c5a7eSJohn Dyson newentries = (PAGE_SIZE / sizeof(struct vm_map_entry)); 35426f9a767SRodney W. Grimes vm_page_wire(m); 35526f9a767SRodney W. Grimes m->flags &= ~PG_BUSY; 356d9459480SDavid Greenman m->valid = VM_PAGE_BITS_ALL; 35726f9a767SRodney W. Grimes pmap_enter(vm_map_pmap(kmem_map), mapvm, 35826f9a767SRodney W. Grimes VM_PAGE_TO_PHYS(m), VM_PROT_DEFAULT, 1); 359f919ebdeSDavid Greenman m->flags |= PG_WRITEABLE|PG_MAPPED; 36026f9a767SRodney W. Grimes 36126f9a767SRodney W. Grimes entry = (vm_map_entry_t) mapvm; 362a91c5a7eSJohn Dyson mapvm += PAGE_SIZE; 36326f9a767SRodney W. Grimes --mapvmpgcnt; 36426f9a767SRodney W. Grimes 36526f9a767SRodney W. Grimes for (i = 0; i < newentries; i++) { 36626f9a767SRodney W. Grimes vm_map_entry_dispose(kernel_map, entry); 36726f9a767SRodney W. Grimes entry++; 36826f9a767SRodney W. Grimes } 36926f9a767SRodney W. Grimes } 37026f9a767SRodney W. Grimes } 37126f9a767SRodney W. Grimes } 37226f9a767SRodney W. Grimes if (map == kernel_map || map == kmem_map || map == pager_map) { 37326f9a767SRodney W. Grimes 37405f0fdd2SPoul-Henning Kamp entry = kentry_free; 37505f0fdd2SPoul-Henning Kamp if (entry) { 37626f9a767SRodney W. Grimes kentry_free = entry->next; 37726f9a767SRodney W. Grimes --kentry_count; 37826f9a767SRodney W. Grimes return entry; 37926f9a767SRodney W. Grimes } 38005f0fdd2SPoul-Henning Kamp entry = mappool; 38105f0fdd2SPoul-Henning Kamp if (entry) { 38226f9a767SRodney W. Grimes mappool = entry->next; 38326f9a767SRodney W. Grimes --mappoolcnt; 38426f9a767SRodney W. Grimes return entry; 38526f9a767SRodney W. Grimes } 38626f9a767SRodney W. Grimes } else { 38705f0fdd2SPoul-Henning Kamp entry = mappool; 38805f0fdd2SPoul-Henning Kamp if (entry) { 38926f9a767SRodney W. Grimes mappool = entry->next; 39026f9a767SRodney W. Grimes --mappoolcnt; 39126f9a767SRodney W. Grimes return entry; 39226f9a767SRodney W. Grimes } 393df8bae1dSRodney W. Grimes MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry), 394df8bae1dSRodney W. Grimes M_VMMAPENT, M_WAITOK); 395df8bae1dSRodney W. Grimes } 396df8bae1dSRodney W. Grimes if (entry == NULL) 397df8bae1dSRodney W. Grimes panic("vm_map_entry_create: out of map entries"); 398df8bae1dSRodney W. Grimes 399df8bae1dSRodney W. Grimes return (entry); 400df8bae1dSRodney W. Grimes } 401df8bae1dSRodney W. Grimes 402df8bae1dSRodney W. Grimes /* 403df8bae1dSRodney W. Grimes * vm_map_entry_dispose: [ internal use only ] 404df8bae1dSRodney W. Grimes * 405df8bae1dSRodney W. Grimes * Inverse of vm_map_entry_create. 406df8bae1dSRodney W. Grimes */ 407f708ef1bSPoul-Henning Kamp static void 40826f9a767SRodney W. Grimes vm_map_entry_dispose(map, entry) 409df8bae1dSRodney W. Grimes vm_map_t map; 410df8bae1dSRodney W. Grimes vm_map_entry_t entry; 411df8bae1dSRodney W. Grimes { 412053bbc78SDavid Greenman if ((kentry_count < KENTRY_LOW_WATER) || 41366ecebedSDavid Greenman ((vm_offset_t) entry >= kentry_data && (vm_offset_t) entry < (kentry_data + kentry_data_size)) || 41466ecebedSDavid Greenman ((vm_offset_t) entry >= mapvm_start && (vm_offset_t) entry < mapvmmax)) { 415df8bae1dSRodney W. Grimes entry->next = kentry_free; 416df8bae1dSRodney W. Grimes kentry_free = entry; 41726f9a767SRodney W. Grimes ++kentry_count; 418053bbc78SDavid Greenman return; 41926f9a767SRodney W. Grimes } else { 42026f9a767SRodney W. Grimes if (mappoolcnt < MAPENTRY_LOW_WATER) { 42126f9a767SRodney W. Grimes entry->next = mappool; 42226f9a767SRodney W. Grimes mappool = entry; 42326f9a767SRodney W. Grimes ++mappoolcnt; 42426f9a767SRodney W. Grimes return; 42526f9a767SRodney W. Grimes } 42626f9a767SRodney W. Grimes FREE(entry, M_VMMAPENT); 427df8bae1dSRodney W. Grimes } 428df8bae1dSRodney W. Grimes } 429df8bae1dSRodney W. Grimes 430df8bae1dSRodney W. Grimes /* 431df8bae1dSRodney W. Grimes * vm_map_entry_{un,}link: 432df8bae1dSRodney W. Grimes * 433df8bae1dSRodney W. Grimes * Insert/remove entries from maps. 434df8bae1dSRodney W. Grimes */ 435df8bae1dSRodney W. Grimes #define vm_map_entry_link(map, after_where, entry) \ 436df8bae1dSRodney W. Grimes { \ 437df8bae1dSRodney W. Grimes (map)->nentries++; \ 438df8bae1dSRodney W. Grimes (entry)->prev = (after_where); \ 439df8bae1dSRodney W. Grimes (entry)->next = (after_where)->next; \ 440df8bae1dSRodney W. Grimes (entry)->prev->next = (entry); \ 441df8bae1dSRodney W. Grimes (entry)->next->prev = (entry); \ 442df8bae1dSRodney W. Grimes } 443df8bae1dSRodney W. Grimes #define vm_map_entry_unlink(map, entry) \ 444df8bae1dSRodney W. Grimes { \ 445df8bae1dSRodney W. Grimes (map)->nentries--; \ 446df8bae1dSRodney W. Grimes (entry)->next->prev = (entry)->prev; \ 447df8bae1dSRodney W. Grimes (entry)->prev->next = (entry)->next; \ 448df8bae1dSRodney W. Grimes } 449df8bae1dSRodney W. Grimes 450df8bae1dSRodney W. Grimes /* 451df8bae1dSRodney W. Grimes * vm_map_reference: 452df8bae1dSRodney W. Grimes * 453df8bae1dSRodney W. Grimes * Creates another valid reference to the given map. 454df8bae1dSRodney W. Grimes * 455df8bae1dSRodney W. Grimes */ 4560d94caffSDavid Greenman void 4570d94caffSDavid Greenman vm_map_reference(map) 458df8bae1dSRodney W. Grimes register vm_map_t map; 459df8bae1dSRodney W. Grimes { 460df8bae1dSRodney W. Grimes if (map == NULL) 461df8bae1dSRodney W. Grimes return; 462df8bae1dSRodney W. Grimes 463df8bae1dSRodney W. Grimes map->ref_count++; 464df8bae1dSRodney W. Grimes } 465df8bae1dSRodney W. Grimes 466df8bae1dSRodney W. Grimes /* 467df8bae1dSRodney W. Grimes * vm_map_deallocate: 468df8bae1dSRodney W. Grimes * 469df8bae1dSRodney W. Grimes * Removes a reference from the specified map, 470df8bae1dSRodney W. Grimes * destroying it if no references remain. 471df8bae1dSRodney W. Grimes * The map should not be locked. 472df8bae1dSRodney W. Grimes */ 4730d94caffSDavid Greenman void 4740d94caffSDavid Greenman vm_map_deallocate(map) 475df8bae1dSRodney W. Grimes register vm_map_t map; 476df8bae1dSRodney W. Grimes { 477df8bae1dSRodney W. Grimes register int c; 478df8bae1dSRodney W. Grimes 479df8bae1dSRodney W. Grimes if (map == NULL) 480df8bae1dSRodney W. Grimes return; 481df8bae1dSRodney W. Grimes 482a1f6d91cSDavid Greenman c = map->ref_count; 483df8bae1dSRodney W. Grimes 484a1f6d91cSDavid Greenman if (c == 0) 485a1f6d91cSDavid Greenman panic("vm_map_deallocate: deallocating already freed map"); 486a1f6d91cSDavid Greenman 487a1f6d91cSDavid Greenman if (c != 1) { 488a1f6d91cSDavid Greenman --map->ref_count; 48924a1cce3SDavid Greenman wakeup(&map->ref_count); 490df8bae1dSRodney W. Grimes return; 491df8bae1dSRodney W. Grimes } 492df8bae1dSRodney W. Grimes /* 4930d94caffSDavid Greenman * Lock the map, to wait out all other references to it. 494df8bae1dSRodney W. Grimes */ 495df8bae1dSRodney W. Grimes 496df8bae1dSRodney W. Grimes vm_map_lock(map); 497df8bae1dSRodney W. Grimes (void) vm_map_delete(map, map->min_offset, map->max_offset); 498a1f6d91cSDavid Greenman --map->ref_count; 499a1f6d91cSDavid Greenman if( map->ref_count != 0) { 500a1f6d91cSDavid Greenman vm_map_unlock(map); 501a1f6d91cSDavid Greenman return; 502a1f6d91cSDavid Greenman } 503df8bae1dSRodney W. Grimes 504df8bae1dSRodney W. Grimes pmap_destroy(map->pmap); 505df8bae1dSRodney W. Grimes FREE(map, M_VMMAP); 506df8bae1dSRodney W. Grimes } 507df8bae1dSRodney W. Grimes 508df8bae1dSRodney W. Grimes /* 509df8bae1dSRodney W. Grimes * SAVE_HINT: 510df8bae1dSRodney W. Grimes * 511df8bae1dSRodney W. Grimes * Saves the specified entry as the hint for 51224a1cce3SDavid Greenman * future lookups. 513df8bae1dSRodney W. Grimes */ 514df8bae1dSRodney W. Grimes #define SAVE_HINT(map,value) \ 51524a1cce3SDavid Greenman (map)->hint = (value); 516df8bae1dSRodney W. Grimes 517df8bae1dSRodney W. Grimes /* 518df8bae1dSRodney W. Grimes * vm_map_lookup_entry: [ internal use only ] 519df8bae1dSRodney W. Grimes * 520df8bae1dSRodney W. Grimes * Finds the map entry containing (or 521df8bae1dSRodney W. Grimes * immediately preceding) the specified address 522df8bae1dSRodney W. Grimes * in the given map; the entry is returned 523df8bae1dSRodney W. Grimes * in the "entry" parameter. The boolean 524df8bae1dSRodney W. Grimes * result indicates whether the address is 525df8bae1dSRodney W. Grimes * actually contained in the map. 526df8bae1dSRodney W. Grimes */ 5270d94caffSDavid Greenman boolean_t 5280d94caffSDavid Greenman vm_map_lookup_entry(map, address, entry) 529df8bae1dSRodney W. Grimes register vm_map_t map; 530df8bae1dSRodney W. Grimes register vm_offset_t address; 531df8bae1dSRodney W. Grimes vm_map_entry_t *entry; /* OUT */ 532df8bae1dSRodney W. Grimes { 533df8bae1dSRodney W. Grimes register vm_map_entry_t cur; 534df8bae1dSRodney W. Grimes register vm_map_entry_t last; 535df8bae1dSRodney W. Grimes 536df8bae1dSRodney W. Grimes /* 5370d94caffSDavid Greenman * Start looking either from the head of the list, or from the hint. 538df8bae1dSRodney W. Grimes */ 539df8bae1dSRodney W. Grimes 540df8bae1dSRodney W. Grimes cur = map->hint; 541df8bae1dSRodney W. Grimes 542df8bae1dSRodney W. Grimes if (cur == &map->header) 543df8bae1dSRodney W. Grimes cur = cur->next; 544df8bae1dSRodney W. Grimes 545df8bae1dSRodney W. Grimes if (address >= cur->start) { 546df8bae1dSRodney W. Grimes /* 547df8bae1dSRodney W. Grimes * Go from hint to end of list. 548df8bae1dSRodney W. Grimes * 5490d94caffSDavid Greenman * But first, make a quick check to see if we are already looking 5500d94caffSDavid Greenman * at the entry we want (which is usually the case). Note also 5510d94caffSDavid Greenman * that we don't need to save the hint here... it is the same 5520d94caffSDavid Greenman * hint (unless we are at the header, in which case the hint 5530d94caffSDavid Greenman * didn't buy us anything anyway). 554df8bae1dSRodney W. Grimes */ 555df8bae1dSRodney W. Grimes last = &map->header; 556df8bae1dSRodney W. Grimes if ((cur != last) && (cur->end > address)) { 557df8bae1dSRodney W. Grimes *entry = cur; 558df8bae1dSRodney W. Grimes return (TRUE); 559df8bae1dSRodney W. Grimes } 5600d94caffSDavid Greenman } else { 561df8bae1dSRodney W. Grimes /* 562df8bae1dSRodney W. Grimes * Go from start to hint, *inclusively* 563df8bae1dSRodney W. Grimes */ 564df8bae1dSRodney W. Grimes last = cur->next; 565df8bae1dSRodney W. Grimes cur = map->header.next; 566df8bae1dSRodney W. Grimes } 567df8bae1dSRodney W. Grimes 568df8bae1dSRodney W. Grimes /* 569df8bae1dSRodney W. Grimes * Search linearly 570df8bae1dSRodney W. Grimes */ 571df8bae1dSRodney W. Grimes 572df8bae1dSRodney W. Grimes while (cur != last) { 573df8bae1dSRodney W. Grimes if (cur->end > address) { 574df8bae1dSRodney W. Grimes if (address >= cur->start) { 575df8bae1dSRodney W. Grimes /* 5760d94caffSDavid Greenman * Save this lookup for future hints, and 5770d94caffSDavid Greenman * return 578df8bae1dSRodney W. Grimes */ 579df8bae1dSRodney W. Grimes 580df8bae1dSRodney W. Grimes *entry = cur; 581df8bae1dSRodney W. Grimes SAVE_HINT(map, cur); 582df8bae1dSRodney W. Grimes return (TRUE); 583df8bae1dSRodney W. Grimes } 584df8bae1dSRodney W. Grimes break; 585df8bae1dSRodney W. Grimes } 586df8bae1dSRodney W. Grimes cur = cur->next; 587df8bae1dSRodney W. Grimes } 588df8bae1dSRodney W. Grimes *entry = cur->prev; 589df8bae1dSRodney W. Grimes SAVE_HINT(map, *entry); 590df8bae1dSRodney W. Grimes return (FALSE); 591df8bae1dSRodney W. Grimes } 592df8bae1dSRodney W. Grimes 593df8bae1dSRodney W. Grimes /* 59430dcfc09SJohn Dyson * vm_map_insert: 59530dcfc09SJohn Dyson * 59630dcfc09SJohn Dyson * Inserts the given whole VM object into the target 59730dcfc09SJohn Dyson * map at the specified address range. The object's 59830dcfc09SJohn Dyson * size should match that of the address range. 59930dcfc09SJohn Dyson * 60030dcfc09SJohn Dyson * Requires that the map be locked, and leaves it so. 60130dcfc09SJohn Dyson */ 60230dcfc09SJohn Dyson int 60330dcfc09SJohn Dyson vm_map_insert(map, object, offset, start, end, prot, max, cow) 60430dcfc09SJohn Dyson vm_map_t map; 60530dcfc09SJohn Dyson vm_object_t object; 60630dcfc09SJohn Dyson vm_ooffset_t offset; 60730dcfc09SJohn Dyson vm_offset_t start; 60830dcfc09SJohn Dyson vm_offset_t end; 60930dcfc09SJohn Dyson vm_prot_t prot, max; 61030dcfc09SJohn Dyson int cow; 61130dcfc09SJohn Dyson { 61230dcfc09SJohn Dyson register vm_map_entry_t new_entry; 61330dcfc09SJohn Dyson register vm_map_entry_t prev_entry; 61430dcfc09SJohn Dyson vm_map_entry_t temp_entry; 61530dcfc09SJohn Dyson 61630dcfc09SJohn Dyson /* 61730dcfc09SJohn Dyson * Check that the start and end points are not bogus. 61830dcfc09SJohn Dyson */ 61930dcfc09SJohn Dyson 62030dcfc09SJohn Dyson if ((start < map->min_offset) || (end > map->max_offset) || 62130dcfc09SJohn Dyson (start >= end)) 62230dcfc09SJohn Dyson return (KERN_INVALID_ADDRESS); 62330dcfc09SJohn Dyson 62430dcfc09SJohn Dyson /* 62530dcfc09SJohn Dyson * Find the entry prior to the proposed starting address; if it's part 62630dcfc09SJohn Dyson * of an existing entry, this range is bogus. 62730dcfc09SJohn Dyson */ 62830dcfc09SJohn Dyson 62930dcfc09SJohn Dyson if (vm_map_lookup_entry(map, start, &temp_entry)) 63030dcfc09SJohn Dyson return (KERN_NO_SPACE); 63130dcfc09SJohn Dyson 63230dcfc09SJohn Dyson prev_entry = temp_entry; 63330dcfc09SJohn Dyson 63430dcfc09SJohn Dyson /* 63530dcfc09SJohn Dyson * Assert that the next entry doesn't overlap the end point. 63630dcfc09SJohn Dyson */ 63730dcfc09SJohn Dyson 63830dcfc09SJohn Dyson if ((prev_entry->next != &map->header) && 63930dcfc09SJohn Dyson (prev_entry->next->start < end)) 64030dcfc09SJohn Dyson return (KERN_NO_SPACE); 64130dcfc09SJohn Dyson 64230dcfc09SJohn Dyson if ((prev_entry != &map->header) && 64330dcfc09SJohn Dyson (prev_entry->end == start) && 64430dcfc09SJohn Dyson (prev_entry->is_a_map == FALSE) && 64530dcfc09SJohn Dyson (prev_entry->is_sub_map == FALSE) && 64630dcfc09SJohn Dyson ((object == NULL) || (prev_entry->object.vm_object == object)) && 64730dcfc09SJohn Dyson (prev_entry->inheritance == VM_INHERIT_DEFAULT) && 64830dcfc09SJohn Dyson (prev_entry->protection == prot) && 64930dcfc09SJohn Dyson (prev_entry->max_protection == max) && 65030dcfc09SJohn Dyson (prev_entry->wired_count == 0)) { 65130dcfc09SJohn Dyson /* 65230dcfc09SJohn Dyson * See if we can avoid creating a new entry by extending one of our 65330dcfc09SJohn Dyson * neighbors. 65430dcfc09SJohn Dyson */ 65530dcfc09SJohn Dyson 65630dcfc09SJohn Dyson if (object == NULL) { 65730dcfc09SJohn Dyson if (vm_object_coalesce(prev_entry->object.vm_object, 65830dcfc09SJohn Dyson OFF_TO_IDX(prev_entry->offset), 65930dcfc09SJohn Dyson (vm_size_t) (prev_entry->end 66030dcfc09SJohn Dyson - prev_entry->start), 66130dcfc09SJohn Dyson (vm_size_t) (end - prev_entry->end))) { 66230dcfc09SJohn Dyson /* 66330dcfc09SJohn Dyson * Coalesced the two objects - can extend the 66430dcfc09SJohn Dyson * previous map entry to include the new 66530dcfc09SJohn Dyson * range. 66630dcfc09SJohn Dyson */ 66730dcfc09SJohn Dyson map->size += (end - prev_entry->end); 66830dcfc09SJohn Dyson prev_entry->end = end; 66930dcfc09SJohn Dyson return (KERN_SUCCESS); 67030dcfc09SJohn Dyson } 67130dcfc09SJohn Dyson } /* else if ((object == prev_entry->object.vm_object) && 67230dcfc09SJohn Dyson (prev_entry->offset + (prev_entry->end - prev_entry->start) == offset)) { 67330dcfc09SJohn Dyson map->size += (end - prev_entry->end); 67430dcfc09SJohn Dyson prev_entry->end = end; 67530dcfc09SJohn Dyson printf("map optim 1\n"); 67630dcfc09SJohn Dyson return (KERN_SUCCESS); 67730dcfc09SJohn Dyson } */ 67830dcfc09SJohn Dyson } 67930dcfc09SJohn Dyson /* 68030dcfc09SJohn Dyson * Create a new entry 68130dcfc09SJohn Dyson */ 68230dcfc09SJohn Dyson 68330dcfc09SJohn Dyson new_entry = vm_map_entry_create(map); 68430dcfc09SJohn Dyson new_entry->start = start; 68530dcfc09SJohn Dyson new_entry->end = end; 68630dcfc09SJohn Dyson 68730dcfc09SJohn Dyson new_entry->is_a_map = FALSE; 68830dcfc09SJohn Dyson new_entry->is_sub_map = FALSE; 68930dcfc09SJohn Dyson new_entry->object.vm_object = object; 69030dcfc09SJohn Dyson new_entry->offset = offset; 69130dcfc09SJohn Dyson 69230dcfc09SJohn Dyson if (cow & MAP_COPY_NEEDED) 69330dcfc09SJohn Dyson new_entry->needs_copy = TRUE; 69430dcfc09SJohn Dyson else 69530dcfc09SJohn Dyson new_entry->needs_copy = FALSE; 69630dcfc09SJohn Dyson 69730dcfc09SJohn Dyson if (cow & MAP_COPY_ON_WRITE) 69830dcfc09SJohn Dyson new_entry->copy_on_write = TRUE; 69930dcfc09SJohn Dyson else 70030dcfc09SJohn Dyson new_entry->copy_on_write = FALSE; 70130dcfc09SJohn Dyson 70230dcfc09SJohn Dyson if (map->is_main_map) { 70330dcfc09SJohn Dyson new_entry->inheritance = VM_INHERIT_DEFAULT; 70430dcfc09SJohn Dyson new_entry->protection = prot; 70530dcfc09SJohn Dyson new_entry->max_protection = max; 70630dcfc09SJohn Dyson new_entry->wired_count = 0; 70730dcfc09SJohn Dyson } 70830dcfc09SJohn Dyson /* 70930dcfc09SJohn Dyson * Insert the new entry into the list 71030dcfc09SJohn Dyson */ 71130dcfc09SJohn Dyson 71230dcfc09SJohn Dyson vm_map_entry_link(map, prev_entry, new_entry); 71330dcfc09SJohn Dyson map->size += new_entry->end - new_entry->start; 71430dcfc09SJohn Dyson 71530dcfc09SJohn Dyson /* 71630dcfc09SJohn Dyson * Update the free space hint 71730dcfc09SJohn Dyson */ 71830dcfc09SJohn Dyson 71930dcfc09SJohn Dyson if ((map->first_free == prev_entry) && 72030dcfc09SJohn Dyson (prev_entry->end >= new_entry->start)) 72130dcfc09SJohn Dyson map->first_free = new_entry; 72230dcfc09SJohn Dyson 72330dcfc09SJohn Dyson return (KERN_SUCCESS); 72430dcfc09SJohn Dyson } 72530dcfc09SJohn Dyson 72630dcfc09SJohn Dyson /* 727df8bae1dSRodney W. Grimes * Find sufficient space for `length' bytes in the given map, starting at 728df8bae1dSRodney W. Grimes * `start'. The map must be locked. Returns 0 on success, 1 on no space. 729df8bae1dSRodney W. Grimes */ 730df8bae1dSRodney W. Grimes int 731df8bae1dSRodney W. Grimes vm_map_findspace(map, start, length, addr) 732df8bae1dSRodney W. Grimes register vm_map_t map; 733df8bae1dSRodney W. Grimes register vm_offset_t start; 734df8bae1dSRodney W. Grimes vm_size_t length; 735df8bae1dSRodney W. Grimes vm_offset_t *addr; 736df8bae1dSRodney W. Grimes { 737df8bae1dSRodney W. Grimes register vm_map_entry_t entry, next; 738df8bae1dSRodney W. Grimes register vm_offset_t end; 739df8bae1dSRodney W. Grimes 740df8bae1dSRodney W. Grimes if (start < map->min_offset) 741df8bae1dSRodney W. Grimes start = map->min_offset; 742df8bae1dSRodney W. Grimes if (start > map->max_offset) 743df8bae1dSRodney W. Grimes return (1); 744df8bae1dSRodney W. Grimes 745df8bae1dSRodney W. Grimes /* 7460d94caffSDavid Greenman * Look for the first possible address; if there's already something 7470d94caffSDavid Greenman * at this address, we have to start after it. 748df8bae1dSRodney W. Grimes */ 749df8bae1dSRodney W. Grimes if (start == map->min_offset) { 750df8bae1dSRodney W. Grimes if ((entry = map->first_free) != &map->header) 751df8bae1dSRodney W. Grimes start = entry->end; 752df8bae1dSRodney W. Grimes } else { 753df8bae1dSRodney W. Grimes vm_map_entry_t tmp; 7540d94caffSDavid Greenman 755df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &tmp)) 756df8bae1dSRodney W. Grimes start = tmp->end; 757df8bae1dSRodney W. Grimes entry = tmp; 758df8bae1dSRodney W. Grimes } 759df8bae1dSRodney W. Grimes 760df8bae1dSRodney W. Grimes /* 7610d94caffSDavid Greenman * Look through the rest of the map, trying to fit a new region in the 7620d94caffSDavid Greenman * gap between existing regions, or after the very last region. 763df8bae1dSRodney W. Grimes */ 764df8bae1dSRodney W. Grimes for (;; start = (entry = next)->end) { 765df8bae1dSRodney W. Grimes /* 766df8bae1dSRodney W. Grimes * Find the end of the proposed new region. Be sure we didn't 767df8bae1dSRodney W. Grimes * go beyond the end of the map, or wrap around the address; 768df8bae1dSRodney W. Grimes * if so, we lose. Otherwise, if this is the last entry, or 769df8bae1dSRodney W. Grimes * if the proposed new region fits before the next entry, we 770df8bae1dSRodney W. Grimes * win. 771df8bae1dSRodney W. Grimes */ 772df8bae1dSRodney W. Grimes end = start + length; 773df8bae1dSRodney W. Grimes if (end > map->max_offset || end < start) 774df8bae1dSRodney W. Grimes return (1); 775df8bae1dSRodney W. Grimes next = entry->next; 776df8bae1dSRodney W. Grimes if (next == &map->header || next->start >= end) 777df8bae1dSRodney W. Grimes break; 778df8bae1dSRodney W. Grimes } 779df8bae1dSRodney W. Grimes SAVE_HINT(map, entry); 780df8bae1dSRodney W. Grimes *addr = start; 7810d94caffSDavid Greenman if (map == kernel_map && round_page(start + length) > kernel_vm_end) 7820d94caffSDavid Greenman pmap_growkernel(round_page(start + length)); 783df8bae1dSRodney W. Grimes return (0); 784df8bae1dSRodney W. Grimes } 785df8bae1dSRodney W. Grimes 786df8bae1dSRodney W. Grimes /* 787df8bae1dSRodney W. Grimes * vm_map_find finds an unallocated region in the target address 788df8bae1dSRodney W. Grimes * map with the given length. The search is defined to be 789df8bae1dSRodney W. Grimes * first-fit from the specified address; the region found is 790df8bae1dSRodney W. Grimes * returned in the same parameter. 791df8bae1dSRodney W. Grimes * 792df8bae1dSRodney W. Grimes */ 793df8bae1dSRodney W. Grimes int 794bd7e5f99SJohn Dyson vm_map_find(map, object, offset, addr, length, find_space, prot, max, cow) 795df8bae1dSRodney W. Grimes vm_map_t map; 796df8bae1dSRodney W. Grimes vm_object_t object; 797a316d390SJohn Dyson vm_ooffset_t offset; 798df8bae1dSRodney W. Grimes vm_offset_t *addr; /* IN/OUT */ 799df8bae1dSRodney W. Grimes vm_size_t length; 800df8bae1dSRodney W. Grimes boolean_t find_space; 801bd7e5f99SJohn Dyson vm_prot_t prot, max; 802bd7e5f99SJohn Dyson int cow; 803df8bae1dSRodney W. Grimes { 804df8bae1dSRodney W. Grimes register vm_offset_t start; 8058d6e8edeSDavid Greenman int result, s = 0; 806df8bae1dSRodney W. Grimes 807df8bae1dSRodney W. Grimes start = *addr; 8088d6e8edeSDavid Greenman 8098d6e8edeSDavid Greenman if (map == kmem_map) 8108d6e8edeSDavid Greenman s = splhigh(); 8118d6e8edeSDavid Greenman 812bea41bcfSDavid Greenman vm_map_lock(map); 813df8bae1dSRodney W. Grimes if (find_space) { 814df8bae1dSRodney W. Grimes if (vm_map_findspace(map, start, length, addr)) { 815df8bae1dSRodney W. Grimes vm_map_unlock(map); 8168d6e8edeSDavid Greenman if (map == kmem_map) 8178d6e8edeSDavid Greenman splx(s); 818df8bae1dSRodney W. Grimes return (KERN_NO_SPACE); 819df8bae1dSRodney W. Grimes } 820df8bae1dSRodney W. Grimes start = *addr; 821df8bae1dSRodney W. Grimes } 822bd7e5f99SJohn Dyson result = vm_map_insert(map, object, offset, 823bd7e5f99SJohn Dyson start, start + length, prot, max, cow); 824df8bae1dSRodney W. Grimes vm_map_unlock(map); 8258d6e8edeSDavid Greenman 8268d6e8edeSDavid Greenman if (map == kmem_map) 8278d6e8edeSDavid Greenman splx(s); 8288d6e8edeSDavid Greenman 829df8bae1dSRodney W. Grimes return (result); 830df8bae1dSRodney W. Grimes } 831df8bae1dSRodney W. Grimes 832df8bae1dSRodney W. Grimes /* 833df8bae1dSRodney W. Grimes * vm_map_simplify_entry: [ internal use only ] 834df8bae1dSRodney W. Grimes * 835df8bae1dSRodney W. Grimes * Simplify the given map entry by: 836df8bae1dSRodney W. Grimes * removing extra sharing maps 837df8bae1dSRodney W. Grimes * [XXX maybe later] merging with a neighbor 838df8bae1dSRodney W. Grimes */ 839f708ef1bSPoul-Henning Kamp static void 8400d94caffSDavid Greenman vm_map_simplify_entry(map, entry) 841df8bae1dSRodney W. Grimes vm_map_t map; 842df8bae1dSRodney W. Grimes vm_map_entry_t entry; 843df8bae1dSRodney W. Grimes { 844308c24baSJohn Dyson vm_map_entry_t next, prev; 845308c24baSJohn Dyson vm_size_t nextsize, prevsize, esize; 846df8bae1dSRodney W. Grimes 847df8bae1dSRodney W. Grimes /* 8480d94caffSDavid Greenman * If this entry corresponds to a sharing map, then see if we can 8490d94caffSDavid Greenman * remove the level of indirection. If it's not a sharing map, then it 8500d94caffSDavid Greenman * points to a VM object, so see if we can merge with either of our 8510d94caffSDavid Greenman * neighbors. 852df8bae1dSRodney W. Grimes */ 853df8bae1dSRodney W. Grimes 854308c24baSJohn Dyson if (entry->is_sub_map || entry->is_a_map || entry->wired_count) 855df8bae1dSRodney W. Grimes return; 856308c24baSJohn Dyson 857308c24baSJohn Dyson prev = entry->prev; 858308c24baSJohn Dyson if (prev != &map->header) { 859308c24baSJohn Dyson prevsize = prev->end - prev->start; 860f32dbbeeSJohn Dyson if ( (prev->end == entry->start) && 861f32dbbeeSJohn Dyson (prev->object.vm_object == entry->object.vm_object) && 862f32dbbeeSJohn Dyson (prev->offset + prevsize == entry->offset) && 863f32dbbeeSJohn Dyson (prev->needs_copy == entry->needs_copy) && 864f32dbbeeSJohn Dyson (prev->copy_on_write == entry->copy_on_write) && 865f32dbbeeSJohn Dyson (prev->protection == entry->protection) && 866f32dbbeeSJohn Dyson (prev->max_protection == entry->max_protection) && 867f32dbbeeSJohn Dyson (prev->inheritance == entry->inheritance) && 868f32dbbeeSJohn Dyson (prev->is_a_map == FALSE) && 869f32dbbeeSJohn Dyson (prev->is_sub_map == FALSE) && 870f32dbbeeSJohn Dyson (prev->wired_count == 0)) { 871308c24baSJohn Dyson if (map->first_free == prev) 872308c24baSJohn Dyson map->first_free = entry; 873308c24baSJohn Dyson vm_map_entry_unlink(map, prev); 874308c24baSJohn Dyson entry->start = prev->start; 875308c24baSJohn Dyson entry->offset = prev->offset; 876308c24baSJohn Dyson vm_object_deallocate(prev->object.vm_object); 877308c24baSJohn Dyson vm_map_entry_dispose(map, prev); 878308c24baSJohn Dyson } 879308c24baSJohn Dyson } 880de5f6a77SJohn Dyson 881de5f6a77SJohn Dyson next = entry->next; 882308c24baSJohn Dyson if (next != &map->header) { 883de5f6a77SJohn Dyson nextsize = next->end - next->start; 884de5f6a77SJohn Dyson esize = entry->end - entry->start; 885f32dbbeeSJohn Dyson if ((entry->end == next->start) && 886f32dbbeeSJohn Dyson (next->object.vm_object == entry->object.vm_object) && 887f32dbbeeSJohn Dyson (entry->offset + esize == next->offset) && 888f32dbbeeSJohn Dyson (next->needs_copy == entry->needs_copy) && 889f32dbbeeSJohn Dyson (next->copy_on_write == entry->copy_on_write) && 890f32dbbeeSJohn Dyson (next->protection == entry->protection) && 891f32dbbeeSJohn Dyson (next->max_protection == entry->max_protection) && 892f32dbbeeSJohn Dyson (next->inheritance == entry->inheritance) && 893f32dbbeeSJohn Dyson (next->is_a_map == FALSE) && 894f32dbbeeSJohn Dyson (next->is_sub_map == FALSE) && 895f32dbbeeSJohn Dyson (next->wired_count == 0)) { 896308c24baSJohn Dyson if (map->first_free == next) 897308c24baSJohn Dyson map->first_free = entry; 898de5f6a77SJohn Dyson vm_map_entry_unlink(map, next); 899de5f6a77SJohn Dyson entry->end = next->end; 900de5f6a77SJohn Dyson vm_object_deallocate(next->object.vm_object); 901de5f6a77SJohn Dyson vm_map_entry_dispose(map, next); 902df8bae1dSRodney W. Grimes } 903df8bae1dSRodney W. Grimes } 904de5f6a77SJohn Dyson } 905df8bae1dSRodney W. Grimes 906df8bae1dSRodney W. Grimes /* 907df8bae1dSRodney W. Grimes * vm_map_clip_start: [ internal use only ] 908df8bae1dSRodney W. Grimes * 909df8bae1dSRodney W. Grimes * Asserts that the given entry begins at or after 910df8bae1dSRodney W. Grimes * the specified address; if necessary, 911df8bae1dSRodney W. Grimes * it splits the entry into two. 912df8bae1dSRodney W. Grimes */ 913df8bae1dSRodney W. Grimes #define vm_map_clip_start(map, entry, startaddr) \ 914df8bae1dSRodney W. Grimes { \ 915df8bae1dSRodney W. Grimes if (startaddr > entry->start) \ 916df8bae1dSRodney W. Grimes _vm_map_clip_start(map, entry, startaddr); \ 917df8bae1dSRodney W. Grimes } 918df8bae1dSRodney W. Grimes 919df8bae1dSRodney W. Grimes /* 920df8bae1dSRodney W. Grimes * This routine is called only when it is known that 921df8bae1dSRodney W. Grimes * the entry must be split. 922df8bae1dSRodney W. Grimes */ 9230d94caffSDavid Greenman static void 9240d94caffSDavid Greenman _vm_map_clip_start(map, entry, start) 925df8bae1dSRodney W. Grimes register vm_map_t map; 926df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 927df8bae1dSRodney W. Grimes register vm_offset_t start; 928df8bae1dSRodney W. Grimes { 929df8bae1dSRodney W. Grimes register vm_map_entry_t new_entry; 930df8bae1dSRodney W. Grimes 931df8bae1dSRodney W. Grimes /* 9320d94caffSDavid Greenman * Split off the front portion -- note that we must insert the new 9330d94caffSDavid Greenman * entry BEFORE this one, so that this entry has the specified 9340d94caffSDavid Greenman * starting address. 935df8bae1dSRodney W. Grimes */ 936df8bae1dSRodney W. Grimes 937f32dbbeeSJohn Dyson vm_map_simplify_entry(map, entry); 938f32dbbeeSJohn Dyson 939df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(map); 940df8bae1dSRodney W. Grimes *new_entry = *entry; 941df8bae1dSRodney W. Grimes 942df8bae1dSRodney W. Grimes new_entry->end = start; 943df8bae1dSRodney W. Grimes entry->offset += (start - entry->start); 944df8bae1dSRodney W. Grimes entry->start = start; 945df8bae1dSRodney W. Grimes 946df8bae1dSRodney W. Grimes vm_map_entry_link(map, entry->prev, new_entry); 947df8bae1dSRodney W. Grimes 948df8bae1dSRodney W. Grimes if (entry->is_a_map || entry->is_sub_map) 949df8bae1dSRodney W. Grimes vm_map_reference(new_entry->object.share_map); 950df8bae1dSRodney W. Grimes else 951df8bae1dSRodney W. Grimes vm_object_reference(new_entry->object.vm_object); 952df8bae1dSRodney W. Grimes } 953df8bae1dSRodney W. Grimes 954df8bae1dSRodney W. Grimes /* 955df8bae1dSRodney W. Grimes * vm_map_clip_end: [ internal use only ] 956df8bae1dSRodney W. Grimes * 957df8bae1dSRodney W. Grimes * Asserts that the given entry ends at or before 958df8bae1dSRodney W. Grimes * the specified address; if necessary, 959df8bae1dSRodney W. Grimes * it splits the entry into two. 960df8bae1dSRodney W. Grimes */ 961df8bae1dSRodney W. Grimes 962df8bae1dSRodney W. Grimes #define vm_map_clip_end(map, entry, endaddr) \ 963df8bae1dSRodney W. Grimes { \ 964df8bae1dSRodney W. Grimes if (endaddr < entry->end) \ 965df8bae1dSRodney W. Grimes _vm_map_clip_end(map, entry, endaddr); \ 966df8bae1dSRodney W. Grimes } 967df8bae1dSRodney W. Grimes 968df8bae1dSRodney W. Grimes /* 969df8bae1dSRodney W. Grimes * This routine is called only when it is known that 970df8bae1dSRodney W. Grimes * the entry must be split. 971df8bae1dSRodney W. Grimes */ 9720d94caffSDavid Greenman static void 9730d94caffSDavid Greenman _vm_map_clip_end(map, entry, end) 974df8bae1dSRodney W. Grimes register vm_map_t map; 975df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 976df8bae1dSRodney W. Grimes register vm_offset_t end; 977df8bae1dSRodney W. Grimes { 978df8bae1dSRodney W. Grimes register vm_map_entry_t new_entry; 979df8bae1dSRodney W. Grimes 980df8bae1dSRodney W. Grimes /* 9810d94caffSDavid Greenman * Create a new entry and insert it AFTER the specified entry 982df8bae1dSRodney W. Grimes */ 983df8bae1dSRodney W. Grimes 984df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(map); 985df8bae1dSRodney W. Grimes *new_entry = *entry; 986df8bae1dSRodney W. Grimes 987df8bae1dSRodney W. Grimes new_entry->start = entry->end = end; 988df8bae1dSRodney W. Grimes new_entry->offset += (end - entry->start); 989df8bae1dSRodney W. Grimes 990df8bae1dSRodney W. Grimes vm_map_entry_link(map, entry, new_entry); 991df8bae1dSRodney W. Grimes 992df8bae1dSRodney W. Grimes if (entry->is_a_map || entry->is_sub_map) 993df8bae1dSRodney W. Grimes vm_map_reference(new_entry->object.share_map); 994df8bae1dSRodney W. Grimes else 995df8bae1dSRodney W. Grimes vm_object_reference(new_entry->object.vm_object); 996df8bae1dSRodney W. Grimes } 997df8bae1dSRodney W. Grimes 998df8bae1dSRodney W. Grimes /* 999df8bae1dSRodney W. Grimes * VM_MAP_RANGE_CHECK: [ internal use only ] 1000df8bae1dSRodney W. Grimes * 1001df8bae1dSRodney W. Grimes * Asserts that the starting and ending region 1002df8bae1dSRodney W. Grimes * addresses fall within the valid range of the map. 1003df8bae1dSRodney W. Grimes */ 1004df8bae1dSRodney W. Grimes #define VM_MAP_RANGE_CHECK(map, start, end) \ 1005df8bae1dSRodney W. Grimes { \ 1006df8bae1dSRodney W. Grimes if (start < vm_map_min(map)) \ 1007df8bae1dSRodney W. Grimes start = vm_map_min(map); \ 1008df8bae1dSRodney W. Grimes if (end > vm_map_max(map)) \ 1009df8bae1dSRodney W. Grimes end = vm_map_max(map); \ 1010df8bae1dSRodney W. Grimes if (start > end) \ 1011df8bae1dSRodney W. Grimes start = end; \ 1012df8bae1dSRodney W. Grimes } 1013df8bae1dSRodney W. Grimes 1014df8bae1dSRodney W. Grimes /* 1015df8bae1dSRodney W. Grimes * vm_map_submap: [ kernel use only ] 1016df8bae1dSRodney W. Grimes * 1017df8bae1dSRodney W. Grimes * Mark the given range as handled by a subordinate map. 1018df8bae1dSRodney W. Grimes * 1019df8bae1dSRodney W. Grimes * This range must have been created with vm_map_find, 1020df8bae1dSRodney W. Grimes * and no other operations may have been performed on this 1021df8bae1dSRodney W. Grimes * range prior to calling vm_map_submap. 1022df8bae1dSRodney W. Grimes * 1023df8bae1dSRodney W. Grimes * Only a limited number of operations can be performed 1024df8bae1dSRodney W. Grimes * within this rage after calling vm_map_submap: 1025df8bae1dSRodney W. Grimes * vm_fault 1026df8bae1dSRodney W. Grimes * [Don't try vm_map_copy!] 1027df8bae1dSRodney W. Grimes * 1028df8bae1dSRodney W. Grimes * To remove a submapping, one must first remove the 1029df8bae1dSRodney W. Grimes * range from the superior map, and then destroy the 1030df8bae1dSRodney W. Grimes * submap (if desired). [Better yet, don't try it.] 1031df8bae1dSRodney W. Grimes */ 1032df8bae1dSRodney W. Grimes int 1033df8bae1dSRodney W. Grimes vm_map_submap(map, start, end, submap) 1034df8bae1dSRodney W. Grimes register vm_map_t map; 1035df8bae1dSRodney W. Grimes register vm_offset_t start; 1036df8bae1dSRodney W. Grimes register vm_offset_t end; 1037df8bae1dSRodney W. Grimes vm_map_t submap; 1038df8bae1dSRodney W. Grimes { 1039df8bae1dSRodney W. Grimes vm_map_entry_t entry; 1040df8bae1dSRodney W. Grimes register int result = KERN_INVALID_ARGUMENT; 1041df8bae1dSRodney W. Grimes 1042df8bae1dSRodney W. Grimes vm_map_lock(map); 1043df8bae1dSRodney W. Grimes 1044df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1045df8bae1dSRodney W. Grimes 1046df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &entry)) { 1047df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 10480d94caffSDavid Greenman } else 1049df8bae1dSRodney W. Grimes entry = entry->next; 1050df8bae1dSRodney W. Grimes 1051df8bae1dSRodney W. Grimes vm_map_clip_end(map, entry, end); 1052df8bae1dSRodney W. Grimes 1053df8bae1dSRodney W. Grimes if ((entry->start == start) && (entry->end == end) && 1054df8bae1dSRodney W. Grimes (!entry->is_a_map) && 1055df8bae1dSRodney W. Grimes (entry->object.vm_object == NULL) && 1056df8bae1dSRodney W. Grimes (!entry->copy_on_write)) { 1057df8bae1dSRodney W. Grimes entry->is_a_map = FALSE; 1058df8bae1dSRodney W. Grimes entry->is_sub_map = TRUE; 1059df8bae1dSRodney W. Grimes vm_map_reference(entry->object.sub_map = submap); 1060df8bae1dSRodney W. Grimes result = KERN_SUCCESS; 1061df8bae1dSRodney W. Grimes } 1062df8bae1dSRodney W. Grimes vm_map_unlock(map); 1063df8bae1dSRodney W. Grimes 1064df8bae1dSRodney W. Grimes return (result); 1065df8bae1dSRodney W. Grimes } 1066df8bae1dSRodney W. Grimes 1067df8bae1dSRodney W. Grimes /* 1068df8bae1dSRodney W. Grimes * vm_map_protect: 1069df8bae1dSRodney W. Grimes * 1070df8bae1dSRodney W. Grimes * Sets the protection of the specified address 1071df8bae1dSRodney W. Grimes * region in the target map. If "set_max" is 1072df8bae1dSRodney W. Grimes * specified, the maximum protection is to be set; 1073df8bae1dSRodney W. Grimes * otherwise, only the current protection is affected. 1074df8bae1dSRodney W. Grimes */ 1075df8bae1dSRodney W. Grimes int 1076df8bae1dSRodney W. Grimes vm_map_protect(map, start, end, new_prot, set_max) 1077df8bae1dSRodney W. Grimes register vm_map_t map; 1078df8bae1dSRodney W. Grimes register vm_offset_t start; 1079df8bae1dSRodney W. Grimes register vm_offset_t end; 1080df8bae1dSRodney W. Grimes register vm_prot_t new_prot; 1081df8bae1dSRodney W. Grimes register boolean_t set_max; 1082df8bae1dSRodney W. Grimes { 1083df8bae1dSRodney W. Grimes register vm_map_entry_t current; 1084df8bae1dSRodney W. Grimes vm_map_entry_t entry; 1085df8bae1dSRodney W. Grimes 1086df8bae1dSRodney W. Grimes vm_map_lock(map); 1087df8bae1dSRodney W. Grimes 1088df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1089df8bae1dSRodney W. Grimes 1090df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &entry)) { 1091df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 10920d94caffSDavid Greenman } else 1093df8bae1dSRodney W. Grimes entry = entry->next; 1094df8bae1dSRodney W. Grimes 1095df8bae1dSRodney W. Grimes /* 10960d94caffSDavid Greenman * Make a first pass to check for protection violations. 1097df8bae1dSRodney W. Grimes */ 1098df8bae1dSRodney W. Grimes 1099df8bae1dSRodney W. Grimes current = entry; 1100df8bae1dSRodney W. Grimes while ((current != &map->header) && (current->start < end)) { 1101a1f6d91cSDavid Greenman if (current->is_sub_map) { 1102a1f6d91cSDavid Greenman vm_map_unlock(map); 1103df8bae1dSRodney W. Grimes return (KERN_INVALID_ARGUMENT); 1104a1f6d91cSDavid Greenman } 1105df8bae1dSRodney W. Grimes if ((new_prot & current->max_protection) != new_prot) { 1106df8bae1dSRodney W. Grimes vm_map_unlock(map); 1107df8bae1dSRodney W. Grimes return (KERN_PROTECTION_FAILURE); 1108df8bae1dSRodney W. Grimes } 1109df8bae1dSRodney W. Grimes current = current->next; 1110df8bae1dSRodney W. Grimes } 1111df8bae1dSRodney W. Grimes 1112df8bae1dSRodney W. Grimes /* 11130d94caffSDavid Greenman * Go back and fix up protections. [Note that clipping is not 11140d94caffSDavid Greenman * necessary the second time.] 1115df8bae1dSRodney W. Grimes */ 1116df8bae1dSRodney W. Grimes 1117df8bae1dSRodney W. Grimes current = entry; 1118df8bae1dSRodney W. Grimes 1119df8bae1dSRodney W. Grimes while ((current != &map->header) && (current->start < end)) { 1120df8bae1dSRodney W. Grimes vm_prot_t old_prot; 1121df8bae1dSRodney W. Grimes 1122df8bae1dSRodney W. Grimes vm_map_clip_end(map, current, end); 1123df8bae1dSRodney W. Grimes 1124df8bae1dSRodney W. Grimes old_prot = current->protection; 1125df8bae1dSRodney W. Grimes if (set_max) 1126df8bae1dSRodney W. Grimes current->protection = 1127df8bae1dSRodney W. Grimes (current->max_protection = new_prot) & 1128df8bae1dSRodney W. Grimes old_prot; 1129df8bae1dSRodney W. Grimes else 1130df8bae1dSRodney W. Grimes current->protection = new_prot; 1131df8bae1dSRodney W. Grimes 1132df8bae1dSRodney W. Grimes /* 11330d94caffSDavid Greenman * Update physical map if necessary. Worry about copy-on-write 11340d94caffSDavid Greenman * here -- CHECK THIS XXX 1135df8bae1dSRodney W. Grimes */ 1136df8bae1dSRodney W. Grimes 1137df8bae1dSRodney W. Grimes if (current->protection != old_prot) { 1138df8bae1dSRodney W. Grimes 1139df8bae1dSRodney W. Grimes #define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \ 1140df8bae1dSRodney W. Grimes VM_PROT_ALL) 1141df8bae1dSRodney W. Grimes #define max(a,b) ((a) > (b) ? (a) : (b)) 1142df8bae1dSRodney W. Grimes 1143df8bae1dSRodney W. Grimes if (current->is_a_map) { 1144df8bae1dSRodney W. Grimes vm_map_entry_t share_entry; 1145df8bae1dSRodney W. Grimes vm_offset_t share_end; 1146df8bae1dSRodney W. Grimes 1147df8bae1dSRodney W. Grimes vm_map_lock(current->object.share_map); 1148df8bae1dSRodney W. Grimes (void) vm_map_lookup_entry( 1149df8bae1dSRodney W. Grimes current->object.share_map, 1150df8bae1dSRodney W. Grimes current->offset, 1151df8bae1dSRodney W. Grimes &share_entry); 1152df8bae1dSRodney W. Grimes share_end = current->offset + 1153df8bae1dSRodney W. Grimes (current->end - current->start); 1154df8bae1dSRodney W. Grimes while ((share_entry != 1155df8bae1dSRodney W. Grimes ¤t->object.share_map->header) && 1156df8bae1dSRodney W. Grimes (share_entry->start < share_end)) { 1157df8bae1dSRodney W. Grimes 1158df8bae1dSRodney W. Grimes pmap_protect(map->pmap, 1159df8bae1dSRodney W. Grimes (max(share_entry->start, 1160df8bae1dSRodney W. Grimes current->offset) - 1161df8bae1dSRodney W. Grimes current->offset + 1162df8bae1dSRodney W. Grimes current->start), 1163df8bae1dSRodney W. Grimes min(share_entry->end, 1164df8bae1dSRodney W. Grimes share_end) - 1165df8bae1dSRodney W. Grimes current->offset + 1166df8bae1dSRodney W. Grimes current->start, 1167df8bae1dSRodney W. Grimes current->protection & 1168df8bae1dSRodney W. Grimes MASK(share_entry)); 1169df8bae1dSRodney W. Grimes 1170df8bae1dSRodney W. Grimes share_entry = share_entry->next; 1171df8bae1dSRodney W. Grimes } 1172df8bae1dSRodney W. Grimes vm_map_unlock(current->object.share_map); 11730d94caffSDavid Greenman } else 1174df8bae1dSRodney W. Grimes pmap_protect(map->pmap, current->start, 1175df8bae1dSRodney W. Grimes current->end, 1176df8bae1dSRodney W. Grimes current->protection & MASK(entry)); 1177df8bae1dSRodney W. Grimes #undef max 1178df8bae1dSRodney W. Grimes #undef MASK 1179df8bae1dSRodney W. Grimes } 1180df8bae1dSRodney W. Grimes current = current->next; 1181df8bae1dSRodney W. Grimes } 1182df8bae1dSRodney W. Grimes 1183f32dbbeeSJohn Dyson vm_map_simplify_entry(map, entry); 1184df8bae1dSRodney W. Grimes vm_map_unlock(map); 1185df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1186df8bae1dSRodney W. Grimes } 1187df8bae1dSRodney W. Grimes 1188df8bae1dSRodney W. Grimes /* 1189df8bae1dSRodney W. Grimes * vm_map_inherit: 1190df8bae1dSRodney W. Grimes * 1191df8bae1dSRodney W. Grimes * Sets the inheritance of the specified address 1192df8bae1dSRodney W. Grimes * range in the target map. Inheritance 1193df8bae1dSRodney W. Grimes * affects how the map will be shared with 1194df8bae1dSRodney W. Grimes * child maps at the time of vm_map_fork. 1195df8bae1dSRodney W. Grimes */ 1196df8bae1dSRodney W. Grimes int 1197df8bae1dSRodney W. Grimes vm_map_inherit(map, start, end, new_inheritance) 1198df8bae1dSRodney W. Grimes register vm_map_t map; 1199df8bae1dSRodney W. Grimes register vm_offset_t start; 1200df8bae1dSRodney W. Grimes register vm_offset_t end; 1201df8bae1dSRodney W. Grimes register vm_inherit_t new_inheritance; 1202df8bae1dSRodney W. Grimes { 1203df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 1204df8bae1dSRodney W. Grimes vm_map_entry_t temp_entry; 1205df8bae1dSRodney W. Grimes 1206df8bae1dSRodney W. Grimes switch (new_inheritance) { 1207df8bae1dSRodney W. Grimes case VM_INHERIT_NONE: 1208df8bae1dSRodney W. Grimes case VM_INHERIT_COPY: 1209df8bae1dSRodney W. Grimes case VM_INHERIT_SHARE: 1210df8bae1dSRodney W. Grimes break; 1211df8bae1dSRodney W. Grimes default: 1212df8bae1dSRodney W. Grimes return (KERN_INVALID_ARGUMENT); 1213df8bae1dSRodney W. Grimes } 1214df8bae1dSRodney W. Grimes 1215df8bae1dSRodney W. Grimes vm_map_lock(map); 1216df8bae1dSRodney W. Grimes 1217df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1218df8bae1dSRodney W. Grimes 1219df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &temp_entry)) { 1220df8bae1dSRodney W. Grimes entry = temp_entry; 1221df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 12220d94caffSDavid Greenman } else 1223df8bae1dSRodney W. Grimes entry = temp_entry->next; 1224df8bae1dSRodney W. Grimes 1225df8bae1dSRodney W. Grimes while ((entry != &map->header) && (entry->start < end)) { 1226df8bae1dSRodney W. Grimes vm_map_clip_end(map, entry, end); 1227df8bae1dSRodney W. Grimes 1228df8bae1dSRodney W. Grimes entry->inheritance = new_inheritance; 1229df8bae1dSRodney W. Grimes 1230df8bae1dSRodney W. Grimes entry = entry->next; 1231df8bae1dSRodney W. Grimes } 1232df8bae1dSRodney W. Grimes 1233f32dbbeeSJohn Dyson vm_map_simplify_entry(map, temp_entry); 1234df8bae1dSRodney W. Grimes vm_map_unlock(map); 1235df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1236df8bae1dSRodney W. Grimes } 1237df8bae1dSRodney W. Grimes 1238df8bae1dSRodney W. Grimes /* 1239df8bae1dSRodney W. Grimes * vm_map_pageable: 1240df8bae1dSRodney W. Grimes * 1241df8bae1dSRodney W. Grimes * Sets the pageability of the specified address 1242df8bae1dSRodney W. Grimes * range in the target map. Regions specified 1243df8bae1dSRodney W. Grimes * as not pageable require locked-down physical 1244df8bae1dSRodney W. Grimes * memory and physical page maps. 1245df8bae1dSRodney W. Grimes * 1246df8bae1dSRodney W. Grimes * The map must not be locked, but a reference 1247df8bae1dSRodney W. Grimes * must remain to the map throughout the call. 1248df8bae1dSRodney W. Grimes */ 1249df8bae1dSRodney W. Grimes int 1250df8bae1dSRodney W. Grimes vm_map_pageable(map, start, end, new_pageable) 1251df8bae1dSRodney W. Grimes register vm_map_t map; 1252df8bae1dSRodney W. Grimes register vm_offset_t start; 1253df8bae1dSRodney W. Grimes register vm_offset_t end; 1254df8bae1dSRodney W. Grimes register boolean_t new_pageable; 1255df8bae1dSRodney W. Grimes { 1256df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 1257df8bae1dSRodney W. Grimes vm_map_entry_t start_entry; 125826f9a767SRodney W. Grimes register vm_offset_t failed = 0; 1259df8bae1dSRodney W. Grimes int rv; 1260df8bae1dSRodney W. Grimes 1261df8bae1dSRodney W. Grimes vm_map_lock(map); 1262df8bae1dSRodney W. Grimes 1263df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1264df8bae1dSRodney W. Grimes 1265df8bae1dSRodney W. Grimes /* 12660d94caffSDavid Greenman * Only one pageability change may take place at one time, since 12670d94caffSDavid Greenman * vm_fault assumes it will be called only once for each 12680d94caffSDavid Greenman * wiring/unwiring. Therefore, we have to make sure we're actually 12690d94caffSDavid Greenman * changing the pageability for the entire region. We do so before 12700d94caffSDavid Greenman * making any changes. 1271df8bae1dSRodney W. Grimes */ 1272df8bae1dSRodney W. Grimes 1273df8bae1dSRodney W. Grimes if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1274df8bae1dSRodney W. Grimes vm_map_unlock(map); 1275df8bae1dSRodney W. Grimes return (KERN_INVALID_ADDRESS); 1276df8bae1dSRodney W. Grimes } 1277df8bae1dSRodney W. Grimes entry = start_entry; 1278df8bae1dSRodney W. Grimes 1279df8bae1dSRodney W. Grimes /* 12800d94caffSDavid Greenman * Actions are rather different for wiring and unwiring, so we have 12810d94caffSDavid Greenman * two separate cases. 1282df8bae1dSRodney W. Grimes */ 1283df8bae1dSRodney W. Grimes 1284df8bae1dSRodney W. Grimes if (new_pageable) { 1285df8bae1dSRodney W. Grimes 1286df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 1287df8bae1dSRodney W. Grimes 1288df8bae1dSRodney W. Grimes /* 12890d94caffSDavid Greenman * Unwiring. First ensure that the range to be unwired is 12900d94caffSDavid Greenman * really wired down and that there are no holes. 1291df8bae1dSRodney W. Grimes */ 1292df8bae1dSRodney W. Grimes while ((entry != &map->header) && (entry->start < end)) { 1293df8bae1dSRodney W. Grimes 1294df8bae1dSRodney W. Grimes if (entry->wired_count == 0 || 1295df8bae1dSRodney W. Grimes (entry->end < end && 1296df8bae1dSRodney W. Grimes (entry->next == &map->header || 1297df8bae1dSRodney W. Grimes entry->next->start > entry->end))) { 1298df8bae1dSRodney W. Grimes vm_map_unlock(map); 1299df8bae1dSRodney W. Grimes return (KERN_INVALID_ARGUMENT); 1300df8bae1dSRodney W. Grimes } 1301df8bae1dSRodney W. Grimes entry = entry->next; 1302df8bae1dSRodney W. Grimes } 1303df8bae1dSRodney W. Grimes 1304df8bae1dSRodney W. Grimes /* 13050d94caffSDavid Greenman * Now decrement the wiring count for each region. If a region 13060d94caffSDavid Greenman * becomes completely unwired, unwire its physical pages and 13070d94caffSDavid Greenman * mappings. 1308df8bae1dSRodney W. Grimes */ 1309df8bae1dSRodney W. Grimes lock_set_recursive(&map->lock); 1310df8bae1dSRodney W. Grimes 1311df8bae1dSRodney W. Grimes entry = start_entry; 1312df8bae1dSRodney W. Grimes while ((entry != &map->header) && (entry->start < end)) { 1313df8bae1dSRodney W. Grimes vm_map_clip_end(map, entry, end); 1314df8bae1dSRodney W. Grimes 1315df8bae1dSRodney W. Grimes entry->wired_count--; 1316df8bae1dSRodney W. Grimes if (entry->wired_count == 0) 1317df8bae1dSRodney W. Grimes vm_fault_unwire(map, entry->start, entry->end); 1318df8bae1dSRodney W. Grimes 1319df8bae1dSRodney W. Grimes entry = entry->next; 1320df8bae1dSRodney W. Grimes } 1321f32dbbeeSJohn Dyson vm_map_simplify_entry(map, start_entry); 1322df8bae1dSRodney W. Grimes lock_clear_recursive(&map->lock); 13230d94caffSDavid Greenman } else { 1324df8bae1dSRodney W. Grimes /* 1325df8bae1dSRodney W. Grimes * Wiring. We must do this in two passes: 1326df8bae1dSRodney W. Grimes * 13270d94caffSDavid Greenman * 1. Holding the write lock, we create any shadow or zero-fill 13280d94caffSDavid Greenman * objects that need to be created. Then we clip each map 13290d94caffSDavid Greenman * entry to the region to be wired and increment its wiring 13300d94caffSDavid Greenman * count. We create objects before clipping the map entries 1331df8bae1dSRodney W. Grimes * to avoid object proliferation. 1332df8bae1dSRodney W. Grimes * 13330d94caffSDavid Greenman * 2. We downgrade to a read lock, and call vm_fault_wire to 13340d94caffSDavid Greenman * fault in the pages for any newly wired area (wired_count is 13350d94caffSDavid Greenman * 1). 1336df8bae1dSRodney W. Grimes * 13370d94caffSDavid Greenman * Downgrading to a read lock for vm_fault_wire avoids a possible 133824a1cce3SDavid Greenman * deadlock with another process that may have faulted on one 13390d94caffSDavid Greenman * of the pages to be wired (it would mark the page busy, 13400d94caffSDavid Greenman * blocking us, then in turn block on the map lock that we 13410d94caffSDavid Greenman * hold). Because of problems in the recursive lock package, 13420d94caffSDavid Greenman * we cannot upgrade to a write lock in vm_map_lookup. Thus, 13430d94caffSDavid Greenman * any actions that require the write lock must be done 13440d94caffSDavid Greenman * beforehand. Because we keep the read lock on the map, the 13450d94caffSDavid Greenman * copy-on-write status of the entries we modify here cannot 13460d94caffSDavid Greenman * change. 1347df8bae1dSRodney W. Grimes */ 1348df8bae1dSRodney W. Grimes 1349df8bae1dSRodney W. Grimes /* 1350df8bae1dSRodney W. Grimes * Pass 1. 1351df8bae1dSRodney W. Grimes */ 1352df8bae1dSRodney W. Grimes while ((entry != &map->header) && (entry->start < end)) { 1353df8bae1dSRodney W. Grimes if (entry->wired_count == 0) { 1354df8bae1dSRodney W. Grimes 1355df8bae1dSRodney W. Grimes /* 1356df8bae1dSRodney W. Grimes * Perform actions of vm_map_lookup that need 1357df8bae1dSRodney W. Grimes * the write lock on the map: create a shadow 1358df8bae1dSRodney W. Grimes * object for a copy-on-write region, or an 1359df8bae1dSRodney W. Grimes * object for a zero-fill region. 1360df8bae1dSRodney W. Grimes * 1361df8bae1dSRodney W. Grimes * We don't have to do this for entries that 13620d94caffSDavid Greenman * point to sharing maps, because we won't 13630d94caffSDavid Greenman * hold the lock on the sharing map. 1364df8bae1dSRodney W. Grimes */ 1365bf4bd9bdSDavid Greenman if (!entry->is_a_map && !entry->is_sub_map) { 1366df8bae1dSRodney W. Grimes if (entry->needs_copy && 1367df8bae1dSRodney W. Grimes ((entry->protection & VM_PROT_WRITE) != 0)) { 1368df8bae1dSRodney W. Grimes 1369df8bae1dSRodney W. Grimes vm_object_shadow(&entry->object.vm_object, 1370df8bae1dSRodney W. Grimes &entry->offset, 1371a316d390SJohn Dyson OFF_TO_IDX(entry->end 1372df8bae1dSRodney W. Grimes - entry->start)); 1373df8bae1dSRodney W. Grimes entry->needs_copy = FALSE; 13740d94caffSDavid Greenman } else if (entry->object.vm_object == NULL) { 1375df8bae1dSRodney W. Grimes entry->object.vm_object = 1376a316d390SJohn Dyson vm_object_allocate(OBJT_DEFAULT, 1377a316d390SJohn Dyson OFF_TO_IDX(entry->end - entry->start)); 1378df8bae1dSRodney W. Grimes entry->offset = (vm_offset_t) 0; 1379df8bae1dSRodney W. Grimes } 1380df8bae1dSRodney W. Grimes } 1381df8bae1dSRodney W. Grimes } 1382df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 1383df8bae1dSRodney W. Grimes vm_map_clip_end(map, entry, end); 1384df8bae1dSRodney W. Grimes entry->wired_count++; 1385df8bae1dSRodney W. Grimes 1386df8bae1dSRodney W. Grimes /* 1387df8bae1dSRodney W. Grimes * Check for holes 1388df8bae1dSRodney W. Grimes */ 1389df8bae1dSRodney W. Grimes if (entry->end < end && 1390df8bae1dSRodney W. Grimes (entry->next == &map->header || 1391df8bae1dSRodney W. Grimes entry->next->start > entry->end)) { 1392df8bae1dSRodney W. Grimes /* 13930d94caffSDavid Greenman * Found one. Object creation actions do not 13940d94caffSDavid Greenman * need to be undone, but the wired counts 13950d94caffSDavid Greenman * need to be restored. 1396df8bae1dSRodney W. Grimes */ 1397df8bae1dSRodney W. Grimes while (entry != &map->header && entry->end > start) { 1398df8bae1dSRodney W. Grimes entry->wired_count--; 1399df8bae1dSRodney W. Grimes entry = entry->prev; 1400df8bae1dSRodney W. Grimes } 1401df8bae1dSRodney W. Grimes vm_map_unlock(map); 1402df8bae1dSRodney W. Grimes return (KERN_INVALID_ARGUMENT); 1403df8bae1dSRodney W. Grimes } 1404df8bae1dSRodney W. Grimes entry = entry->next; 1405df8bae1dSRodney W. Grimes } 1406df8bae1dSRodney W. Grimes 1407df8bae1dSRodney W. Grimes /* 1408df8bae1dSRodney W. Grimes * Pass 2. 1409df8bae1dSRodney W. Grimes */ 1410df8bae1dSRodney W. Grimes 1411df8bae1dSRodney W. Grimes /* 1412df8bae1dSRodney W. Grimes * HACK HACK HACK HACK 1413df8bae1dSRodney W. Grimes * 141424a1cce3SDavid Greenman * If we are wiring in the kernel map or a submap of it, 141524a1cce3SDavid Greenman * unlock the map to avoid deadlocks. We trust that the 141624a1cce3SDavid Greenman * kernel is well-behaved, and therefore will not do 141724a1cce3SDavid Greenman * anything destructive to this region of the map while 141824a1cce3SDavid Greenman * we have it unlocked. We cannot trust user processes 141924a1cce3SDavid Greenman * to do the same. 1420df8bae1dSRodney W. Grimes * 1421df8bae1dSRodney W. Grimes * HACK HACK HACK HACK 1422df8bae1dSRodney W. Grimes */ 1423df8bae1dSRodney W. Grimes if (vm_map_pmap(map) == kernel_pmap) { 1424df8bae1dSRodney W. Grimes vm_map_unlock(map); /* trust me ... */ 14250d94caffSDavid Greenman } else { 1426df8bae1dSRodney W. Grimes lock_set_recursive(&map->lock); 1427df8bae1dSRodney W. Grimes lock_write_to_read(&map->lock); 1428df8bae1dSRodney W. Grimes } 1429df8bae1dSRodney W. Grimes 1430df8bae1dSRodney W. Grimes rv = 0; 1431df8bae1dSRodney W. Grimes entry = start_entry; 1432df8bae1dSRodney W. Grimes while (entry != &map->header && entry->start < end) { 1433df8bae1dSRodney W. Grimes /* 14340d94caffSDavid Greenman * If vm_fault_wire fails for any page we need to undo 14350d94caffSDavid Greenman * what has been done. We decrement the wiring count 14360d94caffSDavid Greenman * for those pages which have not yet been wired (now) 14370d94caffSDavid Greenman * and unwire those that have (later). 1438df8bae1dSRodney W. Grimes * 1439df8bae1dSRodney W. Grimes * XXX this violates the locking protocol on the map, 1440df8bae1dSRodney W. Grimes * needs to be fixed. 1441df8bae1dSRodney W. Grimes */ 1442df8bae1dSRodney W. Grimes if (rv) 1443df8bae1dSRodney W. Grimes entry->wired_count--; 1444df8bae1dSRodney W. Grimes else if (entry->wired_count == 1) { 1445df8bae1dSRodney W. Grimes rv = vm_fault_wire(map, entry->start, entry->end); 1446df8bae1dSRodney W. Grimes if (rv) { 1447df8bae1dSRodney W. Grimes failed = entry->start; 1448df8bae1dSRodney W. Grimes entry->wired_count--; 1449df8bae1dSRodney W. Grimes } 1450df8bae1dSRodney W. Grimes } 1451df8bae1dSRodney W. Grimes entry = entry->next; 1452df8bae1dSRodney W. Grimes } 1453df8bae1dSRodney W. Grimes 1454df8bae1dSRodney W. Grimes if (vm_map_pmap(map) == kernel_pmap) { 1455df8bae1dSRodney W. Grimes vm_map_lock(map); 14560d94caffSDavid Greenman } else { 1457df8bae1dSRodney W. Grimes lock_clear_recursive(&map->lock); 1458df8bae1dSRodney W. Grimes } 1459df8bae1dSRodney W. Grimes if (rv) { 1460df8bae1dSRodney W. Grimes vm_map_unlock(map); 1461df8bae1dSRodney W. Grimes (void) vm_map_pageable(map, start, failed, TRUE); 1462df8bae1dSRodney W. Grimes return (rv); 1463df8bae1dSRodney W. Grimes } 1464df8bae1dSRodney W. Grimes } 1465df8bae1dSRodney W. Grimes 1466df8bae1dSRodney W. Grimes vm_map_unlock(map); 1467df8bae1dSRodney W. Grimes 1468df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1469df8bae1dSRodney W. Grimes } 1470df8bae1dSRodney W. Grimes 1471df8bae1dSRodney W. Grimes /* 1472df8bae1dSRodney W. Grimes * vm_map_clean 1473df8bae1dSRodney W. Grimes * 1474df8bae1dSRodney W. Grimes * Push any dirty cached pages in the address range to their pager. 1475df8bae1dSRodney W. Grimes * If syncio is TRUE, dirty pages are written synchronously. 1476df8bae1dSRodney W. Grimes * If invalidate is TRUE, any cached pages are freed as well. 1477df8bae1dSRodney W. Grimes * 1478df8bae1dSRodney W. Grimes * Returns an error if any part of the specified range is not mapped. 1479df8bae1dSRodney W. Grimes */ 1480df8bae1dSRodney W. Grimes int 1481df8bae1dSRodney W. Grimes vm_map_clean(map, start, end, syncio, invalidate) 1482df8bae1dSRodney W. Grimes vm_map_t map; 1483df8bae1dSRodney W. Grimes vm_offset_t start; 1484df8bae1dSRodney W. Grimes vm_offset_t end; 1485df8bae1dSRodney W. Grimes boolean_t syncio; 1486df8bae1dSRodney W. Grimes boolean_t invalidate; 1487df8bae1dSRodney W. Grimes { 1488df8bae1dSRodney W. Grimes register vm_map_entry_t current; 1489df8bae1dSRodney W. Grimes vm_map_entry_t entry; 1490df8bae1dSRodney W. Grimes vm_size_t size; 1491df8bae1dSRodney W. Grimes vm_object_t object; 1492a316d390SJohn Dyson vm_ooffset_t offset; 1493df8bae1dSRodney W. Grimes 1494df8bae1dSRodney W. Grimes vm_map_lock_read(map); 1495df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1496df8bae1dSRodney W. Grimes if (!vm_map_lookup_entry(map, start, &entry)) { 1497df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 1498df8bae1dSRodney W. Grimes return (KERN_INVALID_ADDRESS); 1499df8bae1dSRodney W. Grimes } 1500df8bae1dSRodney W. Grimes /* 1501df8bae1dSRodney W. Grimes * Make a first pass to check for holes. 1502df8bae1dSRodney W. Grimes */ 1503df8bae1dSRodney W. Grimes for (current = entry; current->start < end; current = current->next) { 1504df8bae1dSRodney W. Grimes if (current->is_sub_map) { 1505df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 1506df8bae1dSRodney W. Grimes return (KERN_INVALID_ARGUMENT); 1507df8bae1dSRodney W. Grimes } 1508df8bae1dSRodney W. Grimes if (end > current->end && 1509df8bae1dSRodney W. Grimes (current->next == &map->header || 1510df8bae1dSRodney W. Grimes current->end != current->next->start)) { 1511df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 1512df8bae1dSRodney W. Grimes return (KERN_INVALID_ADDRESS); 1513df8bae1dSRodney W. Grimes } 1514df8bae1dSRodney W. Grimes } 1515df8bae1dSRodney W. Grimes 1516df8bae1dSRodney W. Grimes /* 1517df8bae1dSRodney W. Grimes * Make a second pass, cleaning/uncaching pages from the indicated 1518df8bae1dSRodney W. Grimes * objects as we go. 1519df8bae1dSRodney W. Grimes */ 1520df8bae1dSRodney W. Grimes for (current = entry; current->start < end; current = current->next) { 1521df8bae1dSRodney W. Grimes offset = current->offset + (start - current->start); 1522df8bae1dSRodney W. Grimes size = (end <= current->end ? end : current->end) - start; 1523bf4bd9bdSDavid Greenman if (current->is_a_map || current->is_sub_map) { 1524df8bae1dSRodney W. Grimes register vm_map_t smap; 1525df8bae1dSRodney W. Grimes vm_map_entry_t tentry; 1526df8bae1dSRodney W. Grimes vm_size_t tsize; 1527df8bae1dSRodney W. Grimes 1528df8bae1dSRodney W. Grimes smap = current->object.share_map; 1529df8bae1dSRodney W. Grimes vm_map_lock_read(smap); 1530df8bae1dSRodney W. Grimes (void) vm_map_lookup_entry(smap, offset, &tentry); 1531df8bae1dSRodney W. Grimes tsize = tentry->end - offset; 1532df8bae1dSRodney W. Grimes if (tsize < size) 1533df8bae1dSRodney W. Grimes size = tsize; 1534df8bae1dSRodney W. Grimes object = tentry->object.vm_object; 1535df8bae1dSRodney W. Grimes offset = tentry->offset + (offset - tentry->start); 1536df8bae1dSRodney W. Grimes vm_map_unlock_read(smap); 1537df8bae1dSRodney W. Grimes } else { 1538df8bae1dSRodney W. Grimes object = current->object.vm_object; 1539df8bae1dSRodney W. Grimes } 15408a02c104SJohn Dyson /* 15418a02c104SJohn Dyson * Note that there is absolutely no sense in writing out 15428a02c104SJohn Dyson * anonymous objects, so we track down the vnode object 15438a02c104SJohn Dyson * to write out. 15448a02c104SJohn Dyson * We invalidate (remove) all pages from the address space 15458a02c104SJohn Dyson * anyway, for semantic correctness. 15468a02c104SJohn Dyson */ 15478a02c104SJohn Dyson while (object->backing_object) { 15488a02c104SJohn Dyson object = object->backing_object; 15498a02c104SJohn Dyson offset += object->backing_object_offset; 15508a02c104SJohn Dyson if (object->size < OFF_TO_IDX( offset + size)) 15518a02c104SJohn Dyson size = IDX_TO_OFF(object->size) - offset; 15528a02c104SJohn Dyson } 15538a02c104SJohn Dyson if (invalidate) 15548a02c104SJohn Dyson pmap_remove(vm_map_pmap(map), current->start, 155567cc64f4SJohn Dyson current->start + size); 155624a1cce3SDavid Greenman if (object && (object->type == OBJT_VNODE)) { 1557df8bae1dSRodney W. Grimes /* 15580d94caffSDavid Greenman * Flush pages if writing is allowed. XXX should we continue 15590d94caffSDavid Greenman * on an error? 1560f5cf85d4SDavid Greenman * 1561f5cf85d4SDavid Greenman * XXX Doing async I/O and then removing all the pages from 1562f5cf85d4SDavid Greenman * the object before it completes is probably a very bad 1563f5cf85d4SDavid Greenman * idea. 1564df8bae1dSRodney W. Grimes */ 1565a02051c3SJohn Dyson if (current->protection & VM_PROT_WRITE) { 1566a316d390SJohn Dyson vm_object_page_clean(object, 1567a316d390SJohn Dyson OFF_TO_IDX(offset), 1568a316d390SJohn Dyson OFF_TO_IDX(offset + size), 1569a02051c3SJohn Dyson (syncio||invalidate)?1:0, TRUE); 1570df8bae1dSRodney W. Grimes if (invalidate) 1571a316d390SJohn Dyson vm_object_page_remove(object, 1572a316d390SJohn Dyson OFF_TO_IDX(offset), 1573a316d390SJohn Dyson OFF_TO_IDX(offset + size), 1574a316d390SJohn Dyson FALSE); 1575bf4bd9bdSDavid Greenman } 1576a02051c3SJohn Dyson } 1577df8bae1dSRodney W. Grimes start += size; 1578df8bae1dSRodney W. Grimes } 1579df8bae1dSRodney W. Grimes 1580df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 1581df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1582df8bae1dSRodney W. Grimes } 1583df8bae1dSRodney W. Grimes 1584df8bae1dSRodney W. Grimes /* 1585df8bae1dSRodney W. Grimes * vm_map_entry_unwire: [ internal use only ] 1586df8bae1dSRodney W. Grimes * 1587df8bae1dSRodney W. Grimes * Make the region specified by this entry pageable. 1588df8bae1dSRodney W. Grimes * 1589df8bae1dSRodney W. Grimes * The map in question should be locked. 1590df8bae1dSRodney W. Grimes * [This is the reason for this routine's existence.] 1591df8bae1dSRodney W. Grimes */ 1592f708ef1bSPoul-Henning Kamp static void 15930d94caffSDavid Greenman vm_map_entry_unwire(map, entry) 1594df8bae1dSRodney W. Grimes vm_map_t map; 1595df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 1596df8bae1dSRodney W. Grimes { 1597df8bae1dSRodney W. Grimes vm_fault_unwire(map, entry->start, entry->end); 1598df8bae1dSRodney W. Grimes entry->wired_count = 0; 1599df8bae1dSRodney W. Grimes } 1600df8bae1dSRodney W. Grimes 1601df8bae1dSRodney W. Grimes /* 1602df8bae1dSRodney W. Grimes * vm_map_entry_delete: [ internal use only ] 1603df8bae1dSRodney W. Grimes * 1604df8bae1dSRodney W. Grimes * Deallocate the given entry from the target map. 1605df8bae1dSRodney W. Grimes */ 1606f708ef1bSPoul-Henning Kamp static void 16070d94caffSDavid Greenman vm_map_entry_delete(map, entry) 1608df8bae1dSRodney W. Grimes register vm_map_t map; 1609df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 1610df8bae1dSRodney W. Grimes { 1611df8bae1dSRodney W. Grimes if (entry->wired_count != 0) 1612df8bae1dSRodney W. Grimes vm_map_entry_unwire(map, entry); 1613df8bae1dSRodney W. Grimes 1614df8bae1dSRodney W. Grimes vm_map_entry_unlink(map, entry); 1615df8bae1dSRodney W. Grimes map->size -= entry->end - entry->start; 1616df8bae1dSRodney W. Grimes 1617df8bae1dSRodney W. Grimes if (entry->is_a_map || entry->is_sub_map) 1618df8bae1dSRodney W. Grimes vm_map_deallocate(entry->object.share_map); 1619df8bae1dSRodney W. Grimes else 1620df8bae1dSRodney W. Grimes vm_object_deallocate(entry->object.vm_object); 1621df8bae1dSRodney W. Grimes 1622df8bae1dSRodney W. Grimes vm_map_entry_dispose(map, entry); 1623df8bae1dSRodney W. Grimes } 1624df8bae1dSRodney W. Grimes 1625df8bae1dSRodney W. Grimes /* 1626df8bae1dSRodney W. Grimes * vm_map_delete: [ internal use only ] 1627df8bae1dSRodney W. Grimes * 1628df8bae1dSRodney W. Grimes * Deallocates the given address range from the target 1629df8bae1dSRodney W. Grimes * map. 1630df8bae1dSRodney W. Grimes * 1631df8bae1dSRodney W. Grimes * When called with a sharing map, removes pages from 1632df8bae1dSRodney W. Grimes * that region from all physical maps. 1633df8bae1dSRodney W. Grimes */ 1634df8bae1dSRodney W. Grimes int 1635df8bae1dSRodney W. Grimes vm_map_delete(map, start, end) 1636df8bae1dSRodney W. Grimes register vm_map_t map; 1637df8bae1dSRodney W. Grimes vm_offset_t start; 1638df8bae1dSRodney W. Grimes register vm_offset_t end; 1639df8bae1dSRodney W. Grimes { 1640df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 1641df8bae1dSRodney W. Grimes vm_map_entry_t first_entry; 1642df8bae1dSRodney W. Grimes 1643df8bae1dSRodney W. Grimes /* 1644df8bae1dSRodney W. Grimes * Find the start of the region, and clip it 1645df8bae1dSRodney W. Grimes */ 1646df8bae1dSRodney W. Grimes 1647df8bae1dSRodney W. Grimes if (!vm_map_lookup_entry(map, start, &first_entry)) 1648df8bae1dSRodney W. Grimes entry = first_entry->next; 1649df8bae1dSRodney W. Grimes else { 1650df8bae1dSRodney W. Grimes entry = first_entry; 1651df8bae1dSRodney W. Grimes vm_map_clip_start(map, entry, start); 1652df8bae1dSRodney W. Grimes 1653df8bae1dSRodney W. Grimes /* 16540d94caffSDavid Greenman * Fix the lookup hint now, rather than each time though the 16550d94caffSDavid Greenman * loop. 1656df8bae1dSRodney W. Grimes */ 1657df8bae1dSRodney W. Grimes 1658df8bae1dSRodney W. Grimes SAVE_HINT(map, entry->prev); 1659df8bae1dSRodney W. Grimes } 1660df8bae1dSRodney W. Grimes 1661df8bae1dSRodney W. Grimes /* 1662df8bae1dSRodney W. Grimes * Save the free space hint 1663df8bae1dSRodney W. Grimes */ 1664df8bae1dSRodney W. Grimes 1665df8bae1dSRodney W. Grimes if (map->first_free->start >= start) 1666df8bae1dSRodney W. Grimes map->first_free = entry->prev; 1667df8bae1dSRodney W. Grimes 1668df8bae1dSRodney W. Grimes /* 1669df8bae1dSRodney W. Grimes * Step through all entries in this region 1670df8bae1dSRodney W. Grimes */ 1671df8bae1dSRodney W. Grimes 1672df8bae1dSRodney W. Grimes while ((entry != &map->header) && (entry->start < end)) { 1673df8bae1dSRodney W. Grimes vm_map_entry_t next; 1674df8bae1dSRodney W. Grimes register vm_offset_t s, e; 1675df8bae1dSRodney W. Grimes register vm_object_t object; 1676df8bae1dSRodney W. Grimes 1677df8bae1dSRodney W. Grimes vm_map_clip_end(map, entry, end); 1678df8bae1dSRodney W. Grimes 1679df8bae1dSRodney W. Grimes next = entry->next; 1680df8bae1dSRodney W. Grimes s = entry->start; 1681df8bae1dSRodney W. Grimes e = entry->end; 1682df8bae1dSRodney W. Grimes 1683df8bae1dSRodney W. Grimes /* 16840d94caffSDavid Greenman * Unwire before removing addresses from the pmap; otherwise, 16850d94caffSDavid Greenman * unwiring will put the entries back in the pmap. 1686df8bae1dSRodney W. Grimes */ 1687df8bae1dSRodney W. Grimes 1688df8bae1dSRodney W. Grimes object = entry->object.vm_object; 1689df8bae1dSRodney W. Grimes if (entry->wired_count != 0) 1690df8bae1dSRodney W. Grimes vm_map_entry_unwire(map, entry); 1691df8bae1dSRodney W. Grimes 1692df8bae1dSRodney W. Grimes /* 16930d94caffSDavid Greenman * If this is a sharing map, we must remove *all* references 16940d94caffSDavid Greenman * to this data, since we can't find all of the physical maps 16950d94caffSDavid Greenman * which are sharing it. 1696df8bae1dSRodney W. Grimes */ 1697df8bae1dSRodney W. Grimes 1698df8bae1dSRodney W. Grimes if (object == kernel_object || object == kmem_object) 1699a316d390SJohn Dyson vm_object_page_remove(object, OFF_TO_IDX(entry->offset), 1700a316d390SJohn Dyson OFF_TO_IDX(entry->offset + (e - s)), FALSE); 1701df8bae1dSRodney W. Grimes else if (!map->is_main_map) 1702df8bae1dSRodney W. Grimes vm_object_pmap_remove(object, 1703a316d390SJohn Dyson OFF_TO_IDX(entry->offset), 1704a316d390SJohn Dyson OFF_TO_IDX(entry->offset + (e - s))); 1705df8bae1dSRodney W. Grimes else 1706df8bae1dSRodney W. Grimes pmap_remove(map->pmap, s, e); 1707df8bae1dSRodney W. Grimes 1708df8bae1dSRodney W. Grimes /* 17090d94caffSDavid Greenman * Delete the entry (which may delete the object) only after 17100d94caffSDavid Greenman * removing all pmap entries pointing to its pages. 17110d94caffSDavid Greenman * (Otherwise, its page frames may be reallocated, and any 17120d94caffSDavid Greenman * modify bits will be set in the wrong object!) 1713df8bae1dSRodney W. Grimes */ 1714df8bae1dSRodney W. Grimes 1715df8bae1dSRodney W. Grimes vm_map_entry_delete(map, entry); 1716df8bae1dSRodney W. Grimes entry = next; 1717df8bae1dSRodney W. Grimes } 1718df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1719df8bae1dSRodney W. Grimes } 1720df8bae1dSRodney W. Grimes 1721df8bae1dSRodney W. Grimes /* 1722df8bae1dSRodney W. Grimes * vm_map_remove: 1723df8bae1dSRodney W. Grimes * 1724df8bae1dSRodney W. Grimes * Remove the given address range from the target map. 1725df8bae1dSRodney W. Grimes * This is the exported form of vm_map_delete. 1726df8bae1dSRodney W. Grimes */ 1727df8bae1dSRodney W. Grimes int 1728df8bae1dSRodney W. Grimes vm_map_remove(map, start, end) 1729df8bae1dSRodney W. Grimes register vm_map_t map; 1730df8bae1dSRodney W. Grimes register vm_offset_t start; 1731df8bae1dSRodney W. Grimes register vm_offset_t end; 1732df8bae1dSRodney W. Grimes { 17338d6e8edeSDavid Greenman register int result, s = 0; 17348d6e8edeSDavid Greenman 17358d6e8edeSDavid Greenman if (map == kmem_map) 17368d6e8edeSDavid Greenman s = splhigh(); 1737df8bae1dSRodney W. Grimes 1738df8bae1dSRodney W. Grimes vm_map_lock(map); 1739df8bae1dSRodney W. Grimes VM_MAP_RANGE_CHECK(map, start, end); 1740df8bae1dSRodney W. Grimes result = vm_map_delete(map, start, end); 1741df8bae1dSRodney W. Grimes vm_map_unlock(map); 1742df8bae1dSRodney W. Grimes 17438d6e8edeSDavid Greenman if (map == kmem_map) 17448d6e8edeSDavid Greenman splx(s); 17458d6e8edeSDavid Greenman 1746df8bae1dSRodney W. Grimes return (result); 1747df8bae1dSRodney W. Grimes } 1748df8bae1dSRodney W. Grimes 1749df8bae1dSRodney W. Grimes /* 1750df8bae1dSRodney W. Grimes * vm_map_check_protection: 1751df8bae1dSRodney W. Grimes * 1752df8bae1dSRodney W. Grimes * Assert that the target map allows the specified 1753df8bae1dSRodney W. Grimes * privilege on the entire address region given. 1754df8bae1dSRodney W. Grimes * The entire region must be allocated. 1755df8bae1dSRodney W. Grimes */ 17560d94caffSDavid Greenman boolean_t 17570d94caffSDavid Greenman vm_map_check_protection(map, start, end, protection) 1758df8bae1dSRodney W. Grimes register vm_map_t map; 1759df8bae1dSRodney W. Grimes register vm_offset_t start; 1760df8bae1dSRodney W. Grimes register vm_offset_t end; 1761df8bae1dSRodney W. Grimes register vm_prot_t protection; 1762df8bae1dSRodney W. Grimes { 1763df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 1764df8bae1dSRodney W. Grimes vm_map_entry_t tmp_entry; 1765df8bae1dSRodney W. Grimes 1766df8bae1dSRodney W. Grimes if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 1767df8bae1dSRodney W. Grimes return (FALSE); 1768df8bae1dSRodney W. Grimes } 1769df8bae1dSRodney W. Grimes entry = tmp_entry; 1770df8bae1dSRodney W. Grimes 1771df8bae1dSRodney W. Grimes while (start < end) { 1772df8bae1dSRodney W. Grimes if (entry == &map->header) { 1773df8bae1dSRodney W. Grimes return (FALSE); 1774df8bae1dSRodney W. Grimes } 1775df8bae1dSRodney W. Grimes /* 1776df8bae1dSRodney W. Grimes * No holes allowed! 1777df8bae1dSRodney W. Grimes */ 1778df8bae1dSRodney W. Grimes 1779df8bae1dSRodney W. Grimes if (start < entry->start) { 1780df8bae1dSRodney W. Grimes return (FALSE); 1781df8bae1dSRodney W. Grimes } 1782df8bae1dSRodney W. Grimes /* 1783df8bae1dSRodney W. Grimes * Check protection associated with entry. 1784df8bae1dSRodney W. Grimes */ 1785df8bae1dSRodney W. Grimes 1786df8bae1dSRodney W. Grimes if ((entry->protection & protection) != protection) { 1787df8bae1dSRodney W. Grimes return (FALSE); 1788df8bae1dSRodney W. Grimes } 1789df8bae1dSRodney W. Grimes /* go to next entry */ 1790df8bae1dSRodney W. Grimes 1791df8bae1dSRodney W. Grimes start = entry->end; 1792df8bae1dSRodney W. Grimes entry = entry->next; 1793df8bae1dSRodney W. Grimes } 1794df8bae1dSRodney W. Grimes return (TRUE); 1795df8bae1dSRodney W. Grimes } 1796df8bae1dSRodney W. Grimes 1797df8bae1dSRodney W. Grimes /* 1798df8bae1dSRodney W. Grimes * vm_map_copy_entry: 1799df8bae1dSRodney W. Grimes * 1800df8bae1dSRodney W. Grimes * Copies the contents of the source entry to the destination 1801df8bae1dSRodney W. Grimes * entry. The entries *must* be aligned properly. 1802df8bae1dSRodney W. Grimes */ 1803f708ef1bSPoul-Henning Kamp static void 18040d94caffSDavid Greenman vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry) 1805df8bae1dSRodney W. Grimes vm_map_t src_map, dst_map; 1806df8bae1dSRodney W. Grimes register vm_map_entry_t src_entry, dst_entry; 1807df8bae1dSRodney W. Grimes { 1808a316d390SJohn Dyson vm_pindex_t temp_pindex; 1809a316d390SJohn Dyson 1810df8bae1dSRodney W. Grimes if (src_entry->is_sub_map || dst_entry->is_sub_map) 1811df8bae1dSRodney W. Grimes return; 1812df8bae1dSRodney W. Grimes 181324a1cce3SDavid Greenman if (dst_entry->object.vm_object != NULL) 181424a1cce3SDavid Greenman printf("vm_map_copy_entry: dst_entry object not NULL!\n"); 1815df8bae1dSRodney W. Grimes 1816df8bae1dSRodney W. Grimes /* 18170d94caffSDavid Greenman * If our destination map was wired down, unwire it now. 1818df8bae1dSRodney W. Grimes */ 1819df8bae1dSRodney W. Grimes 1820df8bae1dSRodney W. Grimes if (dst_entry->wired_count != 0) 1821df8bae1dSRodney W. Grimes vm_map_entry_unwire(dst_map, dst_entry); 1822df8bae1dSRodney W. Grimes 1823df8bae1dSRodney W. Grimes if (src_entry->wired_count == 0) { 1824df8bae1dSRodney W. Grimes 1825df8bae1dSRodney W. Grimes boolean_t src_needs_copy; 1826df8bae1dSRodney W. Grimes 1827df8bae1dSRodney W. Grimes /* 18280d94caffSDavid Greenman * If the source entry is marked needs_copy, it is already 18290d94caffSDavid Greenman * write-protected. 1830df8bae1dSRodney W. Grimes */ 1831df8bae1dSRodney W. Grimes if (!src_entry->needs_copy) { 1832df8bae1dSRodney W. Grimes 1833df8bae1dSRodney W. Grimes boolean_t su; 1834df8bae1dSRodney W. Grimes 1835df8bae1dSRodney W. Grimes /* 18360d94caffSDavid Greenman * If the source entry has only one mapping, we can 18370d94caffSDavid Greenman * just protect the virtual address range. 1838df8bae1dSRodney W. Grimes */ 1839df8bae1dSRodney W. Grimes if (!(su = src_map->is_main_map)) { 1840df8bae1dSRodney W. Grimes su = (src_map->ref_count == 1); 1841df8bae1dSRodney W. Grimes } 1842df8bae1dSRodney W. Grimes if (su) { 1843df8bae1dSRodney W. Grimes pmap_protect(src_map->pmap, 1844df8bae1dSRodney W. Grimes src_entry->start, 1845df8bae1dSRodney W. Grimes src_entry->end, 1846df8bae1dSRodney W. Grimes src_entry->protection & ~VM_PROT_WRITE); 18470d94caffSDavid Greenman } else { 1848df8bae1dSRodney W. Grimes vm_object_pmap_copy(src_entry->object.vm_object, 1849a316d390SJohn Dyson OFF_TO_IDX(src_entry->offset), 1850a316d390SJohn Dyson OFF_TO_IDX(src_entry->offset + (src_entry->end 1851a316d390SJohn Dyson - src_entry->start))); 1852df8bae1dSRodney W. Grimes } 1853df8bae1dSRodney W. Grimes } 1854df8bae1dSRodney W. Grimes /* 1855df8bae1dSRodney W. Grimes * Make a copy of the object. 1856df8bae1dSRodney W. Grimes */ 1857a316d390SJohn Dyson temp_pindex = OFF_TO_IDX(dst_entry->offset); 1858df8bae1dSRodney W. Grimes vm_object_copy(src_entry->object.vm_object, 1859a316d390SJohn Dyson OFF_TO_IDX(src_entry->offset), 1860df8bae1dSRodney W. Grimes &dst_entry->object.vm_object, 1861a316d390SJohn Dyson &temp_pindex, 1862df8bae1dSRodney W. Grimes &src_needs_copy); 1863a316d390SJohn Dyson dst_entry->offset = IDX_TO_OFF(temp_pindex); 1864df8bae1dSRodney W. Grimes /* 18650d94caffSDavid Greenman * If we didn't get a copy-object now, mark the source map 18660d94caffSDavid Greenman * entry so that a shadow will be created to hold its changed 18670d94caffSDavid Greenman * pages. 1868df8bae1dSRodney W. Grimes */ 1869df8bae1dSRodney W. Grimes if (src_needs_copy) 1870df8bae1dSRodney W. Grimes src_entry->needs_copy = TRUE; 1871df8bae1dSRodney W. Grimes 1872df8bae1dSRodney W. Grimes /* 18730d94caffSDavid Greenman * The destination always needs to have a shadow created. 1874df8bae1dSRodney W. Grimes */ 1875df8bae1dSRodney W. Grimes dst_entry->needs_copy = TRUE; 1876df8bae1dSRodney W. Grimes 1877df8bae1dSRodney W. Grimes /* 18780d94caffSDavid Greenman * Mark the entries copy-on-write, so that write-enabling the 18790d94caffSDavid Greenman * entry won't make copy-on-write pages writable. 1880df8bae1dSRodney W. Grimes */ 1881df8bae1dSRodney W. Grimes src_entry->copy_on_write = TRUE; 1882df8bae1dSRodney W. Grimes dst_entry->copy_on_write = TRUE; 1883df8bae1dSRodney W. Grimes 1884df8bae1dSRodney W. Grimes pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 1885df8bae1dSRodney W. Grimes dst_entry->end - dst_entry->start, src_entry->start); 18860d94caffSDavid Greenman } else { 1887df8bae1dSRodney W. Grimes /* 1888df8bae1dSRodney W. Grimes * Of course, wired down pages can't be set copy-on-write. 18890d94caffSDavid Greenman * Cause wired pages to be copied into the new map by 18900d94caffSDavid Greenman * simulating faults (the new pages are pageable) 1891df8bae1dSRodney W. Grimes */ 1892df8bae1dSRodney W. Grimes vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 1893df8bae1dSRodney W. Grimes } 1894df8bae1dSRodney W. Grimes } 1895df8bae1dSRodney W. Grimes 1896df8bae1dSRodney W. Grimes /* 1897df8bae1dSRodney W. Grimes * vmspace_fork: 1898df8bae1dSRodney W. Grimes * Create a new process vmspace structure and vm_map 1899df8bae1dSRodney W. Grimes * based on those of an existing process. The new map 1900df8bae1dSRodney W. Grimes * is based on the old map, according to the inheritance 1901df8bae1dSRodney W. Grimes * values on the regions in that map. 1902df8bae1dSRodney W. Grimes * 1903df8bae1dSRodney W. Grimes * The source map must not be locked. 1904df8bae1dSRodney W. Grimes */ 1905df8bae1dSRodney W. Grimes struct vmspace * 1906df8bae1dSRodney W. Grimes vmspace_fork(vm1) 1907df8bae1dSRodney W. Grimes register struct vmspace *vm1; 1908df8bae1dSRodney W. Grimes { 1909df8bae1dSRodney W. Grimes register struct vmspace *vm2; 1910df8bae1dSRodney W. Grimes vm_map_t old_map = &vm1->vm_map; 1911df8bae1dSRodney W. Grimes vm_map_t new_map; 1912df8bae1dSRodney W. Grimes vm_map_entry_t old_entry; 1913df8bae1dSRodney W. Grimes vm_map_entry_t new_entry; 1914df8bae1dSRodney W. Grimes pmap_t new_pmap; 1915de5f6a77SJohn Dyson vm_object_t object; 1916de5f6a77SJohn Dyson vm_page_t p; 1917df8bae1dSRodney W. Grimes 1918df8bae1dSRodney W. Grimes vm_map_lock(old_map); 1919df8bae1dSRodney W. Grimes 1920df8bae1dSRodney W. Grimes vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, 1921df8bae1dSRodney W. Grimes old_map->entries_pageable); 1922df8bae1dSRodney W. Grimes bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 1923df8bae1dSRodney W. Grimes (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); 1924df8bae1dSRodney W. Grimes new_pmap = &vm2->vm_pmap; /* XXX */ 1925df8bae1dSRodney W. Grimes new_map = &vm2->vm_map; /* XXX */ 1926df8bae1dSRodney W. Grimes 1927df8bae1dSRodney W. Grimes old_entry = old_map->header.next; 1928df8bae1dSRodney W. Grimes 1929df8bae1dSRodney W. Grimes while (old_entry != &old_map->header) { 1930df8bae1dSRodney W. Grimes if (old_entry->is_sub_map) 1931df8bae1dSRodney W. Grimes panic("vm_map_fork: encountered a submap"); 1932df8bae1dSRodney W. Grimes 1933df8bae1dSRodney W. Grimes switch (old_entry->inheritance) { 1934df8bae1dSRodney W. Grimes case VM_INHERIT_NONE: 1935df8bae1dSRodney W. Grimes break; 1936df8bae1dSRodney W. Grimes 1937df8bae1dSRodney W. Grimes case VM_INHERIT_SHARE: 1938df8bae1dSRodney W. Grimes /* 1939df8bae1dSRodney W. Grimes * Clone the entry, referencing the sharing map. 1940df8bae1dSRodney W. Grimes */ 1941df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(new_map); 1942df8bae1dSRodney W. Grimes *new_entry = *old_entry; 1943df8bae1dSRodney W. Grimes new_entry->wired_count = 0; 1944de5f6a77SJohn Dyson object = new_entry->object.vm_object; 1945de5f6a77SJohn Dyson ++object->ref_count; 1946df8bae1dSRodney W. Grimes 1947df8bae1dSRodney W. Grimes /* 19480d94caffSDavid Greenman * Insert the entry into the new map -- we know we're 19490d94caffSDavid Greenman * inserting at the end of the new map. 1950df8bae1dSRodney W. Grimes */ 1951df8bae1dSRodney W. Grimes 1952df8bae1dSRodney W. Grimes vm_map_entry_link(new_map, new_map->header.prev, 1953df8bae1dSRodney W. Grimes new_entry); 1954df8bae1dSRodney W. Grimes 1955df8bae1dSRodney W. Grimes /* 1956df8bae1dSRodney W. Grimes * Update the physical map 1957df8bae1dSRodney W. Grimes */ 1958df8bae1dSRodney W. Grimes 1959df8bae1dSRodney W. Grimes pmap_copy(new_map->pmap, old_map->pmap, 1960df8bae1dSRodney W. Grimes new_entry->start, 1961df8bae1dSRodney W. Grimes (old_entry->end - old_entry->start), 1962df8bae1dSRodney W. Grimes old_entry->start); 1963df8bae1dSRodney W. Grimes break; 1964df8bae1dSRodney W. Grimes 1965df8bae1dSRodney W. Grimes case VM_INHERIT_COPY: 1966df8bae1dSRodney W. Grimes /* 1967df8bae1dSRodney W. Grimes * Clone the entry and link into the map. 1968df8bae1dSRodney W. Grimes */ 1969df8bae1dSRodney W. Grimes 1970df8bae1dSRodney W. Grimes new_entry = vm_map_entry_create(new_map); 1971df8bae1dSRodney W. Grimes *new_entry = *old_entry; 1972df8bae1dSRodney W. Grimes new_entry->wired_count = 0; 1973df8bae1dSRodney W. Grimes new_entry->object.vm_object = NULL; 1974df8bae1dSRodney W. Grimes new_entry->is_a_map = FALSE; 1975df8bae1dSRodney W. Grimes vm_map_entry_link(new_map, new_map->header.prev, 1976df8bae1dSRodney W. Grimes new_entry); 1977bd7e5f99SJohn Dyson vm_map_copy_entry(old_map, new_map, old_entry, 1978bd7e5f99SJohn Dyson new_entry); 1979df8bae1dSRodney W. Grimes break; 1980df8bae1dSRodney W. Grimes } 1981df8bae1dSRodney W. Grimes old_entry = old_entry->next; 1982df8bae1dSRodney W. Grimes } 1983df8bae1dSRodney W. Grimes 1984df8bae1dSRodney W. Grimes new_map->size = old_map->size; 1985df8bae1dSRodney W. Grimes vm_map_unlock(old_map); 1986df8bae1dSRodney W. Grimes 1987df8bae1dSRodney W. Grimes return (vm2); 1988df8bae1dSRodney W. Grimes } 1989df8bae1dSRodney W. Grimes 1990df8bae1dSRodney W. Grimes /* 1991df8bae1dSRodney W. Grimes * vm_map_lookup: 1992df8bae1dSRodney W. Grimes * 1993df8bae1dSRodney W. Grimes * Finds the VM object, offset, and 1994df8bae1dSRodney W. Grimes * protection for a given virtual address in the 1995df8bae1dSRodney W. Grimes * specified map, assuming a page fault of the 1996df8bae1dSRodney W. Grimes * type specified. 1997df8bae1dSRodney W. Grimes * 1998df8bae1dSRodney W. Grimes * Leaves the map in question locked for read; return 1999df8bae1dSRodney W. Grimes * values are guaranteed until a vm_map_lookup_done 2000df8bae1dSRodney W. Grimes * call is performed. Note that the map argument 2001df8bae1dSRodney W. Grimes * is in/out; the returned map must be used in 2002df8bae1dSRodney W. Grimes * the call to vm_map_lookup_done. 2003df8bae1dSRodney W. Grimes * 2004df8bae1dSRodney W. Grimes * A handle (out_entry) is returned for use in 2005df8bae1dSRodney W. Grimes * vm_map_lookup_done, to make that fast. 2006df8bae1dSRodney W. Grimes * 2007df8bae1dSRodney W. Grimes * If a lookup is requested with "write protection" 2008df8bae1dSRodney W. Grimes * specified, the map may be changed to perform virtual 2009df8bae1dSRodney W. Grimes * copying operations, although the data referenced will 2010df8bae1dSRodney W. Grimes * remain the same. 2011df8bae1dSRodney W. Grimes */ 2012df8bae1dSRodney W. Grimes int 2013df8bae1dSRodney W. Grimes vm_map_lookup(var_map, vaddr, fault_type, out_entry, 2014a316d390SJohn Dyson object, pindex, out_prot, wired, single_use) 2015df8bae1dSRodney W. Grimes vm_map_t *var_map; /* IN/OUT */ 2016df8bae1dSRodney W. Grimes register vm_offset_t vaddr; 2017df8bae1dSRodney W. Grimes register vm_prot_t fault_type; 2018df8bae1dSRodney W. Grimes 2019df8bae1dSRodney W. Grimes vm_map_entry_t *out_entry; /* OUT */ 2020df8bae1dSRodney W. Grimes vm_object_t *object; /* OUT */ 2021a316d390SJohn Dyson vm_pindex_t *pindex; /* OUT */ 2022df8bae1dSRodney W. Grimes vm_prot_t *out_prot; /* OUT */ 2023df8bae1dSRodney W. Grimes boolean_t *wired; /* OUT */ 2024df8bae1dSRodney W. Grimes boolean_t *single_use; /* OUT */ 2025df8bae1dSRodney W. Grimes { 2026df8bae1dSRodney W. Grimes vm_map_t share_map; 2027df8bae1dSRodney W. Grimes vm_offset_t share_offset; 2028df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 2029df8bae1dSRodney W. Grimes register vm_map_t map = *var_map; 2030df8bae1dSRodney W. Grimes register vm_prot_t prot; 2031df8bae1dSRodney W. Grimes register boolean_t su; 2032df8bae1dSRodney W. Grimes 2033df8bae1dSRodney W. Grimes RetryLookup:; 2034df8bae1dSRodney W. Grimes 2035df8bae1dSRodney W. Grimes /* 2036df8bae1dSRodney W. Grimes * Lookup the faulting address. 2037df8bae1dSRodney W. Grimes */ 2038df8bae1dSRodney W. Grimes 2039df8bae1dSRodney W. Grimes vm_map_lock_read(map); 2040df8bae1dSRodney W. Grimes 2041df8bae1dSRodney W. Grimes #define RETURN(why) \ 2042df8bae1dSRodney W. Grimes { \ 2043df8bae1dSRodney W. Grimes vm_map_unlock_read(map); \ 2044df8bae1dSRodney W. Grimes return(why); \ 2045df8bae1dSRodney W. Grimes } 2046df8bae1dSRodney W. Grimes 2047df8bae1dSRodney W. Grimes /* 20480d94caffSDavid Greenman * If the map has an interesting hint, try it before calling full 20490d94caffSDavid Greenman * blown lookup routine. 2050df8bae1dSRodney W. Grimes */ 2051df8bae1dSRodney W. Grimes 2052df8bae1dSRodney W. Grimes entry = map->hint; 2053df8bae1dSRodney W. Grimes 2054df8bae1dSRodney W. Grimes *out_entry = entry; 2055df8bae1dSRodney W. Grimes 2056df8bae1dSRodney W. Grimes if ((entry == &map->header) || 2057df8bae1dSRodney W. Grimes (vaddr < entry->start) || (vaddr >= entry->end)) { 2058df8bae1dSRodney W. Grimes vm_map_entry_t tmp_entry; 2059df8bae1dSRodney W. Grimes 2060df8bae1dSRodney W. Grimes /* 20610d94caffSDavid Greenman * Entry was either not a valid hint, or the vaddr was not 20620d94caffSDavid Greenman * contained in the entry, so do a full lookup. 2063df8bae1dSRodney W. Grimes */ 2064df8bae1dSRodney W. Grimes if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) 2065df8bae1dSRodney W. Grimes RETURN(KERN_INVALID_ADDRESS); 2066df8bae1dSRodney W. Grimes 2067df8bae1dSRodney W. Grimes entry = tmp_entry; 2068df8bae1dSRodney W. Grimes *out_entry = entry; 2069df8bae1dSRodney W. Grimes } 2070df8bae1dSRodney W. Grimes /* 2071df8bae1dSRodney W. Grimes * Handle submaps. 2072df8bae1dSRodney W. Grimes */ 2073df8bae1dSRodney W. Grimes 2074df8bae1dSRodney W. Grimes if (entry->is_sub_map) { 2075df8bae1dSRodney W. Grimes vm_map_t old_map = map; 2076df8bae1dSRodney W. Grimes 2077df8bae1dSRodney W. Grimes *var_map = map = entry->object.sub_map; 2078df8bae1dSRodney W. Grimes vm_map_unlock_read(old_map); 2079df8bae1dSRodney W. Grimes goto RetryLookup; 2080df8bae1dSRodney W. Grimes } 2081df8bae1dSRodney W. Grimes /* 20820d94caffSDavid Greenman * Check whether this task is allowed to have this page. 2083df8bae1dSRodney W. Grimes */ 2084df8bae1dSRodney W. Grimes 2085df8bae1dSRodney W. Grimes prot = entry->protection; 2086df8bae1dSRodney W. Grimes if ((fault_type & (prot)) != fault_type) 2087df8bae1dSRodney W. Grimes RETURN(KERN_PROTECTION_FAILURE); 2088df8bae1dSRodney W. Grimes 2089df8bae1dSRodney W. Grimes /* 20900d94caffSDavid Greenman * If this page is not pageable, we have to get it for all possible 20910d94caffSDavid Greenman * accesses. 2092df8bae1dSRodney W. Grimes */ 2093df8bae1dSRodney W. Grimes 209405f0fdd2SPoul-Henning Kamp *wired = (entry->wired_count != 0); 209505f0fdd2SPoul-Henning Kamp if (*wired) 2096df8bae1dSRodney W. Grimes prot = fault_type = entry->protection; 2097df8bae1dSRodney W. Grimes 2098df8bae1dSRodney W. Grimes /* 20990d94caffSDavid Greenman * If we don't already have a VM object, track it down. 2100df8bae1dSRodney W. Grimes */ 2101df8bae1dSRodney W. Grimes 210205f0fdd2SPoul-Henning Kamp su = !entry->is_a_map; 210305f0fdd2SPoul-Henning Kamp if (su) { 2104df8bae1dSRodney W. Grimes share_map = map; 2105df8bae1dSRodney W. Grimes share_offset = vaddr; 21060d94caffSDavid Greenman } else { 2107df8bae1dSRodney W. Grimes vm_map_entry_t share_entry; 2108df8bae1dSRodney W. Grimes 2109df8bae1dSRodney W. Grimes /* 2110df8bae1dSRodney W. Grimes * Compute the sharing map, and offset into it. 2111df8bae1dSRodney W. Grimes */ 2112df8bae1dSRodney W. Grimes 2113df8bae1dSRodney W. Grimes share_map = entry->object.share_map; 2114df8bae1dSRodney W. Grimes share_offset = (vaddr - entry->start) + entry->offset; 2115df8bae1dSRodney W. Grimes 2116df8bae1dSRodney W. Grimes /* 2117df8bae1dSRodney W. Grimes * Look for the backing store object and offset 2118df8bae1dSRodney W. Grimes */ 2119df8bae1dSRodney W. Grimes 2120df8bae1dSRodney W. Grimes vm_map_lock_read(share_map); 2121df8bae1dSRodney W. Grimes 2122df8bae1dSRodney W. Grimes if (!vm_map_lookup_entry(share_map, share_offset, 2123df8bae1dSRodney W. Grimes &share_entry)) { 2124df8bae1dSRodney W. Grimes vm_map_unlock_read(share_map); 2125df8bae1dSRodney W. Grimes RETURN(KERN_INVALID_ADDRESS); 2126df8bae1dSRodney W. Grimes } 2127df8bae1dSRodney W. Grimes entry = share_entry; 2128df8bae1dSRodney W. Grimes } 2129df8bae1dSRodney W. Grimes 2130df8bae1dSRodney W. Grimes /* 2131df8bae1dSRodney W. Grimes * If the entry was copy-on-write, we either ... 2132df8bae1dSRodney W. Grimes */ 2133df8bae1dSRodney W. Grimes 2134df8bae1dSRodney W. Grimes if (entry->needs_copy) { 2135df8bae1dSRodney W. Grimes /* 21360d94caffSDavid Greenman * If we want to write the page, we may as well handle that 21370d94caffSDavid Greenman * now since we've got the sharing map locked. 2138df8bae1dSRodney W. Grimes * 21390d94caffSDavid Greenman * If we don't need to write the page, we just demote the 21400d94caffSDavid Greenman * permissions allowed. 2141df8bae1dSRodney W. Grimes */ 2142df8bae1dSRodney W. Grimes 2143df8bae1dSRodney W. Grimes if (fault_type & VM_PROT_WRITE) { 2144df8bae1dSRodney W. Grimes /* 21450d94caffSDavid Greenman * Make a new object, and place it in the object 21460d94caffSDavid Greenman * chain. Note that no new references have appeared 21470d94caffSDavid Greenman * -- one just moved from the share map to the new 21480d94caffSDavid Greenman * object. 2149df8bae1dSRodney W. Grimes */ 2150df8bae1dSRodney W. Grimes 2151df8bae1dSRodney W. Grimes if (lock_read_to_write(&share_map->lock)) { 2152df8bae1dSRodney W. Grimes if (share_map != map) 2153df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 2154df8bae1dSRodney W. Grimes goto RetryLookup; 2155df8bae1dSRodney W. Grimes } 2156df8bae1dSRodney W. Grimes vm_object_shadow( 2157df8bae1dSRodney W. Grimes &entry->object.vm_object, 2158df8bae1dSRodney W. Grimes &entry->offset, 2159a316d390SJohn Dyson OFF_TO_IDX(entry->end - entry->start)); 2160df8bae1dSRodney W. Grimes 2161df8bae1dSRodney W. Grimes entry->needs_copy = FALSE; 2162df8bae1dSRodney W. Grimes 2163df8bae1dSRodney W. Grimes lock_write_to_read(&share_map->lock); 21640d94caffSDavid Greenman } else { 2165df8bae1dSRodney W. Grimes /* 21660d94caffSDavid Greenman * We're attempting to read a copy-on-write page -- 21670d94caffSDavid Greenman * don't allow writes. 2168df8bae1dSRodney W. Grimes */ 2169df8bae1dSRodney W. Grimes 2170df8bae1dSRodney W. Grimes prot &= (~VM_PROT_WRITE); 2171df8bae1dSRodney W. Grimes } 2172df8bae1dSRodney W. Grimes } 2173df8bae1dSRodney W. Grimes /* 2174df8bae1dSRodney W. Grimes * Create an object if necessary. 2175df8bae1dSRodney W. Grimes */ 2176df8bae1dSRodney W. Grimes if (entry->object.vm_object == NULL) { 2177df8bae1dSRodney W. Grimes 2178df8bae1dSRodney W. Grimes if (lock_read_to_write(&share_map->lock)) { 2179df8bae1dSRodney W. Grimes if (share_map != map) 2180df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 2181df8bae1dSRodney W. Grimes goto RetryLookup; 2182df8bae1dSRodney W. Grimes } 218324a1cce3SDavid Greenman entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 2184a316d390SJohn Dyson OFF_TO_IDX(entry->end - entry->start)); 2185df8bae1dSRodney W. Grimes entry->offset = 0; 2186df8bae1dSRodney W. Grimes lock_write_to_read(&share_map->lock); 2187df8bae1dSRodney W. Grimes } 2188df8bae1dSRodney W. Grimes /* 21890d94caffSDavid Greenman * Return the object/offset from this entry. If the entry was 21900d94caffSDavid Greenman * copy-on-write or empty, it has been fixed up. 2191df8bae1dSRodney W. Grimes */ 2192df8bae1dSRodney W. Grimes 2193a316d390SJohn Dyson *pindex = OFF_TO_IDX((share_offset - entry->start) + entry->offset); 2194df8bae1dSRodney W. Grimes *object = entry->object.vm_object; 2195df8bae1dSRodney W. Grimes 2196df8bae1dSRodney W. Grimes /* 2197df8bae1dSRodney W. Grimes * Return whether this is the only map sharing this data. 2198df8bae1dSRodney W. Grimes */ 2199df8bae1dSRodney W. Grimes 2200df8bae1dSRodney W. Grimes if (!su) { 2201df8bae1dSRodney W. Grimes su = (share_map->ref_count == 1); 2202df8bae1dSRodney W. Grimes } 2203df8bae1dSRodney W. Grimes *out_prot = prot; 2204df8bae1dSRodney W. Grimes *single_use = su; 2205df8bae1dSRodney W. Grimes 2206df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 2207df8bae1dSRodney W. Grimes 2208df8bae1dSRodney W. Grimes #undef RETURN 2209df8bae1dSRodney W. Grimes } 2210df8bae1dSRodney W. Grimes 2211df8bae1dSRodney W. Grimes /* 2212df8bae1dSRodney W. Grimes * vm_map_lookup_done: 2213df8bae1dSRodney W. Grimes * 2214df8bae1dSRodney W. Grimes * Releases locks acquired by a vm_map_lookup 2215df8bae1dSRodney W. Grimes * (according to the handle returned by that lookup). 2216df8bae1dSRodney W. Grimes */ 2217df8bae1dSRodney W. Grimes 22180d94caffSDavid Greenman void 22190d94caffSDavid Greenman vm_map_lookup_done(map, entry) 2220df8bae1dSRodney W. Grimes register vm_map_t map; 2221df8bae1dSRodney W. Grimes vm_map_entry_t entry; 2222df8bae1dSRodney W. Grimes { 2223df8bae1dSRodney W. Grimes /* 2224df8bae1dSRodney W. Grimes * If this entry references a map, unlock it first. 2225df8bae1dSRodney W. Grimes */ 2226df8bae1dSRodney W. Grimes 2227df8bae1dSRodney W. Grimes if (entry->is_a_map) 2228df8bae1dSRodney W. Grimes vm_map_unlock_read(entry->object.share_map); 2229df8bae1dSRodney W. Grimes 2230df8bae1dSRodney W. Grimes /* 2231df8bae1dSRodney W. Grimes * Unlock the main-level map 2232df8bae1dSRodney W. Grimes */ 2233df8bae1dSRodney W. Grimes 2234df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 2235df8bae1dSRodney W. Grimes } 2236df8bae1dSRodney W. Grimes 2237df8bae1dSRodney W. Grimes /* 2238df8bae1dSRodney W. Grimes * Routine: vm_map_simplify 2239df8bae1dSRodney W. Grimes * Purpose: 2240df8bae1dSRodney W. Grimes * Attempt to simplify the map representation in 2241df8bae1dSRodney W. Grimes * the vicinity of the given starting address. 2242df8bae1dSRodney W. Grimes * Note: 2243df8bae1dSRodney W. Grimes * This routine is intended primarily to keep the 2244df8bae1dSRodney W. Grimes * kernel maps more compact -- they generally don't 2245df8bae1dSRodney W. Grimes * benefit from the "expand a map entry" technology 2246df8bae1dSRodney W. Grimes * at allocation time because the adjacent entry 2247df8bae1dSRodney W. Grimes * is often wired down. 2248df8bae1dSRodney W. Grimes */ 22490d94caffSDavid Greenman void 22500d94caffSDavid Greenman vm_map_simplify(map, start) 2251df8bae1dSRodney W. Grimes vm_map_t map; 2252df8bae1dSRodney W. Grimes vm_offset_t start; 2253df8bae1dSRodney W. Grimes { 2254df8bae1dSRodney W. Grimes vm_map_entry_t this_entry; 2255df8bae1dSRodney W. Grimes vm_map_entry_t prev_entry; 2256df8bae1dSRodney W. Grimes 2257df8bae1dSRodney W. Grimes vm_map_lock(map); 2258df8bae1dSRodney W. Grimes if ( 2259df8bae1dSRodney W. Grimes (vm_map_lookup_entry(map, start, &this_entry)) && 2260df8bae1dSRodney W. Grimes ((prev_entry = this_entry->prev) != &map->header) && 2261df8bae1dSRodney W. Grimes 2262df8bae1dSRodney W. Grimes (prev_entry->end == start) && 2263df8bae1dSRodney W. Grimes (map->is_main_map) && 2264df8bae1dSRodney W. Grimes 2265df8bae1dSRodney W. Grimes (prev_entry->is_a_map == FALSE) && 2266df8bae1dSRodney W. Grimes (prev_entry->is_sub_map == FALSE) && 2267df8bae1dSRodney W. Grimes 2268df8bae1dSRodney W. Grimes (this_entry->is_a_map == FALSE) && 2269df8bae1dSRodney W. Grimes (this_entry->is_sub_map == FALSE) && 2270df8bae1dSRodney W. Grimes 2271df8bae1dSRodney W. Grimes (prev_entry->inheritance == this_entry->inheritance) && 2272df8bae1dSRodney W. Grimes (prev_entry->protection == this_entry->protection) && 2273df8bae1dSRodney W. Grimes (prev_entry->max_protection == this_entry->max_protection) && 2274df8bae1dSRodney W. Grimes (prev_entry->wired_count == this_entry->wired_count) && 2275df8bae1dSRodney W. Grimes 2276df8bae1dSRodney W. Grimes (prev_entry->copy_on_write == this_entry->copy_on_write) && 2277df8bae1dSRodney W. Grimes (prev_entry->needs_copy == this_entry->needs_copy) && 2278df8bae1dSRodney W. Grimes 2279df8bae1dSRodney W. Grimes (prev_entry->object.vm_object == this_entry->object.vm_object) && 2280df8bae1dSRodney W. Grimes ((prev_entry->offset + (prev_entry->end - prev_entry->start)) 2281df8bae1dSRodney W. Grimes == this_entry->offset) 2282df8bae1dSRodney W. Grimes ) { 2283df8bae1dSRodney W. Grimes if (map->first_free == this_entry) 2284df8bae1dSRodney W. Grimes map->first_free = prev_entry; 2285df8bae1dSRodney W. Grimes SAVE_HINT(map, prev_entry); 2286df8bae1dSRodney W. Grimes vm_map_entry_unlink(map, this_entry); 2287df8bae1dSRodney W. Grimes prev_entry->end = this_entry->end; 2288df8bae1dSRodney W. Grimes vm_object_deallocate(this_entry->object.vm_object); 2289df8bae1dSRodney W. Grimes vm_map_entry_dispose(map, this_entry); 2290df8bae1dSRodney W. Grimes } 2291df8bae1dSRodney W. Grimes vm_map_unlock(map); 2292df8bae1dSRodney W. Grimes } 2293df8bae1dSRodney W. Grimes 2294c3cb3e12SDavid Greenman #ifdef DDB 2295df8bae1dSRodney W. Grimes /* 2296df8bae1dSRodney W. Grimes * vm_map_print: [ debug ] 2297df8bae1dSRodney W. Grimes */ 22980d94caffSDavid Greenman void 2299914181e7SBruce Evans vm_map_print(imap, full, dummy3, dummy4) 2300914181e7SBruce Evans /* db_expr_t */ int imap; 2301df8bae1dSRodney W. Grimes boolean_t full; 2302914181e7SBruce Evans /* db_expr_t */ int dummy3; 2303914181e7SBruce Evans char *dummy4; 2304df8bae1dSRodney W. Grimes { 2305df8bae1dSRodney W. Grimes register vm_map_entry_t entry; 2306914181e7SBruce Evans register vm_map_t map = (vm_map_t)imap; /* XXX */ 2307df8bae1dSRodney W. Grimes 2308df8bae1dSRodney W. Grimes iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n", 2309df8bae1dSRodney W. Grimes (map->is_main_map ? "Task" : "Share"), 2310df8bae1dSRodney W. Grimes (int) map, (int) (map->pmap), map->ref_count, map->nentries, 2311df8bae1dSRodney W. Grimes map->timestamp); 2312df8bae1dSRodney W. Grimes 2313df8bae1dSRodney W. Grimes if (!full && indent) 2314df8bae1dSRodney W. Grimes return; 2315df8bae1dSRodney W. Grimes 2316df8bae1dSRodney W. Grimes indent += 2; 2317df8bae1dSRodney W. Grimes for (entry = map->header.next; entry != &map->header; 2318df8bae1dSRodney W. Grimes entry = entry->next) { 2319df8bae1dSRodney W. Grimes iprintf("map entry 0x%x: start=0x%x, end=0x%x, ", 2320df8bae1dSRodney W. Grimes (int) entry, (int) entry->start, (int) entry->end); 2321df8bae1dSRodney W. Grimes if (map->is_main_map) { 2322df8bae1dSRodney W. Grimes static char *inheritance_name[4] = 2323df8bae1dSRodney W. Grimes {"share", "copy", "none", "donate_copy"}; 23240d94caffSDavid Greenman 2325df8bae1dSRodney W. Grimes printf("prot=%x/%x/%s, ", 2326df8bae1dSRodney W. Grimes entry->protection, 2327df8bae1dSRodney W. Grimes entry->max_protection, 2328df8bae1dSRodney W. Grimes inheritance_name[entry->inheritance]); 2329df8bae1dSRodney W. Grimes if (entry->wired_count != 0) 2330df8bae1dSRodney W. Grimes printf("wired, "); 2331df8bae1dSRodney W. Grimes } 2332df8bae1dSRodney W. Grimes if (entry->is_a_map || entry->is_sub_map) { 2333df8bae1dSRodney W. Grimes printf("share=0x%x, offset=0x%x\n", 2334df8bae1dSRodney W. Grimes (int) entry->object.share_map, 2335df8bae1dSRodney W. Grimes (int) entry->offset); 2336df8bae1dSRodney W. Grimes if ((entry->prev == &map->header) || 2337df8bae1dSRodney W. Grimes (!entry->prev->is_a_map) || 2338df8bae1dSRodney W. Grimes (entry->prev->object.share_map != 2339df8bae1dSRodney W. Grimes entry->object.share_map)) { 2340df8bae1dSRodney W. Grimes indent += 2; 2341914181e7SBruce Evans vm_map_print((int)entry->object.share_map, 2342914181e7SBruce Evans full, 0, (char *)0); 2343df8bae1dSRodney W. Grimes indent -= 2; 2344df8bae1dSRodney W. Grimes } 23450d94caffSDavid Greenman } else { 2346df8bae1dSRodney W. Grimes printf("object=0x%x, offset=0x%x", 2347df8bae1dSRodney W. Grimes (int) entry->object.vm_object, 2348df8bae1dSRodney W. Grimes (int) entry->offset); 2349df8bae1dSRodney W. Grimes if (entry->copy_on_write) 2350df8bae1dSRodney W. Grimes printf(", copy (%s)", 2351df8bae1dSRodney W. Grimes entry->needs_copy ? "needed" : "done"); 2352df8bae1dSRodney W. Grimes printf("\n"); 2353df8bae1dSRodney W. Grimes 2354df8bae1dSRodney W. Grimes if ((entry->prev == &map->header) || 2355df8bae1dSRodney W. Grimes (entry->prev->is_a_map) || 2356df8bae1dSRodney W. Grimes (entry->prev->object.vm_object != 2357df8bae1dSRodney W. Grimes entry->object.vm_object)) { 2358df8bae1dSRodney W. Grimes indent += 2; 2359914181e7SBruce Evans vm_object_print((int)entry->object.vm_object, 2360914181e7SBruce Evans full, 0, (char *)0); 2361df8bae1dSRodney W. Grimes indent -= 2; 2362df8bae1dSRodney W. Grimes } 2363df8bae1dSRodney W. Grimes } 2364df8bae1dSRodney W. Grimes } 2365df8bae1dSRodney W. Grimes indent -= 2; 2366df8bae1dSRodney W. Grimes } 2367c3cb3e12SDavid Greenman #endif 2368