1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 6df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 9df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 10df8bae1dSRodney W. Grimes * are met: 11df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 12df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 13df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 15df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 16df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 17df8bae1dSRodney W. Grimes * must display the following acknowledgement: 18df8bae1dSRodney W. Grimes * This product includes software developed by the University of 19df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 20df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 21df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 22df8bae1dSRodney W. Grimes * without specific prior written permission. 23df8bae1dSRodney W. Grimes * 24df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34df8bae1dSRodney W. Grimes * SUCH DAMAGE. 35df8bae1dSRodney W. Grimes * 363c4dd356SDavid Greenman * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37df8bae1dSRodney W. Grimes * 38df8bae1dSRodney W. Grimes * 39df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40df8bae1dSRodney W. Grimes * All rights reserved. 41df8bae1dSRodney W. Grimes * 42df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43df8bae1dSRodney W. Grimes * 44df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 45df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 46df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 47df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 48df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53df8bae1dSRodney W. Grimes * 54df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 55df8bae1dSRodney W. Grimes * 56df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57df8bae1dSRodney W. Grimes * School of Computer Science 58df8bae1dSRodney W. Grimes * Carnegie Mellon University 59df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 60df8bae1dSRodney W. Grimes * 61df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 62df8bae1dSRodney W. Grimes * rights to redistribute these changes. 633c4dd356SDavid Greenman * 64a1f6d91cSDavid Greenman * $Id: vm_kern.c,v 1.9 1995/01/24 10:12:51 davidg Exp $ 65df8bae1dSRodney W. Grimes */ 66df8bae1dSRodney W. Grimes 67df8bae1dSRodney W. Grimes /* 68df8bae1dSRodney W. Grimes * Kernel memory management. 69df8bae1dSRodney W. Grimes */ 70df8bae1dSRodney W. Grimes 71df8bae1dSRodney W. Grimes #include <sys/param.h> 72df8bae1dSRodney W. Grimes #include <sys/systm.h> 73f23b4c91SGarrett Wollman #include <sys/kernel.h> 74f23b4c91SGarrett Wollman #include <sys/proc.h> 75a1f6d91cSDavid Greenman #include <sys/malloc.h> 76df8bae1dSRodney W. Grimes 77df8bae1dSRodney W. Grimes #include <vm/vm.h> 78df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 79df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 80df8bae1dSRodney W. Grimes #include <vm/vm_kern.h> 81df8bae1dSRodney W. Grimes 82f23b4c91SGarrett Wollman vm_map_t buffer_map; 83f23b4c91SGarrett Wollman vm_map_t kernel_map; 84f23b4c91SGarrett Wollman vm_map_t kmem_map; 85f23b4c91SGarrett Wollman vm_map_t mb_map; 86f23b4c91SGarrett Wollman vm_map_t io_map; 87f23b4c91SGarrett Wollman vm_map_t clean_map; 88f23b4c91SGarrett Wollman vm_map_t pager_map; 89f23b4c91SGarrett Wollman vm_map_t phys_map; 900d94caffSDavid Greenman vm_map_t exec_map; 910d94caffSDavid Greenman vm_map_t u_map; 92f23b4c91SGarrett Wollman 93df8bae1dSRodney W. Grimes /* 94df8bae1dSRodney W. Grimes * kmem_alloc_pageable: 95df8bae1dSRodney W. Grimes * 96df8bae1dSRodney W. Grimes * Allocate pageable memory to the kernel's address map. 97df8bae1dSRodney W. Grimes * map must be "kernel_map" below. 98df8bae1dSRodney W. Grimes */ 99df8bae1dSRodney W. Grimes 1000d94caffSDavid Greenman vm_offset_t 1010d94caffSDavid Greenman kmem_alloc_pageable(map, size) 102df8bae1dSRodney W. Grimes vm_map_t map; 103df8bae1dSRodney W. Grimes register vm_size_t size; 104df8bae1dSRodney W. Grimes { 105df8bae1dSRodney W. Grimes vm_offset_t addr; 106df8bae1dSRodney W. Grimes register int result; 107df8bae1dSRodney W. Grimes 108df8bae1dSRodney W. Grimes #if 0 109df8bae1dSRodney W. Grimes if (map != kernel_map) 110df8bae1dSRodney W. Grimes panic("kmem_alloc_pageable: not called with kernel_map"); 111df8bae1dSRodney W. Grimes #endif 112df8bae1dSRodney W. Grimes 113df8bae1dSRodney W. Grimes size = round_page(size); 114df8bae1dSRodney W. Grimes 115df8bae1dSRodney W. Grimes addr = vm_map_min(map); 116df8bae1dSRodney W. Grimes result = vm_map_find(map, NULL, (vm_offset_t) 0, 117df8bae1dSRodney W. Grimes &addr, size, TRUE); 118df8bae1dSRodney W. Grimes if (result != KERN_SUCCESS) { 119df8bae1dSRodney W. Grimes return (0); 120df8bae1dSRodney W. Grimes } 121df8bae1dSRodney W. Grimes return (addr); 122df8bae1dSRodney W. Grimes } 123df8bae1dSRodney W. Grimes 124df8bae1dSRodney W. Grimes /* 125df8bae1dSRodney W. Grimes * Allocate wired-down memory in the kernel's address map 126df8bae1dSRodney W. Grimes * or a submap. 127df8bae1dSRodney W. Grimes */ 1280d94caffSDavid Greenman vm_offset_t 1290d94caffSDavid Greenman kmem_alloc(map, size) 130df8bae1dSRodney W. Grimes register vm_map_t map; 131df8bae1dSRodney W. Grimes register vm_size_t size; 132df8bae1dSRodney W. Grimes { 133df8bae1dSRodney W. Grimes vm_offset_t addr; 134df8bae1dSRodney W. Grimes register vm_offset_t offset; 135df8bae1dSRodney W. Grimes vm_offset_t i; 136df8bae1dSRodney W. Grimes 137df8bae1dSRodney W. Grimes size = round_page(size); 138df8bae1dSRodney W. Grimes 139df8bae1dSRodney W. Grimes /* 1400d94caffSDavid Greenman * Use the kernel object for wired-down kernel pages. Assume that no 1410d94caffSDavid Greenman * region of the kernel object is referenced more than once. 142df8bae1dSRodney W. Grimes */ 143df8bae1dSRodney W. Grimes 144df8bae1dSRodney W. Grimes /* 1450d94caffSDavid Greenman * Locate sufficient space in the map. This will give us the final 1460d94caffSDavid Greenman * virtual address for the new memory, and thus will tell us the 1470d94caffSDavid Greenman * offset within the kernel map. 148df8bae1dSRodney W. Grimes */ 149df8bae1dSRodney W. Grimes vm_map_lock(map); 150df8bae1dSRodney W. Grimes if (vm_map_findspace(map, 0, size, &addr)) { 151df8bae1dSRodney W. Grimes vm_map_unlock(map); 152df8bae1dSRodney W. Grimes return (0); 153df8bae1dSRodney W. Grimes } 154df8bae1dSRodney W. Grimes offset = addr - VM_MIN_KERNEL_ADDRESS; 155df8bae1dSRodney W. Grimes vm_object_reference(kernel_object); 156df8bae1dSRodney W. Grimes vm_map_insert(map, kernel_object, offset, addr, addr + size); 157df8bae1dSRodney W. Grimes vm_map_unlock(map); 158df8bae1dSRodney W. Grimes 159df8bae1dSRodney W. Grimes /* 1600d94caffSDavid Greenman * Guarantee that there are pages already in this object before 1610d94caffSDavid Greenman * calling vm_map_pageable. This is to prevent the following 1620d94caffSDavid Greenman * scenario: 163df8bae1dSRodney W. Grimes * 1640d94caffSDavid Greenman * 1) Threads have swapped out, so that there is a pager for the 1650d94caffSDavid Greenman * kernel_object. 2) The kmsg zone is empty, and so we are 1660d94caffSDavid Greenman * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault; 1670d94caffSDavid Greenman * there is no page, but there is a pager, so we call 1680d94caffSDavid Greenman * pager_data_request. But the kmsg zone is empty, so we must 1690d94caffSDavid Greenman * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 1700d94caffSDavid Greenman * we get the data back from the pager, it will be (very stale) 1710d94caffSDavid Greenman * non-zero data. kmem_alloc is defined to return zero-filled memory. 172df8bae1dSRodney W. Grimes * 1730d94caffSDavid Greenman * We're intentionally not activating the pages we allocate to prevent a 1740d94caffSDavid Greenman * race with page-out. vm_map_pageable will wire the pages. 175df8bae1dSRodney W. Grimes */ 176df8bae1dSRodney W. Grimes 177df8bae1dSRodney W. Grimes vm_object_lock(kernel_object); 178df8bae1dSRodney W. Grimes for (i = 0; i < size; i += PAGE_SIZE) { 179df8bae1dSRodney W. Grimes vm_page_t mem; 180df8bae1dSRodney W. Grimes 1816d40c3d3SDavid Greenman while ((mem = vm_page_alloc(kernel_object, offset + i, VM_ALLOC_NORMAL)) == NULL) { 182df8bae1dSRodney W. Grimes vm_object_unlock(kernel_object); 183df8bae1dSRodney W. Grimes VM_WAIT; 184df8bae1dSRodney W. Grimes vm_object_lock(kernel_object); 185df8bae1dSRodney W. Grimes } 186df8bae1dSRodney W. Grimes vm_page_zero_fill(mem); 187df8bae1dSRodney W. Grimes mem->flags &= ~PG_BUSY; 1880d94caffSDavid Greenman mem->valid |= VM_PAGE_BITS_ALL; 189df8bae1dSRodney W. Grimes } 190df8bae1dSRodney W. Grimes vm_object_unlock(kernel_object); 191df8bae1dSRodney W. Grimes 192df8bae1dSRodney W. Grimes /* 193df8bae1dSRodney W. Grimes * And finally, mark the data as non-pageable. 194df8bae1dSRodney W. Grimes */ 195df8bae1dSRodney W. Grimes 196df8bae1dSRodney W. Grimes (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE); 197df8bae1dSRodney W. Grimes 198df8bae1dSRodney W. Grimes /* 199df8bae1dSRodney W. Grimes * Try to coalesce the map 200df8bae1dSRodney W. Grimes */ 201df8bae1dSRodney W. Grimes vm_map_simplify(map, addr); 202df8bae1dSRodney W. Grimes 203df8bae1dSRodney W. Grimes return (addr); 204df8bae1dSRodney W. Grimes } 205df8bae1dSRodney W. Grimes 206df8bae1dSRodney W. Grimes /* 207df8bae1dSRodney W. Grimes * kmem_free: 208df8bae1dSRodney W. Grimes * 209df8bae1dSRodney W. Grimes * Release a region of kernel virtual memory allocated 210df8bae1dSRodney W. Grimes * with kmem_alloc, and return the physical pages 211df8bae1dSRodney W. Grimes * associated with that region. 212df8bae1dSRodney W. Grimes */ 2130d94caffSDavid Greenman void 2140d94caffSDavid Greenman kmem_free(map, addr, size) 215df8bae1dSRodney W. Grimes vm_map_t map; 216df8bae1dSRodney W. Grimes register vm_offset_t addr; 217df8bae1dSRodney W. Grimes vm_size_t size; 218df8bae1dSRodney W. Grimes { 219df8bae1dSRodney W. Grimes (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 220df8bae1dSRodney W. Grimes } 221df8bae1dSRodney W. Grimes 222df8bae1dSRodney W. Grimes /* 223df8bae1dSRodney W. Grimes * kmem_suballoc: 224df8bae1dSRodney W. Grimes * 225df8bae1dSRodney W. Grimes * Allocates a map to manage a subrange 226df8bae1dSRodney W. Grimes * of the kernel virtual address space. 227df8bae1dSRodney W. Grimes * 228df8bae1dSRodney W. Grimes * Arguments are as follows: 229df8bae1dSRodney W. Grimes * 230df8bae1dSRodney W. Grimes * parent Map to take range from 231df8bae1dSRodney W. Grimes * size Size of range to find 232df8bae1dSRodney W. Grimes * min, max Returned endpoints of map 233df8bae1dSRodney W. Grimes * pageable Can the region be paged 234df8bae1dSRodney W. Grimes */ 2350d94caffSDavid Greenman vm_map_t 2360d94caffSDavid Greenman kmem_suballoc(parent, min, max, size, pageable) 237df8bae1dSRodney W. Grimes register vm_map_t parent; 238df8bae1dSRodney W. Grimes vm_offset_t *min, *max; 239df8bae1dSRodney W. Grimes register vm_size_t size; 240df8bae1dSRodney W. Grimes boolean_t pageable; 241df8bae1dSRodney W. Grimes { 242df8bae1dSRodney W. Grimes register int ret; 243df8bae1dSRodney W. Grimes vm_map_t result; 244df8bae1dSRodney W. Grimes 245df8bae1dSRodney W. Grimes size = round_page(size); 246df8bae1dSRodney W. Grimes 247df8bae1dSRodney W. Grimes *min = (vm_offset_t) vm_map_min(parent); 248df8bae1dSRodney W. Grimes ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 249df8bae1dSRodney W. Grimes min, size, TRUE); 250df8bae1dSRodney W. Grimes if (ret != KERN_SUCCESS) { 251df8bae1dSRodney W. Grimes printf("kmem_suballoc: bad status return of %d.\n", ret); 252df8bae1dSRodney W. Grimes panic("kmem_suballoc"); 253df8bae1dSRodney W. Grimes } 254df8bae1dSRodney W. Grimes *max = *min + size; 255df8bae1dSRodney W. Grimes pmap_reference(vm_map_pmap(parent)); 256df8bae1dSRodney W. Grimes result = vm_map_create(vm_map_pmap(parent), *min, *max, pageable); 257df8bae1dSRodney W. Grimes if (result == NULL) 258df8bae1dSRodney W. Grimes panic("kmem_suballoc: cannot create submap"); 259df8bae1dSRodney W. Grimes if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 260df8bae1dSRodney W. Grimes panic("kmem_suballoc: unable to change range to submap"); 261df8bae1dSRodney W. Grimes return (result); 262df8bae1dSRodney W. Grimes } 263df8bae1dSRodney W. Grimes 264df8bae1dSRodney W. Grimes /* 265df8bae1dSRodney W. Grimes * Allocate wired-down memory in the kernel's address map for the higher 266df8bae1dSRodney W. Grimes * level kernel memory allocator (kern/kern_malloc.c). We cannot use 267df8bae1dSRodney W. Grimes * kmem_alloc() because we may need to allocate memory at interrupt 268df8bae1dSRodney W. Grimes * level where we cannot block (canwait == FALSE). 269df8bae1dSRodney W. Grimes * 270df8bae1dSRodney W. Grimes * This routine has its own private kernel submap (kmem_map) and object 271df8bae1dSRodney W. Grimes * (kmem_object). This, combined with the fact that only malloc uses 272df8bae1dSRodney W. Grimes * this routine, ensures that we will never block in map or object waits. 273df8bae1dSRodney W. Grimes * 274df8bae1dSRodney W. Grimes * Note that this still only works in a uni-processor environment and 275df8bae1dSRodney W. Grimes * when called at splhigh(). 276df8bae1dSRodney W. Grimes * 277df8bae1dSRodney W. Grimes * We don't worry about expanding the map (adding entries) since entries 278df8bae1dSRodney W. Grimes * for wired maps are statically allocated. 279df8bae1dSRodney W. Grimes */ 280df8bae1dSRodney W. Grimes vm_offset_t 281a1f6d91cSDavid Greenman kmem_malloc(map, size, waitflag) 282df8bae1dSRodney W. Grimes register vm_map_t map; 283df8bae1dSRodney W. Grimes register vm_size_t size; 284a1f6d91cSDavid Greenman boolean_t waitflag; 285df8bae1dSRodney W. Grimes { 286df8bae1dSRodney W. Grimes register vm_offset_t offset, i; 287df8bae1dSRodney W. Grimes vm_map_entry_t entry; 288df8bae1dSRodney W. Grimes vm_offset_t addr; 289df8bae1dSRodney W. Grimes vm_page_t m; 290df8bae1dSRodney W. Grimes 291df8bae1dSRodney W. Grimes if (map != kmem_map && map != mb_map) 292df8bae1dSRodney W. Grimes panic("kern_malloc_alloc: map != {kmem,mb}_map"); 293df8bae1dSRodney W. Grimes 294df8bae1dSRodney W. Grimes size = round_page(size); 295df8bae1dSRodney W. Grimes addr = vm_map_min(map); 296df8bae1dSRodney W. Grimes 297df8bae1dSRodney W. Grimes /* 2980d94caffSDavid Greenman * Locate sufficient space in the map. This will give us the final 2990d94caffSDavid Greenman * virtual address for the new memory, and thus will tell us the 3000d94caffSDavid Greenman * offset within the kernel map. 301df8bae1dSRodney W. Grimes */ 302df8bae1dSRodney W. Grimes vm_map_lock(map); 303df8bae1dSRodney W. Grimes if (vm_map_findspace(map, 0, size, &addr)) { 304df8bae1dSRodney W. Grimes vm_map_unlock(map); 30526f9a767SRodney W. Grimes #if 0 306df8bae1dSRodney W. Grimes if (canwait) /* XXX should wait */ 307df8bae1dSRodney W. Grimes panic("kmem_malloc: %s too small", 308df8bae1dSRodney W. Grimes map == kmem_map ? "kmem_map" : "mb_map"); 30926f9a767SRodney W. Grimes #endif 310a1f6d91cSDavid Greenman if (waitflag == M_WAITOK) 31126f9a767SRodney W. Grimes panic("kmem_malloc: map too small"); 312df8bae1dSRodney W. Grimes return (0); 313df8bae1dSRodney W. Grimes } 314df8bae1dSRodney W. Grimes offset = addr - vm_map_min(kmem_map); 315df8bae1dSRodney W. Grimes vm_object_reference(kmem_object); 316df8bae1dSRodney W. Grimes vm_map_insert(map, kmem_object, offset, addr, addr + size); 317df8bae1dSRodney W. Grimes 318df8bae1dSRodney W. Grimes /* 3190d94caffSDavid Greenman * If we can wait, just mark the range as wired (will fault pages as 3200d94caffSDavid Greenman * necessary). 321df8bae1dSRodney W. Grimes */ 322a1f6d91cSDavid Greenman if (waitflag == M_WAITOK) { 323df8bae1dSRodney W. Grimes vm_map_unlock(map); 324df8bae1dSRodney W. Grimes (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, 325df8bae1dSRodney W. Grimes FALSE); 326df8bae1dSRodney W. Grimes vm_map_simplify(map, addr); 327df8bae1dSRodney W. Grimes return (addr); 328df8bae1dSRodney W. Grimes } 329df8bae1dSRodney W. Grimes /* 330df8bae1dSRodney W. Grimes * If we cannot wait then we must allocate all memory up front, 331df8bae1dSRodney W. Grimes * pulling it off the active queue to prevent pageout. 332df8bae1dSRodney W. Grimes */ 333df8bae1dSRodney W. Grimes vm_object_lock(kmem_object); 334df8bae1dSRodney W. Grimes for (i = 0; i < size; i += PAGE_SIZE) { 335a1f6d91cSDavid Greenman m = vm_page_alloc(kmem_object, offset + i, 336a1f6d91cSDavid Greenman (waitflag == M_NOWAIT) ? VM_ALLOC_INTERRUPT : VM_ALLOC_SYSTEM); 337df8bae1dSRodney W. Grimes 338df8bae1dSRodney W. Grimes /* 3390d94caffSDavid Greenman * Ran out of space, free everything up and return. Don't need 3400d94caffSDavid Greenman * to lock page queues here as we know that the pages we got 3410d94caffSDavid Greenman * aren't on any queues. 342df8bae1dSRodney W. Grimes */ 343df8bae1dSRodney W. Grimes if (m == NULL) { 344df8bae1dSRodney W. Grimes while (i != 0) { 345df8bae1dSRodney W. Grimes i -= PAGE_SIZE; 346df8bae1dSRodney W. Grimes m = vm_page_lookup(kmem_object, offset + i); 347df8bae1dSRodney W. Grimes vm_page_free(m); 348df8bae1dSRodney W. Grimes } 349df8bae1dSRodney W. Grimes vm_object_unlock(kmem_object); 350df8bae1dSRodney W. Grimes vm_map_delete(map, addr, addr + size); 351df8bae1dSRodney W. Grimes vm_map_unlock(map); 352df8bae1dSRodney W. Grimes return (0); 353df8bae1dSRodney W. Grimes } 354df8bae1dSRodney W. Grimes #if 0 355df8bae1dSRodney W. Grimes vm_page_zero_fill(m); 356df8bae1dSRodney W. Grimes #endif 357df8bae1dSRodney W. Grimes m->flags &= ~PG_BUSY; 3580d94caffSDavid Greenman m->valid |= VM_PAGE_BITS_ALL; 359df8bae1dSRodney W. Grimes } 360df8bae1dSRodney W. Grimes vm_object_unlock(kmem_object); 361df8bae1dSRodney W. Grimes 362df8bae1dSRodney W. Grimes /* 3630d94caffSDavid Greenman * Mark map entry as non-pageable. Assert: vm_map_insert() will never 3640d94caffSDavid Greenman * be able to extend the previous entry so there will be a new entry 3650d94caffSDavid Greenman * exactly corresponding to this address range and it will have 3660d94caffSDavid Greenman * wired_count == 0. 367df8bae1dSRodney W. Grimes */ 368df8bae1dSRodney W. Grimes if (!vm_map_lookup_entry(map, addr, &entry) || 369df8bae1dSRodney W. Grimes entry->start != addr || entry->end != addr + size || 370df8bae1dSRodney W. Grimes entry->wired_count) 371df8bae1dSRodney W. Grimes panic("kmem_malloc: entry not found or misaligned"); 372df8bae1dSRodney W. Grimes entry->wired_count++; 373df8bae1dSRodney W. Grimes 374df8bae1dSRodney W. Grimes /* 3750d94caffSDavid Greenman * Loop thru pages, entering them in the pmap. (We cannot add them to 3760d94caffSDavid Greenman * the wired count without wrapping the vm_page_queue_lock in 3770d94caffSDavid Greenman * splimp...) 378df8bae1dSRodney W. Grimes */ 379df8bae1dSRodney W. Grimes for (i = 0; i < size; i += PAGE_SIZE) { 380df8bae1dSRodney W. Grimes vm_object_lock(kmem_object); 381df8bae1dSRodney W. Grimes m = vm_page_lookup(kmem_object, offset + i); 382df8bae1dSRodney W. Grimes vm_object_unlock(kmem_object); 38316f62314SDavid Greenman pmap_kenter(addr + i, VM_PAGE_TO_PHYS(m)); 384df8bae1dSRodney W. Grimes } 385df8bae1dSRodney W. Grimes vm_map_unlock(map); 386df8bae1dSRodney W. Grimes 387df8bae1dSRodney W. Grimes vm_map_simplify(map, addr); 388df8bae1dSRodney W. Grimes return (addr); 389df8bae1dSRodney W. Grimes } 390df8bae1dSRodney W. Grimes 391df8bae1dSRodney W. Grimes /* 392df8bae1dSRodney W. Grimes * kmem_alloc_wait 393df8bae1dSRodney W. Grimes * 394df8bae1dSRodney W. Grimes * Allocates pageable memory from a sub-map of the kernel. If the submap 395df8bae1dSRodney W. Grimes * has no room, the caller sleeps waiting for more memory in the submap. 396df8bae1dSRodney W. Grimes * 397df8bae1dSRodney W. Grimes */ 3980d94caffSDavid Greenman vm_offset_t 3990d94caffSDavid Greenman kmem_alloc_wait(map, size) 400df8bae1dSRodney W. Grimes vm_map_t map; 401df8bae1dSRodney W. Grimes vm_size_t size; 402df8bae1dSRodney W. Grimes { 403df8bae1dSRodney W. Grimes vm_offset_t addr; 404df8bae1dSRodney W. Grimes 405df8bae1dSRodney W. Grimes size = round_page(size); 406df8bae1dSRodney W. Grimes 407df8bae1dSRodney W. Grimes for (;;) { 408df8bae1dSRodney W. Grimes /* 4090d94caffSDavid Greenman * To make this work for more than one map, use the map's lock 4100d94caffSDavid Greenman * to lock out sleepers/wakers. 411df8bae1dSRodney W. Grimes */ 412df8bae1dSRodney W. Grimes vm_map_lock(map); 413df8bae1dSRodney W. Grimes if (vm_map_findspace(map, 0, size, &addr) == 0) 414df8bae1dSRodney W. Grimes break; 415df8bae1dSRodney W. Grimes /* no space now; see if we can ever get space */ 416df8bae1dSRodney W. Grimes if (vm_map_max(map) - vm_map_min(map) < size) { 417df8bae1dSRodney W. Grimes vm_map_unlock(map); 418df8bae1dSRodney W. Grimes return (0); 419df8bae1dSRodney W. Grimes } 420df8bae1dSRodney W. Grimes assert_wait((int) map, TRUE); 421df8bae1dSRodney W. Grimes vm_map_unlock(map); 42226f9a767SRodney W. Grimes thread_block("kmaw"); 423df8bae1dSRodney W. Grimes } 424df8bae1dSRodney W. Grimes vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size); 425df8bae1dSRodney W. Grimes vm_map_unlock(map); 426df8bae1dSRodney W. Grimes return (addr); 427df8bae1dSRodney W. Grimes } 428df8bae1dSRodney W. Grimes 429df8bae1dSRodney W. Grimes /* 430df8bae1dSRodney W. Grimes * kmem_free_wakeup 431df8bae1dSRodney W. Grimes * 432df8bae1dSRodney W. Grimes * Returns memory to a submap of the kernel, and wakes up any threads 433df8bae1dSRodney W. Grimes * waiting for memory in that map. 434df8bae1dSRodney W. Grimes */ 4350d94caffSDavid Greenman void 4360d94caffSDavid Greenman kmem_free_wakeup(map, addr, size) 437df8bae1dSRodney W. Grimes vm_map_t map; 438df8bae1dSRodney W. Grimes vm_offset_t addr; 439df8bae1dSRodney W. Grimes vm_size_t size; 440df8bae1dSRodney W. Grimes { 441df8bae1dSRodney W. Grimes vm_map_lock(map); 442df8bae1dSRodney W. Grimes (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 443df8bae1dSRodney W. Grimes thread_wakeup((int) map); 444df8bae1dSRodney W. Grimes vm_map_unlock(map); 445df8bae1dSRodney W. Grimes } 446df8bae1dSRodney W. Grimes 447df8bae1dSRodney W. Grimes /* 448df8bae1dSRodney W. Grimes * Create the kernel map; insert a mapping covering kernel text, data, bss, 449df8bae1dSRodney W. Grimes * and all space allocated thus far (`boostrap' data). The new map will thus 450df8bae1dSRodney W. Grimes * map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and 451df8bae1dSRodney W. Grimes * the range between `start' and `end' as free. 452df8bae1dSRodney W. Grimes */ 4530d94caffSDavid Greenman void 4540d94caffSDavid Greenman kmem_init(start, end) 455df8bae1dSRodney W. Grimes vm_offset_t start, end; 456df8bae1dSRodney W. Grimes { 457df8bae1dSRodney W. Grimes register vm_map_t m; 458df8bae1dSRodney W. Grimes 459df8bae1dSRodney W. Grimes m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end, FALSE); 460df8bae1dSRodney W. Grimes vm_map_lock(m); 461df8bae1dSRodney W. Grimes /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 462df8bae1dSRodney W. Grimes kernel_map = m; 463df8bae1dSRodney W. Grimes (void) vm_map_insert(m, NULL, (vm_offset_t) 0, 464df8bae1dSRodney W. Grimes VM_MIN_KERNEL_ADDRESS, start); 465df8bae1dSRodney W. Grimes /* ... and ending with the completion of the above `insert' */ 466df8bae1dSRodney W. Grimes vm_map_unlock(m); 467df8bae1dSRodney W. Grimes } 468