1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 6df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 9df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 10df8bae1dSRodney W. Grimes * are met: 11df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 12df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 13df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 15df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 16df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 17df8bae1dSRodney W. Grimes * must display the following acknowledgement: 18df8bae1dSRodney W. Grimes * This product includes software developed by the University of 19df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 20df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 21df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 22df8bae1dSRodney W. Grimes * without specific prior written permission. 23df8bae1dSRodney W. Grimes * 24df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34df8bae1dSRodney W. Grimes * SUCH DAMAGE. 35df8bae1dSRodney W. Grimes * 363c4dd356SDavid Greenman * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37df8bae1dSRodney W. Grimes * 38df8bae1dSRodney W. Grimes * 39df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40df8bae1dSRodney W. Grimes * All rights reserved. 41df8bae1dSRodney W. Grimes * 42df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43df8bae1dSRodney W. Grimes * 44df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 45df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 46df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 47df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 48df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53df8bae1dSRodney W. Grimes * 54df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 55df8bae1dSRodney W. Grimes * 56df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57df8bae1dSRodney W. Grimes * School of Computer Science 58df8bae1dSRodney W. Grimes * Carnegie Mellon University 59df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 60df8bae1dSRodney W. Grimes * 61df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 62df8bae1dSRodney W. Grimes * rights to redistribute these changes. 633c4dd356SDavid Greenman * 640891ef4cSJohn Dyson * $Id: vm_kern.c,v 1.22 1996/01/31 12:05:52 davidg Exp $ 65df8bae1dSRodney W. Grimes */ 66df8bae1dSRodney W. Grimes 67df8bae1dSRodney W. Grimes /* 68df8bae1dSRodney W. Grimes * Kernel memory management. 69df8bae1dSRodney W. Grimes */ 70df8bae1dSRodney W. Grimes 71df8bae1dSRodney W. Grimes #include <sys/param.h> 72df8bae1dSRodney W. Grimes #include <sys/systm.h> 73f23b4c91SGarrett Wollman #include <sys/kernel.h> 74f23b4c91SGarrett Wollman #include <sys/proc.h> 75a1f6d91cSDavid Greenman #include <sys/malloc.h> 765eb7d0cdSDavid Greenman #include <sys/syslog.h> 77efeaf95aSDavid Greenman #include <sys/queue.h> 78efeaf95aSDavid Greenman #include <sys/vmmeter.h> 79df8bae1dSRodney W. Grimes 80df8bae1dSRodney W. Grimes #include <vm/vm.h> 81efeaf95aSDavid Greenman #include <vm/vm_param.h> 82efeaf95aSDavid Greenman #include <vm/vm_prot.h> 83efeaf95aSDavid Greenman #include <vm/lock.h> 84efeaf95aSDavid Greenman #include <vm/pmap.h> 85efeaf95aSDavid Greenman #include <vm/vm_map.h> 86efeaf95aSDavid Greenman #include <vm/vm_object.h> 87df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 88df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 89df8bae1dSRodney W. Grimes #include <vm/vm_kern.h> 909b4288a3SBruce Evans #include <vm/vm_extern.h> 91df8bae1dSRodney W. Grimes 92f23b4c91SGarrett Wollman vm_map_t buffer_map; 93f23b4c91SGarrett Wollman vm_map_t kernel_map; 94f23b4c91SGarrett Wollman vm_map_t kmem_map; 95f23b4c91SGarrett Wollman vm_map_t mb_map; 9628f8db14SBruce Evans int mb_map_full; 97f23b4c91SGarrett Wollman vm_map_t io_map; 98f23b4c91SGarrett Wollman vm_map_t clean_map; 99f23b4c91SGarrett Wollman vm_map_t phys_map; 1000d94caffSDavid Greenman vm_map_t exec_map; 1010d94caffSDavid Greenman vm_map_t u_map; 102f23b4c91SGarrett Wollman 103df8bae1dSRodney W. Grimes /* 104df8bae1dSRodney W. Grimes * kmem_alloc_pageable: 105df8bae1dSRodney W. Grimes * 106df8bae1dSRodney W. Grimes * Allocate pageable memory to the kernel's address map. 107f81b8592SDavid Greenman * "map" must be kernel_map or a submap of kernel_map. 108df8bae1dSRodney W. Grimes */ 109df8bae1dSRodney W. Grimes 1100d94caffSDavid Greenman vm_offset_t 1110d94caffSDavid Greenman kmem_alloc_pageable(map, size) 112df8bae1dSRodney W. Grimes vm_map_t map; 113df8bae1dSRodney W. Grimes register vm_size_t size; 114df8bae1dSRodney W. Grimes { 115df8bae1dSRodney W. Grimes vm_offset_t addr; 116df8bae1dSRodney W. Grimes register int result; 117df8bae1dSRodney W. Grimes 118df8bae1dSRodney W. Grimes size = round_page(size); 119df8bae1dSRodney W. Grimes addr = vm_map_min(map); 120df8bae1dSRodney W. Grimes result = vm_map_find(map, NULL, (vm_offset_t) 0, 121bd7e5f99SJohn Dyson &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 122df8bae1dSRodney W. Grimes if (result != KERN_SUCCESS) { 123df8bae1dSRodney W. Grimes return (0); 124df8bae1dSRodney W. Grimes } 125df8bae1dSRodney W. Grimes return (addr); 126df8bae1dSRodney W. Grimes } 127df8bae1dSRodney W. Grimes 128df8bae1dSRodney W. Grimes /* 129df8bae1dSRodney W. Grimes * Allocate wired-down memory in the kernel's address map 130df8bae1dSRodney W. Grimes * or a submap. 131df8bae1dSRodney W. Grimes */ 1320d94caffSDavid Greenman vm_offset_t 1330d94caffSDavid Greenman kmem_alloc(map, size) 134df8bae1dSRodney W. Grimes register vm_map_t map; 135df8bae1dSRodney W. Grimes register vm_size_t size; 136df8bae1dSRodney W. Grimes { 137df8bae1dSRodney W. Grimes vm_offset_t addr; 138df8bae1dSRodney W. Grimes register vm_offset_t offset; 139df8bae1dSRodney W. Grimes vm_offset_t i; 140df8bae1dSRodney W. Grimes 141df8bae1dSRodney W. Grimes size = round_page(size); 142df8bae1dSRodney W. Grimes 143df8bae1dSRodney W. Grimes /* 1440d94caffSDavid Greenman * Use the kernel object for wired-down kernel pages. Assume that no 1450d94caffSDavid Greenman * region of the kernel object is referenced more than once. 146df8bae1dSRodney W. Grimes */ 147df8bae1dSRodney W. Grimes 148df8bae1dSRodney W. Grimes /* 1490d94caffSDavid Greenman * Locate sufficient space in the map. This will give us the final 1500d94caffSDavid Greenman * virtual address for the new memory, and thus will tell us the 1510d94caffSDavid Greenman * offset within the kernel map. 152df8bae1dSRodney W. Grimes */ 153df8bae1dSRodney W. Grimes vm_map_lock(map); 154df8bae1dSRodney W. Grimes if (vm_map_findspace(map, 0, size, &addr)) { 155df8bae1dSRodney W. Grimes vm_map_unlock(map); 156df8bae1dSRodney W. Grimes return (0); 157df8bae1dSRodney W. Grimes } 158df8bae1dSRodney W. Grimes offset = addr - VM_MIN_KERNEL_ADDRESS; 159df8bae1dSRodney W. Grimes vm_object_reference(kernel_object); 160bd7e5f99SJohn Dyson vm_map_insert(map, kernel_object, offset, addr, addr + size, 161bd7e5f99SJohn Dyson VM_PROT_ALL, VM_PROT_ALL, 0); 162df8bae1dSRodney W. Grimes vm_map_unlock(map); 163df8bae1dSRodney W. Grimes 164df8bae1dSRodney W. Grimes /* 1650d94caffSDavid Greenman * Guarantee that there are pages already in this object before 1660d94caffSDavid Greenman * calling vm_map_pageable. This is to prevent the following 1670d94caffSDavid Greenman * scenario: 168df8bae1dSRodney W. Grimes * 1690d94caffSDavid Greenman * 1) Threads have swapped out, so that there is a pager for the 1700d94caffSDavid Greenman * kernel_object. 2) The kmsg zone is empty, and so we are 1710d94caffSDavid Greenman * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault; 1720d94caffSDavid Greenman * there is no page, but there is a pager, so we call 1730d94caffSDavid Greenman * pager_data_request. But the kmsg zone is empty, so we must 1740d94caffSDavid Greenman * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 1750d94caffSDavid Greenman * we get the data back from the pager, it will be (very stale) 1760d94caffSDavid Greenman * non-zero data. kmem_alloc is defined to return zero-filled memory. 177df8bae1dSRodney W. Grimes * 1780d94caffSDavid Greenman * We're intentionally not activating the pages we allocate to prevent a 1790d94caffSDavid Greenman * race with page-out. vm_map_pageable will wire the pages. 180df8bae1dSRodney W. Grimes */ 181df8bae1dSRodney W. Grimes 182df8bae1dSRodney W. Grimes for (i = 0; i < size; i += PAGE_SIZE) { 183df8bae1dSRodney W. Grimes vm_page_t mem; 184df8bae1dSRodney W. Grimes 185a316d390SJohn Dyson while ((mem = vm_page_alloc(kernel_object, 186bd7e5f99SJohn Dyson OFF_TO_IDX(offset + i), VM_ALLOC_ZERO)) == NULL) { 187df8bae1dSRodney W. Grimes VM_WAIT; 188df8bae1dSRodney W. Grimes } 189f70f05f2SJohn Dyson if ((mem->flags & PG_ZERO) == 0) 190df8bae1dSRodney W. Grimes vm_page_zero_fill(mem); 191f70f05f2SJohn Dyson mem->flags &= ~(PG_BUSY|PG_ZERO); 1927fb0c17eSDavid Greenman mem->valid = VM_PAGE_BITS_ALL; 193df8bae1dSRodney W. Grimes } 194df8bae1dSRodney W. Grimes 195df8bae1dSRodney W. Grimes /* 196df8bae1dSRodney W. Grimes * And finally, mark the data as non-pageable. 197df8bae1dSRodney W. Grimes */ 198df8bae1dSRodney W. Grimes 199df8bae1dSRodney W. Grimes (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE); 200df8bae1dSRodney W. Grimes 201df8bae1dSRodney W. Grimes /* 202df8bae1dSRodney W. Grimes * Try to coalesce the map 203df8bae1dSRodney W. Grimes */ 204df8bae1dSRodney W. Grimes vm_map_simplify(map, addr); 205df8bae1dSRodney W. Grimes 206df8bae1dSRodney W. Grimes return (addr); 207df8bae1dSRodney W. Grimes } 208df8bae1dSRodney W. Grimes 209df8bae1dSRodney W. Grimes /* 210df8bae1dSRodney W. Grimes * kmem_free: 211df8bae1dSRodney W. Grimes * 212df8bae1dSRodney W. Grimes * Release a region of kernel virtual memory allocated 213df8bae1dSRodney W. Grimes * with kmem_alloc, and return the physical pages 214df8bae1dSRodney W. Grimes * associated with that region. 215df8bae1dSRodney W. Grimes */ 2160d94caffSDavid Greenman void 2170d94caffSDavid Greenman kmem_free(map, addr, size) 218df8bae1dSRodney W. Grimes vm_map_t map; 219df8bae1dSRodney W. Grimes register vm_offset_t addr; 220df8bae1dSRodney W. Grimes vm_size_t size; 221df8bae1dSRodney W. Grimes { 222df8bae1dSRodney W. Grimes (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 223df8bae1dSRodney W. Grimes } 224df8bae1dSRodney W. Grimes 225df8bae1dSRodney W. Grimes /* 226df8bae1dSRodney W. Grimes * kmem_suballoc: 227df8bae1dSRodney W. Grimes * 228df8bae1dSRodney W. Grimes * Allocates a map to manage a subrange 229df8bae1dSRodney W. Grimes * of the kernel virtual address space. 230df8bae1dSRodney W. Grimes * 231df8bae1dSRodney W. Grimes * Arguments are as follows: 232df8bae1dSRodney W. Grimes * 233df8bae1dSRodney W. Grimes * parent Map to take range from 234df8bae1dSRodney W. Grimes * size Size of range to find 235df8bae1dSRodney W. Grimes * min, max Returned endpoints of map 236df8bae1dSRodney W. Grimes * pageable Can the region be paged 237df8bae1dSRodney W. Grimes */ 2380d94caffSDavid Greenman vm_map_t 2390d94caffSDavid Greenman kmem_suballoc(parent, min, max, size, pageable) 240df8bae1dSRodney W. Grimes register vm_map_t parent; 241df8bae1dSRodney W. Grimes vm_offset_t *min, *max; 242df8bae1dSRodney W. Grimes register vm_size_t size; 243df8bae1dSRodney W. Grimes boolean_t pageable; 244df8bae1dSRodney W. Grimes { 245df8bae1dSRodney W. Grimes register int ret; 246df8bae1dSRodney W. Grimes vm_map_t result; 247df8bae1dSRodney W. Grimes 248df8bae1dSRodney W. Grimes size = round_page(size); 249df8bae1dSRodney W. Grimes 250df8bae1dSRodney W. Grimes *min = (vm_offset_t) vm_map_min(parent); 251df8bae1dSRodney W. Grimes ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 252bd7e5f99SJohn Dyson min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 253df8bae1dSRodney W. Grimes if (ret != KERN_SUCCESS) { 254df8bae1dSRodney W. Grimes printf("kmem_suballoc: bad status return of %d.\n", ret); 255df8bae1dSRodney W. Grimes panic("kmem_suballoc"); 256df8bae1dSRodney W. Grimes } 257df8bae1dSRodney W. Grimes *max = *min + size; 258df8bae1dSRodney W. Grimes pmap_reference(vm_map_pmap(parent)); 259df8bae1dSRodney W. Grimes result = vm_map_create(vm_map_pmap(parent), *min, *max, pageable); 260df8bae1dSRodney W. Grimes if (result == NULL) 261df8bae1dSRodney W. Grimes panic("kmem_suballoc: cannot create submap"); 262df8bae1dSRodney W. Grimes if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 263df8bae1dSRodney W. Grimes panic("kmem_suballoc: unable to change range to submap"); 264df8bae1dSRodney W. Grimes return (result); 265df8bae1dSRodney W. Grimes } 266df8bae1dSRodney W. Grimes 267df8bae1dSRodney W. Grimes /* 268df8bae1dSRodney W. Grimes * Allocate wired-down memory in the kernel's address map for the higher 269df8bae1dSRodney W. Grimes * level kernel memory allocator (kern/kern_malloc.c). We cannot use 270df8bae1dSRodney W. Grimes * kmem_alloc() because we may need to allocate memory at interrupt 271df8bae1dSRodney W. Grimes * level where we cannot block (canwait == FALSE). 272df8bae1dSRodney W. Grimes * 273df8bae1dSRodney W. Grimes * This routine has its own private kernel submap (kmem_map) and object 274df8bae1dSRodney W. Grimes * (kmem_object). This, combined with the fact that only malloc uses 275df8bae1dSRodney W. Grimes * this routine, ensures that we will never block in map or object waits. 276df8bae1dSRodney W. Grimes * 277df8bae1dSRodney W. Grimes * Note that this still only works in a uni-processor environment and 278df8bae1dSRodney W. Grimes * when called at splhigh(). 279df8bae1dSRodney W. Grimes * 280df8bae1dSRodney W. Grimes * We don't worry about expanding the map (adding entries) since entries 281df8bae1dSRodney W. Grimes * for wired maps are statically allocated. 282df8bae1dSRodney W. Grimes */ 283df8bae1dSRodney W. Grimes vm_offset_t 284a1f6d91cSDavid Greenman kmem_malloc(map, size, waitflag) 285df8bae1dSRodney W. Grimes register vm_map_t map; 286df8bae1dSRodney W. Grimes register vm_size_t size; 287a1f6d91cSDavid Greenman boolean_t waitflag; 288df8bae1dSRodney W. Grimes { 289df8bae1dSRodney W. Grimes register vm_offset_t offset, i; 290df8bae1dSRodney W. Grimes vm_map_entry_t entry; 291df8bae1dSRodney W. Grimes vm_offset_t addr; 292df8bae1dSRodney W. Grimes vm_page_t m; 293df8bae1dSRodney W. Grimes 294df8bae1dSRodney W. Grimes if (map != kmem_map && map != mb_map) 2955eb7d0cdSDavid Greenman panic("kmem_malloc: map != {kmem,mb}_map"); 296df8bae1dSRodney W. Grimes 297df8bae1dSRodney W. Grimes size = round_page(size); 298df8bae1dSRodney W. Grimes addr = vm_map_min(map); 299df8bae1dSRodney W. Grimes 300df8bae1dSRodney W. Grimes /* 3010d94caffSDavid Greenman * Locate sufficient space in the map. This will give us the final 3020d94caffSDavid Greenman * virtual address for the new memory, and thus will tell us the 3030d94caffSDavid Greenman * offset within the kernel map. 304df8bae1dSRodney W. Grimes */ 305df8bae1dSRodney W. Grimes vm_map_lock(map); 306df8bae1dSRodney W. Grimes if (vm_map_findspace(map, 0, size, &addr)) { 307df8bae1dSRodney W. Grimes vm_map_unlock(map); 3085eb7d0cdSDavid Greenman if (map == mb_map) { 3095eb7d0cdSDavid Greenman mb_map_full = TRUE; 3108c73da1eSDavid Greenman log(LOG_ERR, "Out of mbuf clusters - increase maxusers!\n"); 3115eb7d0cdSDavid Greenman return (0); 3125eb7d0cdSDavid Greenman } 313a1f6d91cSDavid Greenman if (waitflag == M_WAITOK) 3145eb7d0cdSDavid Greenman panic("kmem_malloc: kmem_map too small"); 315df8bae1dSRodney W. Grimes return (0); 316df8bae1dSRodney W. Grimes } 3170891ef4cSJohn Dyson offset = addr - VM_MIN_KERNEL_ADDRESS; 318df8bae1dSRodney W. Grimes vm_object_reference(kmem_object); 319bd7e5f99SJohn Dyson vm_map_insert(map, kmem_object, offset, addr, addr + size, 320bd7e5f99SJohn Dyson VM_PROT_ALL, VM_PROT_ALL, 0); 321df8bae1dSRodney W. Grimes 322df8bae1dSRodney W. Grimes /* 3230d94caffSDavid Greenman * If we can wait, just mark the range as wired (will fault pages as 3240d94caffSDavid Greenman * necessary). 325df8bae1dSRodney W. Grimes */ 326a1f6d91cSDavid Greenman if (waitflag == M_WAITOK) { 327df8bae1dSRodney W. Grimes vm_map_unlock(map); 328df8bae1dSRodney W. Grimes (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, 329df8bae1dSRodney W. Grimes FALSE); 330df8bae1dSRodney W. Grimes vm_map_simplify(map, addr); 331df8bae1dSRodney W. Grimes return (addr); 332df8bae1dSRodney W. Grimes } 333df8bae1dSRodney W. Grimes /* 334df8bae1dSRodney W. Grimes * If we cannot wait then we must allocate all memory up front, 335df8bae1dSRodney W. Grimes * pulling it off the active queue to prevent pageout. 336df8bae1dSRodney W. Grimes */ 337df8bae1dSRodney W. Grimes for (i = 0; i < size; i += PAGE_SIZE) { 338a316d390SJohn Dyson m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), 339a1f6d91cSDavid Greenman (waitflag == M_NOWAIT) ? VM_ALLOC_INTERRUPT : VM_ALLOC_SYSTEM); 340df8bae1dSRodney W. Grimes 341df8bae1dSRodney W. Grimes /* 3420d94caffSDavid Greenman * Ran out of space, free everything up and return. Don't need 3430d94caffSDavid Greenman * to lock page queues here as we know that the pages we got 3440d94caffSDavid Greenman * aren't on any queues. 345df8bae1dSRodney W. Grimes */ 346df8bae1dSRodney W. Grimes if (m == NULL) { 347df8bae1dSRodney W. Grimes while (i != 0) { 348df8bae1dSRodney W. Grimes i -= PAGE_SIZE; 349a316d390SJohn Dyson m = vm_page_lookup(kmem_object, 350a316d390SJohn Dyson OFF_TO_IDX(offset + i)); 351df8bae1dSRodney W. Grimes vm_page_free(m); 352df8bae1dSRodney W. Grimes } 353df8bae1dSRodney W. Grimes vm_map_delete(map, addr, addr + size); 354df8bae1dSRodney W. Grimes vm_map_unlock(map); 355df8bae1dSRodney W. Grimes return (0); 356df8bae1dSRodney W. Grimes } 357f70f05f2SJohn Dyson m->flags &= ~(PG_BUSY|PG_ZERO); 3587fb0c17eSDavid Greenman m->valid = VM_PAGE_BITS_ALL; 359df8bae1dSRodney W. Grimes } 360df8bae1dSRodney W. Grimes 361df8bae1dSRodney W. Grimes /* 3620d94caffSDavid Greenman * Mark map entry as non-pageable. Assert: vm_map_insert() will never 3630d94caffSDavid Greenman * be able to extend the previous entry so there will be a new entry 3640d94caffSDavid Greenman * exactly corresponding to this address range and it will have 3650d94caffSDavid Greenman * wired_count == 0. 366df8bae1dSRodney W. Grimes */ 367df8bae1dSRodney W. Grimes if (!vm_map_lookup_entry(map, addr, &entry) || 368df8bae1dSRodney W. Grimes entry->start != addr || entry->end != addr + size || 369df8bae1dSRodney W. Grimes entry->wired_count) 370df8bae1dSRodney W. Grimes panic("kmem_malloc: entry not found or misaligned"); 371df8bae1dSRodney W. Grimes entry->wired_count++; 372df8bae1dSRodney W. Grimes 373df8bae1dSRodney W. Grimes /* 3740d94caffSDavid Greenman * Loop thru pages, entering them in the pmap. (We cannot add them to 3750d94caffSDavid Greenman * the wired count without wrapping the vm_page_queue_lock in 3760d94caffSDavid Greenman * splimp...) 377df8bae1dSRodney W. Grimes */ 378df8bae1dSRodney W. Grimes for (i = 0; i < size; i += PAGE_SIZE) { 379a316d390SJohn Dyson m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 380bd7e5f99SJohn Dyson vm_page_wire(m); 38116f62314SDavid Greenman pmap_kenter(addr + i, VM_PAGE_TO_PHYS(m)); 382df8bae1dSRodney W. Grimes } 383df8bae1dSRodney W. Grimes vm_map_unlock(map); 384df8bae1dSRodney W. Grimes 385df8bae1dSRodney W. Grimes vm_map_simplify(map, addr); 386df8bae1dSRodney W. Grimes return (addr); 387df8bae1dSRodney W. Grimes } 388df8bae1dSRodney W. Grimes 389df8bae1dSRodney W. Grimes /* 390df8bae1dSRodney W. Grimes * kmem_alloc_wait 391df8bae1dSRodney W. Grimes * 392df8bae1dSRodney W. Grimes * Allocates pageable memory from a sub-map of the kernel. If the submap 393df8bae1dSRodney W. Grimes * has no room, the caller sleeps waiting for more memory in the submap. 394df8bae1dSRodney W. Grimes * 395df8bae1dSRodney W. Grimes */ 3960d94caffSDavid Greenman vm_offset_t 3970d94caffSDavid Greenman kmem_alloc_wait(map, size) 398df8bae1dSRodney W. Grimes vm_map_t map; 399df8bae1dSRodney W. Grimes vm_size_t size; 400df8bae1dSRodney W. Grimes { 401df8bae1dSRodney W. Grimes vm_offset_t addr; 402df8bae1dSRodney W. Grimes 403df8bae1dSRodney W. Grimes size = round_page(size); 404df8bae1dSRodney W. Grimes 405df8bae1dSRodney W. Grimes for (;;) { 406df8bae1dSRodney W. Grimes /* 4070d94caffSDavid Greenman * To make this work for more than one map, use the map's lock 4080d94caffSDavid Greenman * to lock out sleepers/wakers. 409df8bae1dSRodney W. Grimes */ 410df8bae1dSRodney W. Grimes vm_map_lock(map); 411df8bae1dSRodney W. Grimes if (vm_map_findspace(map, 0, size, &addr) == 0) 412df8bae1dSRodney W. Grimes break; 413df8bae1dSRodney W. Grimes /* no space now; see if we can ever get space */ 414df8bae1dSRodney W. Grimes if (vm_map_max(map) - vm_map_min(map) < size) { 415df8bae1dSRodney W. Grimes vm_map_unlock(map); 416df8bae1dSRodney W. Grimes return (0); 417df8bae1dSRodney W. Grimes } 418df8bae1dSRodney W. Grimes vm_map_unlock(map); 41924a1cce3SDavid Greenman tsleep(map, PVM, "kmaw", 0); 420df8bae1dSRodney W. Grimes } 421bd7e5f99SJohn Dyson vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 422df8bae1dSRodney W. Grimes vm_map_unlock(map); 423df8bae1dSRodney W. Grimes return (addr); 424df8bae1dSRodney W. Grimes } 425df8bae1dSRodney W. Grimes 426df8bae1dSRodney W. Grimes /* 427df8bae1dSRodney W. Grimes * kmem_free_wakeup 428df8bae1dSRodney W. Grimes * 42924a1cce3SDavid Greenman * Returns memory to a submap of the kernel, and wakes up any processes 430df8bae1dSRodney W. Grimes * waiting for memory in that map. 431df8bae1dSRodney W. Grimes */ 4320d94caffSDavid Greenman void 4330d94caffSDavid Greenman kmem_free_wakeup(map, addr, size) 434df8bae1dSRodney W. Grimes vm_map_t map; 435df8bae1dSRodney W. Grimes vm_offset_t addr; 436df8bae1dSRodney W. Grimes vm_size_t size; 437df8bae1dSRodney W. Grimes { 438df8bae1dSRodney W. Grimes vm_map_lock(map); 439df8bae1dSRodney W. Grimes (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 44024a1cce3SDavid Greenman wakeup(map); 441df8bae1dSRodney W. Grimes vm_map_unlock(map); 442df8bae1dSRodney W. Grimes } 443df8bae1dSRodney W. Grimes 444df8bae1dSRodney W. Grimes /* 445df8bae1dSRodney W. Grimes * Create the kernel map; insert a mapping covering kernel text, data, bss, 446df8bae1dSRodney W. Grimes * and all space allocated thus far (`boostrap' data). The new map will thus 447df8bae1dSRodney W. Grimes * map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and 448df8bae1dSRodney W. Grimes * the range between `start' and `end' as free. 449df8bae1dSRodney W. Grimes */ 4500d94caffSDavid Greenman void 4510d94caffSDavid Greenman kmem_init(start, end) 452df8bae1dSRodney W. Grimes vm_offset_t start, end; 453df8bae1dSRodney W. Grimes { 454df8bae1dSRodney W. Grimes register vm_map_t m; 455df8bae1dSRodney W. Grimes 456df8bae1dSRodney W. Grimes m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end, FALSE); 457df8bae1dSRodney W. Grimes vm_map_lock(m); 458df8bae1dSRodney W. Grimes /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 459df8bae1dSRodney W. Grimes kernel_map = m; 460df8bae1dSRodney W. Grimes (void) vm_map_insert(m, NULL, (vm_offset_t) 0, 461bd7e5f99SJohn Dyson VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 462df8bae1dSRodney W. Grimes /* ... and ending with the completion of the above `insert' */ 463df8bae1dSRodney W. Grimes vm_map_unlock(m); 464df8bae1dSRodney W. Grimes } 465