1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 /* 62 * Kernel memory management. 63 */ 64 65 #include <sys/cdefs.h> 66 __FBSDID("$FreeBSD$"); 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/kernel.h> /* for ticks and hz */ 71 #include <sys/lock.h> 72 #include <sys/mutex.h> 73 #include <sys/proc.h> 74 #include <sys/malloc.h> 75 76 #include <vm/vm.h> 77 #include <vm/vm_param.h> 78 #include <vm/pmap.h> 79 #include <vm/vm_map.h> 80 #include <vm/vm_object.h> 81 #include <vm/vm_page.h> 82 #include <vm/vm_pageout.h> 83 #include <vm/vm_extern.h> 84 85 vm_map_t kernel_map=0; 86 vm_map_t kmem_map=0; 87 vm_map_t exec_map=0; 88 vm_map_t pipe_map; 89 vm_map_t buffer_map=0; 90 91 /* 92 * kmem_alloc_nofault: 93 * 94 * Allocate a virtual address range with no underlying object and 95 * no initial mapping to physical memory. Any mapping from this 96 * range to physical memory must be explicitly created prior to 97 * its use, typically with pmap_qenter(). Any attempt to create 98 * a mapping on demand through vm_fault() will result in a panic. 99 */ 100 vm_offset_t 101 kmem_alloc_nofault(map, size) 102 vm_map_t map; 103 vm_size_t size; 104 { 105 vm_offset_t addr; 106 int result; 107 108 size = round_page(size); 109 addr = vm_map_min(map); 110 result = vm_map_find(map, NULL, 0, 111 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 112 if (result != KERN_SUCCESS) { 113 return (0); 114 } 115 return (addr); 116 } 117 118 /* 119 * Allocate wired-down memory in the kernel's address map 120 * or a submap. 121 */ 122 vm_offset_t 123 kmem_alloc(map, size) 124 vm_map_t map; 125 vm_size_t size; 126 { 127 vm_offset_t addr; 128 vm_offset_t offset; 129 vm_offset_t i; 130 131 size = round_page(size); 132 133 /* 134 * Use the kernel object for wired-down kernel pages. Assume that no 135 * region of the kernel object is referenced more than once. 136 */ 137 138 /* 139 * Locate sufficient space in the map. This will give us the final 140 * virtual address for the new memory, and thus will tell us the 141 * offset within the kernel map. 142 */ 143 vm_map_lock(map); 144 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 145 vm_map_unlock(map); 146 return (0); 147 } 148 offset = addr - VM_MIN_KERNEL_ADDRESS; 149 vm_object_reference(kernel_object); 150 vm_map_insert(map, kernel_object, offset, addr, addr + size, 151 VM_PROT_ALL, VM_PROT_ALL, 0); 152 vm_map_unlock(map); 153 154 /* 155 * Guarantee that there are pages already in this object before 156 * calling vm_map_wire. This is to prevent the following 157 * scenario: 158 * 159 * 1) Threads have swapped out, so that there is a pager for the 160 * kernel_object. 2) The kmsg zone is empty, and so we are 161 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault; 162 * there is no page, but there is a pager, so we call 163 * pager_data_request. But the kmsg zone is empty, so we must 164 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 165 * we get the data back from the pager, it will be (very stale) 166 * non-zero data. kmem_alloc is defined to return zero-filled memory. 167 * 168 * We're intentionally not activating the pages we allocate to prevent a 169 * race with page-out. vm_map_wire will wire the pages. 170 */ 171 VM_OBJECT_LOCK(kernel_object); 172 for (i = 0; i < size; i += PAGE_SIZE) { 173 vm_page_t mem; 174 175 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 176 VM_ALLOC_ZERO | VM_ALLOC_RETRY); 177 mem->valid = VM_PAGE_BITS_ALL; 178 vm_page_lock_queues(); 179 vm_page_unmanage(mem); 180 vm_page_wakeup(mem); 181 vm_page_unlock_queues(); 182 } 183 VM_OBJECT_UNLOCK(kernel_object); 184 185 /* 186 * And finally, mark the data as non-pageable. 187 */ 188 (void) vm_map_wire(map, addr, addr + size, 189 VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES); 190 191 return (addr); 192 } 193 194 /* 195 * kmem_free: 196 * 197 * Release a region of kernel virtual memory allocated 198 * with kmem_alloc, and return the physical pages 199 * associated with that region. 200 * 201 * This routine may not block on kernel maps. 202 */ 203 void 204 kmem_free(map, addr, size) 205 vm_map_t map; 206 vm_offset_t addr; 207 vm_size_t size; 208 { 209 210 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 211 } 212 213 /* 214 * kmem_suballoc: 215 * 216 * Allocates a map to manage a subrange 217 * of the kernel virtual address space. 218 * 219 * Arguments are as follows: 220 * 221 * parent Map to take range from 222 * min, max Returned endpoints of map 223 * size Size of range to find 224 */ 225 vm_map_t 226 kmem_suballoc(parent, min, max, size) 227 vm_map_t parent; 228 vm_offset_t *min, *max; 229 vm_size_t size; 230 { 231 int ret; 232 vm_map_t result; 233 234 size = round_page(size); 235 236 *min = (vm_offset_t) vm_map_min(parent); 237 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 238 min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 239 if (ret != KERN_SUCCESS) { 240 printf("kmem_suballoc: bad status return of %d.\n", ret); 241 panic("kmem_suballoc"); 242 } 243 *max = *min + size; 244 result = vm_map_create(vm_map_pmap(parent), *min, *max); 245 if (result == NULL) 246 panic("kmem_suballoc: cannot create submap"); 247 if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 248 panic("kmem_suballoc: unable to change range to submap"); 249 return (result); 250 } 251 252 /* 253 * kmem_malloc: 254 * 255 * Allocate wired-down memory in the kernel's address map for the higher 256 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 257 * kmem_alloc() because we may need to allocate memory at interrupt 258 * level where we cannot block (canwait == FALSE). 259 * 260 * This routine has its own private kernel submap (kmem_map) and object 261 * (kmem_object). This, combined with the fact that only malloc uses 262 * this routine, ensures that we will never block in map or object waits. 263 * 264 * Note that this still only works in a uni-processor environment and 265 * when called at splhigh(). 266 * 267 * We don't worry about expanding the map (adding entries) since entries 268 * for wired maps are statically allocated. 269 * 270 * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 271 * I have not verified that it actually does not block. 272 * 273 * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to 274 * which we never free. 275 */ 276 vm_offset_t 277 kmem_malloc(map, size, flags) 278 vm_map_t map; 279 vm_size_t size; 280 int flags; 281 { 282 vm_offset_t offset, i; 283 vm_map_entry_t entry; 284 vm_offset_t addr; 285 vm_page_t m; 286 int pflags; 287 288 size = round_page(size); 289 addr = vm_map_min(map); 290 291 /* 292 * Locate sufficient space in the map. This will give us the final 293 * virtual address for the new memory, and thus will tell us the 294 * offset within the kernel map. 295 */ 296 vm_map_lock(map); 297 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 298 vm_map_unlock(map); 299 if ((flags & M_NOWAIT) == 0) 300 panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated", 301 (long)size, (long)map->size); 302 return (0); 303 } 304 offset = addr - VM_MIN_KERNEL_ADDRESS; 305 vm_object_reference(kmem_object); 306 vm_map_insert(map, kmem_object, offset, addr, addr + size, 307 VM_PROT_ALL, VM_PROT_ALL, 0); 308 309 /* 310 * Note: if M_NOWAIT specified alone, allocate from 311 * interrupt-safe queues only (just the free list). If 312 * M_USE_RESERVE is also specified, we can also 313 * allocate from the cache. Neither of the latter two 314 * flags may be specified from an interrupt since interrupts 315 * are not allowed to mess with the cache queue. 316 */ 317 318 if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 319 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 320 else 321 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 322 323 if (flags & M_ZERO) 324 pflags |= VM_ALLOC_ZERO; 325 326 VM_OBJECT_LOCK(kmem_object); 327 for (i = 0; i < size; i += PAGE_SIZE) { 328 retry: 329 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags); 330 331 /* 332 * Ran out of space, free everything up and return. Don't need 333 * to lock page queues here as we know that the pages we got 334 * aren't on any queues. 335 */ 336 if (m == NULL) { 337 if ((flags & M_NOWAIT) == 0) { 338 VM_OBJECT_UNLOCK(kmem_object); 339 vm_map_unlock(map); 340 VM_WAIT; 341 vm_map_lock(map); 342 VM_OBJECT_LOCK(kmem_object); 343 goto retry; 344 } 345 /* 346 * Free the pages before removing the map entry. 347 * They are already marked busy. Calling 348 * vm_map_delete before the pages has been freed or 349 * unbusied will cause a deadlock. 350 */ 351 while (i != 0) { 352 i -= PAGE_SIZE; 353 m = vm_page_lookup(kmem_object, 354 OFF_TO_IDX(offset + i)); 355 vm_page_lock_queues(); 356 vm_page_unwire(m, 0); 357 vm_page_free(m); 358 vm_page_unlock_queues(); 359 } 360 VM_OBJECT_UNLOCK(kmem_object); 361 vm_map_delete(map, addr, addr + size); 362 vm_map_unlock(map); 363 return (0); 364 } 365 if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) 366 pmap_zero_page(m); 367 m->valid = VM_PAGE_BITS_ALL; 368 vm_page_lock_queues(); 369 vm_page_unmanage(m); 370 vm_page_unlock_queues(); 371 } 372 VM_OBJECT_UNLOCK(kmem_object); 373 374 /* 375 * Mark map entry as non-pageable. Assert: vm_map_insert() will never 376 * be able to extend the previous entry so there will be a new entry 377 * exactly corresponding to this address range and it will have 378 * wired_count == 0. 379 */ 380 if (!vm_map_lookup_entry(map, addr, &entry) || 381 entry->start != addr || entry->end != addr + size || 382 entry->wired_count != 0) 383 panic("kmem_malloc: entry not found or misaligned"); 384 entry->wired_count = 1; 385 386 /* 387 * At this point, the kmem_object must be unlocked because 388 * vm_map_simplify_entry() calls vm_object_deallocate(), which 389 * locks the kmem_object. 390 */ 391 vm_map_simplify_entry(map, entry); 392 393 /* 394 * Loop thru pages, entering them in the pmap. (We cannot add them to 395 * the wired count without wrapping the vm_page_queue_lock in 396 * splimp...) 397 */ 398 VM_OBJECT_LOCK(kmem_object); 399 for (i = 0; i < size; i += PAGE_SIZE) { 400 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 401 /* 402 * Because this is kernel_pmap, this call will not block. 403 */ 404 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 405 vm_page_lock_queues(); 406 vm_page_flag_set(m, PG_WRITEABLE | PG_REFERENCED); 407 vm_page_wakeup(m); 408 vm_page_unlock_queues(); 409 } 410 VM_OBJECT_UNLOCK(kmem_object); 411 vm_map_unlock(map); 412 413 return (addr); 414 } 415 416 /* 417 * kmem_alloc_wait: 418 * 419 * Allocates pageable memory from a sub-map of the kernel. If the submap 420 * has no room, the caller sleeps waiting for more memory in the submap. 421 * 422 * This routine may block. 423 */ 424 vm_offset_t 425 kmem_alloc_wait(map, size) 426 vm_map_t map; 427 vm_size_t size; 428 { 429 vm_offset_t addr; 430 431 size = round_page(size); 432 433 for (;;) { 434 /* 435 * To make this work for more than one map, use the map's lock 436 * to lock out sleepers/wakers. 437 */ 438 vm_map_lock(map); 439 if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 440 break; 441 /* no space now; see if we can ever get space */ 442 if (vm_map_max(map) - vm_map_min(map) < size) { 443 vm_map_unlock(map); 444 return (0); 445 } 446 map->needs_wakeup = TRUE; 447 vm_map_unlock_and_wait(map, FALSE); 448 } 449 vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 450 vm_map_unlock(map); 451 return (addr); 452 } 453 454 /* 455 * kmem_free_wakeup: 456 * 457 * Returns memory to a submap of the kernel, and wakes up any processes 458 * waiting for memory in that map. 459 */ 460 void 461 kmem_free_wakeup(map, addr, size) 462 vm_map_t map; 463 vm_offset_t addr; 464 vm_size_t size; 465 { 466 467 vm_map_lock(map); 468 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 469 if (map->needs_wakeup) { 470 map->needs_wakeup = FALSE; 471 vm_map_wakeup(map); 472 } 473 vm_map_unlock(map); 474 } 475 476 /* 477 * kmem_init: 478 * 479 * Create the kernel map; insert a mapping covering kernel text, 480 * data, bss, and all space allocated thus far (`boostrap' data). The 481 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 482 * `start' as allocated, and the range between `start' and `end' as free. 483 */ 484 void 485 kmem_init(start, end) 486 vm_offset_t start, end; 487 { 488 vm_map_t m; 489 490 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 491 m->system_map = 1; 492 vm_map_lock(m); 493 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 494 kernel_map = m; 495 (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0, 496 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 497 /* ... and ending with the completion of the above `insert' */ 498 vm_map_unlock(m); 499 } 500