1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD$ 65 */ 66 67 /* 68 * Kernel memory management. 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/kernel.h> /* for ticks and hz */ 74 #include <sys/lock.h> 75 #include <sys/mutex.h> 76 #include <sys/proc.h> 77 #include <sys/malloc.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_param.h> 81 #include <vm/pmap.h> 82 #include <vm/vm_map.h> 83 #include <vm/vm_object.h> 84 #include <vm/vm_page.h> 85 #include <vm/vm_pageout.h> 86 #include <vm/vm_extern.h> 87 88 vm_map_t kernel_map=0; 89 vm_map_t kmem_map=0; 90 vm_map_t exec_map=0; 91 vm_map_t clean_map=0; 92 vm_map_t buffer_map=0; 93 94 /* 95 * kmem_alloc_pageable: 96 * 97 * Allocate pageable memory to the kernel's address map. 98 * "map" must be kernel_map or a submap of kernel_map. 99 */ 100 vm_offset_t 101 kmem_alloc_pageable(map, size) 102 vm_map_t map; 103 vm_size_t size; 104 { 105 vm_offset_t addr; 106 int result; 107 108 GIANT_REQUIRED; 109 110 size = round_page(size); 111 addr = vm_map_min(map); 112 result = vm_map_find(map, NULL, (vm_offset_t) 0, 113 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 114 if (result != KERN_SUCCESS) { 115 return (0); 116 } 117 return (addr); 118 } 119 120 /* 121 * kmem_alloc_nofault: 122 * 123 * Same as kmem_alloc_pageable, except that it create a nofault entry. 124 */ 125 vm_offset_t 126 kmem_alloc_nofault(map, size) 127 vm_map_t map; 128 vm_size_t size; 129 { 130 vm_offset_t addr; 131 int result; 132 133 GIANT_REQUIRED; 134 135 size = round_page(size); 136 addr = vm_map_min(map); 137 result = vm_map_find(map, NULL, (vm_offset_t) 0, 138 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 139 if (result != KERN_SUCCESS) { 140 return (0); 141 } 142 return (addr); 143 } 144 145 /* 146 * Allocate wired-down memory in the kernel's address map 147 * or a submap. 148 */ 149 vm_offset_t 150 kmem_alloc(map, size) 151 vm_map_t map; 152 vm_size_t size; 153 { 154 vm_offset_t addr; 155 vm_offset_t offset; 156 vm_offset_t i; 157 158 GIANT_REQUIRED; 159 160 size = round_page(size); 161 162 /* 163 * Use the kernel object for wired-down kernel pages. Assume that no 164 * region of the kernel object is referenced more than once. 165 */ 166 167 /* 168 * Locate sufficient space in the map. This will give us the final 169 * virtual address for the new memory, and thus will tell us the 170 * offset within the kernel map. 171 */ 172 vm_map_lock(map); 173 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 174 vm_map_unlock(map); 175 return (0); 176 } 177 offset = addr - VM_MIN_KERNEL_ADDRESS; 178 vm_object_reference(kernel_object); 179 vm_map_insert(map, kernel_object, offset, addr, addr + size, 180 VM_PROT_ALL, VM_PROT_ALL, 0); 181 vm_map_unlock(map); 182 183 /* 184 * Guarantee that there are pages already in this object before 185 * calling vm_map_pageable. This is to prevent the following 186 * scenario: 187 * 188 * 1) Threads have swapped out, so that there is a pager for the 189 * kernel_object. 2) The kmsg zone is empty, and so we are 190 * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault; 191 * there is no page, but there is a pager, so we call 192 * pager_data_request. But the kmsg zone is empty, so we must 193 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 194 * we get the data back from the pager, it will be (very stale) 195 * non-zero data. kmem_alloc is defined to return zero-filled memory. 196 * 197 * We're intentionally not activating the pages we allocate to prevent a 198 * race with page-out. vm_map_pageable will wire the pages. 199 */ 200 for (i = 0; i < size; i += PAGE_SIZE) { 201 vm_page_t mem; 202 203 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 204 VM_ALLOC_ZERO | VM_ALLOC_RETRY); 205 if ((mem->flags & PG_ZERO) == 0) 206 vm_page_zero_fill(mem); 207 mem->valid = VM_PAGE_BITS_ALL; 208 vm_page_flag_clear(mem, PG_ZERO); 209 vm_page_wakeup(mem); 210 } 211 212 /* 213 * And finally, mark the data as non-pageable. 214 */ 215 (void) vm_map_wire(map, addr, addr + size, FALSE); 216 217 return (addr); 218 } 219 220 /* 221 * kmem_free: 222 * 223 * Release a region of kernel virtual memory allocated 224 * with kmem_alloc, and return the physical pages 225 * associated with that region. 226 * 227 * This routine may not block on kernel maps. 228 */ 229 void 230 kmem_free(map, addr, size) 231 vm_map_t map; 232 vm_offset_t addr; 233 vm_size_t size; 234 { 235 GIANT_REQUIRED; 236 237 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 238 } 239 240 /* 241 * kmem_suballoc: 242 * 243 * Allocates a map to manage a subrange 244 * of the kernel virtual address space. 245 * 246 * Arguments are as follows: 247 * 248 * parent Map to take range from 249 * min, max Returned endpoints of map 250 * size Size of range to find 251 */ 252 vm_map_t 253 kmem_suballoc(parent, min, max, size) 254 vm_map_t parent; 255 vm_offset_t *min, *max; 256 vm_size_t size; 257 { 258 int ret; 259 vm_map_t result; 260 261 GIANT_REQUIRED; 262 263 size = round_page(size); 264 265 *min = (vm_offset_t) vm_map_min(parent); 266 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 267 min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 268 if (ret != KERN_SUCCESS) { 269 printf("kmem_suballoc: bad status return of %d.\n", ret); 270 panic("kmem_suballoc"); 271 } 272 *max = *min + size; 273 result = vm_map_create(vm_map_pmap(parent), *min, *max); 274 if (result == NULL) 275 panic("kmem_suballoc: cannot create submap"); 276 if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 277 panic("kmem_suballoc: unable to change range to submap"); 278 return (result); 279 } 280 281 /* 282 * kmem_malloc: 283 * 284 * Allocate wired-down memory in the kernel's address map for the higher 285 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 286 * kmem_alloc() because we may need to allocate memory at interrupt 287 * level where we cannot block (canwait == FALSE). 288 * 289 * This routine has its own private kernel submap (kmem_map) and object 290 * (kmem_object). This, combined with the fact that only malloc uses 291 * this routine, ensures that we will never block in map or object waits. 292 * 293 * Note that this still only works in a uni-processor environment and 294 * when called at splhigh(). 295 * 296 * We don't worry about expanding the map (adding entries) since entries 297 * for wired maps are statically allocated. 298 * 299 * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 300 * I have not verified that it actually does not block. 301 * 302 * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to 303 * which we never free. 304 */ 305 vm_offset_t 306 kmem_malloc(map, size, flags) 307 vm_map_t map; 308 vm_size_t size; 309 int flags; 310 { 311 vm_offset_t offset, i; 312 vm_map_entry_t entry; 313 vm_offset_t addr; 314 vm_page_t m; 315 int pflags; 316 317 GIANT_REQUIRED; 318 319 size = round_page(size); 320 addr = vm_map_min(map); 321 322 /* 323 * Locate sufficient space in the map. This will give us the final 324 * virtual address for the new memory, and thus will tell us the 325 * offset within the kernel map. 326 */ 327 vm_map_lock(map); 328 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 329 vm_map_unlock(map); 330 if (map != kmem_map) { 331 static int last_report; /* when we did it (in ticks) */ 332 if (ticks < last_report || 333 (ticks - last_report) >= hz) { 334 last_report = ticks; 335 printf("Out of mbuf address space!\n"); 336 printf("Consider increasing NMBCLUSTERS\n"); 337 } 338 goto bad; 339 } 340 if ((flags & M_NOWAIT) == 0) 341 panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated", 342 (long)size, (long)map->size); 343 goto bad; 344 } 345 offset = addr - VM_MIN_KERNEL_ADDRESS; 346 vm_object_reference(kmem_object); 347 vm_map_insert(map, kmem_object, offset, addr, addr + size, 348 VM_PROT_ALL, VM_PROT_ALL, 0); 349 350 /* 351 * Note: if M_NOWAIT specified alone, allocate from 352 * interrupt-safe queues only (just the free list). If 353 * M_USE_RESERVE is also specified, we can also 354 * allocate from the cache. Neither of the latter two 355 * flags may be specified from an interrupt since interrupts 356 * are not allowed to mess with the cache queue. 357 */ 358 359 if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 360 pflags = VM_ALLOC_INTERRUPT; 361 else 362 pflags = VM_ALLOC_SYSTEM; 363 364 if (flags & M_ZERO) 365 pflags |= VM_ALLOC_ZERO; 366 367 368 for (i = 0; i < size; i += PAGE_SIZE) { 369 retry: 370 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags); 371 372 /* 373 * Ran out of space, free everything up and return. Don't need 374 * to lock page queues here as we know that the pages we got 375 * aren't on any queues. 376 */ 377 if (m == NULL) { 378 if ((flags & M_NOWAIT) == 0) { 379 vm_map_unlock(map); 380 VM_WAIT; 381 vm_map_lock(map); 382 goto retry; 383 } 384 /* 385 * Free the pages before removing the map entry. 386 * They are already marked busy. Calling 387 * vm_map_delete before the pages has been freed or 388 * unbusied will cause a deadlock. 389 */ 390 while (i != 0) { 391 i -= PAGE_SIZE; 392 m = vm_page_lookup(kmem_object, 393 OFF_TO_IDX(offset + i)); 394 vm_page_free(m); 395 } 396 vm_map_delete(map, addr, addr + size); 397 vm_map_unlock(map); 398 goto bad; 399 } 400 if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) 401 vm_page_zero_fill(m); 402 vm_page_flag_clear(m, PG_ZERO); 403 m->valid = VM_PAGE_BITS_ALL; 404 } 405 406 /* 407 * Mark map entry as non-pageable. Assert: vm_map_insert() will never 408 * be able to extend the previous entry so there will be a new entry 409 * exactly corresponding to this address range and it will have 410 * wired_count == 0. 411 */ 412 if (!vm_map_lookup_entry(map, addr, &entry) || 413 entry->start != addr || entry->end != addr + size || 414 entry->wired_count != 0) 415 panic("kmem_malloc: entry not found or misaligned"); 416 entry->wired_count = 1; 417 418 vm_map_simplify_entry(map, entry); 419 420 /* 421 * Loop thru pages, entering them in the pmap. (We cannot add them to 422 * the wired count without wrapping the vm_page_queue_lock in 423 * splimp...) 424 */ 425 for (i = 0; i < size; i += PAGE_SIZE) { 426 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 427 vm_page_wire(m); 428 vm_page_wakeup(m); 429 /* 430 * Because this is kernel_pmap, this call will not block. 431 */ 432 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 433 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); 434 } 435 vm_map_unlock(map); 436 437 return (addr); 438 439 bad: 440 return (0); 441 } 442 443 /* 444 * kmem_alloc_wait: 445 * 446 * Allocates pageable memory from a sub-map of the kernel. If the submap 447 * has no room, the caller sleeps waiting for more memory in the submap. 448 * 449 * This routine may block. 450 */ 451 vm_offset_t 452 kmem_alloc_wait(map, size) 453 vm_map_t map; 454 vm_size_t size; 455 { 456 vm_offset_t addr; 457 458 GIANT_REQUIRED; 459 460 size = round_page(size); 461 462 for (;;) { 463 /* 464 * To make this work for more than one map, use the map's lock 465 * to lock out sleepers/wakers. 466 */ 467 vm_map_lock(map); 468 if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 469 break; 470 /* no space now; see if we can ever get space */ 471 if (vm_map_max(map) - vm_map_min(map) < size) { 472 vm_map_unlock(map); 473 return (0); 474 } 475 vm_map_unlock(map); 476 tsleep(map, PVM, "kmaw", 0); 477 } 478 vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 479 vm_map_unlock(map); 480 return (addr); 481 } 482 483 /* 484 * kmem_free_wakeup: 485 * 486 * Returns memory to a submap of the kernel, and wakes up any processes 487 * waiting for memory in that map. 488 */ 489 void 490 kmem_free_wakeup(map, addr, size) 491 vm_map_t map; 492 vm_offset_t addr; 493 vm_size_t size; 494 { 495 GIANT_REQUIRED; 496 497 vm_map_lock(map); 498 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 499 wakeup(map); 500 vm_map_unlock(map); 501 } 502 503 /* 504 * kmem_init: 505 * 506 * Create the kernel map; insert a mapping covering kernel text, 507 * data, bss, and all space allocated thus far (`boostrap' data). The 508 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 509 * `start' as allocated, and the range between `start' and `end' as free. 510 */ 511 void 512 kmem_init(start, end) 513 vm_offset_t start, end; 514 { 515 vm_map_t m; 516 517 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 518 vm_map_lock(m); 519 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 520 kernel_map = m; 521 kernel_map->system_map = 1; 522 (void) vm_map_insert(m, NULL, (vm_offset_t) 0, 523 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 524 /* ... and ending with the completion of the above `insert' */ 525 vm_map_unlock(m); 526 } 527