1 /*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 /* 62 * Kernel memory management. 63 */ 64 65 #include <sys/cdefs.h> 66 __FBSDID("$FreeBSD$"); 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/kernel.h> /* for ticks and hz */ 71 #include <sys/eventhandler.h> 72 #include <sys/lock.h> 73 #include <sys/proc.h> 74 #include <sys/malloc.h> 75 #include <sys/rwlock.h> 76 #include <sys/sysctl.h> 77 #include <sys/vmem.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_param.h> 81 #include <vm/vm_kern.h> 82 #include <vm/pmap.h> 83 #include <vm/vm_map.h> 84 #include <vm/vm_object.h> 85 #include <vm/vm_page.h> 86 #include <vm/vm_pageout.h> 87 #include <vm/vm_extern.h> 88 #include <vm/uma.h> 89 90 vm_map_t kernel_map; 91 vm_map_t exec_map; 92 vm_map_t pipe_map; 93 94 const void *zero_region; 95 CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0); 96 97 SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD, 98 SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS, "Min kernel address"); 99 100 SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD, 101 #if defined(__arm__) || defined(__sparc64__) 102 &vm_max_kernel_address, 0, 103 #else 104 SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS, 105 #endif 106 "Max kernel address"); 107 108 /* 109 * kva_alloc: 110 * 111 * Allocate a virtual address range with no underlying object and 112 * no initial mapping to physical memory. Any mapping from this 113 * range to physical memory must be explicitly created prior to 114 * its use, typically with pmap_qenter(). Any attempt to create 115 * a mapping on demand through vm_fault() will result in a panic. 116 */ 117 vm_offset_t 118 kva_alloc(size) 119 vm_size_t size; 120 { 121 vm_offset_t addr; 122 123 size = round_page(size); 124 if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr)) 125 return (0); 126 127 return (addr); 128 } 129 130 /* 131 * kva_free: 132 * 133 * Release a region of kernel virtual memory allocated 134 * with kva_alloc, and return the physical pages 135 * associated with that region. 136 * 137 * This routine may not block on kernel maps. 138 */ 139 void 140 kva_free(addr, size) 141 vm_offset_t addr; 142 vm_size_t size; 143 { 144 145 size = round_page(size); 146 vmem_free(kernel_arena, addr, size); 147 } 148 149 /* 150 * Allocates a region from the kernel address map and physical pages 151 * within the specified address range to the kernel object. Creates a 152 * wired mapping from this region to these pages, and returns the 153 * region's starting virtual address. The allocated pages are not 154 * necessarily physically contiguous. If M_ZERO is specified through the 155 * given flags, then the pages are zeroed before they are mapped. 156 */ 157 vm_offset_t 158 kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low, 159 vm_paddr_t high, vm_memattr_t memattr) 160 { 161 vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object; 162 vm_offset_t addr, i; 163 vm_ooffset_t offset; 164 vm_page_t m; 165 int pflags, tries; 166 167 size = round_page(size); 168 if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr)) 169 return (0); 170 offset = addr - VM_MIN_KERNEL_ADDRESS; 171 pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 172 VM_OBJECT_WLOCK(object); 173 for (i = 0; i < size; i += PAGE_SIZE) { 174 tries = 0; 175 retry: 176 m = vm_page_alloc_contig(object, OFF_TO_IDX(offset + i), 177 pflags, 1, low, high, PAGE_SIZE, 0, memattr); 178 if (m == NULL) { 179 VM_OBJECT_WUNLOCK(object); 180 if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { 181 vm_pageout_grow_cache(tries, low, high); 182 VM_OBJECT_WLOCK(object); 183 tries++; 184 goto retry; 185 } 186 kmem_unback(object, addr, i); 187 vmem_free(vmem, addr, size); 188 return (0); 189 } 190 if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) 191 pmap_zero_page(m); 192 m->valid = VM_PAGE_BITS_ALL; 193 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 194 VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 195 } 196 VM_OBJECT_WUNLOCK(object); 197 return (addr); 198 } 199 200 /* 201 * Allocates a region from the kernel address map and physically 202 * contiguous pages within the specified address range to the kernel 203 * object. Creates a wired mapping from this region to these pages, and 204 * returns the region's starting virtual address. If M_ZERO is specified 205 * through the given flags, then the pages are zeroed before they are 206 * mapped. 207 */ 208 vm_offset_t 209 kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low, 210 vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 211 vm_memattr_t memattr) 212 { 213 vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object; 214 vm_offset_t addr, tmp; 215 vm_ooffset_t offset; 216 vm_page_t end_m, m; 217 int pflags, tries; 218 219 size = round_page(size); 220 if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) 221 return (0); 222 offset = addr - VM_MIN_KERNEL_ADDRESS; 223 pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 224 VM_OBJECT_WLOCK(object); 225 tries = 0; 226 retry: 227 m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags, 228 atop(size), low, high, alignment, boundary, memattr); 229 if (m == NULL) { 230 VM_OBJECT_WUNLOCK(object); 231 if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { 232 vm_pageout_grow_cache(tries, low, high); 233 VM_OBJECT_WLOCK(object); 234 tries++; 235 goto retry; 236 } 237 vmem_free(vmem, addr, size); 238 return (0); 239 } 240 end_m = m + atop(size); 241 tmp = addr; 242 for (; m < end_m; m++) { 243 if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) 244 pmap_zero_page(m); 245 m->valid = VM_PAGE_BITS_ALL; 246 pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL, 247 VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 248 tmp += PAGE_SIZE; 249 } 250 VM_OBJECT_WUNLOCK(object); 251 return (addr); 252 } 253 254 /* 255 * kmem_suballoc: 256 * 257 * Allocates a map to manage a subrange 258 * of the kernel virtual address space. 259 * 260 * Arguments are as follows: 261 * 262 * parent Map to take range from 263 * min, max Returned endpoints of map 264 * size Size of range to find 265 * superpage_align Request that min is superpage aligned 266 */ 267 vm_map_t 268 kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, 269 vm_size_t size, boolean_t superpage_align) 270 { 271 int ret; 272 vm_map_t result; 273 274 size = round_page(size); 275 276 *min = vm_map_min(parent); 277 ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ? 278 VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, 279 MAP_ACC_NO_CHARGE); 280 if (ret != KERN_SUCCESS) 281 panic("kmem_suballoc: bad status return of %d", ret); 282 *max = *min + size; 283 result = vm_map_create(vm_map_pmap(parent), *min, *max); 284 if (result == NULL) 285 panic("kmem_suballoc: cannot create submap"); 286 if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 287 panic("kmem_suballoc: unable to change range to submap"); 288 return (result); 289 } 290 291 /* 292 * kmem_malloc: 293 * 294 * Allocate wired-down pages in the kernel's address space. 295 */ 296 vm_offset_t 297 kmem_malloc(struct vmem *vmem, vm_size_t size, int flags) 298 { 299 vm_offset_t addr; 300 int rv; 301 302 size = round_page(size); 303 if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) 304 return (0); 305 306 rv = kmem_back((vmem == kmem_arena) ? kmem_object : kernel_object, 307 addr, size, flags); 308 if (rv != KERN_SUCCESS) { 309 vmem_free(vmem, addr, size); 310 return (0); 311 } 312 return (addr); 313 } 314 315 /* 316 * kmem_back: 317 * 318 * Allocate physical pages for the specified virtual address range. 319 */ 320 int 321 kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) 322 { 323 vm_offset_t offset, i; 324 vm_page_t m; 325 int pflags; 326 327 KASSERT(object == kmem_object || object == kernel_object, 328 ("kmem_back: only supports kernel objects.")); 329 330 offset = addr - VM_MIN_KERNEL_ADDRESS; 331 pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 332 333 VM_OBJECT_WLOCK(object); 334 for (i = 0; i < size; i += PAGE_SIZE) { 335 retry: 336 m = vm_page_alloc(object, OFF_TO_IDX(offset + i), pflags); 337 338 /* 339 * Ran out of space, free everything up and return. Don't need 340 * to lock page queues here as we know that the pages we got 341 * aren't on any queues. 342 */ 343 if (m == NULL) { 344 VM_OBJECT_WUNLOCK(object); 345 if ((flags & M_NOWAIT) == 0) { 346 VM_WAIT; 347 VM_OBJECT_WLOCK(object); 348 goto retry; 349 } 350 kmem_unback(object, addr, i); 351 return (KERN_NO_SPACE); 352 } 353 if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) 354 pmap_zero_page(m); 355 KASSERT((m->oflags & VPO_UNMANAGED) != 0, 356 ("kmem_malloc: page %p is managed", m)); 357 m->valid = VM_PAGE_BITS_ALL; 358 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 359 VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 360 } 361 VM_OBJECT_WUNLOCK(object); 362 363 return (KERN_SUCCESS); 364 } 365 366 /* 367 * kmem_unback: 368 * 369 * Unmap and free the physical pages underlying the specified virtual 370 * address range. 371 * 372 * A physical page must exist within the specified object at each index 373 * that is being unmapped. 374 */ 375 void 376 kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) 377 { 378 vm_page_t m; 379 vm_offset_t i, offset; 380 381 KASSERT(object == kmem_object || object == kernel_object, 382 ("kmem_unback: only supports kernel objects.")); 383 384 pmap_remove(kernel_pmap, addr, addr + size); 385 offset = addr - VM_MIN_KERNEL_ADDRESS; 386 VM_OBJECT_WLOCK(object); 387 for (i = 0; i < size; i += PAGE_SIZE) { 388 m = vm_page_lookup(object, OFF_TO_IDX(offset + i)); 389 vm_page_unwire(m, PQ_INACTIVE); 390 vm_page_free(m); 391 } 392 VM_OBJECT_WUNLOCK(object); 393 } 394 395 /* 396 * kmem_free: 397 * 398 * Free memory allocated with kmem_malloc. The size must match the 399 * original allocation. 400 */ 401 void 402 kmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size) 403 { 404 405 size = round_page(size); 406 kmem_unback((vmem == kmem_arena) ? kmem_object : kernel_object, 407 addr, size); 408 vmem_free(vmem, addr, size); 409 } 410 411 /* 412 * kmap_alloc_wait: 413 * 414 * Allocates pageable memory from a sub-map of the kernel. If the submap 415 * has no room, the caller sleeps waiting for more memory in the submap. 416 * 417 * This routine may block. 418 */ 419 vm_offset_t 420 kmap_alloc_wait(map, size) 421 vm_map_t map; 422 vm_size_t size; 423 { 424 vm_offset_t addr; 425 426 size = round_page(size); 427 if (!swap_reserve(size)) 428 return (0); 429 430 for (;;) { 431 /* 432 * To make this work for more than one map, use the map's lock 433 * to lock out sleepers/wakers. 434 */ 435 vm_map_lock(map); 436 if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 437 break; 438 /* no space now; see if we can ever get space */ 439 if (vm_map_max(map) - vm_map_min(map) < size) { 440 vm_map_unlock(map); 441 swap_release(size); 442 return (0); 443 } 444 map->needs_wakeup = TRUE; 445 vm_map_unlock_and_wait(map, 0); 446 } 447 vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, 448 VM_PROT_ALL, MAP_ACC_CHARGED); 449 vm_map_unlock(map); 450 return (addr); 451 } 452 453 /* 454 * kmap_free_wakeup: 455 * 456 * Returns memory to a submap of the kernel, and wakes up any processes 457 * waiting for memory in that map. 458 */ 459 void 460 kmap_free_wakeup(map, addr, size) 461 vm_map_t map; 462 vm_offset_t addr; 463 vm_size_t size; 464 { 465 466 vm_map_lock(map); 467 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 468 if (map->needs_wakeup) { 469 map->needs_wakeup = FALSE; 470 vm_map_wakeup(map); 471 } 472 vm_map_unlock(map); 473 } 474 475 void 476 kmem_init_zero_region(void) 477 { 478 vm_offset_t addr, i; 479 vm_page_t m; 480 481 /* 482 * Map a single physical page of zeros to a larger virtual range. 483 * This requires less looping in places that want large amounts of 484 * zeros, while not using much more physical resources. 485 */ 486 addr = kva_alloc(ZERO_REGION_SIZE); 487 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 488 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); 489 if ((m->flags & PG_ZERO) == 0) 490 pmap_zero_page(m); 491 for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE) 492 pmap_qenter(addr + i, &m, 1); 493 pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ); 494 495 zero_region = (const void *)addr; 496 } 497 498 /* 499 * kmem_init: 500 * 501 * Create the kernel map; insert a mapping covering kernel text, 502 * data, bss, and all space allocated thus far (`boostrap' data). The 503 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 504 * `start' as allocated, and the range between `start' and `end' as free. 505 */ 506 void 507 kmem_init(start, end) 508 vm_offset_t start, end; 509 { 510 vm_map_t m; 511 512 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 513 m->system_map = 1; 514 vm_map_lock(m); 515 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 516 kernel_map = m; 517 (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0, 518 #ifdef __amd64__ 519 KERNBASE, 520 #else 521 VM_MIN_KERNEL_ADDRESS, 522 #endif 523 start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 524 /* ... and ending with the completion of the above `insert' */ 525 vm_map_unlock(m); 526 } 527 528 #ifdef DIAGNOSTIC 529 /* 530 * Allow userspace to directly trigger the VM drain routine for testing 531 * purposes. 532 */ 533 static int 534 debug_vm_lowmem(SYSCTL_HANDLER_ARGS) 535 { 536 int error, i; 537 538 i = 0; 539 error = sysctl_handle_int(oidp, &i, 0, req); 540 if (error) 541 return (error); 542 if (i) 543 EVENTHANDLER_INVOKE(vm_lowmem, 0); 544 return (0); 545 } 546 547 SYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_RW, 0, 0, 548 debug_vm_lowmem, "I", "set to trigger vm_lowmem event"); 549 #endif 550