1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * Kernel memory management. 65 */ 66 67 #include <sys/cdefs.h> 68 __FBSDID("$FreeBSD$"); 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/kernel.h> /* for ticks and hz */ 73 #include <sys/eventhandler.h> 74 #include <sys/lock.h> 75 #include <sys/proc.h> 76 #include <sys/malloc.h> 77 #include <sys/rwlock.h> 78 #include <sys/sysctl.h> 79 #include <sys/vmem.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_param.h> 83 #include <vm/vm_kern.h> 84 #include <vm/pmap.h> 85 #include <vm/vm_map.h> 86 #include <vm/vm_object.h> 87 #include <vm/vm_page.h> 88 #include <vm/vm_pageout.h> 89 #include <vm/vm_radix.h> 90 #include <vm/vm_extern.h> 91 #include <vm/uma.h> 92 93 vm_map_t kernel_map; 94 vm_map_t exec_map; 95 vm_map_t pipe_map; 96 97 const void *zero_region; 98 CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0); 99 100 /* NB: Used by kernel debuggers. */ 101 const u_long vm_maxuser_address = VM_MAXUSER_ADDRESS; 102 103 u_int exec_map_entry_size; 104 u_int exec_map_entries; 105 106 SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD, 107 SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS, "Min kernel address"); 108 109 SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD, 110 #if defined(__arm__) || defined(__sparc64__) 111 &vm_max_kernel_address, 0, 112 #else 113 SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS, 114 #endif 115 "Max kernel address"); 116 117 /* 118 * kva_alloc: 119 * 120 * Allocate a virtual address range with no underlying object and 121 * no initial mapping to physical memory. Any mapping from this 122 * range to physical memory must be explicitly created prior to 123 * its use, typically with pmap_qenter(). Any attempt to create 124 * a mapping on demand through vm_fault() will result in a panic. 125 */ 126 vm_offset_t 127 kva_alloc(vm_size_t size) 128 { 129 vm_offset_t addr; 130 131 size = round_page(size); 132 if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr)) 133 return (0); 134 135 return (addr); 136 } 137 138 /* 139 * kva_free: 140 * 141 * Release a region of kernel virtual memory allocated 142 * with kva_alloc, and return the physical pages 143 * associated with that region. 144 * 145 * This routine may not block on kernel maps. 146 */ 147 void 148 kva_free(vm_offset_t addr, vm_size_t size) 149 { 150 151 size = round_page(size); 152 vmem_free(kernel_arena, addr, size); 153 } 154 155 /* 156 * Allocates a region from the kernel address map and physical pages 157 * within the specified address range to the kernel object. Creates a 158 * wired mapping from this region to these pages, and returns the 159 * region's starting virtual address. The allocated pages are not 160 * necessarily physically contiguous. If M_ZERO is specified through the 161 * given flags, then the pages are zeroed before they are mapped. 162 */ 163 vm_offset_t 164 kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low, 165 vm_paddr_t high, vm_memattr_t memattr) 166 { 167 vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object; 168 vm_offset_t addr, i, offset; 169 vm_page_t m; 170 int pflags, tries; 171 172 size = round_page(size); 173 if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr)) 174 return (0); 175 offset = addr - VM_MIN_KERNEL_ADDRESS; 176 pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 177 pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); 178 pflags |= VM_ALLOC_NOWAIT; 179 VM_OBJECT_WLOCK(object); 180 for (i = 0; i < size; i += PAGE_SIZE) { 181 tries = 0; 182 retry: 183 m = vm_page_alloc_contig(object, atop(offset + i), 184 pflags, 1, low, high, PAGE_SIZE, 0, memattr); 185 if (m == NULL) { 186 VM_OBJECT_WUNLOCK(object); 187 if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { 188 if (!vm_page_reclaim_contig(pflags, 1, 189 low, high, PAGE_SIZE, 0) && 190 (flags & M_WAITOK) != 0) 191 VM_WAIT; 192 VM_OBJECT_WLOCK(object); 193 tries++; 194 goto retry; 195 } 196 kmem_unback(object, addr, i); 197 vmem_free(vmem, addr, size); 198 return (0); 199 } 200 if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) 201 pmap_zero_page(m); 202 m->valid = VM_PAGE_BITS_ALL; 203 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 204 VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 205 } 206 VM_OBJECT_WUNLOCK(object); 207 return (addr); 208 } 209 210 /* 211 * Allocates a region from the kernel address map and physically 212 * contiguous pages within the specified address range to the kernel 213 * object. Creates a wired mapping from this region to these pages, and 214 * returns the region's starting virtual address. If M_ZERO is specified 215 * through the given flags, then the pages are zeroed before they are 216 * mapped. 217 */ 218 vm_offset_t 219 kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low, 220 vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 221 vm_memattr_t memattr) 222 { 223 vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object; 224 vm_offset_t addr, offset, tmp; 225 vm_page_t end_m, m; 226 u_long npages; 227 int pflags, tries; 228 229 size = round_page(size); 230 if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) 231 return (0); 232 offset = addr - VM_MIN_KERNEL_ADDRESS; 233 pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 234 pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); 235 pflags |= VM_ALLOC_NOWAIT; 236 npages = atop(size); 237 VM_OBJECT_WLOCK(object); 238 tries = 0; 239 retry: 240 m = vm_page_alloc_contig(object, atop(offset), pflags, 241 npages, low, high, alignment, boundary, memattr); 242 if (m == NULL) { 243 VM_OBJECT_WUNLOCK(object); 244 if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { 245 if (!vm_page_reclaim_contig(pflags, npages, low, high, 246 alignment, boundary) && (flags & M_WAITOK) != 0) 247 VM_WAIT; 248 VM_OBJECT_WLOCK(object); 249 tries++; 250 goto retry; 251 } 252 vmem_free(vmem, addr, size); 253 return (0); 254 } 255 end_m = m + npages; 256 tmp = addr; 257 for (; m < end_m; m++) { 258 if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) 259 pmap_zero_page(m); 260 m->valid = VM_PAGE_BITS_ALL; 261 pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL, 262 VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 263 tmp += PAGE_SIZE; 264 } 265 VM_OBJECT_WUNLOCK(object); 266 return (addr); 267 } 268 269 /* 270 * kmem_suballoc: 271 * 272 * Allocates a map to manage a subrange 273 * of the kernel virtual address space. 274 * 275 * Arguments are as follows: 276 * 277 * parent Map to take range from 278 * min, max Returned endpoints of map 279 * size Size of range to find 280 * superpage_align Request that min is superpage aligned 281 */ 282 vm_map_t 283 kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, 284 vm_size_t size, boolean_t superpage_align) 285 { 286 int ret; 287 vm_map_t result; 288 289 size = round_page(size); 290 291 *min = vm_map_min(parent); 292 ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ? 293 VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, 294 MAP_ACC_NO_CHARGE); 295 if (ret != KERN_SUCCESS) 296 panic("kmem_suballoc: bad status return of %d", ret); 297 *max = *min + size; 298 result = vm_map_create(vm_map_pmap(parent), *min, *max); 299 if (result == NULL) 300 panic("kmem_suballoc: cannot create submap"); 301 if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 302 panic("kmem_suballoc: unable to change range to submap"); 303 return (result); 304 } 305 306 /* 307 * kmem_malloc: 308 * 309 * Allocate wired-down pages in the kernel's address space. 310 */ 311 vm_offset_t 312 kmem_malloc(struct vmem *vmem, vm_size_t size, int flags) 313 { 314 vm_offset_t addr; 315 int rv; 316 317 size = round_page(size); 318 if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) 319 return (0); 320 321 rv = kmem_back((vmem == kmem_arena) ? kmem_object : kernel_object, 322 addr, size, flags); 323 if (rv != KERN_SUCCESS) { 324 vmem_free(vmem, addr, size); 325 return (0); 326 } 327 return (addr); 328 } 329 330 /* 331 * kmem_back: 332 * 333 * Allocate physical pages for the specified virtual address range. 334 */ 335 int 336 kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) 337 { 338 vm_offset_t offset, i; 339 vm_page_t m, mpred; 340 int pflags; 341 342 KASSERT(object == kmem_object || object == kernel_object, 343 ("kmem_back: only supports kernel objects.")); 344 345 offset = addr - VM_MIN_KERNEL_ADDRESS; 346 pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 347 pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); 348 if (flags & M_WAITOK) 349 pflags |= VM_ALLOC_WAITFAIL; 350 351 i = 0; 352 VM_OBJECT_WLOCK(object); 353 retry: 354 mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i)); 355 for (; i < size; i += PAGE_SIZE, mpred = m) { 356 m = vm_page_alloc_after(object, atop(offset + i), pflags, 357 mpred); 358 359 /* 360 * Ran out of space, free everything up and return. Don't need 361 * to lock page queues here as we know that the pages we got 362 * aren't on any queues. 363 */ 364 if (m == NULL) { 365 if ((flags & M_NOWAIT) == 0) 366 goto retry; 367 VM_OBJECT_WUNLOCK(object); 368 kmem_unback(object, addr, i); 369 return (KERN_NO_SPACE); 370 } 371 if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) 372 pmap_zero_page(m); 373 KASSERT((m->oflags & VPO_UNMANAGED) != 0, 374 ("kmem_malloc: page %p is managed", m)); 375 m->valid = VM_PAGE_BITS_ALL; 376 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 377 VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 378 } 379 VM_OBJECT_WUNLOCK(object); 380 381 return (KERN_SUCCESS); 382 } 383 384 /* 385 * kmem_unback: 386 * 387 * Unmap and free the physical pages underlying the specified virtual 388 * address range. 389 * 390 * A physical page must exist within the specified object at each index 391 * that is being unmapped. 392 */ 393 void 394 kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) 395 { 396 vm_page_t m, next; 397 vm_offset_t end, offset; 398 399 KASSERT(object == kmem_object || object == kernel_object, 400 ("kmem_unback: only supports kernel objects.")); 401 402 pmap_remove(kernel_pmap, addr, addr + size); 403 offset = addr - VM_MIN_KERNEL_ADDRESS; 404 end = offset + size; 405 VM_OBJECT_WLOCK(object); 406 for (m = vm_page_lookup(object, atop(offset)); offset < end; 407 offset += PAGE_SIZE, m = next) { 408 next = vm_page_next(m); 409 vm_page_unwire(m, PQ_NONE); 410 vm_page_free(m); 411 } 412 VM_OBJECT_WUNLOCK(object); 413 } 414 415 /* 416 * kmem_free: 417 * 418 * Free memory allocated with kmem_malloc. The size must match the 419 * original allocation. 420 */ 421 void 422 kmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size) 423 { 424 425 size = round_page(size); 426 kmem_unback((vmem == kmem_arena) ? kmem_object : kernel_object, 427 addr, size); 428 vmem_free(vmem, addr, size); 429 } 430 431 /* 432 * kmap_alloc_wait: 433 * 434 * Allocates pageable memory from a sub-map of the kernel. If the submap 435 * has no room, the caller sleeps waiting for more memory in the submap. 436 * 437 * This routine may block. 438 */ 439 vm_offset_t 440 kmap_alloc_wait(vm_map_t map, vm_size_t size) 441 { 442 vm_offset_t addr; 443 444 size = round_page(size); 445 if (!swap_reserve(size)) 446 return (0); 447 448 for (;;) { 449 /* 450 * To make this work for more than one map, use the map's lock 451 * to lock out sleepers/wakers. 452 */ 453 vm_map_lock(map); 454 if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 455 break; 456 /* no space now; see if we can ever get space */ 457 if (vm_map_max(map) - vm_map_min(map) < size) { 458 vm_map_unlock(map); 459 swap_release(size); 460 return (0); 461 } 462 map->needs_wakeup = TRUE; 463 vm_map_unlock_and_wait(map, 0); 464 } 465 vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, 466 VM_PROT_ALL, MAP_ACC_CHARGED); 467 vm_map_unlock(map); 468 return (addr); 469 } 470 471 /* 472 * kmap_free_wakeup: 473 * 474 * Returns memory to a submap of the kernel, and wakes up any processes 475 * waiting for memory in that map. 476 */ 477 void 478 kmap_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size) 479 { 480 481 vm_map_lock(map); 482 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 483 if (map->needs_wakeup) { 484 map->needs_wakeup = FALSE; 485 vm_map_wakeup(map); 486 } 487 vm_map_unlock(map); 488 } 489 490 void 491 kmem_init_zero_region(void) 492 { 493 vm_offset_t addr, i; 494 vm_page_t m; 495 496 /* 497 * Map a single physical page of zeros to a larger virtual range. 498 * This requires less looping in places that want large amounts of 499 * zeros, while not using much more physical resources. 500 */ 501 addr = kva_alloc(ZERO_REGION_SIZE); 502 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 503 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); 504 if ((m->flags & PG_ZERO) == 0) 505 pmap_zero_page(m); 506 for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE) 507 pmap_qenter(addr + i, &m, 1); 508 pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ); 509 510 zero_region = (const void *)addr; 511 } 512 513 /* 514 * kmem_init: 515 * 516 * Create the kernel map; insert a mapping covering kernel text, 517 * data, bss, and all space allocated thus far (`boostrap' data). The 518 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 519 * `start' as allocated, and the range between `start' and `end' as free. 520 */ 521 void 522 kmem_init(vm_offset_t start, vm_offset_t end) 523 { 524 vm_map_t m; 525 526 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 527 m->system_map = 1; 528 vm_map_lock(m); 529 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 530 kernel_map = m; 531 (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0, 532 #ifdef __amd64__ 533 KERNBASE, 534 #else 535 VM_MIN_KERNEL_ADDRESS, 536 #endif 537 start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 538 /* ... and ending with the completion of the above `insert' */ 539 vm_map_unlock(m); 540 } 541 542 #ifdef DIAGNOSTIC 543 /* 544 * Allow userspace to directly trigger the VM drain routine for testing 545 * purposes. 546 */ 547 static int 548 debug_vm_lowmem(SYSCTL_HANDLER_ARGS) 549 { 550 int error, i; 551 552 i = 0; 553 error = sysctl_handle_int(oidp, &i, 0, req); 554 if (error) 555 return (error); 556 if ((i & ~(VM_LOW_KMEM | VM_LOW_PAGES)) != 0) 557 return (EINVAL); 558 if (i != 0) 559 EVENTHANDLER_INVOKE(vm_lowmem, i); 560 return (0); 561 } 562 563 SYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_RW, 0, 0, 564 debug_vm_lowmem, "I", "set to trigger vm_lowmem event with given flags"); 565 #endif 566