1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_init.c 8.1 (Berkeley) 6/11/93 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * Initialize the Virtual Memory subsystem. 65 */ 66 67 #include <sys/cdefs.h> 68 __FBSDID("$FreeBSD$"); 69 70 #include <sys/param.h> 71 #include <sys/kernel.h> 72 #include <sys/lock.h> 73 #include <sys/proc.h> 74 #include <sys/rwlock.h> 75 #include <sys/malloc.h> 76 #include <sys/sysctl.h> 77 #include <sys/systm.h> 78 #include <sys/selinfo.h> 79 #include <sys/smp.h> 80 #include <sys/pipe.h> 81 #include <sys/bio.h> 82 #include <sys/buf.h> 83 #include <sys/vmem.h> 84 #include <sys/vmmeter.h> 85 86 #include <vm/vm.h> 87 #include <vm/vm_param.h> 88 #include <vm/vm_kern.h> 89 #include <vm/vm_object.h> 90 #include <vm/vm_page.h> 91 #include <vm/vm_phys.h> 92 #include <vm/vm_map.h> 93 #include <vm/vm_pager.h> 94 #include <vm/vm_extern.h> 95 96 97 #if VM_NRESERVLEVEL > 0 98 #define KVA_QUANTUM (1 << (VM_LEVEL_0_ORDER + PAGE_SHIFT)) 99 #else 100 /* On non-superpage architectures want large import sizes. */ 101 #define KVA_QUANTUM (PAGE_SIZE * 1024) 102 #endif 103 long physmem; 104 105 /* 106 * System initialization 107 */ 108 static void vm_mem_init(void *); 109 SYSINIT(vm_mem, SI_SUB_VM, SI_ORDER_FIRST, vm_mem_init, NULL); 110 111 /* 112 * Import kva into the kernel arena. 113 */ 114 static int 115 kva_import(void *unused, vmem_size_t size, int flags, vmem_addr_t *addrp) 116 { 117 vm_offset_t addr; 118 int result; 119 120 KASSERT((size % KVA_QUANTUM) == 0, 121 ("kva_import: Size %jd is not a multiple of %d", 122 (intmax_t)size, (int)KVA_QUANTUM)); 123 addr = vm_map_min(kernel_map); 124 result = vm_map_find(kernel_map, NULL, 0, &addr, size, 0, 125 VMFS_SUPER_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 126 if (result != KERN_SUCCESS) 127 return (ENOMEM); 128 129 *addrp = addr; 130 131 return (0); 132 } 133 134 /* 135 * vm_init initializes the virtual memory system. 136 * This is done only by the first cpu up. 137 * 138 * The start and end address of physical memory is passed in. 139 */ 140 /* ARGSUSED*/ 141 static void 142 vm_mem_init(dummy) 143 void *dummy; 144 { 145 int domain; 146 147 /* 148 * Initializes resident memory structures. From here on, all physical 149 * memory is accounted for, and we use only virtual addresses. 150 */ 151 vm_set_page_size(); 152 virtual_avail = vm_page_startup(virtual_avail); 153 154 /* 155 * Initialize other VM packages 156 */ 157 vmem_startup(); 158 vm_object_init(); 159 vm_map_startup(); 160 kmem_init(virtual_avail, virtual_end); 161 162 /* 163 * Initialize the kernel_arena. This can grow on demand. 164 */ 165 vmem_init(kernel_arena, "kernel arena", 0, 0, PAGE_SIZE, 0, 0); 166 vmem_set_import(kernel_arena, kva_import, NULL, NULL, KVA_QUANTUM); 167 168 for (domain = 0; domain < vm_ndomains; domain++) { 169 vm_dom[domain].vmd_kernel_arena = vmem_create( 170 "kernel arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK); 171 vmem_set_import(vm_dom[domain].vmd_kernel_arena, 172 (vmem_import_t *)vmem_alloc, NULL, kernel_arena, 173 KVA_QUANTUM); 174 } 175 176 kmem_init_zero_region(); 177 pmap_init(); 178 vm_pager_init(); 179 } 180 181 void 182 vm_ksubmap_init(struct kva_md_info *kmi) 183 { 184 vm_offset_t firstaddr; 185 caddr_t v; 186 vm_size_t size = 0; 187 long physmem_est; 188 vm_offset_t minaddr; 189 vm_offset_t maxaddr; 190 191 /* 192 * Allocate space for system data structures. 193 * The first available kernel virtual address is in "v". 194 * As pages of kernel virtual memory are allocated, "v" is incremented. 195 * As pages of memory are allocated and cleared, 196 * "firstaddr" is incremented. 197 */ 198 199 /* 200 * Make two passes. The first pass calculates how much memory is 201 * needed and allocates it. The second pass assigns virtual 202 * addresses to the various data structures. 203 */ 204 firstaddr = 0; 205 again: 206 v = (caddr_t)firstaddr; 207 208 /* 209 * Discount the physical memory larger than the size of kernel_map 210 * to avoid eating up all of KVA space. 211 */ 212 physmem_est = lmin(physmem, btoc(kernel_map->max_offset - 213 kernel_map->min_offset)); 214 215 v = kern_vfs_bio_buffer_alloc(v, physmem_est); 216 217 /* 218 * End of first pass, size has been calculated so allocate memory 219 */ 220 if (firstaddr == 0) { 221 size = (vm_size_t)v; 222 #ifdef VM_FREELIST_DMA32 223 /* 224 * Try to protect 32-bit DMAable memory from the largest 225 * early alloc of wired mem. 226 */ 227 firstaddr = kmem_alloc_attr(kernel_arena, size, 228 M_ZERO | M_NOWAIT, (vm_paddr_t)1 << 32, 229 ~(vm_paddr_t)0, VM_MEMATTR_DEFAULT); 230 if (firstaddr == 0) 231 #endif 232 firstaddr = kmem_malloc(kernel_arena, size, 233 M_ZERO | M_WAITOK); 234 if (firstaddr == 0) 235 panic("startup: no room for tables"); 236 goto again; 237 } 238 239 /* 240 * End of second pass, addresses have been assigned 241 */ 242 if ((vm_size_t)((char *)v - firstaddr) != size) 243 panic("startup: table size inconsistency"); 244 245 /* 246 * Allocate the clean map to hold all of the paging and I/O virtual 247 * memory. 248 */ 249 size = (long)nbuf * BKVASIZE + (long)nswbuf * MAXPHYS + 250 (long)bio_transient_maxcnt * MAXPHYS; 251 kmi->clean_sva = firstaddr = kva_alloc(size); 252 kmi->clean_eva = firstaddr + size; 253 254 /* 255 * Allocate the buffer arena. 256 * 257 * Enable the quantum cache if we have more than 4 cpus. This 258 * avoids lock contention at the expense of some fragmentation. 259 */ 260 size = (long)nbuf * BKVASIZE; 261 kmi->buffer_sva = firstaddr; 262 kmi->buffer_eva = kmi->buffer_sva + size; 263 vmem_init(buffer_arena, "buffer arena", kmi->buffer_sva, size, 264 PAGE_SIZE, (mp_ncpus > 4) ? BKVASIZE * 8 : 0, 0); 265 firstaddr += size; 266 267 /* 268 * Now swap kva. 269 */ 270 swapbkva = firstaddr; 271 size = (long)nswbuf * MAXPHYS; 272 firstaddr += size; 273 274 /* 275 * And optionally transient bio space. 276 */ 277 if (bio_transient_maxcnt != 0) { 278 size = (long)bio_transient_maxcnt * MAXPHYS; 279 vmem_init(transient_arena, "transient arena", 280 firstaddr, size, PAGE_SIZE, 0, 0); 281 firstaddr += size; 282 } 283 if (firstaddr != kmi->clean_eva) 284 panic("Clean map calculation incorrect"); 285 286 /* 287 * Allocate the pageable submaps. We may cache an exec map entry per 288 * CPU, so we therefore need to reserve space for at least ncpu+1 289 * entries to avoid deadlock. The exec map is also used by some image 290 * activators, so we leave a fixed number of pages for their use. 291 */ 292 #ifdef __LP64__ 293 exec_map_entries = 8 * mp_ncpus; 294 #else 295 exec_map_entries = 2 * mp_ncpus + 4; 296 #endif 297 exec_map_entry_size = round_page(PATH_MAX + ARG_MAX); 298 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 299 exec_map_entries * exec_map_entry_size + 64 * PAGE_SIZE, FALSE); 300 pipe_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, maxpipekva, 301 FALSE); 302 } 303