1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/mm/init.c 4 * 5 * Copyright (C) 1995-2005 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/export.h> 11 #include <linux/errno.h> 12 #include <linux/swap.h> 13 #include <linux/init.h> 14 #include <linux/cache.h> 15 #include <linux/mman.h> 16 #include <linux/nodemask.h> 17 #include <linux/initrd.h> 18 #include <linux/gfp.h> 19 #include <linux/memblock.h> 20 #include <linux/sort.h> 21 #include <linux/of.h> 22 #include <linux/of_fdt.h> 23 #include <linux/dma-direct.h> 24 #include <linux/dma-map-ops.h> 25 #include <linux/efi.h> 26 #include <linux/swiotlb.h> 27 #include <linux/vmalloc.h> 28 #include <linux/mm.h> 29 #include <linux/kexec.h> 30 #include <linux/crash_dump.h> 31 #include <linux/hugetlb.h> 32 #include <linux/acpi_iort.h> 33 #include <linux/kmemleak.h> 34 35 #include <asm/boot.h> 36 #include <asm/fixmap.h> 37 #include <asm/kasan.h> 38 #include <asm/kernel-pgtable.h> 39 #include <asm/kvm_host.h> 40 #include <asm/memory.h> 41 #include <asm/numa.h> 42 #include <asm/sections.h> 43 #include <asm/setup.h> 44 #include <linux/sizes.h> 45 #include <asm/tlb.h> 46 #include <asm/alternative.h> 47 #include <asm/xen/swiotlb-xen.h> 48 49 /* 50 * We need to be able to catch inadvertent references to memstart_addr 51 * that occur (potentially in generic code) before arm64_memblock_init() 52 * executes, which assigns it its actual value. So use a default value 53 * that cannot be mistaken for a real physical address. 54 */ 55 s64 memstart_addr __ro_after_init = -1; 56 EXPORT_SYMBOL(memstart_addr); 57 58 /* 59 * If the corresponding config options are enabled, we create both ZONE_DMA 60 * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory 61 * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4). 62 * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory, 63 * otherwise it is empty. 64 */ 65 phys_addr_t __ro_after_init arm64_dma_phys_limit; 66 67 /* Current arm64 boot protocol requires 2MB alignment */ 68 #define CRASH_ALIGN SZ_2M 69 70 #define CRASH_ADDR_LOW_MAX arm64_dma_phys_limit 71 #define CRASH_ADDR_HIGH_MAX (PHYS_MASK + 1) 72 #define CRASH_HIGH_SEARCH_BASE SZ_4G 73 74 #define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20) 75 76 static int __init reserve_crashkernel_low(unsigned long long low_size) 77 { 78 unsigned long long low_base; 79 80 low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX); 81 if (!low_base) { 82 pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size); 83 return -ENOMEM; 84 } 85 86 pr_info("crashkernel low memory reserved: 0x%08llx - 0x%08llx (%lld MB)\n", 87 low_base, low_base + low_size, low_size >> 20); 88 89 crashk_low_res.start = low_base; 90 crashk_low_res.end = low_base + low_size - 1; 91 insert_resource(&iomem_resource, &crashk_low_res); 92 93 return 0; 94 } 95 96 /* 97 * reserve_crashkernel() - reserves memory for crash kernel 98 * 99 * This function reserves memory area given in "crashkernel=" kernel command 100 * line parameter. The memory reserved is used by dump capture kernel when 101 * primary kernel is crashing. 102 */ 103 static void __init reserve_crashkernel(void) 104 { 105 unsigned long long crash_low_size = 0, search_base = 0; 106 unsigned long long crash_max = CRASH_ADDR_LOW_MAX; 107 unsigned long long crash_base, crash_size; 108 char *cmdline = boot_command_line; 109 bool fixed_base = false; 110 bool high = false; 111 int ret; 112 113 if (!IS_ENABLED(CONFIG_KEXEC_CORE)) 114 return; 115 116 /* crashkernel=X[@offset] */ 117 ret = parse_crashkernel(cmdline, memblock_phys_mem_size(), 118 &crash_size, &crash_base); 119 if (ret == -ENOENT) { 120 ret = parse_crashkernel_high(cmdline, 0, &crash_size, &crash_base); 121 if (ret || !crash_size) 122 return; 123 124 /* 125 * crashkernel=Y,low can be specified or not, but invalid value 126 * is not allowed. 127 */ 128 ret = parse_crashkernel_low(cmdline, 0, &crash_low_size, &crash_base); 129 if (ret == -ENOENT) 130 crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE; 131 else if (ret) 132 return; 133 134 search_base = CRASH_HIGH_SEARCH_BASE; 135 crash_max = CRASH_ADDR_HIGH_MAX; 136 high = true; 137 } else if (ret || !crash_size) { 138 /* The specified value is invalid */ 139 return; 140 } 141 142 crash_size = PAGE_ALIGN(crash_size); 143 144 /* User specifies base address explicitly. */ 145 if (crash_base) { 146 fixed_base = true; 147 search_base = crash_base; 148 crash_max = crash_base + crash_size; 149 } 150 151 retry: 152 crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, 153 search_base, crash_max); 154 if (!crash_base) { 155 /* 156 * For crashkernel=size[KMG]@offset[KMG], print out failure 157 * message if can't reserve the specified region. 158 */ 159 if (fixed_base) { 160 pr_warn("crashkernel reservation failed - memory is in use.\n"); 161 return; 162 } 163 164 /* 165 * For crashkernel=size[KMG], if the first attempt was for 166 * low memory, fall back to high memory, the minimum required 167 * low memory will be reserved later. 168 */ 169 if (!high && crash_max == CRASH_ADDR_LOW_MAX) { 170 crash_max = CRASH_ADDR_HIGH_MAX; 171 search_base = CRASH_ADDR_LOW_MAX; 172 crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE; 173 goto retry; 174 } 175 176 /* 177 * For crashkernel=size[KMG],high, if the first attempt was 178 * for high memory, fall back to low memory. 179 */ 180 if (high && crash_max == CRASH_ADDR_HIGH_MAX) { 181 crash_max = CRASH_ADDR_LOW_MAX; 182 search_base = 0; 183 goto retry; 184 } 185 pr_warn("cannot allocate crashkernel (size:0x%llx)\n", 186 crash_size); 187 return; 188 } 189 190 if ((crash_base >= CRASH_ADDR_LOW_MAX) && crash_low_size && 191 reserve_crashkernel_low(crash_low_size)) { 192 memblock_phys_free(crash_base, crash_size); 193 return; 194 } 195 196 pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", 197 crash_base, crash_base + crash_size, crash_size >> 20); 198 199 /* 200 * The crashkernel memory will be removed from the kernel linear 201 * map. Inform kmemleak so that it won't try to access it. 202 */ 203 kmemleak_ignore_phys(crash_base); 204 if (crashk_low_res.end) 205 kmemleak_ignore_phys(crashk_low_res.start); 206 207 crashk_res.start = crash_base; 208 crashk_res.end = crash_base + crash_size - 1; 209 insert_resource(&iomem_resource, &crashk_res); 210 } 211 212 /* 213 * Return the maximum physical address for a zone accessible by the given bits 214 * limit. If DRAM starts above 32-bit, expand the zone to the maximum 215 * available memory, otherwise cap it at 32-bit. 216 */ 217 static phys_addr_t __init max_zone_phys(unsigned int zone_bits) 218 { 219 phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits); 220 phys_addr_t phys_start = memblock_start_of_DRAM(); 221 222 if (phys_start > U32_MAX) 223 zone_mask = PHYS_ADDR_MAX; 224 else if (phys_start > zone_mask) 225 zone_mask = U32_MAX; 226 227 return min(zone_mask, memblock_end_of_DRAM() - 1) + 1; 228 } 229 230 static void __init zone_sizes_init(void) 231 { 232 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; 233 unsigned int __maybe_unused acpi_zone_dma_bits; 234 unsigned int __maybe_unused dt_zone_dma_bits; 235 phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32); 236 237 #ifdef CONFIG_ZONE_DMA 238 acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address()); 239 dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL)); 240 zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits); 241 arm64_dma_phys_limit = max_zone_phys(zone_dma_bits); 242 max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit); 243 #endif 244 #ifdef CONFIG_ZONE_DMA32 245 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit); 246 if (!arm64_dma_phys_limit) 247 arm64_dma_phys_limit = dma32_phys_limit; 248 #endif 249 if (!arm64_dma_phys_limit) 250 arm64_dma_phys_limit = PHYS_MASK + 1; 251 max_zone_pfns[ZONE_NORMAL] = max_pfn; 252 253 free_area_init(max_zone_pfns); 254 } 255 256 int pfn_is_map_memory(unsigned long pfn) 257 { 258 phys_addr_t addr = PFN_PHYS(pfn); 259 260 /* avoid false positives for bogus PFNs, see comment in pfn_valid() */ 261 if (PHYS_PFN(addr) != pfn) 262 return 0; 263 264 return memblock_is_map_memory(addr); 265 } 266 EXPORT_SYMBOL(pfn_is_map_memory); 267 268 static phys_addr_t memory_limit __ro_after_init = PHYS_ADDR_MAX; 269 270 /* 271 * Limit the memory size that was specified via FDT. 272 */ 273 static int __init early_mem(char *p) 274 { 275 if (!p) 276 return 1; 277 278 memory_limit = memparse(p, &p) & PAGE_MASK; 279 pr_notice("Memory limited to %lldMB\n", memory_limit >> 20); 280 281 return 0; 282 } 283 early_param("mem", early_mem); 284 285 void __init arm64_memblock_init(void) 286 { 287 s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual); 288 289 /* 290 * Corner case: 52-bit VA capable systems running KVM in nVHE mode may 291 * be limited in their ability to support a linear map that exceeds 51 292 * bits of VA space, depending on the placement of the ID map. Given 293 * that the placement of the ID map may be randomized, let's simply 294 * limit the kernel's linear map to 51 bits as well if we detect this 295 * configuration. 296 */ 297 if (IS_ENABLED(CONFIG_KVM) && vabits_actual == 52 && 298 is_hyp_mode_available() && !is_kernel_in_hyp_mode()) { 299 pr_info("Capping linear region to 51 bits for KVM in nVHE mode on LVA capable hardware.\n"); 300 linear_region_size = min_t(u64, linear_region_size, BIT(51)); 301 } 302 303 /* Remove memory above our supported physical address size */ 304 memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX); 305 306 /* 307 * Select a suitable value for the base of physical memory. 308 */ 309 memstart_addr = round_down(memblock_start_of_DRAM(), 310 ARM64_MEMSTART_ALIGN); 311 312 if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size) 313 pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n"); 314 315 /* 316 * Remove the memory that we will not be able to cover with the 317 * linear mapping. Take care not to clip the kernel which may be 318 * high in memory. 319 */ 320 memblock_remove(max_t(u64, memstart_addr + linear_region_size, 321 __pa_symbol(_end)), ULLONG_MAX); 322 if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) { 323 /* ensure that memstart_addr remains sufficiently aligned */ 324 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, 325 ARM64_MEMSTART_ALIGN); 326 memblock_remove(0, memstart_addr); 327 } 328 329 /* 330 * If we are running with a 52-bit kernel VA config on a system that 331 * does not support it, we have to place the available physical 332 * memory in the 48-bit addressable part of the linear region, i.e., 333 * we have to move it upward. Since memstart_addr represents the 334 * physical address of PAGE_OFFSET, we have to *subtract* from it. 335 */ 336 if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) 337 memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52); 338 339 /* 340 * Apply the memory limit if it was set. Since the kernel may be loaded 341 * high up in memory, add back the kernel region that must be accessible 342 * via the linear mapping. 343 */ 344 if (memory_limit != PHYS_ADDR_MAX) { 345 memblock_mem_limit_remove_map(memory_limit); 346 memblock_add(__pa_symbol(_text), (u64)(_end - _text)); 347 } 348 349 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { 350 /* 351 * Add back the memory we just removed if it results in the 352 * initrd to become inaccessible via the linear mapping. 353 * Otherwise, this is a no-op 354 */ 355 u64 base = phys_initrd_start & PAGE_MASK; 356 u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base; 357 358 /* 359 * We can only add back the initrd memory if we don't end up 360 * with more memory than we can address via the linear mapping. 361 * It is up to the bootloader to position the kernel and the 362 * initrd reasonably close to each other (i.e., within 32 GB of 363 * each other) so that all granule/#levels combinations can 364 * always access both. 365 */ 366 if (WARN(base < memblock_start_of_DRAM() || 367 base + size > memblock_start_of_DRAM() + 368 linear_region_size, 369 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { 370 phys_initrd_size = 0; 371 } else { 372 memblock_add(base, size); 373 memblock_clear_nomap(base, size); 374 memblock_reserve(base, size); 375 } 376 } 377 378 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 379 extern u16 memstart_offset_seed; 380 u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); 381 int parange = cpuid_feature_extract_unsigned_field( 382 mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT); 383 s64 range = linear_region_size - 384 BIT(id_aa64mmfr0_parange_to_phys_shift(parange)); 385 386 /* 387 * If the size of the linear region exceeds, by a sufficient 388 * margin, the size of the region that the physical memory can 389 * span, randomize the linear region as well. 390 */ 391 if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) { 392 range /= ARM64_MEMSTART_ALIGN; 393 memstart_addr -= ARM64_MEMSTART_ALIGN * 394 ((range * memstart_offset_seed) >> 16); 395 } 396 } 397 398 /* 399 * Register the kernel text, kernel data, initrd, and initial 400 * pagetables with memblock. 401 */ 402 memblock_reserve(__pa_symbol(_stext), _end - _stext); 403 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { 404 /* the generic initrd code expects virtual addresses */ 405 initrd_start = __phys_to_virt(phys_initrd_start); 406 initrd_end = initrd_start + phys_initrd_size; 407 } 408 409 early_init_fdt_scan_reserved_mem(); 410 411 high_memory = __va(memblock_end_of_DRAM() - 1) + 1; 412 } 413 414 void __init bootmem_init(void) 415 { 416 unsigned long min, max; 417 418 min = PFN_UP(memblock_start_of_DRAM()); 419 max = PFN_DOWN(memblock_end_of_DRAM()); 420 421 early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT); 422 423 max_pfn = max_low_pfn = max; 424 min_low_pfn = min; 425 426 arch_numa_init(); 427 428 /* 429 * must be done after arch_numa_init() which calls numa_init() to 430 * initialize node_online_map that gets used in hugetlb_cma_reserve() 431 * while allocating required CMA size across online nodes. 432 */ 433 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) 434 arm64_hugetlb_cma_reserve(); 435 #endif 436 437 dma_pernuma_cma_reserve(); 438 439 kvm_hyp_reserve(); 440 441 /* 442 * sparse_init() tries to allocate memory from memblock, so must be 443 * done after the fixed reservations 444 */ 445 sparse_init(); 446 zone_sizes_init(); 447 448 /* 449 * Reserve the CMA area after arm64_dma_phys_limit was initialised. 450 */ 451 dma_contiguous_reserve(arm64_dma_phys_limit); 452 453 /* 454 * request_standard_resources() depends on crashkernel's memory being 455 * reserved, so do it here. 456 */ 457 reserve_crashkernel(); 458 459 memblock_dump_all(); 460 } 461 462 /* 463 * mem_init() marks the free areas in the mem_map and tells us how much memory 464 * is free. This is done after various parts of the system have claimed their 465 * memory after the kernel image. 466 */ 467 void __init mem_init(void) 468 { 469 bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit); 470 471 if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC)) 472 swiotlb = true; 473 474 swiotlb_init(swiotlb, SWIOTLB_VERBOSE); 475 476 /* this will put all unused low memory onto the freelists */ 477 memblock_free_all(); 478 479 /* 480 * Check boundaries twice: Some fundamental inconsistencies can be 481 * detected at build time already. 482 */ 483 #ifdef CONFIG_COMPAT 484 BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64); 485 #endif 486 487 /* 488 * Selected page table levels should match when derived from 489 * scratch using the virtual address range and page size. 490 */ 491 BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) != 492 CONFIG_PGTABLE_LEVELS); 493 494 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { 495 extern int sysctl_overcommit_memory; 496 /* 497 * On a machine this small we won't get anywhere without 498 * overcommit, so turn it on by default. 499 */ 500 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 501 } 502 } 503 504 void free_initmem(void) 505 { 506 free_reserved_area(lm_alias(__init_begin), 507 lm_alias(__init_end), 508 POISON_FREE_INITMEM, "unused kernel"); 509 /* 510 * Unmap the __init region but leave the VM area in place. This 511 * prevents the region from being reused for kernel modules, which 512 * is not supported by kallsyms. 513 */ 514 vunmap_range((u64)__init_begin, (u64)__init_end); 515 } 516 517 void dump_mem_limit(void) 518 { 519 if (memory_limit != PHYS_ADDR_MAX) { 520 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20); 521 } else { 522 pr_emerg("Memory Limit: none\n"); 523 } 524 } 525