1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/mm/init.c 4 * 5 * Copyright (C) 1995-2005 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/export.h> 11 #include <linux/errno.h> 12 #include <linux/swap.h> 13 #include <linux/init.h> 14 #include <linux/cache.h> 15 #include <linux/mman.h> 16 #include <linux/nodemask.h> 17 #include <linux/initrd.h> 18 #include <linux/gfp.h> 19 #include <linux/memblock.h> 20 #include <linux/sort.h> 21 #include <linux/of.h> 22 #include <linux/of_fdt.h> 23 #include <linux/dma-direct.h> 24 #include <linux/dma-map-ops.h> 25 #include <linux/efi.h> 26 #include <linux/swiotlb.h> 27 #include <linux/vmalloc.h> 28 #include <linux/mm.h> 29 #include <linux/kexec.h> 30 #include <linux/crash_dump.h> 31 #include <linux/hugetlb.h> 32 #include <linux/acpi_iort.h> 33 #include <linux/kmemleak.h> 34 35 #include <asm/boot.h> 36 #include <asm/fixmap.h> 37 #include <asm/kasan.h> 38 #include <asm/kernel-pgtable.h> 39 #include <asm/kvm_host.h> 40 #include <asm/memory.h> 41 #include <asm/numa.h> 42 #include <asm/sections.h> 43 #include <asm/setup.h> 44 #include <linux/sizes.h> 45 #include <asm/tlb.h> 46 #include <asm/alternative.h> 47 #include <asm/xen/swiotlb-xen.h> 48 49 /* 50 * We need to be able to catch inadvertent references to memstart_addr 51 * that occur (potentially in generic code) before arm64_memblock_init() 52 * executes, which assigns it its actual value. So use a default value 53 * that cannot be mistaken for a real physical address. 54 */ 55 s64 memstart_addr __ro_after_init = -1; 56 EXPORT_SYMBOL(memstart_addr); 57 58 /* 59 * If the corresponding config options are enabled, we create both ZONE_DMA 60 * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory 61 * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4). 62 * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory, 63 * otherwise it is empty. 64 */ 65 phys_addr_t arm64_dma_phys_limit __ro_after_init; 66 67 #ifdef CONFIG_KEXEC_CORE 68 /* 69 * reserve_crashkernel() - reserves memory for crash kernel 70 * 71 * This function reserves memory area given in "crashkernel=" kernel command 72 * line parameter. The memory reserved is used by dump capture kernel when 73 * primary kernel is crashing. 74 */ 75 static void __init reserve_crashkernel(void) 76 { 77 unsigned long long crash_base, crash_size; 78 unsigned long long crash_max = arm64_dma_phys_limit; 79 int ret; 80 81 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), 82 &crash_size, &crash_base); 83 /* no crashkernel= or invalid value specified */ 84 if (ret || !crash_size) 85 return; 86 87 crash_size = PAGE_ALIGN(crash_size); 88 89 /* User specifies base address explicitly. */ 90 if (crash_base) 91 crash_max = crash_base + crash_size; 92 93 /* Current arm64 boot protocol requires 2MB alignment */ 94 crash_base = memblock_phys_alloc_range(crash_size, SZ_2M, 95 crash_base, crash_max); 96 if (!crash_base) { 97 pr_warn("cannot allocate crashkernel (size:0x%llx)\n", 98 crash_size); 99 return; 100 } 101 102 pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", 103 crash_base, crash_base + crash_size, crash_size >> 20); 104 105 /* 106 * The crashkernel memory will be removed from the kernel linear 107 * map. Inform kmemleak so that it won't try to access it. 108 */ 109 kmemleak_ignore_phys(crash_base); 110 crashk_res.start = crash_base; 111 crashk_res.end = crash_base + crash_size - 1; 112 } 113 #else 114 static void __init reserve_crashkernel(void) 115 { 116 } 117 #endif /* CONFIG_KEXEC_CORE */ 118 119 /* 120 * Return the maximum physical address for a zone accessible by the given bits 121 * limit. If DRAM starts above 32-bit, expand the zone to the maximum 122 * available memory, otherwise cap it at 32-bit. 123 */ 124 static phys_addr_t __init max_zone_phys(unsigned int zone_bits) 125 { 126 phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits); 127 phys_addr_t phys_start = memblock_start_of_DRAM(); 128 129 if (phys_start > U32_MAX) 130 zone_mask = PHYS_ADDR_MAX; 131 else if (phys_start > zone_mask) 132 zone_mask = U32_MAX; 133 134 return min(zone_mask, memblock_end_of_DRAM() - 1) + 1; 135 } 136 137 static void __init zone_sizes_init(unsigned long min, unsigned long max) 138 { 139 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; 140 unsigned int __maybe_unused acpi_zone_dma_bits; 141 unsigned int __maybe_unused dt_zone_dma_bits; 142 phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32); 143 144 #ifdef CONFIG_ZONE_DMA 145 acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address()); 146 dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL)); 147 zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits); 148 arm64_dma_phys_limit = max_zone_phys(zone_dma_bits); 149 max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit); 150 #endif 151 #ifdef CONFIG_ZONE_DMA32 152 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit); 153 if (!arm64_dma_phys_limit) 154 arm64_dma_phys_limit = dma32_phys_limit; 155 #endif 156 if (!arm64_dma_phys_limit) 157 arm64_dma_phys_limit = PHYS_MASK + 1; 158 max_zone_pfns[ZONE_NORMAL] = max; 159 160 free_area_init(max_zone_pfns); 161 } 162 163 int pfn_valid(unsigned long pfn) 164 { 165 phys_addr_t addr = PFN_PHYS(pfn); 166 struct mem_section *ms; 167 168 /* 169 * Ensure the upper PAGE_SHIFT bits are clear in the 170 * pfn. Else it might lead to false positives when 171 * some of the upper bits are set, but the lower bits 172 * match a valid pfn. 173 */ 174 if (PHYS_PFN(addr) != pfn) 175 return 0; 176 177 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 178 return 0; 179 180 ms = __pfn_to_section(pfn); 181 if (!valid_section(ms)) 182 return 0; 183 184 /* 185 * ZONE_DEVICE memory does not have the memblock entries. 186 * memblock_is_map_memory() check for ZONE_DEVICE based 187 * addresses will always fail. Even the normal hotplugged 188 * memory will never have MEMBLOCK_NOMAP flag set in their 189 * memblock entries. Skip memblock search for all non early 190 * memory sections covering all of hotplug memory including 191 * both normal and ZONE_DEVICE based. 192 */ 193 if (!early_section(ms)) 194 return pfn_section_valid(ms, pfn); 195 196 return memblock_is_memory(addr); 197 } 198 EXPORT_SYMBOL(pfn_valid); 199 200 int pfn_is_map_memory(unsigned long pfn) 201 { 202 phys_addr_t addr = PFN_PHYS(pfn); 203 204 /* avoid false positives for bogus PFNs, see comment in pfn_valid() */ 205 if (PHYS_PFN(addr) != pfn) 206 return 0; 207 208 return memblock_is_map_memory(addr); 209 } 210 EXPORT_SYMBOL(pfn_is_map_memory); 211 212 static phys_addr_t memory_limit = PHYS_ADDR_MAX; 213 214 /* 215 * Limit the memory size that was specified via FDT. 216 */ 217 static int __init early_mem(char *p) 218 { 219 if (!p) 220 return 1; 221 222 memory_limit = memparse(p, &p) & PAGE_MASK; 223 pr_notice("Memory limited to %lldMB\n", memory_limit >> 20); 224 225 return 0; 226 } 227 early_param("mem", early_mem); 228 229 void __init arm64_memblock_init(void) 230 { 231 s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual); 232 233 /* 234 * Corner case: 52-bit VA capable systems running KVM in nVHE mode may 235 * be limited in their ability to support a linear map that exceeds 51 236 * bits of VA space, depending on the placement of the ID map. Given 237 * that the placement of the ID map may be randomized, let's simply 238 * limit the kernel's linear map to 51 bits as well if we detect this 239 * configuration. 240 */ 241 if (IS_ENABLED(CONFIG_KVM) && vabits_actual == 52 && 242 is_hyp_mode_available() && !is_kernel_in_hyp_mode()) { 243 pr_info("Capping linear region to 51 bits for KVM in nVHE mode on LVA capable hardware.\n"); 244 linear_region_size = min_t(u64, linear_region_size, BIT(51)); 245 } 246 247 /* Remove memory above our supported physical address size */ 248 memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX); 249 250 /* 251 * Select a suitable value for the base of physical memory. 252 */ 253 memstart_addr = round_down(memblock_start_of_DRAM(), 254 ARM64_MEMSTART_ALIGN); 255 256 if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size) 257 pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n"); 258 259 /* 260 * Remove the memory that we will not be able to cover with the 261 * linear mapping. Take care not to clip the kernel which may be 262 * high in memory. 263 */ 264 memblock_remove(max_t(u64, memstart_addr + linear_region_size, 265 __pa_symbol(_end)), ULLONG_MAX); 266 if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) { 267 /* ensure that memstart_addr remains sufficiently aligned */ 268 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, 269 ARM64_MEMSTART_ALIGN); 270 memblock_remove(0, memstart_addr); 271 } 272 273 /* 274 * If we are running with a 52-bit kernel VA config on a system that 275 * does not support it, we have to place the available physical 276 * memory in the 48-bit addressable part of the linear region, i.e., 277 * we have to move it upward. Since memstart_addr represents the 278 * physical address of PAGE_OFFSET, we have to *subtract* from it. 279 */ 280 if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) 281 memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52); 282 283 /* 284 * Apply the memory limit if it was set. Since the kernel may be loaded 285 * high up in memory, add back the kernel region that must be accessible 286 * via the linear mapping. 287 */ 288 if (memory_limit != PHYS_ADDR_MAX) { 289 memblock_mem_limit_remove_map(memory_limit); 290 memblock_add(__pa_symbol(_text), (u64)(_end - _text)); 291 } 292 293 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { 294 /* 295 * Add back the memory we just removed if it results in the 296 * initrd to become inaccessible via the linear mapping. 297 * Otherwise, this is a no-op 298 */ 299 u64 base = phys_initrd_start & PAGE_MASK; 300 u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base; 301 302 /* 303 * We can only add back the initrd memory if we don't end up 304 * with more memory than we can address via the linear mapping. 305 * It is up to the bootloader to position the kernel and the 306 * initrd reasonably close to each other (i.e., within 32 GB of 307 * each other) so that all granule/#levels combinations can 308 * always access both. 309 */ 310 if (WARN(base < memblock_start_of_DRAM() || 311 base + size > memblock_start_of_DRAM() + 312 linear_region_size, 313 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { 314 phys_initrd_size = 0; 315 } else { 316 memblock_remove(base, size); /* clear MEMBLOCK_ flags */ 317 memblock_add(base, size); 318 memblock_reserve(base, size); 319 } 320 } 321 322 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 323 extern u16 memstart_offset_seed; 324 u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); 325 int parange = cpuid_feature_extract_unsigned_field( 326 mmfr0, ID_AA64MMFR0_PARANGE_SHIFT); 327 s64 range = linear_region_size - 328 BIT(id_aa64mmfr0_parange_to_phys_shift(parange)); 329 330 /* 331 * If the size of the linear region exceeds, by a sufficient 332 * margin, the size of the region that the physical memory can 333 * span, randomize the linear region as well. 334 */ 335 if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) { 336 range /= ARM64_MEMSTART_ALIGN; 337 memstart_addr -= ARM64_MEMSTART_ALIGN * 338 ((range * memstart_offset_seed) >> 16); 339 } 340 } 341 342 /* 343 * Register the kernel text, kernel data, initrd, and initial 344 * pagetables with memblock. 345 */ 346 memblock_reserve(__pa_symbol(_stext), _end - _stext); 347 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { 348 /* the generic initrd code expects virtual addresses */ 349 initrd_start = __phys_to_virt(phys_initrd_start); 350 initrd_end = initrd_start + phys_initrd_size; 351 } 352 353 early_init_fdt_scan_reserved_mem(); 354 355 high_memory = __va(memblock_end_of_DRAM() - 1) + 1; 356 } 357 358 void __init bootmem_init(void) 359 { 360 unsigned long min, max; 361 362 min = PFN_UP(memblock_start_of_DRAM()); 363 max = PFN_DOWN(memblock_end_of_DRAM()); 364 365 early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT); 366 367 max_pfn = max_low_pfn = max; 368 min_low_pfn = min; 369 370 arch_numa_init(); 371 372 /* 373 * must be done after arch_numa_init() which calls numa_init() to 374 * initialize node_online_map that gets used in hugetlb_cma_reserve() 375 * while allocating required CMA size across online nodes. 376 */ 377 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) 378 arm64_hugetlb_cma_reserve(); 379 #endif 380 381 dma_pernuma_cma_reserve(); 382 383 kvm_hyp_reserve(); 384 385 /* 386 * sparse_init() tries to allocate memory from memblock, so must be 387 * done after the fixed reservations 388 */ 389 sparse_init(); 390 zone_sizes_init(min, max); 391 392 /* 393 * Reserve the CMA area after arm64_dma_phys_limit was initialised. 394 */ 395 dma_contiguous_reserve(arm64_dma_phys_limit); 396 397 /* 398 * request_standard_resources() depends on crashkernel's memory being 399 * reserved, so do it here. 400 */ 401 reserve_crashkernel(); 402 403 memblock_dump_all(); 404 } 405 406 /* 407 * mem_init() marks the free areas in the mem_map and tells us how much memory 408 * is free. This is done after various parts of the system have claimed their 409 * memory after the kernel image. 410 */ 411 void __init mem_init(void) 412 { 413 if (swiotlb_force == SWIOTLB_FORCE || 414 max_pfn > PFN_DOWN(arm64_dma_phys_limit)) 415 swiotlb_init(1); 416 else if (!xen_swiotlb_detect()) 417 swiotlb_force = SWIOTLB_NO_FORCE; 418 419 set_max_mapnr(max_pfn - PHYS_PFN_OFFSET); 420 421 /* this will put all unused low memory onto the freelists */ 422 memblock_free_all(); 423 424 /* 425 * Check boundaries twice: Some fundamental inconsistencies can be 426 * detected at build time already. 427 */ 428 #ifdef CONFIG_COMPAT 429 BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64); 430 #endif 431 432 /* 433 * Selected page table levels should match when derived from 434 * scratch using the virtual address range and page size. 435 */ 436 BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) != 437 CONFIG_PGTABLE_LEVELS); 438 439 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { 440 extern int sysctl_overcommit_memory; 441 /* 442 * On a machine this small we won't get anywhere without 443 * overcommit, so turn it on by default. 444 */ 445 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 446 } 447 } 448 449 void free_initmem(void) 450 { 451 free_reserved_area(lm_alias(__init_begin), 452 lm_alias(__init_end), 453 POISON_FREE_INITMEM, "unused kernel"); 454 /* 455 * Unmap the __init region but leave the VM area in place. This 456 * prevents the region from being reused for kernel modules, which 457 * is not supported by kallsyms. 458 */ 459 vunmap_range((u64)__init_begin, (u64)__init_end); 460 } 461 462 void dump_mem_limit(void) 463 { 464 if (memory_limit != PHYS_ADDR_MAX) { 465 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20); 466 } else { 467 pr_emerg("Memory Limit: none\n"); 468 } 469 } 470