1 /* 2 * Based on arch/arm/mm/init.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include <linux/kernel.h> 21 #include <linux/export.h> 22 #include <linux/errno.h> 23 #include <linux/swap.h> 24 #include <linux/init.h> 25 #include <linux/cache.h> 26 #include <linux/mman.h> 27 #include <linux/nodemask.h> 28 #include <linux/initrd.h> 29 #include <linux/gfp.h> 30 #include <linux/memblock.h> 31 #include <linux/sort.h> 32 #include <linux/of.h> 33 #include <linux/of_fdt.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/dma-contiguous.h> 36 #include <linux/efi.h> 37 #include <linux/swiotlb.h> 38 #include <linux/vmalloc.h> 39 #include <linux/mm.h> 40 #include <linux/kexec.h> 41 #include <linux/crash_dump.h> 42 43 #include <asm/boot.h> 44 #include <asm/fixmap.h> 45 #include <asm/kasan.h> 46 #include <asm/kernel-pgtable.h> 47 #include <asm/memory.h> 48 #include <asm/numa.h> 49 #include <asm/sections.h> 50 #include <asm/setup.h> 51 #include <asm/sizes.h> 52 #include <asm/tlb.h> 53 #include <asm/alternative.h> 54 55 /* 56 * We need to be able to catch inadvertent references to memstart_addr 57 * that occur (potentially in generic code) before arm64_memblock_init() 58 * executes, which assigns it its actual value. So use a default value 59 * that cannot be mistaken for a real physical address. 60 */ 61 s64 memstart_addr __ro_after_init = -1; 62 EXPORT_SYMBOL(memstart_addr); 63 64 phys_addr_t arm64_dma_phys_limit __ro_after_init; 65 66 #ifdef CONFIG_KEXEC_CORE 67 /* 68 * reserve_crashkernel() - reserves memory for crash kernel 69 * 70 * This function reserves memory area given in "crashkernel=" kernel command 71 * line parameter. The memory reserved is used by dump capture kernel when 72 * primary kernel is crashing. 73 */ 74 static void __init reserve_crashkernel(void) 75 { 76 unsigned long long crash_base, crash_size; 77 int ret; 78 79 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), 80 &crash_size, &crash_base); 81 /* no crashkernel= or invalid value specified */ 82 if (ret || !crash_size) 83 return; 84 85 crash_size = PAGE_ALIGN(crash_size); 86 87 if (crash_base == 0) { 88 /* Current arm64 boot protocol requires 2MB alignment */ 89 crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT, 90 crash_size, SZ_2M); 91 if (crash_base == 0) { 92 pr_warn("cannot allocate crashkernel (size:0x%llx)\n", 93 crash_size); 94 return; 95 } 96 } else { 97 /* User specifies base address explicitly. */ 98 if (!memblock_is_region_memory(crash_base, crash_size)) { 99 pr_warn("cannot reserve crashkernel: region is not memory\n"); 100 return; 101 } 102 103 if (memblock_is_region_reserved(crash_base, crash_size)) { 104 pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n"); 105 return; 106 } 107 108 if (!IS_ALIGNED(crash_base, SZ_2M)) { 109 pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n"); 110 return; 111 } 112 } 113 memblock_reserve(crash_base, crash_size); 114 115 pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", 116 crash_base, crash_base + crash_size, crash_size >> 20); 117 118 crashk_res.start = crash_base; 119 crashk_res.end = crash_base + crash_size - 1; 120 } 121 #else 122 static void __init reserve_crashkernel(void) 123 { 124 } 125 #endif /* CONFIG_KEXEC_CORE */ 126 127 #ifdef CONFIG_CRASH_DUMP 128 static int __init early_init_dt_scan_elfcorehdr(unsigned long node, 129 const char *uname, int depth, void *data) 130 { 131 const __be32 *reg; 132 int len; 133 134 if (depth != 1 || strcmp(uname, "chosen") != 0) 135 return 0; 136 137 reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len); 138 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) 139 return 1; 140 141 elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, ®); 142 elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, ®); 143 144 return 1; 145 } 146 147 /* 148 * reserve_elfcorehdr() - reserves memory for elf core header 149 * 150 * This function reserves the memory occupied by an elf core header 151 * described in the device tree. This region contains all the 152 * information about primary kernel's core image and is used by a dump 153 * capture kernel to access the system memory on primary kernel. 154 */ 155 static void __init reserve_elfcorehdr(void) 156 { 157 of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL); 158 159 if (!elfcorehdr_size) 160 return; 161 162 if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) { 163 pr_warn("elfcorehdr is overlapped\n"); 164 return; 165 } 166 167 memblock_reserve(elfcorehdr_addr, elfcorehdr_size); 168 169 pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n", 170 elfcorehdr_size >> 10, elfcorehdr_addr); 171 } 172 #else 173 static void __init reserve_elfcorehdr(void) 174 { 175 } 176 #endif /* CONFIG_CRASH_DUMP */ 177 /* 178 * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It 179 * currently assumes that for memory starting above 4G, 32-bit devices will 180 * use a DMA offset. 181 */ 182 static phys_addr_t __init max_zone_dma_phys(void) 183 { 184 phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32); 185 return min(offset + (1ULL << 32), memblock_end_of_DRAM()); 186 } 187 188 #ifdef CONFIG_NUMA 189 190 static void __init zone_sizes_init(unsigned long min, unsigned long max) 191 { 192 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; 193 194 if (IS_ENABLED(CONFIG_ZONE_DMA32)) 195 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys()); 196 max_zone_pfns[ZONE_NORMAL] = max; 197 198 free_area_init_nodes(max_zone_pfns); 199 } 200 201 #else 202 203 static void __init zone_sizes_init(unsigned long min, unsigned long max) 204 { 205 struct memblock_region *reg; 206 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 207 unsigned long max_dma = min; 208 209 memset(zone_size, 0, sizeof(zone_size)); 210 211 /* 4GB maximum for 32-bit only capable devices */ 212 #ifdef CONFIG_ZONE_DMA32 213 max_dma = PFN_DOWN(arm64_dma_phys_limit); 214 zone_size[ZONE_DMA32] = max_dma - min; 215 #endif 216 zone_size[ZONE_NORMAL] = max - max_dma; 217 218 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 219 220 for_each_memblock(memory, reg) { 221 unsigned long start = memblock_region_memory_base_pfn(reg); 222 unsigned long end = memblock_region_memory_end_pfn(reg); 223 224 if (start >= max) 225 continue; 226 227 #ifdef CONFIG_ZONE_DMA32 228 if (start < max_dma) { 229 unsigned long dma_end = min(end, max_dma); 230 zhole_size[ZONE_DMA32] -= dma_end - start; 231 } 232 #endif 233 if (end > max_dma) { 234 unsigned long normal_end = min(end, max); 235 unsigned long normal_start = max(start, max_dma); 236 zhole_size[ZONE_NORMAL] -= normal_end - normal_start; 237 } 238 } 239 240 free_area_init_node(0, zone_size, min, zhole_size); 241 } 242 243 #endif /* CONFIG_NUMA */ 244 245 int pfn_valid(unsigned long pfn) 246 { 247 phys_addr_t addr = pfn << PAGE_SHIFT; 248 249 if ((addr >> PAGE_SHIFT) != pfn) 250 return 0; 251 252 #ifdef CONFIG_SPARSEMEM 253 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 254 return 0; 255 256 if (!valid_section(__nr_to_section(pfn_to_section_nr(pfn)))) 257 return 0; 258 #endif 259 return memblock_is_map_memory(addr); 260 } 261 EXPORT_SYMBOL(pfn_valid); 262 263 #ifndef CONFIG_SPARSEMEM 264 static void __init arm64_memory_present(void) 265 { 266 } 267 #else 268 static void __init arm64_memory_present(void) 269 { 270 struct memblock_region *reg; 271 272 for_each_memblock(memory, reg) { 273 int nid = memblock_get_region_node(reg); 274 275 memory_present(nid, memblock_region_memory_base_pfn(reg), 276 memblock_region_memory_end_pfn(reg)); 277 } 278 } 279 #endif 280 281 static phys_addr_t memory_limit = PHYS_ADDR_MAX; 282 283 /* 284 * Limit the memory size that was specified via FDT. 285 */ 286 static int __init early_mem(char *p) 287 { 288 if (!p) 289 return 1; 290 291 memory_limit = memparse(p, &p) & PAGE_MASK; 292 pr_notice("Memory limited to %lldMB\n", memory_limit >> 20); 293 294 return 0; 295 } 296 early_param("mem", early_mem); 297 298 static int __init early_init_dt_scan_usablemem(unsigned long node, 299 const char *uname, int depth, void *data) 300 { 301 struct memblock_region *usablemem = data; 302 const __be32 *reg; 303 int len; 304 305 if (depth != 1 || strcmp(uname, "chosen") != 0) 306 return 0; 307 308 reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len); 309 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) 310 return 1; 311 312 usablemem->base = dt_mem_next_cell(dt_root_addr_cells, ®); 313 usablemem->size = dt_mem_next_cell(dt_root_size_cells, ®); 314 315 return 1; 316 } 317 318 static void __init fdt_enforce_memory_region(void) 319 { 320 struct memblock_region reg = { 321 .size = 0, 322 }; 323 324 of_scan_flat_dt(early_init_dt_scan_usablemem, ®); 325 326 if (reg.size) 327 memblock_cap_memory_range(reg.base, reg.size); 328 } 329 330 void __init arm64_memblock_init(void) 331 { 332 const s64 linear_region_size = -(s64)PAGE_OFFSET; 333 334 /* Handle linux,usable-memory-range property */ 335 fdt_enforce_memory_region(); 336 337 /* Remove memory above our supported physical address size */ 338 memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX); 339 340 /* 341 * Ensure that the linear region takes up exactly half of the kernel 342 * virtual address space. This way, we can distinguish a linear address 343 * from a kernel/module/vmalloc address by testing a single bit. 344 */ 345 BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1)); 346 347 /* 348 * Select a suitable value for the base of physical memory. 349 */ 350 memstart_addr = round_down(memblock_start_of_DRAM(), 351 ARM64_MEMSTART_ALIGN); 352 353 /* 354 * Remove the memory that we will not be able to cover with the 355 * linear mapping. Take care not to clip the kernel which may be 356 * high in memory. 357 */ 358 memblock_remove(max_t(u64, memstart_addr + linear_region_size, 359 __pa_symbol(_end)), ULLONG_MAX); 360 if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) { 361 /* ensure that memstart_addr remains sufficiently aligned */ 362 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, 363 ARM64_MEMSTART_ALIGN); 364 memblock_remove(0, memstart_addr); 365 } 366 367 /* 368 * Apply the memory limit if it was set. Since the kernel may be loaded 369 * high up in memory, add back the kernel region that must be accessible 370 * via the linear mapping. 371 */ 372 if (memory_limit != PHYS_ADDR_MAX) { 373 memblock_mem_limit_remove_map(memory_limit); 374 memblock_add(__pa_symbol(_text), (u64)(_end - _text)); 375 } 376 377 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { 378 /* 379 * Add back the memory we just removed if it results in the 380 * initrd to become inaccessible via the linear mapping. 381 * Otherwise, this is a no-op 382 */ 383 u64 base = phys_initrd_start & PAGE_MASK; 384 u64 size = PAGE_ALIGN(phys_initrd_size); 385 386 /* 387 * We can only add back the initrd memory if we don't end up 388 * with more memory than we can address via the linear mapping. 389 * It is up to the bootloader to position the kernel and the 390 * initrd reasonably close to each other (i.e., within 32 GB of 391 * each other) so that all granule/#levels combinations can 392 * always access both. 393 */ 394 if (WARN(base < memblock_start_of_DRAM() || 395 base + size > memblock_start_of_DRAM() + 396 linear_region_size, 397 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { 398 initrd_start = 0; 399 } else { 400 memblock_remove(base, size); /* clear MEMBLOCK_ flags */ 401 memblock_add(base, size); 402 memblock_reserve(base, size); 403 } 404 } 405 406 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 407 extern u16 memstart_offset_seed; 408 u64 range = linear_region_size - 409 (memblock_end_of_DRAM() - memblock_start_of_DRAM()); 410 411 /* 412 * If the size of the linear region exceeds, by a sufficient 413 * margin, the size of the region that the available physical 414 * memory spans, randomize the linear region as well. 415 */ 416 if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) { 417 range /= ARM64_MEMSTART_ALIGN; 418 memstart_addr -= ARM64_MEMSTART_ALIGN * 419 ((range * memstart_offset_seed) >> 16); 420 } 421 } 422 423 /* 424 * Register the kernel text, kernel data, initrd, and initial 425 * pagetables with memblock. 426 */ 427 memblock_reserve(__pa_symbol(_text), _end - _text); 428 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { 429 /* the generic initrd code expects virtual addresses */ 430 initrd_start = __phys_to_virt(phys_initrd_start); 431 initrd_end = initrd_start + phys_initrd_size; 432 } 433 434 early_init_fdt_scan_reserved_mem(); 435 436 /* 4GB maximum for 32-bit only capable devices */ 437 if (IS_ENABLED(CONFIG_ZONE_DMA32)) 438 arm64_dma_phys_limit = max_zone_dma_phys(); 439 else 440 arm64_dma_phys_limit = PHYS_MASK + 1; 441 442 reserve_crashkernel(); 443 444 reserve_elfcorehdr(); 445 446 high_memory = __va(memblock_end_of_DRAM() - 1) + 1; 447 448 dma_contiguous_reserve(arm64_dma_phys_limit); 449 } 450 451 void __init bootmem_init(void) 452 { 453 unsigned long min, max; 454 455 min = PFN_UP(memblock_start_of_DRAM()); 456 max = PFN_DOWN(memblock_end_of_DRAM()); 457 458 early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT); 459 460 max_pfn = max_low_pfn = max; 461 462 arm64_numa_init(); 463 /* 464 * Sparsemem tries to allocate bootmem in memory_present(), so must be 465 * done after the fixed reservations. 466 */ 467 arm64_memory_present(); 468 469 sparse_init(); 470 zone_sizes_init(min, max); 471 472 memblock_dump_all(); 473 } 474 475 #ifndef CONFIG_SPARSEMEM_VMEMMAP 476 static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) 477 { 478 struct page *start_pg, *end_pg; 479 unsigned long pg, pgend; 480 481 /* 482 * Convert start_pfn/end_pfn to a struct page pointer. 483 */ 484 start_pg = pfn_to_page(start_pfn - 1) + 1; 485 end_pg = pfn_to_page(end_pfn - 1) + 1; 486 487 /* 488 * Convert to physical addresses, and round start upwards and end 489 * downwards. 490 */ 491 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); 492 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; 493 494 /* 495 * If there are free pages between these, free the section of the 496 * memmap array. 497 */ 498 if (pg < pgend) 499 memblock_free(pg, pgend - pg); 500 } 501 502 /* 503 * The mem_map array can get very big. Free the unused area of the memory map. 504 */ 505 static void __init free_unused_memmap(void) 506 { 507 unsigned long start, prev_end = 0; 508 struct memblock_region *reg; 509 510 for_each_memblock(memory, reg) { 511 start = __phys_to_pfn(reg->base); 512 513 #ifdef CONFIG_SPARSEMEM 514 /* 515 * Take care not to free memmap entries that don't exist due 516 * to SPARSEMEM sections which aren't present. 517 */ 518 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); 519 #endif 520 /* 521 * If we had a previous bank, and there is a space between the 522 * current bank and the previous, free it. 523 */ 524 if (prev_end && prev_end < start) 525 free_memmap(prev_end, start); 526 527 /* 528 * Align up here since the VM subsystem insists that the 529 * memmap entries are valid from the bank end aligned to 530 * MAX_ORDER_NR_PAGES. 531 */ 532 prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), 533 MAX_ORDER_NR_PAGES); 534 } 535 536 #ifdef CONFIG_SPARSEMEM 537 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) 538 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); 539 #endif 540 } 541 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 542 543 /* 544 * mem_init() marks the free areas in the mem_map and tells us how much memory 545 * is free. This is done after various parts of the system have claimed their 546 * memory after the kernel image. 547 */ 548 void __init mem_init(void) 549 { 550 if (swiotlb_force == SWIOTLB_FORCE || 551 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) 552 swiotlb_init(1); 553 else 554 swiotlb_force = SWIOTLB_NO_FORCE; 555 556 set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 557 558 #ifndef CONFIG_SPARSEMEM_VMEMMAP 559 free_unused_memmap(); 560 #endif 561 /* this will put all unused low memory onto the freelists */ 562 memblock_free_all(); 563 564 mem_init_print_info(NULL); 565 566 /* 567 * Check boundaries twice: Some fundamental inconsistencies can be 568 * detected at build time already. 569 */ 570 #ifdef CONFIG_COMPAT 571 BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64); 572 #endif 573 574 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { 575 extern int sysctl_overcommit_memory; 576 /* 577 * On a machine this small we won't get anywhere without 578 * overcommit, so turn it on by default. 579 */ 580 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 581 } 582 } 583 584 void free_initmem(void) 585 { 586 free_reserved_area(lm_alias(__init_begin), 587 lm_alias(__init_end), 588 0, "unused kernel"); 589 /* 590 * Unmap the __init region but leave the VM area in place. This 591 * prevents the region from being reused for kernel modules, which 592 * is not supported by kallsyms. 593 */ 594 unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); 595 } 596 597 #ifdef CONFIG_BLK_DEV_INITRD 598 599 static int keep_initrd __initdata; 600 601 void __init free_initrd_mem(unsigned long start, unsigned long end) 602 { 603 if (!keep_initrd) { 604 free_reserved_area((void *)start, (void *)end, 0, "initrd"); 605 memblock_free(__virt_to_phys(start), end - start); 606 } 607 } 608 609 static int __init keepinitrd_setup(char *__unused) 610 { 611 keep_initrd = 1; 612 return 1; 613 } 614 615 __setup("keepinitrd", keepinitrd_setup); 616 #endif 617 618 /* 619 * Dump out memory limit information on panic. 620 */ 621 static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p) 622 { 623 if (memory_limit != PHYS_ADDR_MAX) { 624 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20); 625 } else { 626 pr_emerg("Memory Limit: none\n"); 627 } 628 return 0; 629 } 630 631 static struct notifier_block mem_limit_notifier = { 632 .notifier_call = dump_mem_limit, 633 }; 634 635 static int __init register_mem_limit_dumper(void) 636 { 637 atomic_notifier_chain_register(&panic_notifier_list, 638 &mem_limit_notifier); 639 return 0; 640 } 641 __initcall(register_mem_limit_dumper); 642