1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 5 * Copyright (C) 2020 FORTH-ICS/CARV 6 * Nick Kossifidis <mick@ics.forth.gr> 7 */ 8 9 #include <linux/init.h> 10 #include <linux/mm.h> 11 #include <linux/memblock.h> 12 #include <linux/initrd.h> 13 #include <linux/swap.h> 14 #include <linux/swiotlb.h> 15 #include <linux/sizes.h> 16 #include <linux/of_fdt.h> 17 #include <linux/of_reserved_mem.h> 18 #include <linux/libfdt.h> 19 #include <linux/set_memory.h> 20 #include <linux/dma-map-ops.h> 21 #include <linux/crash_dump.h> 22 #include <linux/hugetlb.h> 23 24 #include <asm/fixmap.h> 25 #include <asm/tlbflush.h> 26 #include <asm/sections.h> 27 #include <asm/soc.h> 28 #include <asm/io.h> 29 #include <asm/ptdump.h> 30 #include <asm/numa.h> 31 32 #include "../kernel/head.h" 33 34 struct kernel_mapping kernel_map __ro_after_init; 35 EXPORT_SYMBOL(kernel_map); 36 #ifdef CONFIG_XIP_KERNEL 37 #define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map)) 38 #endif 39 40 phys_addr_t phys_ram_base __ro_after_init; 41 EXPORT_SYMBOL(phys_ram_base); 42 43 #ifdef CONFIG_XIP_KERNEL 44 extern char _xiprom[], _exiprom[], __data_loc; 45 #endif 46 47 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] 48 __page_aligned_bss; 49 EXPORT_SYMBOL(empty_zero_page); 50 51 extern char _start[]; 52 #define DTB_EARLY_BASE_VA PGDIR_SIZE 53 void *_dtb_early_va __initdata; 54 uintptr_t _dtb_early_pa __initdata; 55 56 struct pt_alloc_ops { 57 pte_t *(*get_pte_virt)(phys_addr_t pa); 58 phys_addr_t (*alloc_pte)(uintptr_t va); 59 #ifndef __PAGETABLE_PMD_FOLDED 60 pmd_t *(*get_pmd_virt)(phys_addr_t pa); 61 phys_addr_t (*alloc_pmd)(uintptr_t va); 62 #endif 63 }; 64 65 static phys_addr_t dma32_phys_limit __initdata; 66 67 static void __init zone_sizes_init(void) 68 { 69 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; 70 71 #ifdef CONFIG_ZONE_DMA32 72 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit); 73 #endif 74 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 75 76 free_area_init(max_zone_pfns); 77 } 78 79 #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM) 80 static inline void print_mlk(char *name, unsigned long b, unsigned long t) 81 { 82 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name, b, t, 83 (((t) - (b)) >> 10)); 84 } 85 86 static inline void print_mlm(char *name, unsigned long b, unsigned long t) 87 { 88 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n", name, b, t, 89 (((t) - (b)) >> 20)); 90 } 91 92 static void __init print_vm_layout(void) 93 { 94 pr_notice("Virtual kernel memory layout:\n"); 95 print_mlk("fixmap", (unsigned long)FIXADDR_START, 96 (unsigned long)FIXADDR_TOP); 97 print_mlm("pci io", (unsigned long)PCI_IO_START, 98 (unsigned long)PCI_IO_END); 99 print_mlm("vmemmap", (unsigned long)VMEMMAP_START, 100 (unsigned long)VMEMMAP_END); 101 print_mlm("vmalloc", (unsigned long)VMALLOC_START, 102 (unsigned long)VMALLOC_END); 103 print_mlm("lowmem", (unsigned long)PAGE_OFFSET, 104 (unsigned long)high_memory); 105 #ifdef CONFIG_64BIT 106 print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR, 107 (unsigned long)ADDRESS_SPACE_END); 108 #endif 109 } 110 #else 111 static void print_vm_layout(void) { } 112 #endif /* CONFIG_DEBUG_VM */ 113 114 void __init mem_init(void) 115 { 116 #ifdef CONFIG_FLATMEM 117 BUG_ON(!mem_map); 118 #endif /* CONFIG_FLATMEM */ 119 120 #ifdef CONFIG_SWIOTLB 121 if (swiotlb_force == SWIOTLB_FORCE || 122 max_pfn > PFN_DOWN(dma32_phys_limit)) 123 swiotlb_init(1); 124 else 125 swiotlb_force = SWIOTLB_NO_FORCE; 126 #endif 127 high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); 128 memblock_free_all(); 129 130 print_vm_layout(); 131 } 132 133 /* 134 * The default maximal physical memory size is -PAGE_OFFSET for 32-bit kernel, 135 * whereas for 64-bit kernel, the end of the virtual address space is occupied 136 * by the modules/BPF/kernel mappings which reduces the available size of the 137 * linear mapping. 138 * Limit the memory size via mem. 139 */ 140 #ifdef CONFIG_64BIT 141 static phys_addr_t memory_limit = -PAGE_OFFSET - SZ_4G; 142 #else 143 static phys_addr_t memory_limit = -PAGE_OFFSET; 144 #endif 145 146 static int __init early_mem(char *p) 147 { 148 u64 size; 149 150 if (!p) 151 return 1; 152 153 size = memparse(p, &p) & PAGE_MASK; 154 memory_limit = min_t(u64, size, memory_limit); 155 156 pr_notice("Memory limited to %lldMB\n", (u64)memory_limit >> 20); 157 158 return 0; 159 } 160 early_param("mem", early_mem); 161 162 static void __init setup_bootmem(void) 163 { 164 phys_addr_t vmlinux_end = __pa_symbol(&_end); 165 phys_addr_t vmlinux_start = __pa_symbol(&_start); 166 phys_addr_t __maybe_unused max_mapped_addr; 167 phys_addr_t phys_ram_end; 168 169 #ifdef CONFIG_XIP_KERNEL 170 vmlinux_start = __pa_symbol(&_sdata); 171 #endif 172 173 memblock_enforce_memory_limit(memory_limit); 174 175 /* 176 * Reserve from the start of the kernel to the end of the kernel 177 */ 178 #if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX) 179 /* 180 * Make sure we align the reservation on PMD_SIZE since we will 181 * map the kernel in the linear mapping as read-only: we do not want 182 * any allocation to happen between _end and the next pmd aligned page. 183 */ 184 vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK; 185 #endif 186 memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); 187 188 189 phys_ram_end = memblock_end_of_DRAM(); 190 #ifndef CONFIG_64BIT 191 #ifndef CONFIG_XIP_KERNEL 192 phys_ram_base = memblock_start_of_DRAM(); 193 #endif 194 /* 195 * memblock allocator is not aware of the fact that last 4K bytes of 196 * the addressable memory can not be mapped because of IS_ERR_VALUE 197 * macro. Make sure that last 4k bytes are not usable by memblock 198 * if end of dram is equal to maximum addressable memory. For 64-bit 199 * kernel, this problem can't happen here as the end of the virtual 200 * address space is occupied by the kernel mapping then this check must 201 * be done as soon as the kernel mapping base address is determined. 202 */ 203 max_mapped_addr = __pa(~(ulong)0); 204 if (max_mapped_addr == (phys_ram_end - 1)) 205 memblock_set_current_limit(max_mapped_addr - 4096); 206 #endif 207 208 min_low_pfn = PFN_UP(phys_ram_base); 209 max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end); 210 211 dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); 212 set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); 213 214 reserve_initrd_mem(); 215 /* 216 * If DTB is built in, no need to reserve its memblock. 217 * Otherwise, do reserve it but avoid using 218 * early_init_fdt_reserve_self() since __pa() does 219 * not work for DTB pointers that are fixmap addresses 220 */ 221 if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) 222 memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); 223 224 early_init_fdt_scan_reserved_mem(); 225 dma_contiguous_reserve(dma32_phys_limit); 226 if (IS_ENABLED(CONFIG_64BIT)) 227 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); 228 memblock_allow_resize(); 229 } 230 231 #ifdef CONFIG_MMU 232 static struct pt_alloc_ops _pt_ops __initdata; 233 234 #ifdef CONFIG_XIP_KERNEL 235 #define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&_pt_ops)) 236 #else 237 #define pt_ops _pt_ops 238 #endif 239 240 unsigned long riscv_pfn_base __ro_after_init; 241 EXPORT_SYMBOL(riscv_pfn_base); 242 243 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 244 pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 245 static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; 246 247 pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); 248 static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); 249 250 #ifdef CONFIG_XIP_KERNEL 251 #define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir)) 252 #define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte)) 253 #define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir)) 254 #endif /* CONFIG_XIP_KERNEL */ 255 256 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) 257 { 258 unsigned long addr = __fix_to_virt(idx); 259 pte_t *ptep; 260 261 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 262 263 ptep = &fixmap_pte[pte_index(addr)]; 264 265 if (pgprot_val(prot)) 266 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); 267 else 268 pte_clear(&init_mm, addr, ptep); 269 local_flush_tlb_page(addr); 270 } 271 272 static inline pte_t *__init get_pte_virt_early(phys_addr_t pa) 273 { 274 return (pte_t *)((uintptr_t)pa); 275 } 276 277 static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa) 278 { 279 clear_fixmap(FIX_PTE); 280 return (pte_t *)set_fixmap_offset(FIX_PTE, pa); 281 } 282 283 static inline pte_t *__init get_pte_virt_late(phys_addr_t pa) 284 { 285 return (pte_t *) __va(pa); 286 } 287 288 static inline phys_addr_t __init alloc_pte_early(uintptr_t va) 289 { 290 /* 291 * We only create PMD or PGD early mappings so we 292 * should never reach here with MMU disabled. 293 */ 294 BUG(); 295 } 296 297 static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va) 298 { 299 return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); 300 } 301 302 static phys_addr_t __init alloc_pte_late(uintptr_t va) 303 { 304 unsigned long vaddr; 305 306 vaddr = __get_free_page(GFP_KERNEL); 307 BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr))); 308 309 return __pa(vaddr); 310 } 311 312 static void __init create_pte_mapping(pte_t *ptep, 313 uintptr_t va, phys_addr_t pa, 314 phys_addr_t sz, pgprot_t prot) 315 { 316 uintptr_t pte_idx = pte_index(va); 317 318 BUG_ON(sz != PAGE_SIZE); 319 320 if (pte_none(ptep[pte_idx])) 321 ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot); 322 } 323 324 #ifndef __PAGETABLE_PMD_FOLDED 325 326 static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss; 327 static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; 328 static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); 329 330 #ifdef CONFIG_XIP_KERNEL 331 #define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd)) 332 #define fixmap_pmd ((pmd_t *)XIP_FIXUP(fixmap_pmd)) 333 #define early_pmd ((pmd_t *)XIP_FIXUP(early_pmd)) 334 #endif /* CONFIG_XIP_KERNEL */ 335 336 static pmd_t *__init get_pmd_virt_early(phys_addr_t pa) 337 { 338 /* Before MMU is enabled */ 339 return (pmd_t *)((uintptr_t)pa); 340 } 341 342 static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa) 343 { 344 clear_fixmap(FIX_PMD); 345 return (pmd_t *)set_fixmap_offset(FIX_PMD, pa); 346 } 347 348 static pmd_t *__init get_pmd_virt_late(phys_addr_t pa) 349 { 350 return (pmd_t *) __va(pa); 351 } 352 353 static phys_addr_t __init alloc_pmd_early(uintptr_t va) 354 { 355 BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); 356 357 return (uintptr_t)early_pmd; 358 } 359 360 static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va) 361 { 362 return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); 363 } 364 365 static phys_addr_t __init alloc_pmd_late(uintptr_t va) 366 { 367 unsigned long vaddr; 368 369 vaddr = __get_free_page(GFP_KERNEL); 370 BUG_ON(!vaddr); 371 return __pa(vaddr); 372 } 373 374 static void __init create_pmd_mapping(pmd_t *pmdp, 375 uintptr_t va, phys_addr_t pa, 376 phys_addr_t sz, pgprot_t prot) 377 { 378 pte_t *ptep; 379 phys_addr_t pte_phys; 380 uintptr_t pmd_idx = pmd_index(va); 381 382 if (sz == PMD_SIZE) { 383 if (pmd_none(pmdp[pmd_idx])) 384 pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot); 385 return; 386 } 387 388 if (pmd_none(pmdp[pmd_idx])) { 389 pte_phys = pt_ops.alloc_pte(va); 390 pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE); 391 ptep = pt_ops.get_pte_virt(pte_phys); 392 memset(ptep, 0, PAGE_SIZE); 393 } else { 394 pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx])); 395 ptep = pt_ops.get_pte_virt(pte_phys); 396 } 397 398 create_pte_mapping(ptep, va, pa, sz, prot); 399 } 400 401 #define pgd_next_t pmd_t 402 #define alloc_pgd_next(__va) pt_ops.alloc_pmd(__va) 403 #define get_pgd_next_virt(__pa) pt_ops.get_pmd_virt(__pa) 404 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ 405 create_pmd_mapping(__nextp, __va, __pa, __sz, __prot) 406 #define fixmap_pgd_next fixmap_pmd 407 #else 408 #define pgd_next_t pte_t 409 #define alloc_pgd_next(__va) pt_ops.alloc_pte(__va) 410 #define get_pgd_next_virt(__pa) pt_ops.get_pte_virt(__pa) 411 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ 412 create_pte_mapping(__nextp, __va, __pa, __sz, __prot) 413 #define fixmap_pgd_next fixmap_pte 414 #define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) 415 #endif 416 417 void __init create_pgd_mapping(pgd_t *pgdp, 418 uintptr_t va, phys_addr_t pa, 419 phys_addr_t sz, pgprot_t prot) 420 { 421 pgd_next_t *nextp; 422 phys_addr_t next_phys; 423 uintptr_t pgd_idx = pgd_index(va); 424 425 if (sz == PGDIR_SIZE) { 426 if (pgd_val(pgdp[pgd_idx]) == 0) 427 pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot); 428 return; 429 } 430 431 if (pgd_val(pgdp[pgd_idx]) == 0) { 432 next_phys = alloc_pgd_next(va); 433 pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE); 434 nextp = get_pgd_next_virt(next_phys); 435 memset(nextp, 0, PAGE_SIZE); 436 } else { 437 next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx])); 438 nextp = get_pgd_next_virt(next_phys); 439 } 440 441 create_pgd_next_mapping(nextp, va, pa, sz, prot); 442 } 443 444 static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) 445 { 446 /* Upgrade to PMD_SIZE mappings whenever possible */ 447 if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1))) 448 return PAGE_SIZE; 449 450 return PMD_SIZE; 451 } 452 453 #ifdef CONFIG_XIP_KERNEL 454 /* called from head.S with MMU off */ 455 asmlinkage void __init __copy_data(void) 456 { 457 void *from = (void *)(&__data_loc); 458 void *to = (void *)CONFIG_PHYS_RAM_BASE; 459 size_t sz = (size_t)((uintptr_t)(&_end) - (uintptr_t)(&_sdata)); 460 461 memcpy(to, from, sz); 462 } 463 #endif 464 465 #ifdef CONFIG_STRICT_KERNEL_RWX 466 static __init pgprot_t pgprot_from_va(uintptr_t va) 467 { 468 if (is_va_kernel_text(va)) 469 return PAGE_KERNEL_READ_EXEC; 470 471 /* 472 * In 64-bit kernel, the kernel mapping is outside the linear mapping so 473 * we must protect its linear mapping alias from being executed and 474 * written. 475 * And rodata section is marked readonly in mark_rodata_ro. 476 */ 477 if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va)) 478 return PAGE_KERNEL_READ; 479 480 return PAGE_KERNEL; 481 } 482 483 void mark_rodata_ro(void) 484 { 485 set_kernel_memory(__start_rodata, _data, set_memory_ro); 486 if (IS_ENABLED(CONFIG_64BIT)) 487 set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data), 488 set_memory_ro); 489 490 debug_checkwx(); 491 } 492 #else 493 static __init pgprot_t pgprot_from_va(uintptr_t va) 494 { 495 if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va)) 496 return PAGE_KERNEL; 497 498 return PAGE_KERNEL_EXEC; 499 } 500 #endif /* CONFIG_STRICT_KERNEL_RWX */ 501 502 /* 503 * setup_vm() is called from head.S with MMU-off. 504 * 505 * Following requirements should be honoured for setup_vm() to work 506 * correctly: 507 * 1) It should use PC-relative addressing for accessing kernel symbols. 508 * To achieve this we always use GCC cmodel=medany. 509 * 2) The compiler instrumentation for FTRACE will not work for setup_vm() 510 * so disable compiler instrumentation when FTRACE is enabled. 511 * 512 * Currently, the above requirements are honoured by using custom CFLAGS 513 * for init.o in mm/Makefile. 514 */ 515 516 #ifndef __riscv_cmodel_medany 517 #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." 518 #endif 519 520 #ifdef CONFIG_XIP_KERNEL 521 static void __init create_kernel_page_table(pgd_t *pgdir, 522 __always_unused bool early) 523 { 524 uintptr_t va, end_va; 525 526 /* Map the flash resident part */ 527 end_va = kernel_map.virt_addr + kernel_map.xiprom_sz; 528 for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE) 529 create_pgd_mapping(pgdir, va, 530 kernel_map.xiprom + (va - kernel_map.virt_addr), 531 PMD_SIZE, PAGE_KERNEL_EXEC); 532 533 /* Map the data in RAM */ 534 end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size; 535 for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE) 536 create_pgd_mapping(pgdir, va, 537 kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)), 538 PMD_SIZE, PAGE_KERNEL); 539 } 540 #else 541 static void __init create_kernel_page_table(pgd_t *pgdir, bool early) 542 { 543 uintptr_t va, end_va; 544 545 end_va = kernel_map.virt_addr + kernel_map.size; 546 for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE) 547 create_pgd_mapping(pgdir, va, 548 kernel_map.phys_addr + (va - kernel_map.virt_addr), 549 PMD_SIZE, 550 early ? 551 PAGE_KERNEL_EXEC : pgprot_from_va(va)); 552 } 553 #endif 554 555 /* 556 * Setup a 4MB mapping that encompasses the device tree: for 64-bit kernel, 557 * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR 558 * entry. 559 */ 560 static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa) 561 { 562 #ifndef CONFIG_BUILTIN_DTB 563 uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1); 564 565 create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA, 566 IS_ENABLED(CONFIG_64BIT) ? (uintptr_t)early_dtb_pmd : pa, 567 PGDIR_SIZE, 568 IS_ENABLED(CONFIG_64BIT) ? PAGE_TABLE : PAGE_KERNEL); 569 570 if (IS_ENABLED(CONFIG_64BIT)) { 571 create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA, 572 pa, PMD_SIZE, PAGE_KERNEL); 573 create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE, 574 pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL); 575 } 576 577 dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1)); 578 #else 579 /* 580 * For 64-bit kernel, __va can't be used since it would return a linear 581 * mapping address whereas dtb_early_va will be used before 582 * setup_vm_final installs the linear mapping. For 32-bit kernel, as the 583 * kernel is mapped in the linear mapping, that makes no difference. 584 */ 585 dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa)); 586 #endif 587 588 dtb_early_pa = dtb_pa; 589 } 590 591 asmlinkage void __init setup_vm(uintptr_t dtb_pa) 592 { 593 pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd; 594 595 kernel_map.virt_addr = KERNEL_LINK_ADDR; 596 597 #ifdef CONFIG_XIP_KERNEL 598 kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR; 599 kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); 600 601 phys_ram_base = CONFIG_PHYS_RAM_BASE; 602 kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE; 603 kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata); 604 605 kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; 606 #else 607 kernel_map.phys_addr = (uintptr_t)(&_start); 608 kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr; 609 #endif 610 kernel_map.va_pa_offset = PAGE_OFFSET - kernel_map.phys_addr; 611 kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr; 612 613 riscv_pfn_base = PFN_DOWN(kernel_map.phys_addr); 614 615 /* Sanity check alignment and size */ 616 BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0); 617 BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0); 618 619 #ifdef CONFIG_64BIT 620 /* 621 * The last 4K bytes of the addressable memory can not be mapped because 622 * of IS_ERR_VALUE macro. 623 */ 624 BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K); 625 #endif 626 627 pt_ops.alloc_pte = alloc_pte_early; 628 pt_ops.get_pte_virt = get_pte_virt_early; 629 #ifndef __PAGETABLE_PMD_FOLDED 630 pt_ops.alloc_pmd = alloc_pmd_early; 631 pt_ops.get_pmd_virt = get_pmd_virt_early; 632 #endif 633 /* Setup early PGD for fixmap */ 634 create_pgd_mapping(early_pg_dir, FIXADDR_START, 635 (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE); 636 637 #ifndef __PAGETABLE_PMD_FOLDED 638 /* Setup fixmap PMD */ 639 create_pmd_mapping(fixmap_pmd, FIXADDR_START, 640 (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE); 641 /* Setup trampoline PGD and PMD */ 642 create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr, 643 (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE); 644 #ifdef CONFIG_XIP_KERNEL 645 create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr, 646 kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC); 647 #else 648 create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr, 649 kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC); 650 #endif 651 #else 652 /* Setup trampoline PGD */ 653 create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr, 654 kernel_map.phys_addr, PGDIR_SIZE, PAGE_KERNEL_EXEC); 655 #endif 656 657 /* 658 * Setup early PGD covering entire kernel which will allow 659 * us to reach paging_init(). We map all memory banks later 660 * in setup_vm_final() below. 661 */ 662 create_kernel_page_table(early_pg_dir, true); 663 664 /* Setup early mapping for FDT early scan */ 665 create_fdt_early_page_table(early_pg_dir, dtb_pa); 666 667 /* 668 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap 669 * range can not span multiple pmds. 670 */ 671 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 672 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 673 674 #ifndef __PAGETABLE_PMD_FOLDED 675 /* 676 * Early ioremap fixmap is already created as it lies within first 2MB 677 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END 678 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn 679 * the user if not. 680 */ 681 fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))]; 682 fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))]; 683 if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) { 684 WARN_ON(1); 685 pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n", 686 pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd)); 687 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 688 fix_to_virt(FIX_BTMAP_BEGIN)); 689 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", 690 fix_to_virt(FIX_BTMAP_END)); 691 692 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 693 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); 694 } 695 #endif 696 } 697 698 static void __init setup_vm_final(void) 699 { 700 uintptr_t va, map_size; 701 phys_addr_t pa, start, end; 702 u64 i; 703 704 /** 705 * MMU is enabled at this point. But page table setup is not complete yet. 706 * fixmap page table alloc functions should be used at this point 707 */ 708 pt_ops.alloc_pte = alloc_pte_fixmap; 709 pt_ops.get_pte_virt = get_pte_virt_fixmap; 710 #ifndef __PAGETABLE_PMD_FOLDED 711 pt_ops.alloc_pmd = alloc_pmd_fixmap; 712 pt_ops.get_pmd_virt = get_pmd_virt_fixmap; 713 #endif 714 /* Setup swapper PGD for fixmap */ 715 create_pgd_mapping(swapper_pg_dir, FIXADDR_START, 716 __pa_symbol(fixmap_pgd_next), 717 PGDIR_SIZE, PAGE_TABLE); 718 719 /* Map all memory banks in the linear mapping */ 720 for_each_mem_range(i, &start, &end) { 721 if (start >= end) 722 break; 723 if (start <= __pa(PAGE_OFFSET) && 724 __pa(PAGE_OFFSET) < end) 725 start = __pa(PAGE_OFFSET); 726 if (end >= __pa(PAGE_OFFSET) + memory_limit) 727 end = __pa(PAGE_OFFSET) + memory_limit; 728 729 map_size = best_map_size(start, end - start); 730 for (pa = start; pa < end; pa += map_size) { 731 va = (uintptr_t)__va(pa); 732 733 create_pgd_mapping(swapper_pg_dir, va, pa, map_size, 734 pgprot_from_va(va)); 735 } 736 } 737 738 #ifdef CONFIG_64BIT 739 /* Map the kernel */ 740 create_kernel_page_table(swapper_pg_dir, false); 741 #endif 742 743 /* Clear fixmap PTE and PMD mappings */ 744 clear_fixmap(FIX_PTE); 745 clear_fixmap(FIX_PMD); 746 747 /* Move to swapper page table */ 748 csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE); 749 local_flush_tlb_all(); 750 751 /* generic page allocation functions must be used to setup page table */ 752 pt_ops.alloc_pte = alloc_pte_late; 753 pt_ops.get_pte_virt = get_pte_virt_late; 754 #ifndef __PAGETABLE_PMD_FOLDED 755 pt_ops.alloc_pmd = alloc_pmd_late; 756 pt_ops.get_pmd_virt = get_pmd_virt_late; 757 #endif 758 } 759 #else 760 asmlinkage void __init setup_vm(uintptr_t dtb_pa) 761 { 762 dtb_early_va = (void *)dtb_pa; 763 dtb_early_pa = dtb_pa; 764 } 765 766 static inline void setup_vm_final(void) 767 { 768 } 769 #endif /* CONFIG_MMU */ 770 771 #ifdef CONFIG_KEXEC_CORE 772 /* 773 * reserve_crashkernel() - reserves memory for crash kernel 774 * 775 * This function reserves memory area given in "crashkernel=" kernel command 776 * line parameter. The memory reserved is used by dump capture kernel when 777 * primary kernel is crashing. 778 */ 779 static void __init reserve_crashkernel(void) 780 { 781 unsigned long long crash_base = 0; 782 unsigned long long crash_size = 0; 783 unsigned long search_start = memblock_start_of_DRAM(); 784 unsigned long search_end = memblock_end_of_DRAM(); 785 786 int ret = 0; 787 788 /* 789 * Don't reserve a region for a crash kernel on a crash kernel 790 * since it doesn't make much sense and we have limited memory 791 * resources. 792 */ 793 #ifdef CONFIG_CRASH_DUMP 794 if (is_kdump_kernel()) { 795 pr_info("crashkernel: ignoring reservation request\n"); 796 return; 797 } 798 #endif 799 800 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), 801 &crash_size, &crash_base); 802 if (ret || !crash_size) 803 return; 804 805 crash_size = PAGE_ALIGN(crash_size); 806 807 if (crash_base) { 808 search_start = crash_base; 809 search_end = crash_base + crash_size; 810 } 811 812 /* 813 * Current riscv boot protocol requires 2MB alignment for 814 * RV64 and 4MB alignment for RV32 (hugepage size) 815 */ 816 crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE, 817 search_start, search_end); 818 if (crash_base == 0) { 819 pr_warn("crashkernel: couldn't allocate %lldKB\n", 820 crash_size >> 10); 821 return; 822 } 823 824 pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n", 825 crash_base, crash_base + crash_size, crash_size >> 20); 826 827 crashk_res.start = crash_base; 828 crashk_res.end = crash_base + crash_size - 1; 829 } 830 #endif /* CONFIG_KEXEC_CORE */ 831 832 void __init paging_init(void) 833 { 834 setup_bootmem(); 835 setup_vm_final(); 836 } 837 838 void __init misc_mem_init(void) 839 { 840 early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); 841 arch_numa_init(); 842 sparse_init(); 843 zone_sizes_init(); 844 #ifdef CONFIG_KEXEC_CORE 845 reserve_crashkernel(); 846 #endif 847 memblock_dump_all(); 848 } 849 850 #ifdef CONFIG_SPARSEMEM_VMEMMAP 851 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 852 struct vmem_altmap *altmap) 853 { 854 return vmemmap_populate_basepages(start, end, node, NULL); 855 } 856 #endif 857