1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 5 * Copyright (C) 2020 FORTH-ICS/CARV 6 * Nick Kossifidis <mick@ics.forth.gr> 7 */ 8 9 #include <linux/init.h> 10 #include <linux/mm.h> 11 #include <linux/memblock.h> 12 #include <linux/initrd.h> 13 #include <linux/swap.h> 14 #include <linux/swiotlb.h> 15 #include <linux/sizes.h> 16 #include <linux/of_fdt.h> 17 #include <linux/of_reserved_mem.h> 18 #include <linux/libfdt.h> 19 #include <linux/set_memory.h> 20 #include <linux/dma-map-ops.h> 21 #include <linux/crash_dump.h> 22 #include <linux/hugetlb.h> 23 #ifdef CONFIG_RELOCATABLE 24 #include <linux/elf.h> 25 #endif 26 #include <linux/kfence.h> 27 28 #include <asm/fixmap.h> 29 #include <asm/io.h> 30 #include <asm/numa.h> 31 #include <asm/pgtable.h> 32 #include <asm/ptdump.h> 33 #include <asm/sections.h> 34 #include <asm/soc.h> 35 #include <asm/tlbflush.h> 36 37 #include "../kernel/head.h" 38 39 struct kernel_mapping kernel_map __ro_after_init; 40 EXPORT_SYMBOL(kernel_map); 41 #ifdef CONFIG_XIP_KERNEL 42 #define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map)) 43 #endif 44 45 #ifdef CONFIG_64BIT 46 u64 satp_mode __ro_after_init = !IS_ENABLED(CONFIG_XIP_KERNEL) ? SATP_MODE_57 : SATP_MODE_39; 47 #else 48 u64 satp_mode __ro_after_init = SATP_MODE_32; 49 #endif 50 EXPORT_SYMBOL(satp_mode); 51 52 #ifdef CONFIG_64BIT 53 bool pgtable_l4_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL); 54 bool pgtable_l5_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL); 55 EXPORT_SYMBOL(pgtable_l4_enabled); 56 EXPORT_SYMBOL(pgtable_l5_enabled); 57 #endif 58 59 phys_addr_t phys_ram_base __ro_after_init; 60 EXPORT_SYMBOL(phys_ram_base); 61 62 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] 63 __page_aligned_bss; 64 EXPORT_SYMBOL(empty_zero_page); 65 66 extern char _start[]; 67 void *_dtb_early_va __initdata; 68 uintptr_t _dtb_early_pa __initdata; 69 70 phys_addr_t dma32_phys_limit __initdata; 71 72 static void __init zone_sizes_init(void) 73 { 74 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; 75 76 #ifdef CONFIG_ZONE_DMA32 77 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit); 78 #endif 79 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 80 81 free_area_init(max_zone_pfns); 82 } 83 84 #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM) 85 86 #define LOG2_SZ_1K ilog2(SZ_1K) 87 #define LOG2_SZ_1M ilog2(SZ_1M) 88 #define LOG2_SZ_1G ilog2(SZ_1G) 89 #define LOG2_SZ_1T ilog2(SZ_1T) 90 91 static inline void print_mlk(char *name, unsigned long b, unsigned long t) 92 { 93 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name, b, t, 94 (((t) - (b)) >> LOG2_SZ_1K)); 95 } 96 97 static inline void print_mlm(char *name, unsigned long b, unsigned long t) 98 { 99 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n", name, b, t, 100 (((t) - (b)) >> LOG2_SZ_1M)); 101 } 102 103 static inline void print_mlg(char *name, unsigned long b, unsigned long t) 104 { 105 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld GB)\n", name, b, t, 106 (((t) - (b)) >> LOG2_SZ_1G)); 107 } 108 109 #ifdef CONFIG_64BIT 110 static inline void print_mlt(char *name, unsigned long b, unsigned long t) 111 { 112 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld TB)\n", name, b, t, 113 (((t) - (b)) >> LOG2_SZ_1T)); 114 } 115 #else 116 #define print_mlt(n, b, t) do {} while (0) 117 #endif 118 119 static inline void print_ml(char *name, unsigned long b, unsigned long t) 120 { 121 unsigned long diff = t - b; 122 123 if (IS_ENABLED(CONFIG_64BIT) && (diff >> LOG2_SZ_1T) >= 10) 124 print_mlt(name, b, t); 125 else if ((diff >> LOG2_SZ_1G) >= 10) 126 print_mlg(name, b, t); 127 else if ((diff >> LOG2_SZ_1M) >= 10) 128 print_mlm(name, b, t); 129 else 130 print_mlk(name, b, t); 131 } 132 133 static void __init print_vm_layout(void) 134 { 135 pr_notice("Virtual kernel memory layout:\n"); 136 print_ml("fixmap", (unsigned long)FIXADDR_START, 137 (unsigned long)FIXADDR_TOP); 138 print_ml("pci io", (unsigned long)PCI_IO_START, 139 (unsigned long)PCI_IO_END); 140 print_ml("vmemmap", (unsigned long)VMEMMAP_START, 141 (unsigned long)VMEMMAP_END); 142 print_ml("vmalloc", (unsigned long)VMALLOC_START, 143 (unsigned long)VMALLOC_END); 144 #ifdef CONFIG_64BIT 145 print_ml("modules", (unsigned long)MODULES_VADDR, 146 (unsigned long)MODULES_END); 147 #endif 148 print_ml("lowmem", (unsigned long)PAGE_OFFSET, 149 (unsigned long)high_memory); 150 if (IS_ENABLED(CONFIG_64BIT)) { 151 #ifdef CONFIG_KASAN 152 print_ml("kasan", KASAN_SHADOW_START, KASAN_SHADOW_END); 153 #endif 154 155 print_ml("kernel", (unsigned long)kernel_map.virt_addr, 156 (unsigned long)ADDRESS_SPACE_END); 157 } 158 } 159 #else 160 static void print_vm_layout(void) { } 161 #endif /* CONFIG_DEBUG_VM */ 162 163 void __init mem_init(void) 164 { 165 #ifdef CONFIG_FLATMEM 166 BUG_ON(!mem_map); 167 #endif /* CONFIG_FLATMEM */ 168 169 swiotlb_init(max_pfn > PFN_DOWN(dma32_phys_limit), SWIOTLB_VERBOSE); 170 memblock_free_all(); 171 172 print_vm_layout(); 173 } 174 175 /* Limit the memory size via mem. */ 176 static phys_addr_t memory_limit; 177 178 static int __init early_mem(char *p) 179 { 180 u64 size; 181 182 if (!p) 183 return 1; 184 185 size = memparse(p, &p) & PAGE_MASK; 186 memory_limit = min_t(u64, size, memory_limit); 187 188 pr_notice("Memory limited to %lldMB\n", (u64)memory_limit >> 20); 189 190 return 0; 191 } 192 early_param("mem", early_mem); 193 194 static void __init setup_bootmem(void) 195 { 196 phys_addr_t vmlinux_end = __pa_symbol(&_end); 197 phys_addr_t max_mapped_addr; 198 phys_addr_t phys_ram_end, vmlinux_start; 199 200 if (IS_ENABLED(CONFIG_XIP_KERNEL)) 201 vmlinux_start = __pa_symbol(&_sdata); 202 else 203 vmlinux_start = __pa_symbol(&_start); 204 205 memblock_enforce_memory_limit(memory_limit); 206 207 /* 208 * Make sure we align the reservation on PMD_SIZE since we will 209 * map the kernel in the linear mapping as read-only: we do not want 210 * any allocation to happen between _end and the next pmd aligned page. 211 */ 212 if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) 213 vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK; 214 /* 215 * Reserve from the start of the kernel to the end of the kernel 216 */ 217 memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); 218 219 phys_ram_end = memblock_end_of_DRAM(); 220 221 /* 222 * Make sure we align the start of the memory on a PMD boundary so that 223 * at worst, we map the linear mapping with PMD mappings. 224 */ 225 if (!IS_ENABLED(CONFIG_XIP_KERNEL)) 226 phys_ram_base = memblock_start_of_DRAM() & PMD_MASK; 227 228 /* 229 * In 64-bit, any use of __va/__pa before this point is wrong as we 230 * did not know the start of DRAM before. 231 */ 232 if (IS_ENABLED(CONFIG_64BIT)) 233 kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base; 234 235 /* 236 * memblock allocator is not aware of the fact that last 4K bytes of 237 * the addressable memory can not be mapped because of IS_ERR_VALUE 238 * macro. Make sure that last 4k bytes are not usable by memblock 239 * if end of dram is equal to maximum addressable memory. For 64-bit 240 * kernel, this problem can't happen here as the end of the virtual 241 * address space is occupied by the kernel mapping then this check must 242 * be done as soon as the kernel mapping base address is determined. 243 */ 244 if (!IS_ENABLED(CONFIG_64BIT)) { 245 max_mapped_addr = __pa(~(ulong)0); 246 if (max_mapped_addr == (phys_ram_end - 1)) 247 memblock_set_current_limit(max_mapped_addr - 4096); 248 } 249 250 min_low_pfn = PFN_UP(phys_ram_base); 251 max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end); 252 high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); 253 254 dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); 255 set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); 256 257 reserve_initrd_mem(); 258 259 /* 260 * No allocation should be done before reserving the memory as defined 261 * in the device tree, otherwise the allocation could end up in a 262 * reserved region. 263 */ 264 early_init_fdt_scan_reserved_mem(); 265 266 /* 267 * If DTB is built in, no need to reserve its memblock. 268 * Otherwise, do reserve it but avoid using 269 * early_init_fdt_reserve_self() since __pa() does 270 * not work for DTB pointers that are fixmap addresses 271 */ 272 if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) 273 memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); 274 275 dma_contiguous_reserve(dma32_phys_limit); 276 if (IS_ENABLED(CONFIG_64BIT)) 277 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); 278 } 279 280 #ifdef CONFIG_MMU 281 struct pt_alloc_ops pt_ops __initdata; 282 283 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 284 pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 285 static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; 286 287 pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); 288 289 #ifdef CONFIG_XIP_KERNEL 290 #define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops)) 291 #define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir)) 292 #define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte)) 293 #define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir)) 294 #endif /* CONFIG_XIP_KERNEL */ 295 296 static const pgprot_t protection_map[16] = { 297 [VM_NONE] = PAGE_NONE, 298 [VM_READ] = PAGE_READ, 299 [VM_WRITE] = PAGE_COPY, 300 [VM_WRITE | VM_READ] = PAGE_COPY, 301 [VM_EXEC] = PAGE_EXEC, 302 [VM_EXEC | VM_READ] = PAGE_READ_EXEC, 303 [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC, 304 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC, 305 [VM_SHARED] = PAGE_NONE, 306 [VM_SHARED | VM_READ] = PAGE_READ, 307 [VM_SHARED | VM_WRITE] = PAGE_SHARED, 308 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, 309 [VM_SHARED | VM_EXEC] = PAGE_EXEC, 310 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ_EXEC, 311 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC, 312 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC 313 }; 314 DECLARE_VM_GET_PAGE_PROT 315 316 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) 317 { 318 unsigned long addr = __fix_to_virt(idx); 319 pte_t *ptep; 320 321 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 322 323 ptep = &fixmap_pte[pte_index(addr)]; 324 325 if (pgprot_val(prot)) 326 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); 327 else 328 pte_clear(&init_mm, addr, ptep); 329 local_flush_tlb_page(addr); 330 } 331 332 static inline pte_t *__init get_pte_virt_early(phys_addr_t pa) 333 { 334 return (pte_t *)((uintptr_t)pa); 335 } 336 337 static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa) 338 { 339 clear_fixmap(FIX_PTE); 340 return (pte_t *)set_fixmap_offset(FIX_PTE, pa); 341 } 342 343 static inline pte_t *__init get_pte_virt_late(phys_addr_t pa) 344 { 345 return (pte_t *) __va(pa); 346 } 347 348 static inline phys_addr_t __init alloc_pte_early(uintptr_t va) 349 { 350 /* 351 * We only create PMD or PGD early mappings so we 352 * should never reach here with MMU disabled. 353 */ 354 BUG(); 355 } 356 357 static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va) 358 { 359 return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); 360 } 361 362 static phys_addr_t __init alloc_pte_late(uintptr_t va) 363 { 364 struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0); 365 366 BUG_ON(!ptdesc || !pagetable_pte_ctor(ptdesc)); 367 return __pa((pte_t *)ptdesc_address(ptdesc)); 368 } 369 370 static void __init create_pte_mapping(pte_t *ptep, 371 uintptr_t va, phys_addr_t pa, 372 phys_addr_t sz, pgprot_t prot) 373 { 374 uintptr_t pte_idx = pte_index(va); 375 376 BUG_ON(sz != PAGE_SIZE); 377 378 if (pte_none(ptep[pte_idx])) 379 ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot); 380 } 381 382 #ifndef __PAGETABLE_PMD_FOLDED 383 384 static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss; 385 static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; 386 static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); 387 388 #ifdef CONFIG_XIP_KERNEL 389 #define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd)) 390 #define fixmap_pmd ((pmd_t *)XIP_FIXUP(fixmap_pmd)) 391 #define early_pmd ((pmd_t *)XIP_FIXUP(early_pmd)) 392 #endif /* CONFIG_XIP_KERNEL */ 393 394 static p4d_t trampoline_p4d[PTRS_PER_P4D] __page_aligned_bss; 395 static p4d_t fixmap_p4d[PTRS_PER_P4D] __page_aligned_bss; 396 static p4d_t early_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); 397 398 #ifdef CONFIG_XIP_KERNEL 399 #define trampoline_p4d ((p4d_t *)XIP_FIXUP(trampoline_p4d)) 400 #define fixmap_p4d ((p4d_t *)XIP_FIXUP(fixmap_p4d)) 401 #define early_p4d ((p4d_t *)XIP_FIXUP(early_p4d)) 402 #endif /* CONFIG_XIP_KERNEL */ 403 404 static pud_t trampoline_pud[PTRS_PER_PUD] __page_aligned_bss; 405 static pud_t fixmap_pud[PTRS_PER_PUD] __page_aligned_bss; 406 static pud_t early_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE); 407 408 #ifdef CONFIG_XIP_KERNEL 409 #define trampoline_pud ((pud_t *)XIP_FIXUP(trampoline_pud)) 410 #define fixmap_pud ((pud_t *)XIP_FIXUP(fixmap_pud)) 411 #define early_pud ((pud_t *)XIP_FIXUP(early_pud)) 412 #endif /* CONFIG_XIP_KERNEL */ 413 414 static pmd_t *__init get_pmd_virt_early(phys_addr_t pa) 415 { 416 /* Before MMU is enabled */ 417 return (pmd_t *)((uintptr_t)pa); 418 } 419 420 static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa) 421 { 422 clear_fixmap(FIX_PMD); 423 return (pmd_t *)set_fixmap_offset(FIX_PMD, pa); 424 } 425 426 static pmd_t *__init get_pmd_virt_late(phys_addr_t pa) 427 { 428 return (pmd_t *) __va(pa); 429 } 430 431 static phys_addr_t __init alloc_pmd_early(uintptr_t va) 432 { 433 BUG_ON((va - kernel_map.virt_addr) >> PUD_SHIFT); 434 435 return (uintptr_t)early_pmd; 436 } 437 438 static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va) 439 { 440 return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); 441 } 442 443 static phys_addr_t __init alloc_pmd_late(uintptr_t va) 444 { 445 struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0); 446 447 BUG_ON(!ptdesc || !pagetable_pmd_ctor(ptdesc)); 448 return __pa((pmd_t *)ptdesc_address(ptdesc)); 449 } 450 451 static void __init create_pmd_mapping(pmd_t *pmdp, 452 uintptr_t va, phys_addr_t pa, 453 phys_addr_t sz, pgprot_t prot) 454 { 455 pte_t *ptep; 456 phys_addr_t pte_phys; 457 uintptr_t pmd_idx = pmd_index(va); 458 459 if (sz == PMD_SIZE) { 460 if (pmd_none(pmdp[pmd_idx])) 461 pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot); 462 return; 463 } 464 465 if (pmd_none(pmdp[pmd_idx])) { 466 pte_phys = pt_ops.alloc_pte(va); 467 pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE); 468 ptep = pt_ops.get_pte_virt(pte_phys); 469 memset(ptep, 0, PAGE_SIZE); 470 } else { 471 pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx])); 472 ptep = pt_ops.get_pte_virt(pte_phys); 473 } 474 475 create_pte_mapping(ptep, va, pa, sz, prot); 476 } 477 478 static pud_t *__init get_pud_virt_early(phys_addr_t pa) 479 { 480 return (pud_t *)((uintptr_t)pa); 481 } 482 483 static pud_t *__init get_pud_virt_fixmap(phys_addr_t pa) 484 { 485 clear_fixmap(FIX_PUD); 486 return (pud_t *)set_fixmap_offset(FIX_PUD, pa); 487 } 488 489 static pud_t *__init get_pud_virt_late(phys_addr_t pa) 490 { 491 return (pud_t *)__va(pa); 492 } 493 494 static phys_addr_t __init alloc_pud_early(uintptr_t va) 495 { 496 /* Only one PUD is available for early mapping */ 497 BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); 498 499 return (uintptr_t)early_pud; 500 } 501 502 static phys_addr_t __init alloc_pud_fixmap(uintptr_t va) 503 { 504 return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); 505 } 506 507 static phys_addr_t alloc_pud_late(uintptr_t va) 508 { 509 unsigned long vaddr; 510 511 vaddr = __get_free_page(GFP_KERNEL); 512 BUG_ON(!vaddr); 513 return __pa(vaddr); 514 } 515 516 static p4d_t *__init get_p4d_virt_early(phys_addr_t pa) 517 { 518 return (p4d_t *)((uintptr_t)pa); 519 } 520 521 static p4d_t *__init get_p4d_virt_fixmap(phys_addr_t pa) 522 { 523 clear_fixmap(FIX_P4D); 524 return (p4d_t *)set_fixmap_offset(FIX_P4D, pa); 525 } 526 527 static p4d_t *__init get_p4d_virt_late(phys_addr_t pa) 528 { 529 return (p4d_t *)__va(pa); 530 } 531 532 static phys_addr_t __init alloc_p4d_early(uintptr_t va) 533 { 534 /* Only one P4D is available for early mapping */ 535 BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); 536 537 return (uintptr_t)early_p4d; 538 } 539 540 static phys_addr_t __init alloc_p4d_fixmap(uintptr_t va) 541 { 542 return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); 543 } 544 545 static phys_addr_t alloc_p4d_late(uintptr_t va) 546 { 547 unsigned long vaddr; 548 549 vaddr = __get_free_page(GFP_KERNEL); 550 BUG_ON(!vaddr); 551 return __pa(vaddr); 552 } 553 554 static void __init create_pud_mapping(pud_t *pudp, 555 uintptr_t va, phys_addr_t pa, 556 phys_addr_t sz, pgprot_t prot) 557 { 558 pmd_t *nextp; 559 phys_addr_t next_phys; 560 uintptr_t pud_index = pud_index(va); 561 562 if (sz == PUD_SIZE) { 563 if (pud_val(pudp[pud_index]) == 0) 564 pudp[pud_index] = pfn_pud(PFN_DOWN(pa), prot); 565 return; 566 } 567 568 if (pud_val(pudp[pud_index]) == 0) { 569 next_phys = pt_ops.alloc_pmd(va); 570 pudp[pud_index] = pfn_pud(PFN_DOWN(next_phys), PAGE_TABLE); 571 nextp = pt_ops.get_pmd_virt(next_phys); 572 memset(nextp, 0, PAGE_SIZE); 573 } else { 574 next_phys = PFN_PHYS(_pud_pfn(pudp[pud_index])); 575 nextp = pt_ops.get_pmd_virt(next_phys); 576 } 577 578 create_pmd_mapping(nextp, va, pa, sz, prot); 579 } 580 581 static void __init create_p4d_mapping(p4d_t *p4dp, 582 uintptr_t va, phys_addr_t pa, 583 phys_addr_t sz, pgprot_t prot) 584 { 585 pud_t *nextp; 586 phys_addr_t next_phys; 587 uintptr_t p4d_index = p4d_index(va); 588 589 if (sz == P4D_SIZE) { 590 if (p4d_val(p4dp[p4d_index]) == 0) 591 p4dp[p4d_index] = pfn_p4d(PFN_DOWN(pa), prot); 592 return; 593 } 594 595 if (p4d_val(p4dp[p4d_index]) == 0) { 596 next_phys = pt_ops.alloc_pud(va); 597 p4dp[p4d_index] = pfn_p4d(PFN_DOWN(next_phys), PAGE_TABLE); 598 nextp = pt_ops.get_pud_virt(next_phys); 599 memset(nextp, 0, PAGE_SIZE); 600 } else { 601 next_phys = PFN_PHYS(_p4d_pfn(p4dp[p4d_index])); 602 nextp = pt_ops.get_pud_virt(next_phys); 603 } 604 605 create_pud_mapping(nextp, va, pa, sz, prot); 606 } 607 608 #define pgd_next_t p4d_t 609 #define alloc_pgd_next(__va) (pgtable_l5_enabled ? \ 610 pt_ops.alloc_p4d(__va) : (pgtable_l4_enabled ? \ 611 pt_ops.alloc_pud(__va) : pt_ops.alloc_pmd(__va))) 612 #define get_pgd_next_virt(__pa) (pgtable_l5_enabled ? \ 613 pt_ops.get_p4d_virt(__pa) : (pgd_next_t *)(pgtable_l4_enabled ? \ 614 pt_ops.get_pud_virt(__pa) : (pud_t *)pt_ops.get_pmd_virt(__pa))) 615 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ 616 (pgtable_l5_enabled ? \ 617 create_p4d_mapping(__nextp, __va, __pa, __sz, __prot) : \ 618 (pgtable_l4_enabled ? \ 619 create_pud_mapping((pud_t *)__nextp, __va, __pa, __sz, __prot) : \ 620 create_pmd_mapping((pmd_t *)__nextp, __va, __pa, __sz, __prot))) 621 #define fixmap_pgd_next (pgtable_l5_enabled ? \ 622 (uintptr_t)fixmap_p4d : (pgtable_l4_enabled ? \ 623 (uintptr_t)fixmap_pud : (uintptr_t)fixmap_pmd)) 624 #define trampoline_pgd_next (pgtable_l5_enabled ? \ 625 (uintptr_t)trampoline_p4d : (pgtable_l4_enabled ? \ 626 (uintptr_t)trampoline_pud : (uintptr_t)trampoline_pmd)) 627 #else 628 #define pgd_next_t pte_t 629 #define alloc_pgd_next(__va) pt_ops.alloc_pte(__va) 630 #define get_pgd_next_virt(__pa) pt_ops.get_pte_virt(__pa) 631 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ 632 create_pte_mapping(__nextp, __va, __pa, __sz, __prot) 633 #define fixmap_pgd_next ((uintptr_t)fixmap_pte) 634 #define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0) 635 #define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0) 636 #define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0) 637 #endif /* __PAGETABLE_PMD_FOLDED */ 638 639 void __init create_pgd_mapping(pgd_t *pgdp, 640 uintptr_t va, phys_addr_t pa, 641 phys_addr_t sz, pgprot_t prot) 642 { 643 pgd_next_t *nextp; 644 phys_addr_t next_phys; 645 uintptr_t pgd_idx = pgd_index(va); 646 647 if (sz == PGDIR_SIZE) { 648 if (pgd_val(pgdp[pgd_idx]) == 0) 649 pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot); 650 return; 651 } 652 653 if (pgd_val(pgdp[pgd_idx]) == 0) { 654 next_phys = alloc_pgd_next(va); 655 pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE); 656 nextp = get_pgd_next_virt(next_phys); 657 memset(nextp, 0, PAGE_SIZE); 658 } else { 659 next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx])); 660 nextp = get_pgd_next_virt(next_phys); 661 } 662 663 create_pgd_next_mapping(nextp, va, pa, sz, prot); 664 } 665 666 static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va, 667 phys_addr_t size) 668 { 669 if (pgtable_l5_enabled && 670 !(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE) 671 return P4D_SIZE; 672 673 if (pgtable_l4_enabled && 674 !(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE) 675 return PUD_SIZE; 676 677 if (IS_ENABLED(CONFIG_64BIT) && 678 !(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE) 679 return PMD_SIZE; 680 681 return PAGE_SIZE; 682 } 683 684 #ifdef CONFIG_XIP_KERNEL 685 #define phys_ram_base (*(phys_addr_t *)XIP_FIXUP(&phys_ram_base)) 686 extern char _xiprom[], _exiprom[], __data_loc; 687 688 /* called from head.S with MMU off */ 689 asmlinkage void __init __copy_data(void) 690 { 691 void *from = (void *)(&__data_loc); 692 void *to = (void *)CONFIG_PHYS_RAM_BASE; 693 size_t sz = (size_t)((uintptr_t)(&_end) - (uintptr_t)(&_sdata)); 694 695 memcpy(to, from, sz); 696 } 697 #endif 698 699 #ifdef CONFIG_STRICT_KERNEL_RWX 700 static __init pgprot_t pgprot_from_va(uintptr_t va) 701 { 702 if (is_va_kernel_text(va)) 703 return PAGE_KERNEL_READ_EXEC; 704 705 /* 706 * In 64-bit kernel, the kernel mapping is outside the linear mapping so 707 * we must protect its linear mapping alias from being executed and 708 * written. 709 * And rodata section is marked readonly in mark_rodata_ro. 710 */ 711 if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va)) 712 return PAGE_KERNEL_READ; 713 714 return PAGE_KERNEL; 715 } 716 717 void mark_rodata_ro(void) 718 { 719 set_kernel_memory(__start_rodata, _data, set_memory_ro); 720 if (IS_ENABLED(CONFIG_64BIT)) 721 set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data), 722 set_memory_ro); 723 724 debug_checkwx(); 725 } 726 #else 727 static __init pgprot_t pgprot_from_va(uintptr_t va) 728 { 729 if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va)) 730 return PAGE_KERNEL; 731 732 return PAGE_KERNEL_EXEC; 733 } 734 #endif /* CONFIG_STRICT_KERNEL_RWX */ 735 736 #if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL) 737 u64 __pi_set_satp_mode_from_cmdline(uintptr_t dtb_pa); 738 739 static void __init disable_pgtable_l5(void) 740 { 741 pgtable_l5_enabled = false; 742 kernel_map.page_offset = PAGE_OFFSET_L4; 743 satp_mode = SATP_MODE_48; 744 } 745 746 static void __init disable_pgtable_l4(void) 747 { 748 pgtable_l4_enabled = false; 749 kernel_map.page_offset = PAGE_OFFSET_L3; 750 satp_mode = SATP_MODE_39; 751 } 752 753 static int __init print_no4lvl(char *p) 754 { 755 pr_info("Disabled 4-level and 5-level paging"); 756 return 0; 757 } 758 early_param("no4lvl", print_no4lvl); 759 760 static int __init print_no5lvl(char *p) 761 { 762 pr_info("Disabled 5-level paging"); 763 return 0; 764 } 765 early_param("no5lvl", print_no5lvl); 766 767 /* 768 * There is a simple way to determine if 4-level is supported by the 769 * underlying hardware: establish 1:1 mapping in 4-level page table mode 770 * then read SATP to see if the configuration was taken into account 771 * meaning sv48 is supported. 772 */ 773 static __init void set_satp_mode(uintptr_t dtb_pa) 774 { 775 u64 identity_satp, hw_satp; 776 uintptr_t set_satp_mode_pmd = ((unsigned long)set_satp_mode) & PMD_MASK; 777 u64 satp_mode_cmdline = __pi_set_satp_mode_from_cmdline(dtb_pa); 778 779 if (satp_mode_cmdline == SATP_MODE_57) { 780 disable_pgtable_l5(); 781 } else if (satp_mode_cmdline == SATP_MODE_48) { 782 disable_pgtable_l5(); 783 disable_pgtable_l4(); 784 return; 785 } 786 787 create_p4d_mapping(early_p4d, 788 set_satp_mode_pmd, (uintptr_t)early_pud, 789 P4D_SIZE, PAGE_TABLE); 790 create_pud_mapping(early_pud, 791 set_satp_mode_pmd, (uintptr_t)early_pmd, 792 PUD_SIZE, PAGE_TABLE); 793 /* Handle the case where set_satp_mode straddles 2 PMDs */ 794 create_pmd_mapping(early_pmd, 795 set_satp_mode_pmd, set_satp_mode_pmd, 796 PMD_SIZE, PAGE_KERNEL_EXEC); 797 create_pmd_mapping(early_pmd, 798 set_satp_mode_pmd + PMD_SIZE, 799 set_satp_mode_pmd + PMD_SIZE, 800 PMD_SIZE, PAGE_KERNEL_EXEC); 801 retry: 802 create_pgd_mapping(early_pg_dir, 803 set_satp_mode_pmd, 804 pgtable_l5_enabled ? 805 (uintptr_t)early_p4d : (uintptr_t)early_pud, 806 PGDIR_SIZE, PAGE_TABLE); 807 808 identity_satp = PFN_DOWN((uintptr_t)&early_pg_dir) | satp_mode; 809 810 local_flush_tlb_all(); 811 csr_write(CSR_SATP, identity_satp); 812 hw_satp = csr_swap(CSR_SATP, 0ULL); 813 local_flush_tlb_all(); 814 815 if (hw_satp != identity_satp) { 816 if (pgtable_l5_enabled) { 817 disable_pgtable_l5(); 818 memset(early_pg_dir, 0, PAGE_SIZE); 819 goto retry; 820 } 821 disable_pgtable_l4(); 822 } 823 824 memset(early_pg_dir, 0, PAGE_SIZE); 825 memset(early_p4d, 0, PAGE_SIZE); 826 memset(early_pud, 0, PAGE_SIZE); 827 memset(early_pmd, 0, PAGE_SIZE); 828 } 829 #endif 830 831 /* 832 * setup_vm() is called from head.S with MMU-off. 833 * 834 * Following requirements should be honoured for setup_vm() to work 835 * correctly: 836 * 1) It should use PC-relative addressing for accessing kernel symbols. 837 * To achieve this we always use GCC cmodel=medany. 838 * 2) The compiler instrumentation for FTRACE will not work for setup_vm() 839 * so disable compiler instrumentation when FTRACE is enabled. 840 * 841 * Currently, the above requirements are honoured by using custom CFLAGS 842 * for init.o in mm/Makefile. 843 */ 844 845 #ifndef __riscv_cmodel_medany 846 #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." 847 #endif 848 849 #ifdef CONFIG_RELOCATABLE 850 extern unsigned long __rela_dyn_start, __rela_dyn_end; 851 852 static void __init relocate_kernel(void) 853 { 854 Elf64_Rela *rela = (Elf64_Rela *)&__rela_dyn_start; 855 /* 856 * This holds the offset between the linked virtual address and the 857 * relocated virtual address. 858 */ 859 uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR; 860 /* 861 * This holds the offset between kernel linked virtual address and 862 * physical address. 863 */ 864 uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr; 865 866 for ( ; rela < (Elf64_Rela *)&__rela_dyn_end; rela++) { 867 Elf64_Addr addr = (rela->r_offset - va_kernel_link_pa_offset); 868 Elf64_Addr relocated_addr = rela->r_addend; 869 870 if (rela->r_info != R_RISCV_RELATIVE) 871 continue; 872 873 /* 874 * Make sure to not relocate vdso symbols like rt_sigreturn 875 * which are linked from the address 0 in vmlinux since 876 * vdso symbol addresses are actually used as an offset from 877 * mm->context.vdso in VDSO_OFFSET macro. 878 */ 879 if (relocated_addr >= KERNEL_LINK_ADDR) 880 relocated_addr += reloc_offset; 881 882 *(Elf64_Addr *)addr = relocated_addr; 883 } 884 } 885 #endif /* CONFIG_RELOCATABLE */ 886 887 #ifdef CONFIG_XIP_KERNEL 888 static void __init create_kernel_page_table(pgd_t *pgdir, 889 __always_unused bool early) 890 { 891 uintptr_t va, end_va; 892 893 /* Map the flash resident part */ 894 end_va = kernel_map.virt_addr + kernel_map.xiprom_sz; 895 for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE) 896 create_pgd_mapping(pgdir, va, 897 kernel_map.xiprom + (va - kernel_map.virt_addr), 898 PMD_SIZE, PAGE_KERNEL_EXEC); 899 900 /* Map the data in RAM */ 901 end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size; 902 for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE) 903 create_pgd_mapping(pgdir, va, 904 kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)), 905 PMD_SIZE, PAGE_KERNEL); 906 } 907 #else 908 static void __init create_kernel_page_table(pgd_t *pgdir, bool early) 909 { 910 uintptr_t va, end_va; 911 912 end_va = kernel_map.virt_addr + kernel_map.size; 913 for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE) 914 create_pgd_mapping(pgdir, va, 915 kernel_map.phys_addr + (va - kernel_map.virt_addr), 916 PMD_SIZE, 917 early ? 918 PAGE_KERNEL_EXEC : pgprot_from_va(va)); 919 } 920 #endif 921 922 /* 923 * Setup a 4MB mapping that encompasses the device tree: for 64-bit kernel, 924 * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR 925 * entry. 926 */ 927 static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va, 928 uintptr_t dtb_pa) 929 { 930 #ifndef CONFIG_BUILTIN_DTB 931 uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1); 932 933 /* Make sure the fdt fixmap address is always aligned on PMD size */ 934 BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE)); 935 936 /* In 32-bit only, the fdt lies in its own PGD */ 937 if (!IS_ENABLED(CONFIG_64BIT)) { 938 create_pgd_mapping(early_pg_dir, fix_fdt_va, 939 pa, MAX_FDT_SIZE, PAGE_KERNEL); 940 } else { 941 create_pmd_mapping(fixmap_pmd, fix_fdt_va, 942 pa, PMD_SIZE, PAGE_KERNEL); 943 create_pmd_mapping(fixmap_pmd, fix_fdt_va + PMD_SIZE, 944 pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL); 945 } 946 947 dtb_early_va = (void *)fix_fdt_va + (dtb_pa & (PMD_SIZE - 1)); 948 #else 949 /* 950 * For 64-bit kernel, __va can't be used since it would return a linear 951 * mapping address whereas dtb_early_va will be used before 952 * setup_vm_final installs the linear mapping. For 32-bit kernel, as the 953 * kernel is mapped in the linear mapping, that makes no difference. 954 */ 955 dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa)); 956 #endif 957 958 dtb_early_pa = dtb_pa; 959 } 960 961 /* 962 * MMU is not enabled, the page tables are allocated directly using 963 * early_pmd/pud/p4d and the address returned is the physical one. 964 */ 965 static void __init pt_ops_set_early(void) 966 { 967 pt_ops.alloc_pte = alloc_pte_early; 968 pt_ops.get_pte_virt = get_pte_virt_early; 969 #ifndef __PAGETABLE_PMD_FOLDED 970 pt_ops.alloc_pmd = alloc_pmd_early; 971 pt_ops.get_pmd_virt = get_pmd_virt_early; 972 pt_ops.alloc_pud = alloc_pud_early; 973 pt_ops.get_pud_virt = get_pud_virt_early; 974 pt_ops.alloc_p4d = alloc_p4d_early; 975 pt_ops.get_p4d_virt = get_p4d_virt_early; 976 #endif 977 } 978 979 /* 980 * MMU is enabled but page table setup is not complete yet. 981 * fixmap page table alloc functions must be used as a means to temporarily 982 * map the allocated physical pages since the linear mapping does not exist yet. 983 * 984 * Note that this is called with MMU disabled, hence kernel_mapping_pa_to_va, 985 * but it will be used as described above. 986 */ 987 static void __init pt_ops_set_fixmap(void) 988 { 989 pt_ops.alloc_pte = kernel_mapping_pa_to_va(alloc_pte_fixmap); 990 pt_ops.get_pte_virt = kernel_mapping_pa_to_va(get_pte_virt_fixmap); 991 #ifndef __PAGETABLE_PMD_FOLDED 992 pt_ops.alloc_pmd = kernel_mapping_pa_to_va(alloc_pmd_fixmap); 993 pt_ops.get_pmd_virt = kernel_mapping_pa_to_va(get_pmd_virt_fixmap); 994 pt_ops.alloc_pud = kernel_mapping_pa_to_va(alloc_pud_fixmap); 995 pt_ops.get_pud_virt = kernel_mapping_pa_to_va(get_pud_virt_fixmap); 996 pt_ops.alloc_p4d = kernel_mapping_pa_to_va(alloc_p4d_fixmap); 997 pt_ops.get_p4d_virt = kernel_mapping_pa_to_va(get_p4d_virt_fixmap); 998 #endif 999 } 1000 1001 /* 1002 * MMU is enabled and page table setup is complete, so from now, we can use 1003 * generic page allocation functions to setup page table. 1004 */ 1005 static void __init pt_ops_set_late(void) 1006 { 1007 pt_ops.alloc_pte = alloc_pte_late; 1008 pt_ops.get_pte_virt = get_pte_virt_late; 1009 #ifndef __PAGETABLE_PMD_FOLDED 1010 pt_ops.alloc_pmd = alloc_pmd_late; 1011 pt_ops.get_pmd_virt = get_pmd_virt_late; 1012 pt_ops.alloc_pud = alloc_pud_late; 1013 pt_ops.get_pud_virt = get_pud_virt_late; 1014 pt_ops.alloc_p4d = alloc_p4d_late; 1015 pt_ops.get_p4d_virt = get_p4d_virt_late; 1016 #endif 1017 } 1018 1019 #ifdef CONFIG_RANDOMIZE_BASE 1020 extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa); 1021 extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa); 1022 1023 static int __init print_nokaslr(char *p) 1024 { 1025 pr_info("Disabled KASLR"); 1026 return 0; 1027 } 1028 early_param("nokaslr", print_nokaslr); 1029 1030 unsigned long kaslr_offset(void) 1031 { 1032 return kernel_map.virt_offset; 1033 } 1034 #endif 1035 1036 asmlinkage void __init setup_vm(uintptr_t dtb_pa) 1037 { 1038 pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd; 1039 1040 #ifdef CONFIG_RANDOMIZE_BASE 1041 if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) { 1042 u64 kaslr_seed = __pi_get_kaslr_seed(dtb_pa); 1043 u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start); 1044 u32 nr_pos; 1045 1046 /* 1047 * Compute the number of positions available: we are limited 1048 * by the early page table that only has one PUD and we must 1049 * be aligned on PMD_SIZE. 1050 */ 1051 nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE; 1052 1053 kernel_map.virt_offset = (kaslr_seed % nr_pos) * PMD_SIZE; 1054 } 1055 #endif 1056 1057 kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset; 1058 kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL); 1059 1060 #ifdef CONFIG_XIP_KERNEL 1061 kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR; 1062 kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); 1063 1064 phys_ram_base = CONFIG_PHYS_RAM_BASE; 1065 kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE; 1066 kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata); 1067 1068 kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; 1069 #else 1070 kernel_map.phys_addr = (uintptr_t)(&_start); 1071 kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr; 1072 #endif 1073 1074 #if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL) 1075 set_satp_mode(dtb_pa); 1076 #endif 1077 1078 /* 1079 * In 64-bit, we defer the setup of va_pa_offset to setup_bootmem, 1080 * where we have the system memory layout: this allows us to align 1081 * the physical and virtual mappings and then make use of PUD/P4D/PGD 1082 * for the linear mapping. This is only possible because the kernel 1083 * mapping lies outside the linear mapping. 1084 * In 32-bit however, as the kernel resides in the linear mapping, 1085 * setup_vm_final can not change the mapping established here, 1086 * otherwise the same kernel addresses would get mapped to different 1087 * physical addresses (if the start of dram is different from the 1088 * kernel physical address start). 1089 */ 1090 kernel_map.va_pa_offset = IS_ENABLED(CONFIG_64BIT) ? 1091 0UL : PAGE_OFFSET - kernel_map.phys_addr; 1092 kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr; 1093 1094 /* 1095 * The default maximal physical memory size is KERN_VIRT_SIZE for 32-bit 1096 * kernel, whereas for 64-bit kernel, the end of the virtual address 1097 * space is occupied by the modules/BPF/kernel mappings which reduces 1098 * the available size of the linear mapping. 1099 */ 1100 memory_limit = KERN_VIRT_SIZE - (IS_ENABLED(CONFIG_64BIT) ? SZ_4G : 0); 1101 1102 /* Sanity check alignment and size */ 1103 BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0); 1104 BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0); 1105 1106 #ifdef CONFIG_64BIT 1107 /* 1108 * The last 4K bytes of the addressable memory can not be mapped because 1109 * of IS_ERR_VALUE macro. 1110 */ 1111 BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K); 1112 #endif 1113 1114 #ifdef CONFIG_RELOCATABLE 1115 /* 1116 * Early page table uses only one PUD, which makes it possible 1117 * to map PUD_SIZE aligned on PUD_SIZE: if the relocation offset 1118 * makes the kernel cross over a PUD_SIZE boundary, raise a bug 1119 * since a part of the kernel would not get mapped. 1120 */ 1121 BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size); 1122 relocate_kernel(); 1123 #endif 1124 1125 apply_early_boot_alternatives(); 1126 pt_ops_set_early(); 1127 1128 /* Setup early PGD for fixmap */ 1129 create_pgd_mapping(early_pg_dir, FIXADDR_START, 1130 fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE); 1131 1132 #ifndef __PAGETABLE_PMD_FOLDED 1133 /* Setup fixmap P4D and PUD */ 1134 if (pgtable_l5_enabled) 1135 create_p4d_mapping(fixmap_p4d, FIXADDR_START, 1136 (uintptr_t)fixmap_pud, P4D_SIZE, PAGE_TABLE); 1137 /* Setup fixmap PUD and PMD */ 1138 if (pgtable_l4_enabled) 1139 create_pud_mapping(fixmap_pud, FIXADDR_START, 1140 (uintptr_t)fixmap_pmd, PUD_SIZE, PAGE_TABLE); 1141 create_pmd_mapping(fixmap_pmd, FIXADDR_START, 1142 (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE); 1143 /* Setup trampoline PGD and PMD */ 1144 create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr, 1145 trampoline_pgd_next, PGDIR_SIZE, PAGE_TABLE); 1146 if (pgtable_l5_enabled) 1147 create_p4d_mapping(trampoline_p4d, kernel_map.virt_addr, 1148 (uintptr_t)trampoline_pud, P4D_SIZE, PAGE_TABLE); 1149 if (pgtable_l4_enabled) 1150 create_pud_mapping(trampoline_pud, kernel_map.virt_addr, 1151 (uintptr_t)trampoline_pmd, PUD_SIZE, PAGE_TABLE); 1152 #ifdef CONFIG_XIP_KERNEL 1153 create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr, 1154 kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC); 1155 #else 1156 create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr, 1157 kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC); 1158 #endif 1159 #else 1160 /* Setup trampoline PGD */ 1161 create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr, 1162 kernel_map.phys_addr, PGDIR_SIZE, PAGE_KERNEL_EXEC); 1163 #endif 1164 1165 /* 1166 * Setup early PGD covering entire kernel which will allow 1167 * us to reach paging_init(). We map all memory banks later 1168 * in setup_vm_final() below. 1169 */ 1170 create_kernel_page_table(early_pg_dir, true); 1171 1172 /* Setup early mapping for FDT early scan */ 1173 create_fdt_early_page_table(__fix_to_virt(FIX_FDT), dtb_pa); 1174 1175 /* 1176 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap 1177 * range can not span multiple pmds. 1178 */ 1179 BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 1180 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 1181 1182 #ifndef __PAGETABLE_PMD_FOLDED 1183 /* 1184 * Early ioremap fixmap is already created as it lies within first 2MB 1185 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END 1186 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn 1187 * the user if not. 1188 */ 1189 fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))]; 1190 fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))]; 1191 if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) { 1192 WARN_ON(1); 1193 pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n", 1194 pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd)); 1195 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 1196 fix_to_virt(FIX_BTMAP_BEGIN)); 1197 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", 1198 fix_to_virt(FIX_BTMAP_END)); 1199 1200 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 1201 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); 1202 } 1203 #endif 1204 1205 pt_ops_set_fixmap(); 1206 } 1207 1208 static void __init create_linear_mapping_range(phys_addr_t start, 1209 phys_addr_t end, 1210 uintptr_t fixed_map_size) 1211 { 1212 phys_addr_t pa; 1213 uintptr_t va, map_size; 1214 1215 for (pa = start; pa < end; pa += map_size) { 1216 va = (uintptr_t)__va(pa); 1217 map_size = fixed_map_size ? fixed_map_size : 1218 best_map_size(pa, va, end - pa); 1219 1220 create_pgd_mapping(swapper_pg_dir, va, pa, map_size, 1221 pgprot_from_va(va)); 1222 } 1223 } 1224 1225 static void __init create_linear_mapping_page_table(void) 1226 { 1227 phys_addr_t start, end; 1228 phys_addr_t kfence_pool __maybe_unused; 1229 u64 i; 1230 1231 #ifdef CONFIG_STRICT_KERNEL_RWX 1232 phys_addr_t ktext_start = __pa_symbol(_start); 1233 phys_addr_t ktext_size = __init_data_begin - _start; 1234 phys_addr_t krodata_start = __pa_symbol(__start_rodata); 1235 phys_addr_t krodata_size = _data - __start_rodata; 1236 1237 /* Isolate kernel text and rodata so they don't get mapped with a PUD */ 1238 memblock_mark_nomap(ktext_start, ktext_size); 1239 memblock_mark_nomap(krodata_start, krodata_size); 1240 #endif 1241 1242 #ifdef CONFIG_KFENCE 1243 /* 1244 * kfence pool must be backed by PAGE_SIZE mappings, so allocate it 1245 * before we setup the linear mapping so that we avoid using hugepages 1246 * for this region. 1247 */ 1248 kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); 1249 BUG_ON(!kfence_pool); 1250 1251 memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); 1252 __kfence_pool = __va(kfence_pool); 1253 #endif 1254 1255 /* Map all memory banks in the linear mapping */ 1256 for_each_mem_range(i, &start, &end) { 1257 if (start >= end) 1258 break; 1259 if (start <= __pa(PAGE_OFFSET) && 1260 __pa(PAGE_OFFSET) < end) 1261 start = __pa(PAGE_OFFSET); 1262 if (end >= __pa(PAGE_OFFSET) + memory_limit) 1263 end = __pa(PAGE_OFFSET) + memory_limit; 1264 1265 create_linear_mapping_range(start, end, 0); 1266 } 1267 1268 #ifdef CONFIG_STRICT_KERNEL_RWX 1269 create_linear_mapping_range(ktext_start, ktext_start + ktext_size, 0); 1270 create_linear_mapping_range(krodata_start, 1271 krodata_start + krodata_size, 0); 1272 1273 memblock_clear_nomap(ktext_start, ktext_size); 1274 memblock_clear_nomap(krodata_start, krodata_size); 1275 #endif 1276 1277 #ifdef CONFIG_KFENCE 1278 create_linear_mapping_range(kfence_pool, 1279 kfence_pool + KFENCE_POOL_SIZE, 1280 PAGE_SIZE); 1281 1282 memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); 1283 #endif 1284 } 1285 1286 static void __init setup_vm_final(void) 1287 { 1288 /* Setup swapper PGD for fixmap */ 1289 #if !defined(CONFIG_64BIT) 1290 /* 1291 * In 32-bit, the device tree lies in a pgd entry, so it must be copied 1292 * directly in swapper_pg_dir in addition to the pgd entry that points 1293 * to fixmap_pte. 1294 */ 1295 unsigned long idx = pgd_index(__fix_to_virt(FIX_FDT)); 1296 1297 set_pgd(&swapper_pg_dir[idx], early_pg_dir[idx]); 1298 #endif 1299 create_pgd_mapping(swapper_pg_dir, FIXADDR_START, 1300 __pa_symbol(fixmap_pgd_next), 1301 PGDIR_SIZE, PAGE_TABLE); 1302 1303 /* Map the linear mapping */ 1304 create_linear_mapping_page_table(); 1305 1306 /* Map the kernel */ 1307 if (IS_ENABLED(CONFIG_64BIT)) 1308 create_kernel_page_table(swapper_pg_dir, false); 1309 1310 #ifdef CONFIG_KASAN 1311 kasan_swapper_init(); 1312 #endif 1313 1314 /* Clear fixmap PTE and PMD mappings */ 1315 clear_fixmap(FIX_PTE); 1316 clear_fixmap(FIX_PMD); 1317 clear_fixmap(FIX_PUD); 1318 clear_fixmap(FIX_P4D); 1319 1320 /* Move to swapper page table */ 1321 csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | satp_mode); 1322 local_flush_tlb_all(); 1323 1324 pt_ops_set_late(); 1325 } 1326 #else 1327 asmlinkage void __init setup_vm(uintptr_t dtb_pa) 1328 { 1329 dtb_early_va = (void *)dtb_pa; 1330 dtb_early_pa = dtb_pa; 1331 } 1332 1333 static inline void setup_vm_final(void) 1334 { 1335 } 1336 #endif /* CONFIG_MMU */ 1337 1338 /* 1339 * reserve_crashkernel() - reserves memory for crash kernel 1340 * 1341 * This function reserves memory area given in "crashkernel=" kernel command 1342 * line parameter. The memory reserved is used by dump capture kernel when 1343 * primary kernel is crashing. 1344 */ 1345 static void __init arch_reserve_crashkernel(void) 1346 { 1347 unsigned long long low_size = 0; 1348 unsigned long long crash_base, crash_size; 1349 char *cmdline = boot_command_line; 1350 bool high = false; 1351 int ret; 1352 1353 if (!IS_ENABLED(CONFIG_KEXEC_CORE)) 1354 return; 1355 1356 ret = parse_crashkernel(cmdline, memblock_phys_mem_size(), 1357 &crash_size, &crash_base, 1358 &low_size, &high); 1359 if (ret) 1360 return; 1361 1362 reserve_crashkernel_generic(cmdline, crash_size, crash_base, 1363 low_size, high); 1364 } 1365 1366 void __init paging_init(void) 1367 { 1368 setup_bootmem(); 1369 setup_vm_final(); 1370 1371 /* Depend on that Linear Mapping is ready */ 1372 memblock_allow_resize(); 1373 } 1374 1375 void __init misc_mem_init(void) 1376 { 1377 early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); 1378 arch_numa_init(); 1379 sparse_init(); 1380 zone_sizes_init(); 1381 arch_reserve_crashkernel(); 1382 memblock_dump_all(); 1383 } 1384 1385 #ifdef CONFIG_SPARSEMEM_VMEMMAP 1386 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 1387 struct vmem_altmap *altmap) 1388 { 1389 return vmemmap_populate_basepages(start, end, node, NULL); 1390 } 1391 #endif 1392 1393 #if defined(CONFIG_MMU) && defined(CONFIG_64BIT) 1394 /* 1395 * Pre-allocates page-table pages for a specific area in the kernel 1396 * page-table. Only the level which needs to be synchronized between 1397 * all page-tables is allocated because the synchronization can be 1398 * expensive. 1399 */ 1400 static void __init preallocate_pgd_pages_range(unsigned long start, unsigned long end, 1401 const char *area) 1402 { 1403 unsigned long addr; 1404 const char *lvl; 1405 1406 for (addr = start; addr < end && addr >= start; addr = ALIGN(addr + 1, PGDIR_SIZE)) { 1407 pgd_t *pgd = pgd_offset_k(addr); 1408 p4d_t *p4d; 1409 pud_t *pud; 1410 pmd_t *pmd; 1411 1412 lvl = "p4d"; 1413 p4d = p4d_alloc(&init_mm, pgd, addr); 1414 if (!p4d) 1415 goto failed; 1416 1417 if (pgtable_l5_enabled) 1418 continue; 1419 1420 lvl = "pud"; 1421 pud = pud_alloc(&init_mm, p4d, addr); 1422 if (!pud) 1423 goto failed; 1424 1425 if (pgtable_l4_enabled) 1426 continue; 1427 1428 lvl = "pmd"; 1429 pmd = pmd_alloc(&init_mm, pud, addr); 1430 if (!pmd) 1431 goto failed; 1432 } 1433 return; 1434 1435 failed: 1436 /* 1437 * The pages have to be there now or they will be missing in 1438 * process page-tables later. 1439 */ 1440 panic("Failed to pre-allocate %s pages for %s area\n", lvl, area); 1441 } 1442 1443 void __init pgtable_cache_init(void) 1444 { 1445 preallocate_pgd_pages_range(VMALLOC_START, VMALLOC_END, "vmalloc"); 1446 if (IS_ENABLED(CONFIG_MODULES)) 1447 preallocate_pgd_pages_range(MODULES_VADDR, MODULES_END, "bpf/modules"); 1448 } 1449 #endif 1450