1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 5 * Copyright (C) 2020 FORTH-ICS/CARV 6 * Nick Kossifidis <mick@ics.forth.gr> 7 */ 8 9 #include <linux/init.h> 10 #include <linux/mm.h> 11 #include <linux/memblock.h> 12 #include <linux/initrd.h> 13 #include <linux/swap.h> 14 #include <linux/swiotlb.h> 15 #include <linux/sizes.h> 16 #include <linux/of_fdt.h> 17 #include <linux/of_reserved_mem.h> 18 #include <linux/libfdt.h> 19 #include <linux/set_memory.h> 20 #include <linux/dma-map-ops.h> 21 #include <linux/crash_dump.h> 22 #include <linux/hugetlb.h> 23 #ifdef CONFIG_RELOCATABLE 24 #include <linux/elf.h> 25 #endif 26 #include <linux/kfence.h> 27 #include <linux/execmem.h> 28 29 #include <asm/fixmap.h> 30 #include <asm/io.h> 31 #include <asm/kasan.h> 32 #include <asm/numa.h> 33 #include <asm/pgtable.h> 34 #include <asm/sections.h> 35 #include <asm/soc.h> 36 #include <asm/tlbflush.h> 37 38 #include "../kernel/head.h" 39 40 struct kernel_mapping kernel_map __ro_after_init; 41 EXPORT_SYMBOL(kernel_map); 42 #ifdef CONFIG_XIP_KERNEL 43 #define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map)) 44 #endif 45 46 #ifdef CONFIG_64BIT 47 u64 satp_mode __ro_after_init = !IS_ENABLED(CONFIG_XIP_KERNEL) ? SATP_MODE_57 : SATP_MODE_39; 48 #else 49 u64 satp_mode __ro_after_init = SATP_MODE_32; 50 #endif 51 EXPORT_SYMBOL(satp_mode); 52 53 #ifdef CONFIG_64BIT 54 bool pgtable_l4_enabled __ro_after_init = !IS_ENABLED(CONFIG_XIP_KERNEL); 55 bool pgtable_l5_enabled __ro_after_init = !IS_ENABLED(CONFIG_XIP_KERNEL); 56 EXPORT_SYMBOL(pgtable_l4_enabled); 57 EXPORT_SYMBOL(pgtable_l5_enabled); 58 #endif 59 60 phys_addr_t phys_ram_base __ro_after_init; 61 EXPORT_SYMBOL(phys_ram_base); 62 63 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] 64 __page_aligned_bss; 65 EXPORT_SYMBOL(empty_zero_page); 66 67 extern char _start[]; 68 void *_dtb_early_va __initdata; 69 uintptr_t _dtb_early_pa __initdata; 70 71 phys_addr_t dma32_phys_limit __initdata; 72 73 static void __init zone_sizes_init(void) 74 { 75 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; 76 77 #ifdef CONFIG_ZONE_DMA32 78 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit); 79 #endif 80 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 81 82 free_area_init(max_zone_pfns); 83 } 84 85 #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM) 86 87 #define LOG2_SZ_1K ilog2(SZ_1K) 88 #define LOG2_SZ_1M ilog2(SZ_1M) 89 #define LOG2_SZ_1G ilog2(SZ_1G) 90 #define LOG2_SZ_1T ilog2(SZ_1T) 91 92 static inline void print_mlk(char *name, unsigned long b, unsigned long t) 93 { 94 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name, b, t, 95 (((t) - (b)) >> LOG2_SZ_1K)); 96 } 97 98 static inline void print_mlm(char *name, unsigned long b, unsigned long t) 99 { 100 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n", name, b, t, 101 (((t) - (b)) >> LOG2_SZ_1M)); 102 } 103 104 static inline void print_mlg(char *name, unsigned long b, unsigned long t) 105 { 106 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld GB)\n", name, b, t, 107 (((t) - (b)) >> LOG2_SZ_1G)); 108 } 109 110 #ifdef CONFIG_64BIT 111 static inline void print_mlt(char *name, unsigned long b, unsigned long t) 112 { 113 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld TB)\n", name, b, t, 114 (((t) - (b)) >> LOG2_SZ_1T)); 115 } 116 #else 117 #define print_mlt(n, b, t) do {} while (0) 118 #endif 119 120 static inline void print_ml(char *name, unsigned long b, unsigned long t) 121 { 122 unsigned long diff = t - b; 123 124 if (IS_ENABLED(CONFIG_64BIT) && (diff >> LOG2_SZ_1T) >= 10) 125 print_mlt(name, b, t); 126 else if ((diff >> LOG2_SZ_1G) >= 10) 127 print_mlg(name, b, t); 128 else if ((diff >> LOG2_SZ_1M) >= 10) 129 print_mlm(name, b, t); 130 else 131 print_mlk(name, b, t); 132 } 133 134 static void __init print_vm_layout(void) 135 { 136 pr_notice("Virtual kernel memory layout:\n"); 137 print_ml("fixmap", (unsigned long)FIXADDR_START, 138 (unsigned long)FIXADDR_TOP); 139 print_ml("pci io", (unsigned long)PCI_IO_START, 140 (unsigned long)PCI_IO_END); 141 print_ml("vmemmap", (unsigned long)VMEMMAP_START, 142 (unsigned long)VMEMMAP_END); 143 print_ml("vmalloc", (unsigned long)VMALLOC_START, 144 (unsigned long)VMALLOC_END); 145 #ifdef CONFIG_64BIT 146 print_ml("modules", (unsigned long)MODULES_VADDR, 147 (unsigned long)MODULES_END); 148 #endif 149 print_ml("lowmem", (unsigned long)PAGE_OFFSET, 150 (unsigned long)high_memory); 151 if (IS_ENABLED(CONFIG_64BIT)) { 152 #ifdef CONFIG_KASAN 153 print_ml("kasan", KASAN_SHADOW_START, KASAN_SHADOW_END); 154 #endif 155 156 print_ml("kernel", (unsigned long)kernel_map.virt_addr, 157 (unsigned long)ADDRESS_SPACE_END); 158 } 159 } 160 #else 161 static void print_vm_layout(void) { } 162 #endif /* CONFIG_DEBUG_VM */ 163 164 void __init mem_init(void) 165 { 166 bool swiotlb = max_pfn > PFN_DOWN(dma32_phys_limit); 167 #ifdef CONFIG_FLATMEM 168 BUG_ON(!mem_map); 169 #endif /* CONFIG_FLATMEM */ 170 171 if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && !swiotlb && 172 dma_cache_alignment != 1) { 173 /* 174 * If no bouncing needed for ZONE_DMA, allocate 1MB swiotlb 175 * buffer per 1GB of RAM for kmalloc() bouncing on 176 * non-coherent platforms. 177 */ 178 unsigned long size = 179 DIV_ROUND_UP(memblock_phys_mem_size(), 1024); 180 swiotlb_adjust_size(min(swiotlb_size_or_default(), size)); 181 swiotlb = true; 182 } 183 184 swiotlb_init(swiotlb, SWIOTLB_VERBOSE); 185 memblock_free_all(); 186 187 print_vm_layout(); 188 } 189 190 /* Limit the memory size via mem. */ 191 static phys_addr_t memory_limit; 192 #ifdef CONFIG_XIP_KERNEL 193 #define memory_limit (*(phys_addr_t *)XIP_FIXUP(&memory_limit)) 194 #endif /* CONFIG_XIP_KERNEL */ 195 196 static int __init early_mem(char *p) 197 { 198 u64 size; 199 200 if (!p) 201 return 1; 202 203 size = memparse(p, &p) & PAGE_MASK; 204 memory_limit = min_t(u64, size, memory_limit); 205 206 pr_notice("Memory limited to %lldMB\n", (u64)memory_limit >> 20); 207 208 return 0; 209 } 210 early_param("mem", early_mem); 211 212 static void __init setup_bootmem(void) 213 { 214 phys_addr_t vmlinux_end = __pa_symbol(&_end); 215 phys_addr_t max_mapped_addr; 216 phys_addr_t phys_ram_end, vmlinux_start; 217 218 if (IS_ENABLED(CONFIG_XIP_KERNEL)) 219 vmlinux_start = __pa_symbol(&_sdata); 220 else 221 vmlinux_start = __pa_symbol(&_start); 222 223 memblock_enforce_memory_limit(memory_limit); 224 225 /* 226 * Make sure we align the reservation on PMD_SIZE since we will 227 * map the kernel in the linear mapping as read-only: we do not want 228 * any allocation to happen between _end and the next pmd aligned page. 229 */ 230 if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) 231 vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK; 232 /* 233 * Reserve from the start of the kernel to the end of the kernel 234 */ 235 memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); 236 237 phys_ram_end = memblock_end_of_DRAM(); 238 239 /* 240 * Make sure we align the start of the memory on a PMD boundary so that 241 * at worst, we map the linear mapping with PMD mappings. 242 */ 243 if (!IS_ENABLED(CONFIG_XIP_KERNEL)) 244 phys_ram_base = memblock_start_of_DRAM() & PMD_MASK; 245 246 /* 247 * In 64-bit, any use of __va/__pa before this point is wrong as we 248 * did not know the start of DRAM before. 249 */ 250 if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) 251 kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base; 252 253 /* 254 * Reserve physical address space that would be mapped to virtual 255 * addresses greater than (void *)(-PAGE_SIZE) because: 256 * - This memory would overlap with ERR_PTR 257 * - This memory belongs to high memory, which is not supported 258 * 259 * This is not applicable to 64-bit kernel, because virtual addresses 260 * after (void *)(-PAGE_SIZE) are not linearly mapped: they are 261 * occupied by kernel mapping. Also it is unrealistic for high memory 262 * to exist on 64-bit platforms. 263 */ 264 if (!IS_ENABLED(CONFIG_64BIT)) { 265 max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE); 266 memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr); 267 } 268 269 min_low_pfn = PFN_UP(phys_ram_base); 270 max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end); 271 high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); 272 273 dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); 274 set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); 275 276 reserve_initrd_mem(); 277 278 /* 279 * No allocation should be done before reserving the memory as defined 280 * in the device tree, otherwise the allocation could end up in a 281 * reserved region. 282 */ 283 early_init_fdt_scan_reserved_mem(); 284 285 /* 286 * If DTB is built in, no need to reserve its memblock. 287 * Otherwise, do reserve it but avoid using 288 * early_init_fdt_reserve_self() since __pa() does 289 * not work for DTB pointers that are fixmap addresses 290 */ 291 if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) 292 memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); 293 294 dma_contiguous_reserve(dma32_phys_limit); 295 if (IS_ENABLED(CONFIG_64BIT)) 296 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); 297 } 298 299 #ifdef CONFIG_MMU 300 struct pt_alloc_ops pt_ops __meminitdata; 301 302 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 303 pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 304 static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; 305 306 pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); 307 308 #ifdef CONFIG_XIP_KERNEL 309 #define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops)) 310 #define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir)) 311 #define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte)) 312 #define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir)) 313 #endif /* CONFIG_XIP_KERNEL */ 314 315 static const pgprot_t protection_map[16] = { 316 [VM_NONE] = PAGE_NONE, 317 [VM_READ] = PAGE_READ, 318 [VM_WRITE] = PAGE_COPY, 319 [VM_WRITE | VM_READ] = PAGE_COPY, 320 [VM_EXEC] = PAGE_EXEC, 321 [VM_EXEC | VM_READ] = PAGE_READ_EXEC, 322 [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC, 323 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC, 324 [VM_SHARED] = PAGE_NONE, 325 [VM_SHARED | VM_READ] = PAGE_READ, 326 [VM_SHARED | VM_WRITE] = PAGE_SHARED, 327 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, 328 [VM_SHARED | VM_EXEC] = PAGE_EXEC, 329 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ_EXEC, 330 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC, 331 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC 332 }; 333 DECLARE_VM_GET_PAGE_PROT 334 335 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) 336 { 337 unsigned long addr = __fix_to_virt(idx); 338 pte_t *ptep; 339 340 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 341 342 ptep = &fixmap_pte[pte_index(addr)]; 343 344 if (pgprot_val(prot)) 345 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); 346 else 347 pte_clear(&init_mm, addr, ptep); 348 local_flush_tlb_page(addr); 349 } 350 351 static inline pte_t *__init get_pte_virt_early(phys_addr_t pa) 352 { 353 return (pte_t *)((uintptr_t)pa); 354 } 355 356 static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa) 357 { 358 clear_fixmap(FIX_PTE); 359 return (pte_t *)set_fixmap_offset(FIX_PTE, pa); 360 } 361 362 static inline pte_t *__meminit get_pte_virt_late(phys_addr_t pa) 363 { 364 return (pte_t *) __va(pa); 365 } 366 367 static inline phys_addr_t __init alloc_pte_early(uintptr_t va) 368 { 369 /* 370 * We only create PMD or PGD early mappings so we 371 * should never reach here with MMU disabled. 372 */ 373 BUG(); 374 } 375 376 static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va) 377 { 378 return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); 379 } 380 381 static phys_addr_t __meminit alloc_pte_late(uintptr_t va) 382 { 383 struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0); 384 385 BUG_ON(!ptdesc || !pagetable_pte_ctor(ptdesc)); 386 return __pa((pte_t *)ptdesc_address(ptdesc)); 387 } 388 389 static void __meminit create_pte_mapping(pte_t *ptep, uintptr_t va, phys_addr_t pa, phys_addr_t sz, 390 pgprot_t prot) 391 { 392 uintptr_t pte_idx = pte_index(va); 393 394 BUG_ON(sz != PAGE_SIZE); 395 396 if (pte_none(ptep[pte_idx])) 397 ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot); 398 } 399 400 #ifndef __PAGETABLE_PMD_FOLDED 401 402 static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss; 403 static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; 404 static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); 405 406 #ifdef CONFIG_XIP_KERNEL 407 #define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd)) 408 #define fixmap_pmd ((pmd_t *)XIP_FIXUP(fixmap_pmd)) 409 #define early_pmd ((pmd_t *)XIP_FIXUP(early_pmd)) 410 #endif /* CONFIG_XIP_KERNEL */ 411 412 static p4d_t trampoline_p4d[PTRS_PER_P4D] __page_aligned_bss; 413 static p4d_t fixmap_p4d[PTRS_PER_P4D] __page_aligned_bss; 414 static p4d_t early_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); 415 416 #ifdef CONFIG_XIP_KERNEL 417 #define trampoline_p4d ((p4d_t *)XIP_FIXUP(trampoline_p4d)) 418 #define fixmap_p4d ((p4d_t *)XIP_FIXUP(fixmap_p4d)) 419 #define early_p4d ((p4d_t *)XIP_FIXUP(early_p4d)) 420 #endif /* CONFIG_XIP_KERNEL */ 421 422 static pud_t trampoline_pud[PTRS_PER_PUD] __page_aligned_bss; 423 static pud_t fixmap_pud[PTRS_PER_PUD] __page_aligned_bss; 424 static pud_t early_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE); 425 426 #ifdef CONFIG_XIP_KERNEL 427 #define trampoline_pud ((pud_t *)XIP_FIXUP(trampoline_pud)) 428 #define fixmap_pud ((pud_t *)XIP_FIXUP(fixmap_pud)) 429 #define early_pud ((pud_t *)XIP_FIXUP(early_pud)) 430 #endif /* CONFIG_XIP_KERNEL */ 431 432 static pmd_t *__init get_pmd_virt_early(phys_addr_t pa) 433 { 434 /* Before MMU is enabled */ 435 return (pmd_t *)((uintptr_t)pa); 436 } 437 438 static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa) 439 { 440 clear_fixmap(FIX_PMD); 441 return (pmd_t *)set_fixmap_offset(FIX_PMD, pa); 442 } 443 444 static pmd_t *__meminit get_pmd_virt_late(phys_addr_t pa) 445 { 446 return (pmd_t *) __va(pa); 447 } 448 449 static phys_addr_t __init alloc_pmd_early(uintptr_t va) 450 { 451 BUG_ON((va - kernel_map.virt_addr) >> PUD_SHIFT); 452 453 return (uintptr_t)early_pmd; 454 } 455 456 static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va) 457 { 458 return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); 459 } 460 461 static phys_addr_t __meminit alloc_pmd_late(uintptr_t va) 462 { 463 struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0); 464 465 BUG_ON(!ptdesc || !pagetable_pmd_ctor(ptdesc)); 466 return __pa((pmd_t *)ptdesc_address(ptdesc)); 467 } 468 469 static void __meminit create_pmd_mapping(pmd_t *pmdp, 470 uintptr_t va, phys_addr_t pa, 471 phys_addr_t sz, pgprot_t prot) 472 { 473 pte_t *ptep; 474 phys_addr_t pte_phys; 475 uintptr_t pmd_idx = pmd_index(va); 476 477 if (sz == PMD_SIZE) { 478 if (pmd_none(pmdp[pmd_idx])) 479 pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot); 480 return; 481 } 482 483 if (pmd_none(pmdp[pmd_idx])) { 484 pte_phys = pt_ops.alloc_pte(va); 485 pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE); 486 ptep = pt_ops.get_pte_virt(pte_phys); 487 memset(ptep, 0, PAGE_SIZE); 488 } else { 489 pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx])); 490 ptep = pt_ops.get_pte_virt(pte_phys); 491 } 492 493 create_pte_mapping(ptep, va, pa, sz, prot); 494 } 495 496 static pud_t *__init get_pud_virt_early(phys_addr_t pa) 497 { 498 return (pud_t *)((uintptr_t)pa); 499 } 500 501 static pud_t *__init get_pud_virt_fixmap(phys_addr_t pa) 502 { 503 clear_fixmap(FIX_PUD); 504 return (pud_t *)set_fixmap_offset(FIX_PUD, pa); 505 } 506 507 static pud_t *__meminit get_pud_virt_late(phys_addr_t pa) 508 { 509 return (pud_t *)__va(pa); 510 } 511 512 static phys_addr_t __init alloc_pud_early(uintptr_t va) 513 { 514 /* Only one PUD is available for early mapping */ 515 BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); 516 517 return (uintptr_t)early_pud; 518 } 519 520 static phys_addr_t __init alloc_pud_fixmap(uintptr_t va) 521 { 522 return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); 523 } 524 525 static phys_addr_t __meminit alloc_pud_late(uintptr_t va) 526 { 527 unsigned long vaddr; 528 529 vaddr = __get_free_page(GFP_KERNEL); 530 BUG_ON(!vaddr); 531 return __pa(vaddr); 532 } 533 534 static p4d_t *__init get_p4d_virt_early(phys_addr_t pa) 535 { 536 return (p4d_t *)((uintptr_t)pa); 537 } 538 539 static p4d_t *__init get_p4d_virt_fixmap(phys_addr_t pa) 540 { 541 clear_fixmap(FIX_P4D); 542 return (p4d_t *)set_fixmap_offset(FIX_P4D, pa); 543 } 544 545 static p4d_t *__meminit get_p4d_virt_late(phys_addr_t pa) 546 { 547 return (p4d_t *)__va(pa); 548 } 549 550 static phys_addr_t __init alloc_p4d_early(uintptr_t va) 551 { 552 /* Only one P4D is available for early mapping */ 553 BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); 554 555 return (uintptr_t)early_p4d; 556 } 557 558 static phys_addr_t __init alloc_p4d_fixmap(uintptr_t va) 559 { 560 return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); 561 } 562 563 static phys_addr_t __meminit alloc_p4d_late(uintptr_t va) 564 { 565 unsigned long vaddr; 566 567 vaddr = __get_free_page(GFP_KERNEL); 568 BUG_ON(!vaddr); 569 return __pa(vaddr); 570 } 571 572 static void __meminit create_pud_mapping(pud_t *pudp, uintptr_t va, phys_addr_t pa, phys_addr_t sz, 573 pgprot_t prot) 574 { 575 pmd_t *nextp; 576 phys_addr_t next_phys; 577 uintptr_t pud_index = pud_index(va); 578 579 if (sz == PUD_SIZE) { 580 if (pud_val(pudp[pud_index]) == 0) 581 pudp[pud_index] = pfn_pud(PFN_DOWN(pa), prot); 582 return; 583 } 584 585 if (pud_val(pudp[pud_index]) == 0) { 586 next_phys = pt_ops.alloc_pmd(va); 587 pudp[pud_index] = pfn_pud(PFN_DOWN(next_phys), PAGE_TABLE); 588 nextp = pt_ops.get_pmd_virt(next_phys); 589 memset(nextp, 0, PAGE_SIZE); 590 } else { 591 next_phys = PFN_PHYS(_pud_pfn(pudp[pud_index])); 592 nextp = pt_ops.get_pmd_virt(next_phys); 593 } 594 595 create_pmd_mapping(nextp, va, pa, sz, prot); 596 } 597 598 static void __meminit create_p4d_mapping(p4d_t *p4dp, uintptr_t va, phys_addr_t pa, phys_addr_t sz, 599 pgprot_t prot) 600 { 601 pud_t *nextp; 602 phys_addr_t next_phys; 603 uintptr_t p4d_index = p4d_index(va); 604 605 if (sz == P4D_SIZE) { 606 if (p4d_val(p4dp[p4d_index]) == 0) 607 p4dp[p4d_index] = pfn_p4d(PFN_DOWN(pa), prot); 608 return; 609 } 610 611 if (p4d_val(p4dp[p4d_index]) == 0) { 612 next_phys = pt_ops.alloc_pud(va); 613 p4dp[p4d_index] = pfn_p4d(PFN_DOWN(next_phys), PAGE_TABLE); 614 nextp = pt_ops.get_pud_virt(next_phys); 615 memset(nextp, 0, PAGE_SIZE); 616 } else { 617 next_phys = PFN_PHYS(_p4d_pfn(p4dp[p4d_index])); 618 nextp = pt_ops.get_pud_virt(next_phys); 619 } 620 621 create_pud_mapping(nextp, va, pa, sz, prot); 622 } 623 624 #define pgd_next_t p4d_t 625 #define alloc_pgd_next(__va) (pgtable_l5_enabled ? \ 626 pt_ops.alloc_p4d(__va) : (pgtable_l4_enabled ? \ 627 pt_ops.alloc_pud(__va) : pt_ops.alloc_pmd(__va))) 628 #define get_pgd_next_virt(__pa) (pgtable_l5_enabled ? \ 629 pt_ops.get_p4d_virt(__pa) : (pgd_next_t *)(pgtable_l4_enabled ? \ 630 pt_ops.get_pud_virt(__pa) : (pud_t *)pt_ops.get_pmd_virt(__pa))) 631 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ 632 (pgtable_l5_enabled ? \ 633 create_p4d_mapping(__nextp, __va, __pa, __sz, __prot) : \ 634 (pgtable_l4_enabled ? \ 635 create_pud_mapping((pud_t *)__nextp, __va, __pa, __sz, __prot) : \ 636 create_pmd_mapping((pmd_t *)__nextp, __va, __pa, __sz, __prot))) 637 #define fixmap_pgd_next (pgtable_l5_enabled ? \ 638 (uintptr_t)fixmap_p4d : (pgtable_l4_enabled ? \ 639 (uintptr_t)fixmap_pud : (uintptr_t)fixmap_pmd)) 640 #define trampoline_pgd_next (pgtable_l5_enabled ? \ 641 (uintptr_t)trampoline_p4d : (pgtable_l4_enabled ? \ 642 (uintptr_t)trampoline_pud : (uintptr_t)trampoline_pmd)) 643 #else 644 #define pgd_next_t pte_t 645 #define alloc_pgd_next(__va) pt_ops.alloc_pte(__va) 646 #define get_pgd_next_virt(__pa) pt_ops.get_pte_virt(__pa) 647 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ 648 create_pte_mapping(__nextp, __va, __pa, __sz, __prot) 649 #define fixmap_pgd_next ((uintptr_t)fixmap_pte) 650 #define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0) 651 #define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0) 652 #define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0) 653 #endif /* __PAGETABLE_PMD_FOLDED */ 654 655 void __meminit create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa, phys_addr_t sz, 656 pgprot_t prot) 657 { 658 pgd_next_t *nextp; 659 phys_addr_t next_phys; 660 uintptr_t pgd_idx = pgd_index(va); 661 662 if (sz == PGDIR_SIZE) { 663 if (pgd_val(pgdp[pgd_idx]) == 0) 664 pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot); 665 return; 666 } 667 668 if (pgd_val(pgdp[pgd_idx]) == 0) { 669 next_phys = alloc_pgd_next(va); 670 pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE); 671 nextp = get_pgd_next_virt(next_phys); 672 memset(nextp, 0, PAGE_SIZE); 673 } else { 674 next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx])); 675 nextp = get_pgd_next_virt(next_phys); 676 } 677 678 create_pgd_next_mapping(nextp, va, pa, sz, prot); 679 } 680 681 static uintptr_t __meminit best_map_size(phys_addr_t pa, uintptr_t va, phys_addr_t size) 682 { 683 if (debug_pagealloc_enabled()) 684 return PAGE_SIZE; 685 686 if (pgtable_l5_enabled && 687 !(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE) 688 return P4D_SIZE; 689 690 if (pgtable_l4_enabled && 691 !(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE) 692 return PUD_SIZE; 693 694 if (IS_ENABLED(CONFIG_64BIT) && 695 !(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE) 696 return PMD_SIZE; 697 698 return PAGE_SIZE; 699 } 700 701 #ifdef CONFIG_XIP_KERNEL 702 #define phys_ram_base (*(phys_addr_t *)XIP_FIXUP(&phys_ram_base)) 703 extern char _xiprom[], _exiprom[], __data_loc; 704 705 /* called from head.S with MMU off */ 706 asmlinkage void __init __copy_data(void) 707 { 708 void *from = (void *)(&__data_loc); 709 void *to = (void *)CONFIG_PHYS_RAM_BASE; 710 size_t sz = (size_t)((uintptr_t)(&_end) - (uintptr_t)(&_sdata)); 711 712 memcpy(to, from, sz); 713 } 714 #endif 715 716 #ifdef CONFIG_STRICT_KERNEL_RWX 717 static __meminit pgprot_t pgprot_from_va(uintptr_t va) 718 { 719 if (is_va_kernel_text(va)) 720 return PAGE_KERNEL_READ_EXEC; 721 722 /* 723 * In 64-bit kernel, the kernel mapping is outside the linear mapping so 724 * we must protect its linear mapping alias from being executed and 725 * written. 726 * And rodata section is marked readonly in mark_rodata_ro. 727 */ 728 if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va)) 729 return PAGE_KERNEL_READ; 730 731 return PAGE_KERNEL; 732 } 733 734 void mark_rodata_ro(void) 735 { 736 set_kernel_memory(__start_rodata, _data, set_memory_ro); 737 if (IS_ENABLED(CONFIG_64BIT)) 738 set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data), 739 set_memory_ro); 740 } 741 #else 742 static __meminit pgprot_t pgprot_from_va(uintptr_t va) 743 { 744 if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va)) 745 return PAGE_KERNEL; 746 747 return PAGE_KERNEL_EXEC; 748 } 749 #endif /* CONFIG_STRICT_KERNEL_RWX */ 750 751 #if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL) 752 u64 __pi_set_satp_mode_from_cmdline(uintptr_t dtb_pa); 753 754 static void __init disable_pgtable_l5(void) 755 { 756 pgtable_l5_enabled = false; 757 kernel_map.page_offset = PAGE_OFFSET_L4; 758 satp_mode = SATP_MODE_48; 759 } 760 761 static void __init disable_pgtable_l4(void) 762 { 763 pgtable_l4_enabled = false; 764 kernel_map.page_offset = PAGE_OFFSET_L3; 765 satp_mode = SATP_MODE_39; 766 } 767 768 static int __init print_no4lvl(char *p) 769 { 770 pr_info("Disabled 4-level and 5-level paging"); 771 return 0; 772 } 773 early_param("no4lvl", print_no4lvl); 774 775 static int __init print_no5lvl(char *p) 776 { 777 pr_info("Disabled 5-level paging"); 778 return 0; 779 } 780 early_param("no5lvl", print_no5lvl); 781 782 static void __init set_mmap_rnd_bits_max(void) 783 { 784 mmap_rnd_bits_max = MMAP_VA_BITS - PAGE_SHIFT - 3; 785 } 786 787 /* 788 * There is a simple way to determine if 4-level is supported by the 789 * underlying hardware: establish 1:1 mapping in 4-level page table mode 790 * then read SATP to see if the configuration was taken into account 791 * meaning sv48 is supported. 792 */ 793 static __init void set_satp_mode(uintptr_t dtb_pa) 794 { 795 u64 identity_satp, hw_satp; 796 uintptr_t set_satp_mode_pmd = ((unsigned long)set_satp_mode) & PMD_MASK; 797 u64 satp_mode_cmdline = __pi_set_satp_mode_from_cmdline(dtb_pa); 798 799 if (satp_mode_cmdline == SATP_MODE_57) { 800 disable_pgtable_l5(); 801 } else if (satp_mode_cmdline == SATP_MODE_48) { 802 disable_pgtable_l5(); 803 disable_pgtable_l4(); 804 return; 805 } 806 807 create_p4d_mapping(early_p4d, 808 set_satp_mode_pmd, (uintptr_t)early_pud, 809 P4D_SIZE, PAGE_TABLE); 810 create_pud_mapping(early_pud, 811 set_satp_mode_pmd, (uintptr_t)early_pmd, 812 PUD_SIZE, PAGE_TABLE); 813 /* Handle the case where set_satp_mode straddles 2 PMDs */ 814 create_pmd_mapping(early_pmd, 815 set_satp_mode_pmd, set_satp_mode_pmd, 816 PMD_SIZE, PAGE_KERNEL_EXEC); 817 create_pmd_mapping(early_pmd, 818 set_satp_mode_pmd + PMD_SIZE, 819 set_satp_mode_pmd + PMD_SIZE, 820 PMD_SIZE, PAGE_KERNEL_EXEC); 821 retry: 822 create_pgd_mapping(early_pg_dir, 823 set_satp_mode_pmd, 824 pgtable_l5_enabled ? 825 (uintptr_t)early_p4d : (uintptr_t)early_pud, 826 PGDIR_SIZE, PAGE_TABLE); 827 828 identity_satp = PFN_DOWN((uintptr_t)&early_pg_dir) | satp_mode; 829 830 local_flush_tlb_all(); 831 csr_write(CSR_SATP, identity_satp); 832 hw_satp = csr_swap(CSR_SATP, 0ULL); 833 local_flush_tlb_all(); 834 835 if (hw_satp != identity_satp) { 836 if (pgtable_l5_enabled) { 837 disable_pgtable_l5(); 838 memset(early_pg_dir, 0, PAGE_SIZE); 839 goto retry; 840 } 841 disable_pgtable_l4(); 842 } 843 844 memset(early_pg_dir, 0, PAGE_SIZE); 845 memset(early_p4d, 0, PAGE_SIZE); 846 memset(early_pud, 0, PAGE_SIZE); 847 memset(early_pmd, 0, PAGE_SIZE); 848 } 849 #endif 850 851 /* 852 * setup_vm() is called from head.S with MMU-off. 853 * 854 * Following requirements should be honoured for setup_vm() to work 855 * correctly: 856 * 1) It should use PC-relative addressing for accessing kernel symbols. 857 * To achieve this we always use GCC cmodel=medany. 858 * 2) The compiler instrumentation for FTRACE will not work for setup_vm() 859 * so disable compiler instrumentation when FTRACE is enabled. 860 * 861 * Currently, the above requirements are honoured by using custom CFLAGS 862 * for init.o in mm/Makefile. 863 */ 864 865 #ifndef __riscv_cmodel_medany 866 #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." 867 #endif 868 869 #ifdef CONFIG_RELOCATABLE 870 extern unsigned long __rela_dyn_start, __rela_dyn_end; 871 872 static void __init relocate_kernel(void) 873 { 874 Elf64_Rela *rela = (Elf64_Rela *)&__rela_dyn_start; 875 /* 876 * This holds the offset between the linked virtual address and the 877 * relocated virtual address. 878 */ 879 uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR; 880 /* 881 * This holds the offset between kernel linked virtual address and 882 * physical address. 883 */ 884 uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr; 885 886 for ( ; rela < (Elf64_Rela *)&__rela_dyn_end; rela++) { 887 Elf64_Addr addr = (rela->r_offset - va_kernel_link_pa_offset); 888 Elf64_Addr relocated_addr = rela->r_addend; 889 890 if (rela->r_info != R_RISCV_RELATIVE) 891 continue; 892 893 /* 894 * Make sure to not relocate vdso symbols like rt_sigreturn 895 * which are linked from the address 0 in vmlinux since 896 * vdso symbol addresses are actually used as an offset from 897 * mm->context.vdso in VDSO_OFFSET macro. 898 */ 899 if (relocated_addr >= KERNEL_LINK_ADDR) 900 relocated_addr += reloc_offset; 901 902 *(Elf64_Addr *)addr = relocated_addr; 903 } 904 } 905 #endif /* CONFIG_RELOCATABLE */ 906 907 #ifdef CONFIG_XIP_KERNEL 908 static void __init create_kernel_page_table(pgd_t *pgdir, 909 __always_unused bool early) 910 { 911 uintptr_t va, end_va; 912 913 /* Map the flash resident part */ 914 end_va = kernel_map.virt_addr + kernel_map.xiprom_sz; 915 for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE) 916 create_pgd_mapping(pgdir, va, 917 kernel_map.xiprom + (va - kernel_map.virt_addr), 918 PMD_SIZE, PAGE_KERNEL_EXEC); 919 920 /* Map the data in RAM */ 921 end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size; 922 for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE) 923 create_pgd_mapping(pgdir, va, 924 kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)), 925 PMD_SIZE, PAGE_KERNEL); 926 } 927 #else 928 static void __init create_kernel_page_table(pgd_t *pgdir, bool early) 929 { 930 uintptr_t va, end_va; 931 932 end_va = kernel_map.virt_addr + kernel_map.size; 933 for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE) 934 create_pgd_mapping(pgdir, va, 935 kernel_map.phys_addr + (va - kernel_map.virt_addr), 936 PMD_SIZE, 937 early ? 938 PAGE_KERNEL_EXEC : pgprot_from_va(va)); 939 } 940 #endif 941 942 /* 943 * Setup a 4MB mapping that encompasses the device tree: for 64-bit kernel, 944 * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR 945 * entry. 946 */ 947 static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va, 948 uintptr_t dtb_pa) 949 { 950 #ifndef CONFIG_BUILTIN_DTB 951 uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1); 952 953 /* Make sure the fdt fixmap address is always aligned on PMD size */ 954 BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE)); 955 956 /* In 32-bit only, the fdt lies in its own PGD */ 957 if (!IS_ENABLED(CONFIG_64BIT)) { 958 create_pgd_mapping(early_pg_dir, fix_fdt_va, 959 pa, MAX_FDT_SIZE, PAGE_KERNEL); 960 } else { 961 create_pmd_mapping(fixmap_pmd, fix_fdt_va, 962 pa, PMD_SIZE, PAGE_KERNEL); 963 create_pmd_mapping(fixmap_pmd, fix_fdt_va + PMD_SIZE, 964 pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL); 965 } 966 967 dtb_early_va = (void *)fix_fdt_va + (dtb_pa & (PMD_SIZE - 1)); 968 #else 969 /* 970 * For 64-bit kernel, __va can't be used since it would return a linear 971 * mapping address whereas dtb_early_va will be used before 972 * setup_vm_final installs the linear mapping. For 32-bit kernel, as the 973 * kernel is mapped in the linear mapping, that makes no difference. 974 */ 975 dtb_early_va = kernel_mapping_pa_to_va(dtb_pa); 976 #endif 977 978 dtb_early_pa = dtb_pa; 979 } 980 981 /* 982 * MMU is not enabled, the page tables are allocated directly using 983 * early_pmd/pud/p4d and the address returned is the physical one. 984 */ 985 static void __init pt_ops_set_early(void) 986 { 987 pt_ops.alloc_pte = alloc_pte_early; 988 pt_ops.get_pte_virt = get_pte_virt_early; 989 #ifndef __PAGETABLE_PMD_FOLDED 990 pt_ops.alloc_pmd = alloc_pmd_early; 991 pt_ops.get_pmd_virt = get_pmd_virt_early; 992 pt_ops.alloc_pud = alloc_pud_early; 993 pt_ops.get_pud_virt = get_pud_virt_early; 994 pt_ops.alloc_p4d = alloc_p4d_early; 995 pt_ops.get_p4d_virt = get_p4d_virt_early; 996 #endif 997 } 998 999 /* 1000 * MMU is enabled but page table setup is not complete yet. 1001 * fixmap page table alloc functions must be used as a means to temporarily 1002 * map the allocated physical pages since the linear mapping does not exist yet. 1003 * 1004 * Note that this is called with MMU disabled, hence kernel_mapping_pa_to_va, 1005 * but it will be used as described above. 1006 */ 1007 static void __init pt_ops_set_fixmap(void) 1008 { 1009 pt_ops.alloc_pte = kernel_mapping_pa_to_va(alloc_pte_fixmap); 1010 pt_ops.get_pte_virt = kernel_mapping_pa_to_va(get_pte_virt_fixmap); 1011 #ifndef __PAGETABLE_PMD_FOLDED 1012 pt_ops.alloc_pmd = kernel_mapping_pa_to_va(alloc_pmd_fixmap); 1013 pt_ops.get_pmd_virt = kernel_mapping_pa_to_va(get_pmd_virt_fixmap); 1014 pt_ops.alloc_pud = kernel_mapping_pa_to_va(alloc_pud_fixmap); 1015 pt_ops.get_pud_virt = kernel_mapping_pa_to_va(get_pud_virt_fixmap); 1016 pt_ops.alloc_p4d = kernel_mapping_pa_to_va(alloc_p4d_fixmap); 1017 pt_ops.get_p4d_virt = kernel_mapping_pa_to_va(get_p4d_virt_fixmap); 1018 #endif 1019 } 1020 1021 /* 1022 * MMU is enabled and page table setup is complete, so from now, we can use 1023 * generic page allocation functions to setup page table. 1024 */ 1025 static void __init pt_ops_set_late(void) 1026 { 1027 pt_ops.alloc_pte = alloc_pte_late; 1028 pt_ops.get_pte_virt = get_pte_virt_late; 1029 #ifndef __PAGETABLE_PMD_FOLDED 1030 pt_ops.alloc_pmd = alloc_pmd_late; 1031 pt_ops.get_pmd_virt = get_pmd_virt_late; 1032 pt_ops.alloc_pud = alloc_pud_late; 1033 pt_ops.get_pud_virt = get_pud_virt_late; 1034 pt_ops.alloc_p4d = alloc_p4d_late; 1035 pt_ops.get_p4d_virt = get_p4d_virt_late; 1036 #endif 1037 } 1038 1039 #ifdef CONFIG_RANDOMIZE_BASE 1040 extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa); 1041 extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa); 1042 1043 static int __init print_nokaslr(char *p) 1044 { 1045 pr_info("Disabled KASLR"); 1046 return 0; 1047 } 1048 early_param("nokaslr", print_nokaslr); 1049 1050 unsigned long kaslr_offset(void) 1051 { 1052 return kernel_map.virt_offset; 1053 } 1054 #endif 1055 1056 asmlinkage void __init setup_vm(uintptr_t dtb_pa) 1057 { 1058 pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd; 1059 1060 #ifdef CONFIG_RANDOMIZE_BASE 1061 if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) { 1062 u64 kaslr_seed = __pi_get_kaslr_seed(dtb_pa); 1063 u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start); 1064 u32 nr_pos; 1065 1066 /* 1067 * Compute the number of positions available: we are limited 1068 * by the early page table that only has one PUD and we must 1069 * be aligned on PMD_SIZE. 1070 */ 1071 nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE; 1072 1073 kernel_map.virt_offset = (kaslr_seed % nr_pos) * PMD_SIZE; 1074 } 1075 #endif 1076 1077 kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset; 1078 1079 #ifdef CONFIG_XIP_KERNEL 1080 #ifdef CONFIG_64BIT 1081 kernel_map.page_offset = PAGE_OFFSET_L3; 1082 #else 1083 kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL); 1084 #endif 1085 kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR; 1086 kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); 1087 1088 phys_ram_base = CONFIG_PHYS_RAM_BASE; 1089 kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE; 1090 kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata); 1091 1092 kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; 1093 #else 1094 kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL); 1095 kernel_map.phys_addr = (uintptr_t)(&_start); 1096 kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr; 1097 #endif 1098 1099 #if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL) 1100 set_satp_mode(dtb_pa); 1101 set_mmap_rnd_bits_max(); 1102 #endif 1103 1104 /* 1105 * In 64-bit, we defer the setup of va_pa_offset to setup_bootmem, 1106 * where we have the system memory layout: this allows us to align 1107 * the physical and virtual mappings and then make use of PUD/P4D/PGD 1108 * for the linear mapping. This is only possible because the kernel 1109 * mapping lies outside the linear mapping. 1110 * In 32-bit however, as the kernel resides in the linear mapping, 1111 * setup_vm_final can not change the mapping established here, 1112 * otherwise the same kernel addresses would get mapped to different 1113 * physical addresses (if the start of dram is different from the 1114 * kernel physical address start). 1115 */ 1116 kernel_map.va_pa_offset = IS_ENABLED(CONFIG_64BIT) ? 1117 0UL : PAGE_OFFSET - kernel_map.phys_addr; 1118 kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr; 1119 1120 /* 1121 * The default maximal physical memory size is KERN_VIRT_SIZE for 32-bit 1122 * kernel, whereas for 64-bit kernel, the end of the virtual address 1123 * space is occupied by the modules/BPF/kernel mappings which reduces 1124 * the available size of the linear mapping. 1125 */ 1126 memory_limit = KERN_VIRT_SIZE - (IS_ENABLED(CONFIG_64BIT) ? SZ_4G : 0); 1127 1128 /* Sanity check alignment and size */ 1129 BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0); 1130 BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0); 1131 1132 #ifdef CONFIG_64BIT 1133 /* 1134 * The last 4K bytes of the addressable memory can not be mapped because 1135 * of IS_ERR_VALUE macro. 1136 */ 1137 BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K); 1138 #endif 1139 1140 #ifdef CONFIG_RELOCATABLE 1141 /* 1142 * Early page table uses only one PUD, which makes it possible 1143 * to map PUD_SIZE aligned on PUD_SIZE: if the relocation offset 1144 * makes the kernel cross over a PUD_SIZE boundary, raise a bug 1145 * since a part of the kernel would not get mapped. 1146 */ 1147 BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size); 1148 relocate_kernel(); 1149 #endif 1150 1151 apply_early_boot_alternatives(); 1152 pt_ops_set_early(); 1153 1154 /* Setup early PGD for fixmap */ 1155 create_pgd_mapping(early_pg_dir, FIXADDR_START, 1156 fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE); 1157 1158 #ifndef __PAGETABLE_PMD_FOLDED 1159 /* Setup fixmap P4D and PUD */ 1160 if (pgtable_l5_enabled) 1161 create_p4d_mapping(fixmap_p4d, FIXADDR_START, 1162 (uintptr_t)fixmap_pud, P4D_SIZE, PAGE_TABLE); 1163 /* Setup fixmap PUD and PMD */ 1164 if (pgtable_l4_enabled) 1165 create_pud_mapping(fixmap_pud, FIXADDR_START, 1166 (uintptr_t)fixmap_pmd, PUD_SIZE, PAGE_TABLE); 1167 create_pmd_mapping(fixmap_pmd, FIXADDR_START, 1168 (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE); 1169 /* Setup trampoline PGD and PMD */ 1170 create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr, 1171 trampoline_pgd_next, PGDIR_SIZE, PAGE_TABLE); 1172 if (pgtable_l5_enabled) 1173 create_p4d_mapping(trampoline_p4d, kernel_map.virt_addr, 1174 (uintptr_t)trampoline_pud, P4D_SIZE, PAGE_TABLE); 1175 if (pgtable_l4_enabled) 1176 create_pud_mapping(trampoline_pud, kernel_map.virt_addr, 1177 (uintptr_t)trampoline_pmd, PUD_SIZE, PAGE_TABLE); 1178 #ifdef CONFIG_XIP_KERNEL 1179 create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr, 1180 kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC); 1181 #else 1182 create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr, 1183 kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC); 1184 #endif 1185 #else 1186 /* Setup trampoline PGD */ 1187 create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr, 1188 kernel_map.phys_addr, PGDIR_SIZE, PAGE_KERNEL_EXEC); 1189 #endif 1190 1191 /* 1192 * Setup early PGD covering entire kernel which will allow 1193 * us to reach paging_init(). We map all memory banks later 1194 * in setup_vm_final() below. 1195 */ 1196 create_kernel_page_table(early_pg_dir, true); 1197 1198 /* Setup early mapping for FDT early scan */ 1199 create_fdt_early_page_table(__fix_to_virt(FIX_FDT), dtb_pa); 1200 1201 /* 1202 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap 1203 * range can not span multiple pmds. 1204 */ 1205 BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 1206 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 1207 1208 #ifndef __PAGETABLE_PMD_FOLDED 1209 /* 1210 * Early ioremap fixmap is already created as it lies within first 2MB 1211 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END 1212 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn 1213 * the user if not. 1214 */ 1215 fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))]; 1216 fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))]; 1217 if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) { 1218 WARN_ON(1); 1219 pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n", 1220 pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd)); 1221 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 1222 fix_to_virt(FIX_BTMAP_BEGIN)); 1223 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", 1224 fix_to_virt(FIX_BTMAP_END)); 1225 1226 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 1227 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); 1228 } 1229 #endif 1230 1231 pt_ops_set_fixmap(); 1232 } 1233 1234 static void __meminit create_linear_mapping_range(phys_addr_t start, phys_addr_t end, 1235 uintptr_t fixed_map_size, const pgprot_t *pgprot) 1236 { 1237 phys_addr_t pa; 1238 uintptr_t va, map_size; 1239 1240 for (pa = start; pa < end; pa += map_size) { 1241 va = (uintptr_t)__va(pa); 1242 map_size = fixed_map_size ? fixed_map_size : 1243 best_map_size(pa, va, end - pa); 1244 1245 create_pgd_mapping(swapper_pg_dir, va, pa, map_size, 1246 pgprot ? *pgprot : pgprot_from_va(va)); 1247 } 1248 } 1249 1250 static void __init create_linear_mapping_page_table(void) 1251 { 1252 phys_addr_t start, end; 1253 phys_addr_t kfence_pool __maybe_unused; 1254 u64 i; 1255 1256 #ifdef CONFIG_STRICT_KERNEL_RWX 1257 phys_addr_t ktext_start = __pa_symbol(_start); 1258 phys_addr_t ktext_size = __init_data_begin - _start; 1259 phys_addr_t krodata_start = __pa_symbol(__start_rodata); 1260 phys_addr_t krodata_size = _data - __start_rodata; 1261 1262 /* Isolate kernel text and rodata so they don't get mapped with a PUD */ 1263 memblock_mark_nomap(ktext_start, ktext_size); 1264 memblock_mark_nomap(krodata_start, krodata_size); 1265 #endif 1266 1267 #ifdef CONFIG_KFENCE 1268 /* 1269 * kfence pool must be backed by PAGE_SIZE mappings, so allocate it 1270 * before we setup the linear mapping so that we avoid using hugepages 1271 * for this region. 1272 */ 1273 kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); 1274 BUG_ON(!kfence_pool); 1275 1276 memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); 1277 __kfence_pool = __va(kfence_pool); 1278 #endif 1279 1280 /* Map all memory banks in the linear mapping */ 1281 for_each_mem_range(i, &start, &end) { 1282 if (start >= end) 1283 break; 1284 if (start <= __pa(PAGE_OFFSET) && 1285 __pa(PAGE_OFFSET) < end) 1286 start = __pa(PAGE_OFFSET); 1287 if (end >= __pa(PAGE_OFFSET) + memory_limit) 1288 end = __pa(PAGE_OFFSET) + memory_limit; 1289 1290 create_linear_mapping_range(start, end, 0, NULL); 1291 } 1292 1293 #ifdef CONFIG_STRICT_KERNEL_RWX 1294 create_linear_mapping_range(ktext_start, ktext_start + ktext_size, 0, NULL); 1295 create_linear_mapping_range(krodata_start, krodata_start + krodata_size, 0, NULL); 1296 1297 memblock_clear_nomap(ktext_start, ktext_size); 1298 memblock_clear_nomap(krodata_start, krodata_size); 1299 #endif 1300 1301 #ifdef CONFIG_KFENCE 1302 create_linear_mapping_range(kfence_pool, kfence_pool + KFENCE_POOL_SIZE, PAGE_SIZE, NULL); 1303 1304 memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); 1305 #endif 1306 } 1307 1308 static void __init setup_vm_final(void) 1309 { 1310 /* Setup swapper PGD for fixmap */ 1311 #if !defined(CONFIG_64BIT) 1312 /* 1313 * In 32-bit, the device tree lies in a pgd entry, so it must be copied 1314 * directly in swapper_pg_dir in addition to the pgd entry that points 1315 * to fixmap_pte. 1316 */ 1317 unsigned long idx = pgd_index(__fix_to_virt(FIX_FDT)); 1318 1319 set_pgd(&swapper_pg_dir[idx], early_pg_dir[idx]); 1320 #endif 1321 create_pgd_mapping(swapper_pg_dir, FIXADDR_START, 1322 __pa_symbol(fixmap_pgd_next), 1323 PGDIR_SIZE, PAGE_TABLE); 1324 1325 /* Map the linear mapping */ 1326 create_linear_mapping_page_table(); 1327 1328 /* Map the kernel */ 1329 if (IS_ENABLED(CONFIG_64BIT)) 1330 create_kernel_page_table(swapper_pg_dir, false); 1331 1332 #ifdef CONFIG_KASAN 1333 kasan_swapper_init(); 1334 #endif 1335 1336 /* Clear fixmap PTE and PMD mappings */ 1337 clear_fixmap(FIX_PTE); 1338 clear_fixmap(FIX_PMD); 1339 clear_fixmap(FIX_PUD); 1340 clear_fixmap(FIX_P4D); 1341 1342 /* Move to swapper page table */ 1343 csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | satp_mode); 1344 local_flush_tlb_all(); 1345 1346 pt_ops_set_late(); 1347 } 1348 #else 1349 asmlinkage void __init setup_vm(uintptr_t dtb_pa) 1350 { 1351 dtb_early_va = (void *)dtb_pa; 1352 dtb_early_pa = dtb_pa; 1353 } 1354 1355 static inline void setup_vm_final(void) 1356 { 1357 } 1358 #endif /* CONFIG_MMU */ 1359 1360 /* 1361 * reserve_crashkernel() - reserves memory for crash kernel 1362 * 1363 * This function reserves memory area given in "crashkernel=" kernel command 1364 * line parameter. The memory reserved is used by dump capture kernel when 1365 * primary kernel is crashing. 1366 */ 1367 static void __init arch_reserve_crashkernel(void) 1368 { 1369 unsigned long long low_size = 0; 1370 unsigned long long crash_base, crash_size; 1371 char *cmdline = boot_command_line; 1372 bool high = false; 1373 int ret; 1374 1375 if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) 1376 return; 1377 1378 ret = parse_crashkernel(cmdline, memblock_phys_mem_size(), 1379 &crash_size, &crash_base, 1380 &low_size, &high); 1381 if (ret) 1382 return; 1383 1384 reserve_crashkernel_generic(cmdline, crash_size, crash_base, 1385 low_size, high); 1386 } 1387 1388 void __init paging_init(void) 1389 { 1390 setup_bootmem(); 1391 setup_vm_final(); 1392 1393 /* Depend on that Linear Mapping is ready */ 1394 memblock_allow_resize(); 1395 } 1396 1397 void __init misc_mem_init(void) 1398 { 1399 early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); 1400 arch_numa_init(); 1401 sparse_init(); 1402 #ifdef CONFIG_SPARSEMEM_VMEMMAP 1403 /* The entire VMEMMAP region has been populated. Flush TLB for this region */ 1404 local_flush_tlb_kernel_range(VMEMMAP_START, VMEMMAP_END); 1405 #endif 1406 zone_sizes_init(); 1407 arch_reserve_crashkernel(); 1408 memblock_dump_all(); 1409 } 1410 1411 #ifdef CONFIG_SPARSEMEM_VMEMMAP 1412 void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, 1413 unsigned long addr, unsigned long next) 1414 { 1415 pmd_set_huge(pmd, virt_to_phys(p), PAGE_KERNEL); 1416 } 1417 1418 int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, 1419 unsigned long addr, unsigned long next) 1420 { 1421 vmemmap_verify((pte_t *)pmdp, node, addr, next); 1422 return 1; 1423 } 1424 1425 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 1426 struct vmem_altmap *altmap) 1427 { 1428 /* 1429 * Note that SPARSEMEM_VMEMMAP is only selected for rv64 and that we 1430 * can't use hugepage mappings for 2-level page table because in case of 1431 * memory hotplug, we are not able to update all the page tables with 1432 * the new PMDs. 1433 */ 1434 return vmemmap_populate_hugepages(start, end, node, altmap); 1435 } 1436 #endif 1437 1438 #if defined(CONFIG_MMU) && defined(CONFIG_64BIT) 1439 /* 1440 * Pre-allocates page-table pages for a specific area in the kernel 1441 * page-table. Only the level which needs to be synchronized between 1442 * all page-tables is allocated because the synchronization can be 1443 * expensive. 1444 */ 1445 static void __init preallocate_pgd_pages_range(unsigned long start, unsigned long end, 1446 const char *area) 1447 { 1448 unsigned long addr; 1449 const char *lvl; 1450 1451 for (addr = start; addr < end && addr >= start; addr = ALIGN(addr + 1, PGDIR_SIZE)) { 1452 pgd_t *pgd = pgd_offset_k(addr); 1453 p4d_t *p4d; 1454 pud_t *pud; 1455 pmd_t *pmd; 1456 1457 lvl = "p4d"; 1458 p4d = p4d_alloc(&init_mm, pgd, addr); 1459 if (!p4d) 1460 goto failed; 1461 1462 if (pgtable_l5_enabled) 1463 continue; 1464 1465 lvl = "pud"; 1466 pud = pud_alloc(&init_mm, p4d, addr); 1467 if (!pud) 1468 goto failed; 1469 1470 if (pgtable_l4_enabled) 1471 continue; 1472 1473 lvl = "pmd"; 1474 pmd = pmd_alloc(&init_mm, pud, addr); 1475 if (!pmd) 1476 goto failed; 1477 } 1478 return; 1479 1480 failed: 1481 /* 1482 * The pages have to be there now or they will be missing in 1483 * process page-tables later. 1484 */ 1485 panic("Failed to pre-allocate %s pages for %s area\n", lvl, area); 1486 } 1487 1488 #define PAGE_END KASAN_SHADOW_START 1489 1490 void __init pgtable_cache_init(void) 1491 { 1492 preallocate_pgd_pages_range(VMALLOC_START, VMALLOC_END, "vmalloc"); 1493 if (IS_ENABLED(CONFIG_MODULES)) 1494 preallocate_pgd_pages_range(MODULES_VADDR, MODULES_END, "bpf/modules"); 1495 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) { 1496 preallocate_pgd_pages_range(VMEMMAP_START, VMEMMAP_END, "vmemmap"); 1497 preallocate_pgd_pages_range(PAGE_OFFSET, PAGE_END, "direct map"); 1498 if (IS_ENABLED(CONFIG_KASAN)) 1499 preallocate_pgd_pages_range(KASAN_SHADOW_START, KASAN_SHADOW_END, "kasan"); 1500 } 1501 } 1502 #endif 1503 1504 #ifdef CONFIG_EXECMEM 1505 #ifdef CONFIG_MMU 1506 static struct execmem_info execmem_info __ro_after_init; 1507 1508 struct execmem_info __init *execmem_arch_setup(void) 1509 { 1510 execmem_info = (struct execmem_info){ 1511 .ranges = { 1512 [EXECMEM_DEFAULT] = { 1513 .start = MODULES_VADDR, 1514 .end = MODULES_END, 1515 .pgprot = PAGE_KERNEL, 1516 .alignment = 1, 1517 }, 1518 [EXECMEM_KPROBES] = { 1519 .start = VMALLOC_START, 1520 .end = VMALLOC_END, 1521 .pgprot = PAGE_KERNEL_READ_EXEC, 1522 .alignment = 1, 1523 }, 1524 [EXECMEM_BPF] = { 1525 .start = BPF_JIT_REGION_START, 1526 .end = BPF_JIT_REGION_END, 1527 .pgprot = PAGE_KERNEL, 1528 .alignment = PAGE_SIZE, 1529 }, 1530 }, 1531 }; 1532 1533 return &execmem_info; 1534 } 1535 #endif /* CONFIG_MMU */ 1536 #endif /* CONFIG_EXECMEM */ 1537 1538 #ifdef CONFIG_MEMORY_HOTPLUG 1539 static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) 1540 { 1541 struct page *page = pmd_page(*pmd); 1542 struct ptdesc *ptdesc = page_ptdesc(page); 1543 pte_t *pte; 1544 int i; 1545 1546 for (i = 0; i < PTRS_PER_PTE; i++) { 1547 pte = pte_start + i; 1548 if (!pte_none(*pte)) 1549 return; 1550 } 1551 1552 pagetable_pte_dtor(ptdesc); 1553 if (PageReserved(page)) 1554 free_reserved_page(page); 1555 else 1556 pagetable_free(ptdesc); 1557 pmd_clear(pmd); 1558 } 1559 1560 static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) 1561 { 1562 struct page *page = pud_page(*pud); 1563 struct ptdesc *ptdesc = page_ptdesc(page); 1564 pmd_t *pmd; 1565 int i; 1566 1567 for (i = 0; i < PTRS_PER_PMD; i++) { 1568 pmd = pmd_start + i; 1569 if (!pmd_none(*pmd)) 1570 return; 1571 } 1572 1573 pagetable_pmd_dtor(ptdesc); 1574 if (PageReserved(page)) 1575 free_reserved_page(page); 1576 else 1577 pagetable_free(ptdesc); 1578 pud_clear(pud); 1579 } 1580 1581 static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d) 1582 { 1583 struct page *page = p4d_page(*p4d); 1584 pud_t *pud; 1585 int i; 1586 1587 for (i = 0; i < PTRS_PER_PUD; i++) { 1588 pud = pud_start + i; 1589 if (!pud_none(*pud)) 1590 return; 1591 } 1592 1593 if (PageReserved(page)) 1594 free_reserved_page(page); 1595 else 1596 free_pages((unsigned long)page_address(page), 0); 1597 p4d_clear(p4d); 1598 } 1599 1600 static void __meminit free_vmemmap_storage(struct page *page, size_t size, 1601 struct vmem_altmap *altmap) 1602 { 1603 int order = get_order(size); 1604 1605 if (altmap) { 1606 vmem_altmap_free(altmap, size >> PAGE_SHIFT); 1607 return; 1608 } 1609 1610 if (PageReserved(page)) { 1611 unsigned int nr_pages = 1 << order; 1612 1613 while (nr_pages--) 1614 free_reserved_page(page++); 1615 return; 1616 } 1617 1618 free_pages((unsigned long)page_address(page), order); 1619 } 1620 1621 static void __meminit remove_pte_mapping(pte_t *pte_base, unsigned long addr, unsigned long end, 1622 bool is_vmemmap, struct vmem_altmap *altmap) 1623 { 1624 unsigned long next; 1625 pte_t *ptep, pte; 1626 1627 for (; addr < end; addr = next) { 1628 next = (addr + PAGE_SIZE) & PAGE_MASK; 1629 if (next > end) 1630 next = end; 1631 1632 ptep = pte_base + pte_index(addr); 1633 pte = ptep_get(ptep); 1634 if (!pte_present(*ptep)) 1635 continue; 1636 1637 pte_clear(&init_mm, addr, ptep); 1638 if (is_vmemmap) 1639 free_vmemmap_storage(pte_page(pte), PAGE_SIZE, altmap); 1640 } 1641 } 1642 1643 static void __meminit remove_pmd_mapping(pmd_t *pmd_base, unsigned long addr, unsigned long end, 1644 bool is_vmemmap, struct vmem_altmap *altmap) 1645 { 1646 unsigned long next; 1647 pte_t *pte_base; 1648 pmd_t *pmdp, pmd; 1649 1650 for (; addr < end; addr = next) { 1651 next = pmd_addr_end(addr, end); 1652 pmdp = pmd_base + pmd_index(addr); 1653 pmd = pmdp_get(pmdp); 1654 if (!pmd_present(pmd)) 1655 continue; 1656 1657 if (pmd_leaf(pmd)) { 1658 pmd_clear(pmdp); 1659 if (is_vmemmap) 1660 free_vmemmap_storage(pmd_page(pmd), PMD_SIZE, altmap); 1661 continue; 1662 } 1663 1664 pte_base = (pte_t *)pmd_page_vaddr(*pmdp); 1665 remove_pte_mapping(pte_base, addr, next, is_vmemmap, altmap); 1666 free_pte_table(pte_base, pmdp); 1667 } 1668 } 1669 1670 static void __meminit remove_pud_mapping(pud_t *pud_base, unsigned long addr, unsigned long end, 1671 bool is_vmemmap, struct vmem_altmap *altmap) 1672 { 1673 unsigned long next; 1674 pud_t *pudp, pud; 1675 pmd_t *pmd_base; 1676 1677 for (; addr < end; addr = next) { 1678 next = pud_addr_end(addr, end); 1679 pudp = pud_base + pud_index(addr); 1680 pud = pudp_get(pudp); 1681 if (!pud_present(pud)) 1682 continue; 1683 1684 if (pud_leaf(pud)) { 1685 if (pgtable_l4_enabled) { 1686 pud_clear(pudp); 1687 if (is_vmemmap) 1688 free_vmemmap_storage(pud_page(pud), PUD_SIZE, altmap); 1689 } 1690 continue; 1691 } 1692 1693 pmd_base = pmd_offset(pudp, 0); 1694 remove_pmd_mapping(pmd_base, addr, next, is_vmemmap, altmap); 1695 1696 if (pgtable_l4_enabled) 1697 free_pmd_table(pmd_base, pudp); 1698 } 1699 } 1700 1701 static void __meminit remove_p4d_mapping(p4d_t *p4d_base, unsigned long addr, unsigned long end, 1702 bool is_vmemmap, struct vmem_altmap *altmap) 1703 { 1704 unsigned long next; 1705 p4d_t *p4dp, p4d; 1706 pud_t *pud_base; 1707 1708 for (; addr < end; addr = next) { 1709 next = p4d_addr_end(addr, end); 1710 p4dp = p4d_base + p4d_index(addr); 1711 p4d = p4dp_get(p4dp); 1712 if (!p4d_present(p4d)) 1713 continue; 1714 1715 if (p4d_leaf(p4d)) { 1716 if (pgtable_l5_enabled) { 1717 p4d_clear(p4dp); 1718 if (is_vmemmap) 1719 free_vmemmap_storage(p4d_page(p4d), P4D_SIZE, altmap); 1720 } 1721 continue; 1722 } 1723 1724 pud_base = pud_offset(p4dp, 0); 1725 remove_pud_mapping(pud_base, addr, next, is_vmemmap, altmap); 1726 1727 if (pgtable_l5_enabled) 1728 free_pud_table(pud_base, p4dp); 1729 } 1730 } 1731 1732 static void __meminit remove_pgd_mapping(unsigned long va, unsigned long end, bool is_vmemmap, 1733 struct vmem_altmap *altmap) 1734 { 1735 unsigned long addr, next; 1736 p4d_t *p4d_base; 1737 pgd_t *pgd; 1738 1739 for (addr = va; addr < end; addr = next) { 1740 next = pgd_addr_end(addr, end); 1741 pgd = pgd_offset_k(addr); 1742 1743 if (!pgd_present(*pgd)) 1744 continue; 1745 1746 if (pgd_leaf(*pgd)) 1747 continue; 1748 1749 p4d_base = p4d_offset(pgd, 0); 1750 remove_p4d_mapping(p4d_base, addr, next, is_vmemmap, altmap); 1751 } 1752 1753 flush_tlb_all(); 1754 } 1755 1756 static void __meminit remove_linear_mapping(phys_addr_t start, u64 size) 1757 { 1758 unsigned long va = (unsigned long)__va(start); 1759 unsigned long end = (unsigned long)__va(start + size); 1760 1761 remove_pgd_mapping(va, end, false, NULL); 1762 } 1763 1764 struct range arch_get_mappable_range(void) 1765 { 1766 struct range mhp_range; 1767 1768 mhp_range.start = __pa(PAGE_OFFSET); 1769 mhp_range.end = __pa(PAGE_END - 1); 1770 return mhp_range; 1771 } 1772 1773 int __ref arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) 1774 { 1775 int ret = 0; 1776 1777 create_linear_mapping_range(start, start + size, 0, ¶ms->pgprot); 1778 ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, params); 1779 if (ret) { 1780 remove_linear_mapping(start, size); 1781 goto out; 1782 } 1783 1784 max_pfn = PFN_UP(start + size); 1785 max_low_pfn = max_pfn; 1786 1787 out: 1788 flush_tlb_all(); 1789 return ret; 1790 } 1791 1792 void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) 1793 { 1794 __remove_pages(start >> PAGE_SHIFT, size >> PAGE_SHIFT, altmap); 1795 remove_linear_mapping(start, size); 1796 flush_tlb_all(); 1797 } 1798 1799 void __ref vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) 1800 { 1801 remove_pgd_mapping(start, end, true, altmap); 1802 } 1803 #endif /* CONFIG_MEMORY_HOTPLUG */ 1804