1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/mm/mmu.c 4 * 5 * Copyright (C) 1995-2005 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/cache.h> 10 #include <linux/export.h> 11 #include <linux/kernel.h> 12 #include <linux/errno.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/kexec.h> 16 #include <linux/libfdt.h> 17 #include <linux/mman.h> 18 #include <linux/nodemask.h> 19 #include <linux/memblock.h> 20 #include <linux/memory.h> 21 #include <linux/fs.h> 22 #include <linux/io.h> 23 #include <linux/mm.h> 24 #include <linux/vmalloc.h> 25 26 #include <asm/barrier.h> 27 #include <asm/cputype.h> 28 #include <asm/fixmap.h> 29 #include <asm/kasan.h> 30 #include <asm/kernel-pgtable.h> 31 #include <asm/sections.h> 32 #include <asm/setup.h> 33 #include <linux/sizes.h> 34 #include <asm/tlb.h> 35 #include <asm/mmu_context.h> 36 #include <asm/ptdump.h> 37 #include <asm/tlbflush.h> 38 39 #define NO_BLOCK_MAPPINGS BIT(0) 40 #define NO_CONT_MAPPINGS BIT(1) 41 42 u64 idmap_t0sz = TCR_T0SZ(VA_BITS); 43 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; 44 45 u64 __section(".mmuoff.data.write") vabits_actual; 46 EXPORT_SYMBOL(vabits_actual); 47 48 u64 kimage_voffset __ro_after_init; 49 EXPORT_SYMBOL(kimage_voffset); 50 51 /* 52 * Empty_zero_page is a special page that is used for zero-initialized data 53 * and COW. 54 */ 55 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; 56 EXPORT_SYMBOL(empty_zero_page); 57 58 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; 59 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; 60 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; 61 62 static DEFINE_SPINLOCK(swapper_pgdir_lock); 63 64 void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) 65 { 66 pgd_t *fixmap_pgdp; 67 68 spin_lock(&swapper_pgdir_lock); 69 fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp)); 70 WRITE_ONCE(*fixmap_pgdp, pgd); 71 /* 72 * We need dsb(ishst) here to ensure the page-table-walker sees 73 * our new entry before set_p?d() returns. The fixmap's 74 * flush_tlb_kernel_range() via clear_fixmap() does this for us. 75 */ 76 pgd_clear_fixmap(); 77 spin_unlock(&swapper_pgdir_lock); 78 } 79 80 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 81 unsigned long size, pgprot_t vma_prot) 82 { 83 if (!pfn_valid(pfn)) 84 return pgprot_noncached(vma_prot); 85 else if (file->f_flags & O_SYNC) 86 return pgprot_writecombine(vma_prot); 87 return vma_prot; 88 } 89 EXPORT_SYMBOL(phys_mem_access_prot); 90 91 static phys_addr_t __init early_pgtable_alloc(int shift) 92 { 93 phys_addr_t phys; 94 void *ptr; 95 96 phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); 97 if (!phys) 98 panic("Failed to allocate page table page\n"); 99 100 /* 101 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE 102 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise 103 * any level of table. 104 */ 105 ptr = pte_set_fixmap(phys); 106 107 memset(ptr, 0, PAGE_SIZE); 108 109 /* 110 * Implicit barriers also ensure the zeroed page is visible to the page 111 * table walker 112 */ 113 pte_clear_fixmap(); 114 115 return phys; 116 } 117 118 static bool pgattr_change_is_safe(u64 old, u64 new) 119 { 120 /* 121 * The following mapping attributes may be updated in live 122 * kernel mappings without the need for break-before-make. 123 */ 124 static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; 125 126 /* creating or taking down mappings is always safe */ 127 if (old == 0 || new == 0) 128 return true; 129 130 /* live contiguous mappings may not be manipulated at all */ 131 if ((old | new) & PTE_CONT) 132 return false; 133 134 /* Transitioning from Non-Global to Global is unsafe */ 135 if (old & ~new & PTE_NG) 136 return false; 137 138 return ((old ^ new) & ~mask) == 0; 139 } 140 141 static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, 142 phys_addr_t phys, pgprot_t prot) 143 { 144 pte_t *ptep; 145 146 ptep = pte_set_fixmap_offset(pmdp, addr); 147 do { 148 pte_t old_pte = READ_ONCE(*ptep); 149 150 set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); 151 152 /* 153 * After the PTE entry has been populated once, we 154 * only allow updates to the permission attributes. 155 */ 156 BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), 157 READ_ONCE(pte_val(*ptep)))); 158 159 phys += PAGE_SIZE; 160 } while (ptep++, addr += PAGE_SIZE, addr != end); 161 162 pte_clear_fixmap(); 163 } 164 165 static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, 166 unsigned long end, phys_addr_t phys, 167 pgprot_t prot, 168 phys_addr_t (*pgtable_alloc)(int), 169 int flags) 170 { 171 unsigned long next; 172 pmd_t pmd = READ_ONCE(*pmdp); 173 174 BUG_ON(pmd_sect(pmd)); 175 if (pmd_none(pmd)) { 176 phys_addr_t pte_phys; 177 BUG_ON(!pgtable_alloc); 178 pte_phys = pgtable_alloc(PAGE_SHIFT); 179 __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); 180 pmd = READ_ONCE(*pmdp); 181 } 182 BUG_ON(pmd_bad(pmd)); 183 184 do { 185 pgprot_t __prot = prot; 186 187 next = pte_cont_addr_end(addr, end); 188 189 /* use a contiguous mapping if the range is suitably aligned */ 190 if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && 191 (flags & NO_CONT_MAPPINGS) == 0) 192 __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 193 194 init_pte(pmdp, addr, next, phys, __prot); 195 196 phys += next - addr; 197 } while (addr = next, addr != end); 198 } 199 200 static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, 201 phys_addr_t phys, pgprot_t prot, 202 phys_addr_t (*pgtable_alloc)(int), int flags) 203 { 204 unsigned long next; 205 pmd_t *pmdp; 206 207 pmdp = pmd_set_fixmap_offset(pudp, addr); 208 do { 209 pmd_t old_pmd = READ_ONCE(*pmdp); 210 211 next = pmd_addr_end(addr, end); 212 213 /* try section mapping first */ 214 if (((addr | next | phys) & ~SECTION_MASK) == 0 && 215 (flags & NO_BLOCK_MAPPINGS) == 0) { 216 pmd_set_huge(pmdp, phys, prot); 217 218 /* 219 * After the PMD entry has been populated once, we 220 * only allow updates to the permission attributes. 221 */ 222 BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), 223 READ_ONCE(pmd_val(*pmdp)))); 224 } else { 225 alloc_init_cont_pte(pmdp, addr, next, phys, prot, 226 pgtable_alloc, flags); 227 228 BUG_ON(pmd_val(old_pmd) != 0 && 229 pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); 230 } 231 phys += next - addr; 232 } while (pmdp++, addr = next, addr != end); 233 234 pmd_clear_fixmap(); 235 } 236 237 static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, 238 unsigned long end, phys_addr_t phys, 239 pgprot_t prot, 240 phys_addr_t (*pgtable_alloc)(int), int flags) 241 { 242 unsigned long next; 243 pud_t pud = READ_ONCE(*pudp); 244 245 /* 246 * Check for initial section mappings in the pgd/pud. 247 */ 248 BUG_ON(pud_sect(pud)); 249 if (pud_none(pud)) { 250 phys_addr_t pmd_phys; 251 BUG_ON(!pgtable_alloc); 252 pmd_phys = pgtable_alloc(PMD_SHIFT); 253 __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE); 254 pud = READ_ONCE(*pudp); 255 } 256 BUG_ON(pud_bad(pud)); 257 258 do { 259 pgprot_t __prot = prot; 260 261 next = pmd_cont_addr_end(addr, end); 262 263 /* use a contiguous mapping if the range is suitably aligned */ 264 if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) && 265 (flags & NO_CONT_MAPPINGS) == 0) 266 __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 267 268 init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags); 269 270 phys += next - addr; 271 } while (addr = next, addr != end); 272 } 273 274 static inline bool use_1G_block(unsigned long addr, unsigned long next, 275 unsigned long phys) 276 { 277 if (PAGE_SHIFT != 12) 278 return false; 279 280 if (((addr | next | phys) & ~PUD_MASK) != 0) 281 return false; 282 283 return true; 284 } 285 286 static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, 287 phys_addr_t phys, pgprot_t prot, 288 phys_addr_t (*pgtable_alloc)(int), 289 int flags) 290 { 291 unsigned long next; 292 pud_t *pudp; 293 p4d_t *p4dp = p4d_offset(pgdp, addr); 294 p4d_t p4d = READ_ONCE(*p4dp); 295 296 if (p4d_none(p4d)) { 297 phys_addr_t pud_phys; 298 BUG_ON(!pgtable_alloc); 299 pud_phys = pgtable_alloc(PUD_SHIFT); 300 __p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE); 301 p4d = READ_ONCE(*p4dp); 302 } 303 BUG_ON(p4d_bad(p4d)); 304 305 pudp = pud_set_fixmap_offset(p4dp, addr); 306 do { 307 pud_t old_pud = READ_ONCE(*pudp); 308 309 next = pud_addr_end(addr, end); 310 311 /* 312 * For 4K granule only, attempt to put down a 1GB block 313 */ 314 if (use_1G_block(addr, next, phys) && 315 (flags & NO_BLOCK_MAPPINGS) == 0) { 316 pud_set_huge(pudp, phys, prot); 317 318 /* 319 * After the PUD entry has been populated once, we 320 * only allow updates to the permission attributes. 321 */ 322 BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), 323 READ_ONCE(pud_val(*pudp)))); 324 } else { 325 alloc_init_cont_pmd(pudp, addr, next, phys, prot, 326 pgtable_alloc, flags); 327 328 BUG_ON(pud_val(old_pud) != 0 && 329 pud_val(old_pud) != READ_ONCE(pud_val(*pudp))); 330 } 331 phys += next - addr; 332 } while (pudp++, addr = next, addr != end); 333 334 pud_clear_fixmap(); 335 } 336 337 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, 338 unsigned long virt, phys_addr_t size, 339 pgprot_t prot, 340 phys_addr_t (*pgtable_alloc)(int), 341 int flags) 342 { 343 unsigned long addr, end, next; 344 pgd_t *pgdp = pgd_offset_pgd(pgdir, virt); 345 346 /* 347 * If the virtual and physical address don't have the same offset 348 * within a page, we cannot map the region as the caller expects. 349 */ 350 if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) 351 return; 352 353 phys &= PAGE_MASK; 354 addr = virt & PAGE_MASK; 355 end = PAGE_ALIGN(virt + size); 356 357 do { 358 next = pgd_addr_end(addr, end); 359 alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc, 360 flags); 361 phys += next - addr; 362 } while (pgdp++, addr = next, addr != end); 363 } 364 365 static phys_addr_t __pgd_pgtable_alloc(int shift) 366 { 367 void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL); 368 BUG_ON(!ptr); 369 370 /* Ensure the zeroed page is visible to the page table walker */ 371 dsb(ishst); 372 return __pa(ptr); 373 } 374 375 static phys_addr_t pgd_pgtable_alloc(int shift) 376 { 377 phys_addr_t pa = __pgd_pgtable_alloc(shift); 378 379 /* 380 * Call proper page table ctor in case later we need to 381 * call core mm functions like apply_to_page_range() on 382 * this pre-allocated page table. 383 * 384 * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is 385 * folded, and if so pgtable_pmd_page_ctor() becomes nop. 386 */ 387 if (shift == PAGE_SHIFT) 388 BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa))); 389 else if (shift == PMD_SHIFT) 390 BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa))); 391 392 return pa; 393 } 394 395 /* 396 * This function can only be used to modify existing table entries, 397 * without allocating new levels of table. Note that this permits the 398 * creation of new section or page entries. 399 */ 400 static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 401 phys_addr_t size, pgprot_t prot) 402 { 403 if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { 404 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 405 &phys, virt); 406 return; 407 } 408 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 409 NO_CONT_MAPPINGS); 410 } 411 412 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 413 unsigned long virt, phys_addr_t size, 414 pgprot_t prot, bool page_mappings_only) 415 { 416 int flags = 0; 417 418 BUG_ON(mm == &init_mm); 419 420 if (page_mappings_only) 421 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 422 423 __create_pgd_mapping(mm->pgd, phys, virt, size, prot, 424 pgd_pgtable_alloc, flags); 425 } 426 427 static void update_mapping_prot(phys_addr_t phys, unsigned long virt, 428 phys_addr_t size, pgprot_t prot) 429 { 430 if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { 431 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", 432 &phys, virt); 433 return; 434 } 435 436 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 437 NO_CONT_MAPPINGS); 438 439 /* flush the TLBs after updating live kernel mappings */ 440 flush_tlb_kernel_range(virt, virt + size); 441 } 442 443 static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start, 444 phys_addr_t end, pgprot_t prot, int flags) 445 { 446 __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start, 447 prot, early_pgtable_alloc, flags); 448 } 449 450 void __init mark_linear_text_alias_ro(void) 451 { 452 /* 453 * Remove the write permissions from the linear alias of .text/.rodata 454 */ 455 update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text), 456 (unsigned long)__init_begin - (unsigned long)_text, 457 PAGE_KERNEL_RO); 458 } 459 460 static void __init map_mem(pgd_t *pgdp) 461 { 462 phys_addr_t kernel_start = __pa_symbol(_text); 463 phys_addr_t kernel_end = __pa_symbol(__init_begin); 464 struct memblock_region *reg; 465 int flags = 0; 466 467 if (rodata_full || debug_pagealloc_enabled()) 468 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 469 470 /* 471 * Take care not to create a writable alias for the 472 * read-only text and rodata sections of the kernel image. 473 * So temporarily mark them as NOMAP to skip mappings in 474 * the following for-loop 475 */ 476 memblock_mark_nomap(kernel_start, kernel_end - kernel_start); 477 #ifdef CONFIG_KEXEC_CORE 478 if (crashk_res.end) 479 memblock_mark_nomap(crashk_res.start, 480 resource_size(&crashk_res)); 481 #endif 482 483 /* map all the memory banks */ 484 for_each_memblock(memory, reg) { 485 phys_addr_t start = reg->base; 486 phys_addr_t end = start + reg->size; 487 488 if (start >= end) 489 break; 490 if (memblock_is_nomap(reg)) 491 continue; 492 493 __map_memblock(pgdp, start, end, PAGE_KERNEL, flags); 494 } 495 496 /* 497 * Map the linear alias of the [_text, __init_begin) interval 498 * as non-executable now, and remove the write permission in 499 * mark_linear_text_alias_ro() below (which will be called after 500 * alternative patching has completed). This makes the contents 501 * of the region accessible to subsystems such as hibernate, 502 * but protects it from inadvertent modification or execution. 503 * Note that contiguous mappings cannot be remapped in this way, 504 * so we should avoid them here. 505 */ 506 __map_memblock(pgdp, kernel_start, kernel_end, 507 PAGE_KERNEL, NO_CONT_MAPPINGS); 508 memblock_clear_nomap(kernel_start, kernel_end - kernel_start); 509 510 #ifdef CONFIG_KEXEC_CORE 511 /* 512 * Use page-level mappings here so that we can shrink the region 513 * in page granularity and put back unused memory to buddy system 514 * through /sys/kernel/kexec_crash_size interface. 515 */ 516 if (crashk_res.end) { 517 __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1, 518 PAGE_KERNEL, 519 NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); 520 memblock_clear_nomap(crashk_res.start, 521 resource_size(&crashk_res)); 522 } 523 #endif 524 } 525 526 void mark_rodata_ro(void) 527 { 528 unsigned long section_size; 529 530 /* 531 * mark .rodata as read only. Use __init_begin rather than __end_rodata 532 * to cover NOTES and EXCEPTION_TABLE. 533 */ 534 section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; 535 update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, 536 section_size, PAGE_KERNEL_RO); 537 538 debug_checkwx(); 539 } 540 541 static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, 542 pgprot_t prot, struct vm_struct *vma, 543 int flags, unsigned long vm_flags) 544 { 545 phys_addr_t pa_start = __pa_symbol(va_start); 546 unsigned long size = va_end - va_start; 547 548 BUG_ON(!PAGE_ALIGNED(pa_start)); 549 BUG_ON(!PAGE_ALIGNED(size)); 550 551 __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot, 552 early_pgtable_alloc, flags); 553 554 if (!(vm_flags & VM_NO_GUARD)) 555 size += PAGE_SIZE; 556 557 vma->addr = va_start; 558 vma->phys_addr = pa_start; 559 vma->size = size; 560 vma->flags = VM_MAP | vm_flags; 561 vma->caller = __builtin_return_address(0); 562 563 vm_area_add_early(vma); 564 } 565 566 static int __init parse_rodata(char *arg) 567 { 568 int ret = strtobool(arg, &rodata_enabled); 569 if (!ret) { 570 rodata_full = false; 571 return 0; 572 } 573 574 /* permit 'full' in addition to boolean options */ 575 if (strcmp(arg, "full")) 576 return -EINVAL; 577 578 rodata_enabled = true; 579 rodata_full = true; 580 return 0; 581 } 582 early_param("rodata", parse_rodata); 583 584 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 585 static int __init map_entry_trampoline(void) 586 { 587 pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 588 phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); 589 590 /* The trampoline is always mapped and can therefore be global */ 591 pgprot_val(prot) &= ~PTE_NG; 592 593 /* Map only the text into the trampoline page table */ 594 memset(tramp_pg_dir, 0, PGD_SIZE); 595 __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, 596 prot, __pgd_pgtable_alloc, 0); 597 598 /* Map both the text and data into the kernel page table */ 599 __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); 600 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 601 extern char __entry_tramp_data_start[]; 602 603 __set_fixmap(FIX_ENTRY_TRAMP_DATA, 604 __pa_symbol(__entry_tramp_data_start), 605 PAGE_KERNEL_RO); 606 } 607 608 return 0; 609 } 610 core_initcall(map_entry_trampoline); 611 #endif 612 613 /* 614 * Open coded check for BTI, only for use to determine configuration 615 * for early mappings for before the cpufeature code has run. 616 */ 617 static bool arm64_early_this_cpu_has_bti(void) 618 { 619 u64 pfr1; 620 621 if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) 622 return false; 623 624 pfr1 = read_sysreg_s(SYS_ID_AA64PFR1_EL1); 625 return cpuid_feature_extract_unsigned_field(pfr1, 626 ID_AA64PFR1_BT_SHIFT); 627 } 628 629 /* 630 * Create fine-grained mappings for the kernel. 631 */ 632 static void __init map_kernel(pgd_t *pgdp) 633 { 634 static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext, 635 vmlinux_initdata, vmlinux_data; 636 637 /* 638 * External debuggers may need to write directly to the text 639 * mapping to install SW breakpoints. Allow this (only) when 640 * explicitly requested with rodata=off. 641 */ 642 pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 643 644 /* 645 * If we have a CPU that supports BTI and a kernel built for 646 * BTI then mark the kernel executable text as guarded pages 647 * now so we don't have to rewrite the page tables later. 648 */ 649 if (arm64_early_this_cpu_has_bti()) 650 text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP); 651 652 /* 653 * Only rodata will be remapped with different permissions later on, 654 * all other segments are allowed to use contiguous mappings. 655 */ 656 map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0, 657 VM_NO_GUARD); 658 map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL, 659 &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD); 660 map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot, 661 &vmlinux_inittext, 0, VM_NO_GUARD); 662 map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL, 663 &vmlinux_initdata, 0, VM_NO_GUARD); 664 map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); 665 666 if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) { 667 /* 668 * The fixmap falls in a separate pgd to the kernel, and doesn't 669 * live in the carveout for the swapper_pg_dir. We can simply 670 * re-use the existing dir for the fixmap. 671 */ 672 set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START), 673 READ_ONCE(*pgd_offset_k(FIXADDR_START))); 674 } else if (CONFIG_PGTABLE_LEVELS > 3) { 675 pgd_t *bm_pgdp; 676 p4d_t *bm_p4dp; 677 pud_t *bm_pudp; 678 /* 679 * The fixmap shares its top level pgd entry with the kernel 680 * mapping. This can really only occur when we are running 681 * with 16k/4 levels, so we can simply reuse the pud level 682 * entry instead. 683 */ 684 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 685 bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START); 686 bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START); 687 bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START); 688 pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd)); 689 pud_clear_fixmap(); 690 } else { 691 BUG(); 692 } 693 694 kasan_copy_shadow(pgdp); 695 } 696 697 void __init paging_init(void) 698 { 699 pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir)); 700 701 map_kernel(pgdp); 702 map_mem(pgdp); 703 704 pgd_clear_fixmap(); 705 706 cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); 707 init_mm.pgd = swapper_pg_dir; 708 709 memblock_free(__pa_symbol(init_pg_dir), 710 __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir)); 711 712 memblock_allow_resize(); 713 } 714 715 /* 716 * Check whether a kernel address is valid (derived from arch/x86/). 717 */ 718 int kern_addr_valid(unsigned long addr) 719 { 720 pgd_t *pgdp; 721 p4d_t *p4dp; 722 pud_t *pudp, pud; 723 pmd_t *pmdp, pmd; 724 pte_t *ptep, pte; 725 726 if ((((long)addr) >> VA_BITS) != -1UL) 727 return 0; 728 729 pgdp = pgd_offset_k(addr); 730 if (pgd_none(READ_ONCE(*pgdp))) 731 return 0; 732 733 p4dp = p4d_offset(pgdp, addr); 734 if (p4d_none(READ_ONCE(*p4dp))) 735 return 0; 736 737 pudp = pud_offset(p4dp, addr); 738 pud = READ_ONCE(*pudp); 739 if (pud_none(pud)) 740 return 0; 741 742 if (pud_sect(pud)) 743 return pfn_valid(pud_pfn(pud)); 744 745 pmdp = pmd_offset(pudp, addr); 746 pmd = READ_ONCE(*pmdp); 747 if (pmd_none(pmd)) 748 return 0; 749 750 if (pmd_sect(pmd)) 751 return pfn_valid(pmd_pfn(pmd)); 752 753 ptep = pte_offset_kernel(pmdp, addr); 754 pte = READ_ONCE(*ptep); 755 if (pte_none(pte)) 756 return 0; 757 758 return pfn_valid(pte_pfn(pte)); 759 } 760 761 #ifdef CONFIG_MEMORY_HOTPLUG 762 static void free_hotplug_page_range(struct page *page, size_t size) 763 { 764 WARN_ON(PageReserved(page)); 765 free_pages((unsigned long)page_address(page), get_order(size)); 766 } 767 768 static void free_hotplug_pgtable_page(struct page *page) 769 { 770 free_hotplug_page_range(page, PAGE_SIZE); 771 } 772 773 static bool pgtable_range_aligned(unsigned long start, unsigned long end, 774 unsigned long floor, unsigned long ceiling, 775 unsigned long mask) 776 { 777 start &= mask; 778 if (start < floor) 779 return false; 780 781 if (ceiling) { 782 ceiling &= mask; 783 if (!ceiling) 784 return false; 785 } 786 787 if (end - 1 > ceiling - 1) 788 return false; 789 return true; 790 } 791 792 static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, 793 unsigned long end, bool free_mapped) 794 { 795 pte_t *ptep, pte; 796 797 do { 798 ptep = pte_offset_kernel(pmdp, addr); 799 pte = READ_ONCE(*ptep); 800 if (pte_none(pte)) 801 continue; 802 803 WARN_ON(!pte_present(pte)); 804 pte_clear(&init_mm, addr, ptep); 805 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 806 if (free_mapped) 807 free_hotplug_page_range(pte_page(pte), PAGE_SIZE); 808 } while (addr += PAGE_SIZE, addr < end); 809 } 810 811 static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr, 812 unsigned long end, bool free_mapped) 813 { 814 unsigned long next; 815 pmd_t *pmdp, pmd; 816 817 do { 818 next = pmd_addr_end(addr, end); 819 pmdp = pmd_offset(pudp, addr); 820 pmd = READ_ONCE(*pmdp); 821 if (pmd_none(pmd)) 822 continue; 823 824 WARN_ON(!pmd_present(pmd)); 825 if (pmd_sect(pmd)) { 826 pmd_clear(pmdp); 827 828 /* 829 * One TLBI should be sufficient here as the PMD_SIZE 830 * range is mapped with a single block entry. 831 */ 832 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 833 if (free_mapped) 834 free_hotplug_page_range(pmd_page(pmd), 835 PMD_SIZE); 836 continue; 837 } 838 WARN_ON(!pmd_table(pmd)); 839 unmap_hotplug_pte_range(pmdp, addr, next, free_mapped); 840 } while (addr = next, addr < end); 841 } 842 843 static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr, 844 unsigned long end, bool free_mapped) 845 { 846 unsigned long next; 847 pud_t *pudp, pud; 848 849 do { 850 next = pud_addr_end(addr, end); 851 pudp = pud_offset(p4dp, addr); 852 pud = READ_ONCE(*pudp); 853 if (pud_none(pud)) 854 continue; 855 856 WARN_ON(!pud_present(pud)); 857 if (pud_sect(pud)) { 858 pud_clear(pudp); 859 860 /* 861 * One TLBI should be sufficient here as the PUD_SIZE 862 * range is mapped with a single block entry. 863 */ 864 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 865 if (free_mapped) 866 free_hotplug_page_range(pud_page(pud), 867 PUD_SIZE); 868 continue; 869 } 870 WARN_ON(!pud_table(pud)); 871 unmap_hotplug_pmd_range(pudp, addr, next, free_mapped); 872 } while (addr = next, addr < end); 873 } 874 875 static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr, 876 unsigned long end, bool free_mapped) 877 { 878 unsigned long next; 879 p4d_t *p4dp, p4d; 880 881 do { 882 next = p4d_addr_end(addr, end); 883 p4dp = p4d_offset(pgdp, addr); 884 p4d = READ_ONCE(*p4dp); 885 if (p4d_none(p4d)) 886 continue; 887 888 WARN_ON(!p4d_present(p4d)); 889 unmap_hotplug_pud_range(p4dp, addr, next, free_mapped); 890 } while (addr = next, addr < end); 891 } 892 893 static void unmap_hotplug_range(unsigned long addr, unsigned long end, 894 bool free_mapped) 895 { 896 unsigned long next; 897 pgd_t *pgdp, pgd; 898 899 do { 900 next = pgd_addr_end(addr, end); 901 pgdp = pgd_offset_k(addr); 902 pgd = READ_ONCE(*pgdp); 903 if (pgd_none(pgd)) 904 continue; 905 906 WARN_ON(!pgd_present(pgd)); 907 unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped); 908 } while (addr = next, addr < end); 909 } 910 911 static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, 912 unsigned long end, unsigned long floor, 913 unsigned long ceiling) 914 { 915 pte_t *ptep, pte; 916 unsigned long i, start = addr; 917 918 do { 919 ptep = pte_offset_kernel(pmdp, addr); 920 pte = READ_ONCE(*ptep); 921 922 /* 923 * This is just a sanity check here which verifies that 924 * pte clearing has been done by earlier unmap loops. 925 */ 926 WARN_ON(!pte_none(pte)); 927 } while (addr += PAGE_SIZE, addr < end); 928 929 if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK)) 930 return; 931 932 /* 933 * Check whether we can free the pte page if the rest of the 934 * entries are empty. Overlap with other regions have been 935 * handled by the floor/ceiling check. 936 */ 937 ptep = pte_offset_kernel(pmdp, 0UL); 938 for (i = 0; i < PTRS_PER_PTE; i++) { 939 if (!pte_none(READ_ONCE(ptep[i]))) 940 return; 941 } 942 943 pmd_clear(pmdp); 944 __flush_tlb_kernel_pgtable(start); 945 free_hotplug_pgtable_page(virt_to_page(ptep)); 946 } 947 948 static void free_empty_pmd_table(pud_t *pudp, unsigned long addr, 949 unsigned long end, unsigned long floor, 950 unsigned long ceiling) 951 { 952 pmd_t *pmdp, pmd; 953 unsigned long i, next, start = addr; 954 955 do { 956 next = pmd_addr_end(addr, end); 957 pmdp = pmd_offset(pudp, addr); 958 pmd = READ_ONCE(*pmdp); 959 if (pmd_none(pmd)) 960 continue; 961 962 WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd)); 963 free_empty_pte_table(pmdp, addr, next, floor, ceiling); 964 } while (addr = next, addr < end); 965 966 if (CONFIG_PGTABLE_LEVELS <= 2) 967 return; 968 969 if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK)) 970 return; 971 972 /* 973 * Check whether we can free the pmd page if the rest of the 974 * entries are empty. Overlap with other regions have been 975 * handled by the floor/ceiling check. 976 */ 977 pmdp = pmd_offset(pudp, 0UL); 978 for (i = 0; i < PTRS_PER_PMD; i++) { 979 if (!pmd_none(READ_ONCE(pmdp[i]))) 980 return; 981 } 982 983 pud_clear(pudp); 984 __flush_tlb_kernel_pgtable(start); 985 free_hotplug_pgtable_page(virt_to_page(pmdp)); 986 } 987 988 static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr, 989 unsigned long end, unsigned long floor, 990 unsigned long ceiling) 991 { 992 pud_t *pudp, pud; 993 unsigned long i, next, start = addr; 994 995 do { 996 next = pud_addr_end(addr, end); 997 pudp = pud_offset(p4dp, addr); 998 pud = READ_ONCE(*pudp); 999 if (pud_none(pud)) 1000 continue; 1001 1002 WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud)); 1003 free_empty_pmd_table(pudp, addr, next, floor, ceiling); 1004 } while (addr = next, addr < end); 1005 1006 if (CONFIG_PGTABLE_LEVELS <= 3) 1007 return; 1008 1009 if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK)) 1010 return; 1011 1012 /* 1013 * Check whether we can free the pud page if the rest of the 1014 * entries are empty. Overlap with other regions have been 1015 * handled by the floor/ceiling check. 1016 */ 1017 pudp = pud_offset(p4dp, 0UL); 1018 for (i = 0; i < PTRS_PER_PUD; i++) { 1019 if (!pud_none(READ_ONCE(pudp[i]))) 1020 return; 1021 } 1022 1023 p4d_clear(p4dp); 1024 __flush_tlb_kernel_pgtable(start); 1025 free_hotplug_pgtable_page(virt_to_page(pudp)); 1026 } 1027 1028 static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr, 1029 unsigned long end, unsigned long floor, 1030 unsigned long ceiling) 1031 { 1032 unsigned long next; 1033 p4d_t *p4dp, p4d; 1034 1035 do { 1036 next = p4d_addr_end(addr, end); 1037 p4dp = p4d_offset(pgdp, addr); 1038 p4d = READ_ONCE(*p4dp); 1039 if (p4d_none(p4d)) 1040 continue; 1041 1042 WARN_ON(!p4d_present(p4d)); 1043 free_empty_pud_table(p4dp, addr, next, floor, ceiling); 1044 } while (addr = next, addr < end); 1045 } 1046 1047 static void free_empty_tables(unsigned long addr, unsigned long end, 1048 unsigned long floor, unsigned long ceiling) 1049 { 1050 unsigned long next; 1051 pgd_t *pgdp, pgd; 1052 1053 do { 1054 next = pgd_addr_end(addr, end); 1055 pgdp = pgd_offset_k(addr); 1056 pgd = READ_ONCE(*pgdp); 1057 if (pgd_none(pgd)) 1058 continue; 1059 1060 WARN_ON(!pgd_present(pgd)); 1061 free_empty_p4d_table(pgdp, addr, next, floor, ceiling); 1062 } while (addr = next, addr < end); 1063 } 1064 #endif 1065 1066 #ifdef CONFIG_SPARSEMEM_VMEMMAP 1067 #if !ARM64_SWAPPER_USES_SECTION_MAPS 1068 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 1069 struct vmem_altmap *altmap) 1070 { 1071 return vmemmap_populate_basepages(start, end, node); 1072 } 1073 #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ 1074 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 1075 struct vmem_altmap *altmap) 1076 { 1077 unsigned long addr = start; 1078 unsigned long next; 1079 pgd_t *pgdp; 1080 p4d_t *p4dp; 1081 pud_t *pudp; 1082 pmd_t *pmdp; 1083 1084 do { 1085 next = pmd_addr_end(addr, end); 1086 1087 pgdp = vmemmap_pgd_populate(addr, node); 1088 if (!pgdp) 1089 return -ENOMEM; 1090 1091 p4dp = vmemmap_p4d_populate(pgdp, addr, node); 1092 if (!p4dp) 1093 return -ENOMEM; 1094 1095 pudp = vmemmap_pud_populate(p4dp, addr, node); 1096 if (!pudp) 1097 return -ENOMEM; 1098 1099 pmdp = pmd_offset(pudp, addr); 1100 if (pmd_none(READ_ONCE(*pmdp))) { 1101 void *p = NULL; 1102 1103 p = vmemmap_alloc_block_buf(PMD_SIZE, node); 1104 if (!p) 1105 return -ENOMEM; 1106 1107 pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); 1108 } else 1109 vmemmap_verify((pte_t *)pmdp, node, addr, next); 1110 } while (addr = next, addr != end); 1111 1112 return 0; 1113 } 1114 #endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */ 1115 void vmemmap_free(unsigned long start, unsigned long end, 1116 struct vmem_altmap *altmap) 1117 { 1118 #ifdef CONFIG_MEMORY_HOTPLUG 1119 WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); 1120 1121 unmap_hotplug_range(start, end, true); 1122 free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); 1123 #endif 1124 } 1125 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 1126 1127 static inline pud_t * fixmap_pud(unsigned long addr) 1128 { 1129 pgd_t *pgdp = pgd_offset_k(addr); 1130 p4d_t *p4dp = p4d_offset(pgdp, addr); 1131 p4d_t p4d = READ_ONCE(*p4dp); 1132 1133 BUG_ON(p4d_none(p4d) || p4d_bad(p4d)); 1134 1135 return pud_offset_kimg(p4dp, addr); 1136 } 1137 1138 static inline pmd_t * fixmap_pmd(unsigned long addr) 1139 { 1140 pud_t *pudp = fixmap_pud(addr); 1141 pud_t pud = READ_ONCE(*pudp); 1142 1143 BUG_ON(pud_none(pud) || pud_bad(pud)); 1144 1145 return pmd_offset_kimg(pudp, addr); 1146 } 1147 1148 static inline pte_t * fixmap_pte(unsigned long addr) 1149 { 1150 return &bm_pte[pte_index(addr)]; 1151 } 1152 1153 /* 1154 * The p*d_populate functions call virt_to_phys implicitly so they can't be used 1155 * directly on kernel symbols (bm_p*d). This function is called too early to use 1156 * lm_alias so __p*d_populate functions must be used to populate with the 1157 * physical address from __pa_symbol. 1158 */ 1159 void __init early_fixmap_init(void) 1160 { 1161 pgd_t *pgdp; 1162 p4d_t *p4dp, p4d; 1163 pud_t *pudp; 1164 pmd_t *pmdp; 1165 unsigned long addr = FIXADDR_START; 1166 1167 pgdp = pgd_offset_k(addr); 1168 p4dp = p4d_offset(pgdp, addr); 1169 p4d = READ_ONCE(*p4dp); 1170 if (CONFIG_PGTABLE_LEVELS > 3 && 1171 !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) { 1172 /* 1173 * We only end up here if the kernel mapping and the fixmap 1174 * share the top level pgd entry, which should only happen on 1175 * 16k/4 levels configurations. 1176 */ 1177 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 1178 pudp = pud_offset_kimg(p4dp, addr); 1179 } else { 1180 if (p4d_none(p4d)) 1181 __p4d_populate(p4dp, __pa_symbol(bm_pud), PUD_TYPE_TABLE); 1182 pudp = fixmap_pud(addr); 1183 } 1184 if (pud_none(READ_ONCE(*pudp))) 1185 __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); 1186 pmdp = fixmap_pmd(addr); 1187 __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE); 1188 1189 /* 1190 * The boot-ioremap range spans multiple pmds, for which 1191 * we are not prepared: 1192 */ 1193 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 1194 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 1195 1196 if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) 1197 || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { 1198 WARN_ON(1); 1199 pr_warn("pmdp %p != %p, %p\n", 1200 pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), 1201 fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); 1202 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 1203 fix_to_virt(FIX_BTMAP_BEGIN)); 1204 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", 1205 fix_to_virt(FIX_BTMAP_END)); 1206 1207 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 1208 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); 1209 } 1210 } 1211 1212 /* 1213 * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we 1214 * ever need to use IPIs for TLB broadcasting, then we're in trouble here. 1215 */ 1216 void __set_fixmap(enum fixed_addresses idx, 1217 phys_addr_t phys, pgprot_t flags) 1218 { 1219 unsigned long addr = __fix_to_virt(idx); 1220 pte_t *ptep; 1221 1222 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 1223 1224 ptep = fixmap_pte(addr); 1225 1226 if (pgprot_val(flags)) { 1227 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); 1228 } else { 1229 pte_clear(&init_mm, addr, ptep); 1230 flush_tlb_kernel_range(addr, addr+PAGE_SIZE); 1231 } 1232 } 1233 1234 void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) 1235 { 1236 const u64 dt_virt_base = __fix_to_virt(FIX_FDT); 1237 int offset; 1238 void *dt_virt; 1239 1240 /* 1241 * Check whether the physical FDT address is set and meets the minimum 1242 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be 1243 * at least 8 bytes so that we can always access the magic and size 1244 * fields of the FDT header after mapping the first chunk, double check 1245 * here if that is indeed the case. 1246 */ 1247 BUILD_BUG_ON(MIN_FDT_ALIGN < 8); 1248 if (!dt_phys || dt_phys % MIN_FDT_ALIGN) 1249 return NULL; 1250 1251 /* 1252 * Make sure that the FDT region can be mapped without the need to 1253 * allocate additional translation table pages, so that it is safe 1254 * to call create_mapping_noalloc() this early. 1255 * 1256 * On 64k pages, the FDT will be mapped using PTEs, so we need to 1257 * be in the same PMD as the rest of the fixmap. 1258 * On 4k pages, we'll use section mappings for the FDT so we only 1259 * have to be in the same PUD. 1260 */ 1261 BUILD_BUG_ON(dt_virt_base % SZ_2M); 1262 1263 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != 1264 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); 1265 1266 offset = dt_phys % SWAPPER_BLOCK_SIZE; 1267 dt_virt = (void *)dt_virt_base + offset; 1268 1269 /* map the first chunk so we can read the size from the header */ 1270 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), 1271 dt_virt_base, SWAPPER_BLOCK_SIZE, prot); 1272 1273 if (fdt_magic(dt_virt) != FDT_MAGIC) 1274 return NULL; 1275 1276 *size = fdt_totalsize(dt_virt); 1277 if (*size > MAX_FDT_SIZE) 1278 return NULL; 1279 1280 if (offset + *size > SWAPPER_BLOCK_SIZE) 1281 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, 1282 round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot); 1283 1284 return dt_virt; 1285 } 1286 1287 int __init arch_ioremap_p4d_supported(void) 1288 { 1289 return 0; 1290 } 1291 1292 int __init arch_ioremap_pud_supported(void) 1293 { 1294 /* 1295 * Only 4k granule supports level 1 block mappings. 1296 * SW table walks can't handle removal of intermediate entries. 1297 */ 1298 return IS_ENABLED(CONFIG_ARM64_4K_PAGES) && 1299 !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); 1300 } 1301 1302 int __init arch_ioremap_pmd_supported(void) 1303 { 1304 /* See arch_ioremap_pud_supported() */ 1305 return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); 1306 } 1307 1308 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) 1309 { 1310 pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot)); 1311 1312 /* Only allow permission changes for now */ 1313 if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)), 1314 pud_val(new_pud))) 1315 return 0; 1316 1317 VM_BUG_ON(phys & ~PUD_MASK); 1318 set_pud(pudp, new_pud); 1319 return 1; 1320 } 1321 1322 int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) 1323 { 1324 pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot)); 1325 1326 /* Only allow permission changes for now */ 1327 if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)), 1328 pmd_val(new_pmd))) 1329 return 0; 1330 1331 VM_BUG_ON(phys & ~PMD_MASK); 1332 set_pmd(pmdp, new_pmd); 1333 return 1; 1334 } 1335 1336 int pud_clear_huge(pud_t *pudp) 1337 { 1338 if (!pud_sect(READ_ONCE(*pudp))) 1339 return 0; 1340 pud_clear(pudp); 1341 return 1; 1342 } 1343 1344 int pmd_clear_huge(pmd_t *pmdp) 1345 { 1346 if (!pmd_sect(READ_ONCE(*pmdp))) 1347 return 0; 1348 pmd_clear(pmdp); 1349 return 1; 1350 } 1351 1352 int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr) 1353 { 1354 pte_t *table; 1355 pmd_t pmd; 1356 1357 pmd = READ_ONCE(*pmdp); 1358 1359 if (!pmd_table(pmd)) { 1360 VM_WARN_ON(1); 1361 return 1; 1362 } 1363 1364 table = pte_offset_kernel(pmdp, addr); 1365 pmd_clear(pmdp); 1366 __flush_tlb_kernel_pgtable(addr); 1367 pte_free_kernel(NULL, table); 1368 return 1; 1369 } 1370 1371 int pud_free_pmd_page(pud_t *pudp, unsigned long addr) 1372 { 1373 pmd_t *table; 1374 pmd_t *pmdp; 1375 pud_t pud; 1376 unsigned long next, end; 1377 1378 pud = READ_ONCE(*pudp); 1379 1380 if (!pud_table(pud)) { 1381 VM_WARN_ON(1); 1382 return 1; 1383 } 1384 1385 table = pmd_offset(pudp, addr); 1386 pmdp = table; 1387 next = addr; 1388 end = addr + PUD_SIZE; 1389 do { 1390 pmd_free_pte_page(pmdp, next); 1391 } while (pmdp++, next += PMD_SIZE, next != end); 1392 1393 pud_clear(pudp); 1394 __flush_tlb_kernel_pgtable(addr); 1395 pmd_free(NULL, table); 1396 return 1; 1397 } 1398 1399 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) 1400 { 1401 return 0; /* Don't attempt a block mapping */ 1402 } 1403 1404 #ifdef CONFIG_MEMORY_HOTPLUG 1405 static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) 1406 { 1407 unsigned long end = start + size; 1408 1409 WARN_ON(pgdir != init_mm.pgd); 1410 WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END)); 1411 1412 unmap_hotplug_range(start, end, false); 1413 free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); 1414 } 1415 1416 int arch_add_memory(int nid, u64 start, u64 size, 1417 struct mhp_params *params) 1418 { 1419 int ret, flags = 0; 1420 1421 if (rodata_full || debug_pagealloc_enabled()) 1422 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 1423 1424 __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), 1425 size, params->pgprot, __pgd_pgtable_alloc, 1426 flags); 1427 1428 memblock_clear_nomap(start, size); 1429 1430 ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, 1431 params); 1432 if (ret) 1433 __remove_pgd_mapping(swapper_pg_dir, 1434 __phys_to_virt(start), size); 1435 return ret; 1436 } 1437 1438 void arch_remove_memory(int nid, u64 start, u64 size, 1439 struct vmem_altmap *altmap) 1440 { 1441 unsigned long start_pfn = start >> PAGE_SHIFT; 1442 unsigned long nr_pages = size >> PAGE_SHIFT; 1443 1444 __remove_pages(start_pfn, nr_pages, altmap); 1445 __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size); 1446 } 1447 1448 /* 1449 * This memory hotplug notifier helps prevent boot memory from being 1450 * inadvertently removed as it blocks pfn range offlining process in 1451 * __offline_pages(). Hence this prevents both offlining as well as 1452 * removal process for boot memory which is initially always online. 1453 * In future if and when boot memory could be removed, this notifier 1454 * should be dropped and free_hotplug_page_range() should handle any 1455 * reserved pages allocated during boot. 1456 */ 1457 static int prevent_bootmem_remove_notifier(struct notifier_block *nb, 1458 unsigned long action, void *data) 1459 { 1460 struct mem_section *ms; 1461 struct memory_notify *arg = data; 1462 unsigned long end_pfn = arg->start_pfn + arg->nr_pages; 1463 unsigned long pfn = arg->start_pfn; 1464 1465 if (action != MEM_GOING_OFFLINE) 1466 return NOTIFY_OK; 1467 1468 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 1469 ms = __pfn_to_section(pfn); 1470 if (early_section(ms)) 1471 return NOTIFY_BAD; 1472 } 1473 return NOTIFY_OK; 1474 } 1475 1476 static struct notifier_block prevent_bootmem_remove_nb = { 1477 .notifier_call = prevent_bootmem_remove_notifier, 1478 }; 1479 1480 static int __init prevent_bootmem_remove_init(void) 1481 { 1482 return register_memory_notifier(&prevent_bootmem_remove_nb); 1483 } 1484 device_initcall(prevent_bootmem_remove_init); 1485 #endif 1486