1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/mm/mmu.c 4 * 5 * Copyright (C) 1995-2005 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/cache.h> 10 #include <linux/export.h> 11 #include <linux/kernel.h> 12 #include <linux/errno.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/kexec.h> 16 #include <linux/libfdt.h> 17 #include <linux/mman.h> 18 #include <linux/nodemask.h> 19 #include <linux/memblock.h> 20 #include <linux/memory.h> 21 #include <linux/fs.h> 22 #include <linux/io.h> 23 #include <linux/mm.h> 24 #include <linux/vmalloc.h> 25 26 #include <asm/barrier.h> 27 #include <asm/cputype.h> 28 #include <asm/fixmap.h> 29 #include <asm/kasan.h> 30 #include <asm/kernel-pgtable.h> 31 #include <asm/sections.h> 32 #include <asm/setup.h> 33 #include <linux/sizes.h> 34 #include <asm/tlb.h> 35 #include <asm/mmu_context.h> 36 #include <asm/ptdump.h> 37 #include <asm/tlbflush.h> 38 39 #define NO_BLOCK_MAPPINGS BIT(0) 40 #define NO_CONT_MAPPINGS BIT(1) 41 42 u64 idmap_t0sz = TCR_T0SZ(VA_BITS); 43 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; 44 45 u64 __section(".mmuoff.data.write") vabits_actual; 46 EXPORT_SYMBOL(vabits_actual); 47 48 u64 kimage_voffset __ro_after_init; 49 EXPORT_SYMBOL(kimage_voffset); 50 51 /* 52 * Empty_zero_page is a special page that is used for zero-initialized data 53 * and COW. 54 */ 55 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; 56 EXPORT_SYMBOL(empty_zero_page); 57 58 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; 59 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; 60 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; 61 62 static DEFINE_SPINLOCK(swapper_pgdir_lock); 63 64 void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) 65 { 66 pgd_t *fixmap_pgdp; 67 68 spin_lock(&swapper_pgdir_lock); 69 fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp)); 70 WRITE_ONCE(*fixmap_pgdp, pgd); 71 /* 72 * We need dsb(ishst) here to ensure the page-table-walker sees 73 * our new entry before set_p?d() returns. The fixmap's 74 * flush_tlb_kernel_range() via clear_fixmap() does this for us. 75 */ 76 pgd_clear_fixmap(); 77 spin_unlock(&swapper_pgdir_lock); 78 } 79 80 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 81 unsigned long size, pgprot_t vma_prot) 82 { 83 if (!pfn_valid(pfn)) 84 return pgprot_noncached(vma_prot); 85 else if (file->f_flags & O_SYNC) 86 return pgprot_writecombine(vma_prot); 87 return vma_prot; 88 } 89 EXPORT_SYMBOL(phys_mem_access_prot); 90 91 static phys_addr_t __init early_pgtable_alloc(int shift) 92 { 93 phys_addr_t phys; 94 void *ptr; 95 96 phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); 97 if (!phys) 98 panic("Failed to allocate page table page\n"); 99 100 /* 101 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE 102 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise 103 * any level of table. 104 */ 105 ptr = pte_set_fixmap(phys); 106 107 memset(ptr, 0, PAGE_SIZE); 108 109 /* 110 * Implicit barriers also ensure the zeroed page is visible to the page 111 * table walker 112 */ 113 pte_clear_fixmap(); 114 115 return phys; 116 } 117 118 static bool pgattr_change_is_safe(u64 old, u64 new) 119 { 120 /* 121 * The following mapping attributes may be updated in live 122 * kernel mappings without the need for break-before-make. 123 */ 124 static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; 125 126 /* creating or taking down mappings is always safe */ 127 if (old == 0 || new == 0) 128 return true; 129 130 /* live contiguous mappings may not be manipulated at all */ 131 if ((old | new) & PTE_CONT) 132 return false; 133 134 /* Transitioning from Non-Global to Global is unsafe */ 135 if (old & ~new & PTE_NG) 136 return false; 137 138 return ((old ^ new) & ~mask) == 0; 139 } 140 141 static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, 142 phys_addr_t phys, pgprot_t prot) 143 { 144 pte_t *ptep; 145 146 ptep = pte_set_fixmap_offset(pmdp, addr); 147 do { 148 pte_t old_pte = READ_ONCE(*ptep); 149 150 set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); 151 152 /* 153 * After the PTE entry has been populated once, we 154 * only allow updates to the permission attributes. 155 */ 156 BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), 157 READ_ONCE(pte_val(*ptep)))); 158 159 phys += PAGE_SIZE; 160 } while (ptep++, addr += PAGE_SIZE, addr != end); 161 162 pte_clear_fixmap(); 163 } 164 165 static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, 166 unsigned long end, phys_addr_t phys, 167 pgprot_t prot, 168 phys_addr_t (*pgtable_alloc)(int), 169 int flags) 170 { 171 unsigned long next; 172 pmd_t pmd = READ_ONCE(*pmdp); 173 174 BUG_ON(pmd_sect(pmd)); 175 if (pmd_none(pmd)) { 176 phys_addr_t pte_phys; 177 BUG_ON(!pgtable_alloc); 178 pte_phys = pgtable_alloc(PAGE_SHIFT); 179 __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); 180 pmd = READ_ONCE(*pmdp); 181 } 182 BUG_ON(pmd_bad(pmd)); 183 184 do { 185 pgprot_t __prot = prot; 186 187 next = pte_cont_addr_end(addr, end); 188 189 /* use a contiguous mapping if the range is suitably aligned */ 190 if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && 191 (flags & NO_CONT_MAPPINGS) == 0) 192 __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 193 194 init_pte(pmdp, addr, next, phys, __prot); 195 196 phys += next - addr; 197 } while (addr = next, addr != end); 198 } 199 200 static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, 201 phys_addr_t phys, pgprot_t prot, 202 phys_addr_t (*pgtable_alloc)(int), int flags) 203 { 204 unsigned long next; 205 pmd_t *pmdp; 206 207 pmdp = pmd_set_fixmap_offset(pudp, addr); 208 do { 209 pmd_t old_pmd = READ_ONCE(*pmdp); 210 211 next = pmd_addr_end(addr, end); 212 213 /* try section mapping first */ 214 if (((addr | next | phys) & ~SECTION_MASK) == 0 && 215 (flags & NO_BLOCK_MAPPINGS) == 0) { 216 pmd_set_huge(pmdp, phys, prot); 217 218 /* 219 * After the PMD entry has been populated once, we 220 * only allow updates to the permission attributes. 221 */ 222 BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), 223 READ_ONCE(pmd_val(*pmdp)))); 224 } else { 225 alloc_init_cont_pte(pmdp, addr, next, phys, prot, 226 pgtable_alloc, flags); 227 228 BUG_ON(pmd_val(old_pmd) != 0 && 229 pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); 230 } 231 phys += next - addr; 232 } while (pmdp++, addr = next, addr != end); 233 234 pmd_clear_fixmap(); 235 } 236 237 static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, 238 unsigned long end, phys_addr_t phys, 239 pgprot_t prot, 240 phys_addr_t (*pgtable_alloc)(int), int flags) 241 { 242 unsigned long next; 243 pud_t pud = READ_ONCE(*pudp); 244 245 /* 246 * Check for initial section mappings in the pgd/pud. 247 */ 248 BUG_ON(pud_sect(pud)); 249 if (pud_none(pud)) { 250 phys_addr_t pmd_phys; 251 BUG_ON(!pgtable_alloc); 252 pmd_phys = pgtable_alloc(PMD_SHIFT); 253 __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE); 254 pud = READ_ONCE(*pudp); 255 } 256 BUG_ON(pud_bad(pud)); 257 258 do { 259 pgprot_t __prot = prot; 260 261 next = pmd_cont_addr_end(addr, end); 262 263 /* use a contiguous mapping if the range is suitably aligned */ 264 if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) && 265 (flags & NO_CONT_MAPPINGS) == 0) 266 __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 267 268 init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags); 269 270 phys += next - addr; 271 } while (addr = next, addr != end); 272 } 273 274 static inline bool use_1G_block(unsigned long addr, unsigned long next, 275 unsigned long phys) 276 { 277 if (PAGE_SHIFT != 12) 278 return false; 279 280 if (((addr | next | phys) & ~PUD_MASK) != 0) 281 return false; 282 283 return true; 284 } 285 286 static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, 287 phys_addr_t phys, pgprot_t prot, 288 phys_addr_t (*pgtable_alloc)(int), 289 int flags) 290 { 291 unsigned long next; 292 pud_t *pudp; 293 pgd_t pgd = READ_ONCE(*pgdp); 294 295 if (pgd_none(pgd)) { 296 phys_addr_t pud_phys; 297 BUG_ON(!pgtable_alloc); 298 pud_phys = pgtable_alloc(PUD_SHIFT); 299 __pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE); 300 pgd = READ_ONCE(*pgdp); 301 } 302 BUG_ON(pgd_bad(pgd)); 303 304 pudp = pud_set_fixmap_offset(pgdp, addr); 305 do { 306 pud_t old_pud = READ_ONCE(*pudp); 307 308 next = pud_addr_end(addr, end); 309 310 /* 311 * For 4K granule only, attempt to put down a 1GB block 312 */ 313 if (use_1G_block(addr, next, phys) && 314 (flags & NO_BLOCK_MAPPINGS) == 0) { 315 pud_set_huge(pudp, phys, prot); 316 317 /* 318 * After the PUD entry has been populated once, we 319 * only allow updates to the permission attributes. 320 */ 321 BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), 322 READ_ONCE(pud_val(*pudp)))); 323 } else { 324 alloc_init_cont_pmd(pudp, addr, next, phys, prot, 325 pgtable_alloc, flags); 326 327 BUG_ON(pud_val(old_pud) != 0 && 328 pud_val(old_pud) != READ_ONCE(pud_val(*pudp))); 329 } 330 phys += next - addr; 331 } while (pudp++, addr = next, addr != end); 332 333 pud_clear_fixmap(); 334 } 335 336 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, 337 unsigned long virt, phys_addr_t size, 338 pgprot_t prot, 339 phys_addr_t (*pgtable_alloc)(int), 340 int flags) 341 { 342 unsigned long addr, end, next; 343 pgd_t *pgdp = pgd_offset_raw(pgdir, virt); 344 345 /* 346 * If the virtual and physical address don't have the same offset 347 * within a page, we cannot map the region as the caller expects. 348 */ 349 if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) 350 return; 351 352 phys &= PAGE_MASK; 353 addr = virt & PAGE_MASK; 354 end = PAGE_ALIGN(virt + size); 355 356 do { 357 next = pgd_addr_end(addr, end); 358 alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc, 359 flags); 360 phys += next - addr; 361 } while (pgdp++, addr = next, addr != end); 362 } 363 364 static phys_addr_t __pgd_pgtable_alloc(int shift) 365 { 366 void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL); 367 BUG_ON(!ptr); 368 369 /* Ensure the zeroed page is visible to the page table walker */ 370 dsb(ishst); 371 return __pa(ptr); 372 } 373 374 static phys_addr_t pgd_pgtable_alloc(int shift) 375 { 376 phys_addr_t pa = __pgd_pgtable_alloc(shift); 377 378 /* 379 * Call proper page table ctor in case later we need to 380 * call core mm functions like apply_to_page_range() on 381 * this pre-allocated page table. 382 * 383 * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is 384 * folded, and if so pgtable_pmd_page_ctor() becomes nop. 385 */ 386 if (shift == PAGE_SHIFT) 387 BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa))); 388 else if (shift == PMD_SHIFT) 389 BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa))); 390 391 return pa; 392 } 393 394 /* 395 * This function can only be used to modify existing table entries, 396 * without allocating new levels of table. Note that this permits the 397 * creation of new section or page entries. 398 */ 399 static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 400 phys_addr_t size, pgprot_t prot) 401 { 402 if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { 403 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 404 &phys, virt); 405 return; 406 } 407 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 408 NO_CONT_MAPPINGS); 409 } 410 411 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 412 unsigned long virt, phys_addr_t size, 413 pgprot_t prot, bool page_mappings_only) 414 { 415 int flags = 0; 416 417 BUG_ON(mm == &init_mm); 418 419 if (page_mappings_only) 420 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 421 422 __create_pgd_mapping(mm->pgd, phys, virt, size, prot, 423 pgd_pgtable_alloc, flags); 424 } 425 426 static void update_mapping_prot(phys_addr_t phys, unsigned long virt, 427 phys_addr_t size, pgprot_t prot) 428 { 429 if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { 430 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", 431 &phys, virt); 432 return; 433 } 434 435 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 436 NO_CONT_MAPPINGS); 437 438 /* flush the TLBs after updating live kernel mappings */ 439 flush_tlb_kernel_range(virt, virt + size); 440 } 441 442 static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start, 443 phys_addr_t end, pgprot_t prot, int flags) 444 { 445 __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start, 446 prot, early_pgtable_alloc, flags); 447 } 448 449 void __init mark_linear_text_alias_ro(void) 450 { 451 /* 452 * Remove the write permissions from the linear alias of .text/.rodata 453 */ 454 update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text), 455 (unsigned long)__init_begin - (unsigned long)_text, 456 PAGE_KERNEL_RO); 457 } 458 459 static void __init map_mem(pgd_t *pgdp) 460 { 461 phys_addr_t kernel_start = __pa_symbol(_text); 462 phys_addr_t kernel_end = __pa_symbol(__init_begin); 463 struct memblock_region *reg; 464 int flags = 0; 465 466 if (rodata_full || debug_pagealloc_enabled()) 467 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 468 469 /* 470 * Take care not to create a writable alias for the 471 * read-only text and rodata sections of the kernel image. 472 * So temporarily mark them as NOMAP to skip mappings in 473 * the following for-loop 474 */ 475 memblock_mark_nomap(kernel_start, kernel_end - kernel_start); 476 #ifdef CONFIG_KEXEC_CORE 477 if (crashk_res.end) 478 memblock_mark_nomap(crashk_res.start, 479 resource_size(&crashk_res)); 480 #endif 481 482 /* map all the memory banks */ 483 for_each_memblock(memory, reg) { 484 phys_addr_t start = reg->base; 485 phys_addr_t end = start + reg->size; 486 487 if (start >= end) 488 break; 489 if (memblock_is_nomap(reg)) 490 continue; 491 492 __map_memblock(pgdp, start, end, PAGE_KERNEL, flags); 493 } 494 495 /* 496 * Map the linear alias of the [_text, __init_begin) interval 497 * as non-executable now, and remove the write permission in 498 * mark_linear_text_alias_ro() below (which will be called after 499 * alternative patching has completed). This makes the contents 500 * of the region accessible to subsystems such as hibernate, 501 * but protects it from inadvertent modification or execution. 502 * Note that contiguous mappings cannot be remapped in this way, 503 * so we should avoid them here. 504 */ 505 __map_memblock(pgdp, kernel_start, kernel_end, 506 PAGE_KERNEL, NO_CONT_MAPPINGS); 507 memblock_clear_nomap(kernel_start, kernel_end - kernel_start); 508 509 #ifdef CONFIG_KEXEC_CORE 510 /* 511 * Use page-level mappings here so that we can shrink the region 512 * in page granularity and put back unused memory to buddy system 513 * through /sys/kernel/kexec_crash_size interface. 514 */ 515 if (crashk_res.end) { 516 __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1, 517 PAGE_KERNEL, 518 NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); 519 memblock_clear_nomap(crashk_res.start, 520 resource_size(&crashk_res)); 521 } 522 #endif 523 } 524 525 void mark_rodata_ro(void) 526 { 527 unsigned long section_size; 528 529 /* 530 * mark .rodata as read only. Use __init_begin rather than __end_rodata 531 * to cover NOTES and EXCEPTION_TABLE. 532 */ 533 section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; 534 update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, 535 section_size, PAGE_KERNEL_RO); 536 537 debug_checkwx(); 538 } 539 540 static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, 541 pgprot_t prot, struct vm_struct *vma, 542 int flags, unsigned long vm_flags) 543 { 544 phys_addr_t pa_start = __pa_symbol(va_start); 545 unsigned long size = va_end - va_start; 546 547 BUG_ON(!PAGE_ALIGNED(pa_start)); 548 BUG_ON(!PAGE_ALIGNED(size)); 549 550 __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot, 551 early_pgtable_alloc, flags); 552 553 if (!(vm_flags & VM_NO_GUARD)) 554 size += PAGE_SIZE; 555 556 vma->addr = va_start; 557 vma->phys_addr = pa_start; 558 vma->size = size; 559 vma->flags = VM_MAP | vm_flags; 560 vma->caller = __builtin_return_address(0); 561 562 vm_area_add_early(vma); 563 } 564 565 static int __init parse_rodata(char *arg) 566 { 567 int ret = strtobool(arg, &rodata_enabled); 568 if (!ret) { 569 rodata_full = false; 570 return 0; 571 } 572 573 /* permit 'full' in addition to boolean options */ 574 if (strcmp(arg, "full")) 575 return -EINVAL; 576 577 rodata_enabled = true; 578 rodata_full = true; 579 return 0; 580 } 581 early_param("rodata", parse_rodata); 582 583 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 584 static int __init map_entry_trampoline(void) 585 { 586 pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 587 phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); 588 589 /* The trampoline is always mapped and can therefore be global */ 590 pgprot_val(prot) &= ~PTE_NG; 591 592 /* Map only the text into the trampoline page table */ 593 memset(tramp_pg_dir, 0, PGD_SIZE); 594 __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, 595 prot, __pgd_pgtable_alloc, 0); 596 597 /* Map both the text and data into the kernel page table */ 598 __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); 599 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 600 extern char __entry_tramp_data_start[]; 601 602 __set_fixmap(FIX_ENTRY_TRAMP_DATA, 603 __pa_symbol(__entry_tramp_data_start), 604 PAGE_KERNEL_RO); 605 } 606 607 return 0; 608 } 609 core_initcall(map_entry_trampoline); 610 #endif 611 612 /* 613 * Open coded check for BTI, only for use to determine configuration 614 * for early mappings for before the cpufeature code has run. 615 */ 616 static bool arm64_early_this_cpu_has_bti(void) 617 { 618 u64 pfr1; 619 620 if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) 621 return false; 622 623 pfr1 = read_sysreg_s(SYS_ID_AA64PFR1_EL1); 624 return cpuid_feature_extract_unsigned_field(pfr1, 625 ID_AA64PFR1_BT_SHIFT); 626 } 627 628 /* 629 * Create fine-grained mappings for the kernel. 630 */ 631 static void __init map_kernel(pgd_t *pgdp) 632 { 633 static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext, 634 vmlinux_initdata, vmlinux_data; 635 636 /* 637 * External debuggers may need to write directly to the text 638 * mapping to install SW breakpoints. Allow this (only) when 639 * explicitly requested with rodata=off. 640 */ 641 pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 642 643 /* 644 * If we have a CPU that supports BTI and a kernel built for 645 * BTI then mark the kernel executable text as guarded pages 646 * now so we don't have to rewrite the page tables later. 647 */ 648 if (arm64_early_this_cpu_has_bti()) 649 text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP); 650 651 /* 652 * Only rodata will be remapped with different permissions later on, 653 * all other segments are allowed to use contiguous mappings. 654 */ 655 map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0, 656 VM_NO_GUARD); 657 map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL, 658 &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD); 659 map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot, 660 &vmlinux_inittext, 0, VM_NO_GUARD); 661 map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL, 662 &vmlinux_initdata, 0, VM_NO_GUARD); 663 map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); 664 665 if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) { 666 /* 667 * The fixmap falls in a separate pgd to the kernel, and doesn't 668 * live in the carveout for the swapper_pg_dir. We can simply 669 * re-use the existing dir for the fixmap. 670 */ 671 set_pgd(pgd_offset_raw(pgdp, FIXADDR_START), 672 READ_ONCE(*pgd_offset_k(FIXADDR_START))); 673 } else if (CONFIG_PGTABLE_LEVELS > 3) { 674 pgd_t *bm_pgdp; 675 pud_t *bm_pudp; 676 /* 677 * The fixmap shares its top level pgd entry with the kernel 678 * mapping. This can really only occur when we are running 679 * with 16k/4 levels, so we can simply reuse the pud level 680 * entry instead. 681 */ 682 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 683 bm_pgdp = pgd_offset_raw(pgdp, FIXADDR_START); 684 bm_pudp = pud_set_fixmap_offset(bm_pgdp, FIXADDR_START); 685 pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd)); 686 pud_clear_fixmap(); 687 } else { 688 BUG(); 689 } 690 691 kasan_copy_shadow(pgdp); 692 } 693 694 void __init paging_init(void) 695 { 696 pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir)); 697 698 map_kernel(pgdp); 699 map_mem(pgdp); 700 701 pgd_clear_fixmap(); 702 703 cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); 704 init_mm.pgd = swapper_pg_dir; 705 706 memblock_free(__pa_symbol(init_pg_dir), 707 __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir)); 708 709 memblock_allow_resize(); 710 } 711 712 /* 713 * Check whether a kernel address is valid (derived from arch/x86/). 714 */ 715 int kern_addr_valid(unsigned long addr) 716 { 717 pgd_t *pgdp; 718 pud_t *pudp, pud; 719 pmd_t *pmdp, pmd; 720 pte_t *ptep, pte; 721 722 if ((((long)addr) >> VA_BITS) != -1UL) 723 return 0; 724 725 pgdp = pgd_offset_k(addr); 726 if (pgd_none(READ_ONCE(*pgdp))) 727 return 0; 728 729 pudp = pud_offset(pgdp, addr); 730 pud = READ_ONCE(*pudp); 731 if (pud_none(pud)) 732 return 0; 733 734 if (pud_sect(pud)) 735 return pfn_valid(pud_pfn(pud)); 736 737 pmdp = pmd_offset(pudp, addr); 738 pmd = READ_ONCE(*pmdp); 739 if (pmd_none(pmd)) 740 return 0; 741 742 if (pmd_sect(pmd)) 743 return pfn_valid(pmd_pfn(pmd)); 744 745 ptep = pte_offset_kernel(pmdp, addr); 746 pte = READ_ONCE(*ptep); 747 if (pte_none(pte)) 748 return 0; 749 750 return pfn_valid(pte_pfn(pte)); 751 } 752 753 #ifdef CONFIG_MEMORY_HOTPLUG 754 static void free_hotplug_page_range(struct page *page, size_t size) 755 { 756 WARN_ON(PageReserved(page)); 757 free_pages((unsigned long)page_address(page), get_order(size)); 758 } 759 760 static void free_hotplug_pgtable_page(struct page *page) 761 { 762 free_hotplug_page_range(page, PAGE_SIZE); 763 } 764 765 static bool pgtable_range_aligned(unsigned long start, unsigned long end, 766 unsigned long floor, unsigned long ceiling, 767 unsigned long mask) 768 { 769 start &= mask; 770 if (start < floor) 771 return false; 772 773 if (ceiling) { 774 ceiling &= mask; 775 if (!ceiling) 776 return false; 777 } 778 779 if (end - 1 > ceiling - 1) 780 return false; 781 return true; 782 } 783 784 static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, 785 unsigned long end, bool free_mapped) 786 { 787 pte_t *ptep, pte; 788 789 do { 790 ptep = pte_offset_kernel(pmdp, addr); 791 pte = READ_ONCE(*ptep); 792 if (pte_none(pte)) 793 continue; 794 795 WARN_ON(!pte_present(pte)); 796 pte_clear(&init_mm, addr, ptep); 797 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 798 if (free_mapped) 799 free_hotplug_page_range(pte_page(pte), PAGE_SIZE); 800 } while (addr += PAGE_SIZE, addr < end); 801 } 802 803 static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr, 804 unsigned long end, bool free_mapped) 805 { 806 unsigned long next; 807 pmd_t *pmdp, pmd; 808 809 do { 810 next = pmd_addr_end(addr, end); 811 pmdp = pmd_offset(pudp, addr); 812 pmd = READ_ONCE(*pmdp); 813 if (pmd_none(pmd)) 814 continue; 815 816 WARN_ON(!pmd_present(pmd)); 817 if (pmd_sect(pmd)) { 818 pmd_clear(pmdp); 819 820 /* 821 * One TLBI should be sufficient here as the PMD_SIZE 822 * range is mapped with a single block entry. 823 */ 824 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 825 if (free_mapped) 826 free_hotplug_page_range(pmd_page(pmd), 827 PMD_SIZE); 828 continue; 829 } 830 WARN_ON(!pmd_table(pmd)); 831 unmap_hotplug_pte_range(pmdp, addr, next, free_mapped); 832 } while (addr = next, addr < end); 833 } 834 835 static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr, 836 unsigned long end, bool free_mapped) 837 { 838 unsigned long next; 839 pud_t *pudp, pud; 840 841 do { 842 next = pud_addr_end(addr, end); 843 pudp = pud_offset(p4dp, addr); 844 pud = READ_ONCE(*pudp); 845 if (pud_none(pud)) 846 continue; 847 848 WARN_ON(!pud_present(pud)); 849 if (pud_sect(pud)) { 850 pud_clear(pudp); 851 852 /* 853 * One TLBI should be sufficient here as the PUD_SIZE 854 * range is mapped with a single block entry. 855 */ 856 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 857 if (free_mapped) 858 free_hotplug_page_range(pud_page(pud), 859 PUD_SIZE); 860 continue; 861 } 862 WARN_ON(!pud_table(pud)); 863 unmap_hotplug_pmd_range(pudp, addr, next, free_mapped); 864 } while (addr = next, addr < end); 865 } 866 867 static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr, 868 unsigned long end, bool free_mapped) 869 { 870 unsigned long next; 871 p4d_t *p4dp, p4d; 872 873 do { 874 next = p4d_addr_end(addr, end); 875 p4dp = p4d_offset(pgdp, addr); 876 p4d = READ_ONCE(*p4dp); 877 if (p4d_none(p4d)) 878 continue; 879 880 WARN_ON(!p4d_present(p4d)); 881 unmap_hotplug_pud_range(p4dp, addr, next, free_mapped); 882 } while (addr = next, addr < end); 883 } 884 885 static void unmap_hotplug_range(unsigned long addr, unsigned long end, 886 bool free_mapped) 887 { 888 unsigned long next; 889 pgd_t *pgdp, pgd; 890 891 do { 892 next = pgd_addr_end(addr, end); 893 pgdp = pgd_offset_k(addr); 894 pgd = READ_ONCE(*pgdp); 895 if (pgd_none(pgd)) 896 continue; 897 898 WARN_ON(!pgd_present(pgd)); 899 unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped); 900 } while (addr = next, addr < end); 901 } 902 903 static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, 904 unsigned long end, unsigned long floor, 905 unsigned long ceiling) 906 { 907 pte_t *ptep, pte; 908 unsigned long i, start = addr; 909 910 do { 911 ptep = pte_offset_kernel(pmdp, addr); 912 pte = READ_ONCE(*ptep); 913 914 /* 915 * This is just a sanity check here which verifies that 916 * pte clearing has been done by earlier unmap loops. 917 */ 918 WARN_ON(!pte_none(pte)); 919 } while (addr += PAGE_SIZE, addr < end); 920 921 if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK)) 922 return; 923 924 /* 925 * Check whether we can free the pte page if the rest of the 926 * entries are empty. Overlap with other regions have been 927 * handled by the floor/ceiling check. 928 */ 929 ptep = pte_offset_kernel(pmdp, 0UL); 930 for (i = 0; i < PTRS_PER_PTE; i++) { 931 if (!pte_none(READ_ONCE(ptep[i]))) 932 return; 933 } 934 935 pmd_clear(pmdp); 936 __flush_tlb_kernel_pgtable(start); 937 free_hotplug_pgtable_page(virt_to_page(ptep)); 938 } 939 940 static void free_empty_pmd_table(pud_t *pudp, unsigned long addr, 941 unsigned long end, unsigned long floor, 942 unsigned long ceiling) 943 { 944 pmd_t *pmdp, pmd; 945 unsigned long i, next, start = addr; 946 947 do { 948 next = pmd_addr_end(addr, end); 949 pmdp = pmd_offset(pudp, addr); 950 pmd = READ_ONCE(*pmdp); 951 if (pmd_none(pmd)) 952 continue; 953 954 WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd)); 955 free_empty_pte_table(pmdp, addr, next, floor, ceiling); 956 } while (addr = next, addr < end); 957 958 if (CONFIG_PGTABLE_LEVELS <= 2) 959 return; 960 961 if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK)) 962 return; 963 964 /* 965 * Check whether we can free the pmd page if the rest of the 966 * entries are empty. Overlap with other regions have been 967 * handled by the floor/ceiling check. 968 */ 969 pmdp = pmd_offset(pudp, 0UL); 970 for (i = 0; i < PTRS_PER_PMD; i++) { 971 if (!pmd_none(READ_ONCE(pmdp[i]))) 972 return; 973 } 974 975 pud_clear(pudp); 976 __flush_tlb_kernel_pgtable(start); 977 free_hotplug_pgtable_page(virt_to_page(pmdp)); 978 } 979 980 static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr, 981 unsigned long end, unsigned long floor, 982 unsigned long ceiling) 983 { 984 pud_t *pudp, pud; 985 unsigned long i, next, start = addr; 986 987 do { 988 next = pud_addr_end(addr, end); 989 pudp = pud_offset(p4dp, addr); 990 pud = READ_ONCE(*pudp); 991 if (pud_none(pud)) 992 continue; 993 994 WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud)); 995 free_empty_pmd_table(pudp, addr, next, floor, ceiling); 996 } while (addr = next, addr < end); 997 998 if (CONFIG_PGTABLE_LEVELS <= 3) 999 return; 1000 1001 if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK)) 1002 return; 1003 1004 /* 1005 * Check whether we can free the pud page if the rest of the 1006 * entries are empty. Overlap with other regions have been 1007 * handled by the floor/ceiling check. 1008 */ 1009 pudp = pud_offset(p4dp, 0UL); 1010 for (i = 0; i < PTRS_PER_PUD; i++) { 1011 if (!pud_none(READ_ONCE(pudp[i]))) 1012 return; 1013 } 1014 1015 p4d_clear(p4dp); 1016 __flush_tlb_kernel_pgtable(start); 1017 free_hotplug_pgtable_page(virt_to_page(pudp)); 1018 } 1019 1020 static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr, 1021 unsigned long end, unsigned long floor, 1022 unsigned long ceiling) 1023 { 1024 unsigned long next; 1025 p4d_t *p4dp, p4d; 1026 1027 do { 1028 next = p4d_addr_end(addr, end); 1029 p4dp = p4d_offset(pgdp, addr); 1030 p4d = READ_ONCE(*p4dp); 1031 if (p4d_none(p4d)) 1032 continue; 1033 1034 WARN_ON(!p4d_present(p4d)); 1035 free_empty_pud_table(p4dp, addr, next, floor, ceiling); 1036 } while (addr = next, addr < end); 1037 } 1038 1039 static void free_empty_tables(unsigned long addr, unsigned long end, 1040 unsigned long floor, unsigned long ceiling) 1041 { 1042 unsigned long next; 1043 pgd_t *pgdp, pgd; 1044 1045 do { 1046 next = pgd_addr_end(addr, end); 1047 pgdp = pgd_offset_k(addr); 1048 pgd = READ_ONCE(*pgdp); 1049 if (pgd_none(pgd)) 1050 continue; 1051 1052 WARN_ON(!pgd_present(pgd)); 1053 free_empty_p4d_table(pgdp, addr, next, floor, ceiling); 1054 } while (addr = next, addr < end); 1055 } 1056 #endif 1057 1058 #ifdef CONFIG_SPARSEMEM_VMEMMAP 1059 #if !ARM64_SWAPPER_USES_SECTION_MAPS 1060 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 1061 struct vmem_altmap *altmap) 1062 { 1063 return vmemmap_populate_basepages(start, end, node); 1064 } 1065 #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ 1066 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 1067 struct vmem_altmap *altmap) 1068 { 1069 unsigned long addr = start; 1070 unsigned long next; 1071 pgd_t *pgdp; 1072 pud_t *pudp; 1073 pmd_t *pmdp; 1074 1075 do { 1076 next = pmd_addr_end(addr, end); 1077 1078 pgdp = vmemmap_pgd_populate(addr, node); 1079 if (!pgdp) 1080 return -ENOMEM; 1081 1082 pudp = vmemmap_pud_populate(pgdp, addr, node); 1083 if (!pudp) 1084 return -ENOMEM; 1085 1086 pmdp = pmd_offset(pudp, addr); 1087 if (pmd_none(READ_ONCE(*pmdp))) { 1088 void *p = NULL; 1089 1090 p = vmemmap_alloc_block_buf(PMD_SIZE, node); 1091 if (!p) 1092 return -ENOMEM; 1093 1094 pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); 1095 } else 1096 vmemmap_verify((pte_t *)pmdp, node, addr, next); 1097 } while (addr = next, addr != end); 1098 1099 return 0; 1100 } 1101 #endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */ 1102 void vmemmap_free(unsigned long start, unsigned long end, 1103 struct vmem_altmap *altmap) 1104 { 1105 #ifdef CONFIG_MEMORY_HOTPLUG 1106 WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); 1107 1108 unmap_hotplug_range(start, end, true); 1109 free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); 1110 #endif 1111 } 1112 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 1113 1114 static inline pud_t * fixmap_pud(unsigned long addr) 1115 { 1116 pgd_t *pgdp = pgd_offset_k(addr); 1117 pgd_t pgd = READ_ONCE(*pgdp); 1118 1119 BUG_ON(pgd_none(pgd) || pgd_bad(pgd)); 1120 1121 return pud_offset_kimg(pgdp, addr); 1122 } 1123 1124 static inline pmd_t * fixmap_pmd(unsigned long addr) 1125 { 1126 pud_t *pudp = fixmap_pud(addr); 1127 pud_t pud = READ_ONCE(*pudp); 1128 1129 BUG_ON(pud_none(pud) || pud_bad(pud)); 1130 1131 return pmd_offset_kimg(pudp, addr); 1132 } 1133 1134 static inline pte_t * fixmap_pte(unsigned long addr) 1135 { 1136 return &bm_pte[pte_index(addr)]; 1137 } 1138 1139 /* 1140 * The p*d_populate functions call virt_to_phys implicitly so they can't be used 1141 * directly on kernel symbols (bm_p*d). This function is called too early to use 1142 * lm_alias so __p*d_populate functions must be used to populate with the 1143 * physical address from __pa_symbol. 1144 */ 1145 void __init early_fixmap_init(void) 1146 { 1147 pgd_t *pgdp, pgd; 1148 pud_t *pudp; 1149 pmd_t *pmdp; 1150 unsigned long addr = FIXADDR_START; 1151 1152 pgdp = pgd_offset_k(addr); 1153 pgd = READ_ONCE(*pgdp); 1154 if (CONFIG_PGTABLE_LEVELS > 3 && 1155 !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) { 1156 /* 1157 * We only end up here if the kernel mapping and the fixmap 1158 * share the top level pgd entry, which should only happen on 1159 * 16k/4 levels configurations. 1160 */ 1161 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 1162 pudp = pud_offset_kimg(pgdp, addr); 1163 } else { 1164 if (pgd_none(pgd)) 1165 __pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE); 1166 pudp = fixmap_pud(addr); 1167 } 1168 if (pud_none(READ_ONCE(*pudp))) 1169 __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); 1170 pmdp = fixmap_pmd(addr); 1171 __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE); 1172 1173 /* 1174 * The boot-ioremap range spans multiple pmds, for which 1175 * we are not prepared: 1176 */ 1177 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 1178 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 1179 1180 if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) 1181 || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { 1182 WARN_ON(1); 1183 pr_warn("pmdp %p != %p, %p\n", 1184 pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), 1185 fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); 1186 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 1187 fix_to_virt(FIX_BTMAP_BEGIN)); 1188 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", 1189 fix_to_virt(FIX_BTMAP_END)); 1190 1191 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 1192 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); 1193 } 1194 } 1195 1196 /* 1197 * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we 1198 * ever need to use IPIs for TLB broadcasting, then we're in trouble here. 1199 */ 1200 void __set_fixmap(enum fixed_addresses idx, 1201 phys_addr_t phys, pgprot_t flags) 1202 { 1203 unsigned long addr = __fix_to_virt(idx); 1204 pte_t *ptep; 1205 1206 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 1207 1208 ptep = fixmap_pte(addr); 1209 1210 if (pgprot_val(flags)) { 1211 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); 1212 } else { 1213 pte_clear(&init_mm, addr, ptep); 1214 flush_tlb_kernel_range(addr, addr+PAGE_SIZE); 1215 } 1216 } 1217 1218 void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) 1219 { 1220 const u64 dt_virt_base = __fix_to_virt(FIX_FDT); 1221 int offset; 1222 void *dt_virt; 1223 1224 /* 1225 * Check whether the physical FDT address is set and meets the minimum 1226 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be 1227 * at least 8 bytes so that we can always access the magic and size 1228 * fields of the FDT header after mapping the first chunk, double check 1229 * here if that is indeed the case. 1230 */ 1231 BUILD_BUG_ON(MIN_FDT_ALIGN < 8); 1232 if (!dt_phys || dt_phys % MIN_FDT_ALIGN) 1233 return NULL; 1234 1235 /* 1236 * Make sure that the FDT region can be mapped without the need to 1237 * allocate additional translation table pages, so that it is safe 1238 * to call create_mapping_noalloc() this early. 1239 * 1240 * On 64k pages, the FDT will be mapped using PTEs, so we need to 1241 * be in the same PMD as the rest of the fixmap. 1242 * On 4k pages, we'll use section mappings for the FDT so we only 1243 * have to be in the same PUD. 1244 */ 1245 BUILD_BUG_ON(dt_virt_base % SZ_2M); 1246 1247 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != 1248 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); 1249 1250 offset = dt_phys % SWAPPER_BLOCK_SIZE; 1251 dt_virt = (void *)dt_virt_base + offset; 1252 1253 /* map the first chunk so we can read the size from the header */ 1254 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), 1255 dt_virt_base, SWAPPER_BLOCK_SIZE, prot); 1256 1257 if (fdt_magic(dt_virt) != FDT_MAGIC) 1258 return NULL; 1259 1260 *size = fdt_totalsize(dt_virt); 1261 if (*size > MAX_FDT_SIZE) 1262 return NULL; 1263 1264 if (offset + *size > SWAPPER_BLOCK_SIZE) 1265 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, 1266 round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot); 1267 1268 return dt_virt; 1269 } 1270 1271 int __init arch_ioremap_p4d_supported(void) 1272 { 1273 return 0; 1274 } 1275 1276 int __init arch_ioremap_pud_supported(void) 1277 { 1278 /* 1279 * Only 4k granule supports level 1 block mappings. 1280 * SW table walks can't handle removal of intermediate entries. 1281 */ 1282 return IS_ENABLED(CONFIG_ARM64_4K_PAGES) && 1283 !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); 1284 } 1285 1286 int __init arch_ioremap_pmd_supported(void) 1287 { 1288 /* See arch_ioremap_pud_supported() */ 1289 return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); 1290 } 1291 1292 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) 1293 { 1294 pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot)); 1295 1296 /* Only allow permission changes for now */ 1297 if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)), 1298 pud_val(new_pud))) 1299 return 0; 1300 1301 VM_BUG_ON(phys & ~PUD_MASK); 1302 set_pud(pudp, new_pud); 1303 return 1; 1304 } 1305 1306 int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) 1307 { 1308 pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot)); 1309 1310 /* Only allow permission changes for now */ 1311 if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)), 1312 pmd_val(new_pmd))) 1313 return 0; 1314 1315 VM_BUG_ON(phys & ~PMD_MASK); 1316 set_pmd(pmdp, new_pmd); 1317 return 1; 1318 } 1319 1320 int pud_clear_huge(pud_t *pudp) 1321 { 1322 if (!pud_sect(READ_ONCE(*pudp))) 1323 return 0; 1324 pud_clear(pudp); 1325 return 1; 1326 } 1327 1328 int pmd_clear_huge(pmd_t *pmdp) 1329 { 1330 if (!pmd_sect(READ_ONCE(*pmdp))) 1331 return 0; 1332 pmd_clear(pmdp); 1333 return 1; 1334 } 1335 1336 int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr) 1337 { 1338 pte_t *table; 1339 pmd_t pmd; 1340 1341 pmd = READ_ONCE(*pmdp); 1342 1343 if (!pmd_table(pmd)) { 1344 VM_WARN_ON(1); 1345 return 1; 1346 } 1347 1348 table = pte_offset_kernel(pmdp, addr); 1349 pmd_clear(pmdp); 1350 __flush_tlb_kernel_pgtable(addr); 1351 pte_free_kernel(NULL, table); 1352 return 1; 1353 } 1354 1355 int pud_free_pmd_page(pud_t *pudp, unsigned long addr) 1356 { 1357 pmd_t *table; 1358 pmd_t *pmdp; 1359 pud_t pud; 1360 unsigned long next, end; 1361 1362 pud = READ_ONCE(*pudp); 1363 1364 if (!pud_table(pud)) { 1365 VM_WARN_ON(1); 1366 return 1; 1367 } 1368 1369 table = pmd_offset(pudp, addr); 1370 pmdp = table; 1371 next = addr; 1372 end = addr + PUD_SIZE; 1373 do { 1374 pmd_free_pte_page(pmdp, next); 1375 } while (pmdp++, next += PMD_SIZE, next != end); 1376 1377 pud_clear(pudp); 1378 __flush_tlb_kernel_pgtable(addr); 1379 pmd_free(NULL, table); 1380 return 1; 1381 } 1382 1383 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) 1384 { 1385 return 0; /* Don't attempt a block mapping */ 1386 } 1387 1388 #ifdef CONFIG_MEMORY_HOTPLUG 1389 static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) 1390 { 1391 unsigned long end = start + size; 1392 1393 WARN_ON(pgdir != init_mm.pgd); 1394 WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END)); 1395 1396 unmap_hotplug_range(start, end, false); 1397 free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); 1398 } 1399 1400 int arch_add_memory(int nid, u64 start, u64 size, 1401 struct mhp_params *params) 1402 { 1403 int ret, flags = 0; 1404 1405 if (rodata_full || debug_pagealloc_enabled()) 1406 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 1407 1408 __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), 1409 size, params->pgprot, __pgd_pgtable_alloc, 1410 flags); 1411 1412 memblock_clear_nomap(start, size); 1413 1414 ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, 1415 params); 1416 if (ret) 1417 __remove_pgd_mapping(swapper_pg_dir, 1418 __phys_to_virt(start), size); 1419 return ret; 1420 } 1421 1422 void arch_remove_memory(int nid, u64 start, u64 size, 1423 struct vmem_altmap *altmap) 1424 { 1425 unsigned long start_pfn = start >> PAGE_SHIFT; 1426 unsigned long nr_pages = size >> PAGE_SHIFT; 1427 1428 __remove_pages(start_pfn, nr_pages, altmap); 1429 __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size); 1430 } 1431 1432 /* 1433 * This memory hotplug notifier helps prevent boot memory from being 1434 * inadvertently removed as it blocks pfn range offlining process in 1435 * __offline_pages(). Hence this prevents both offlining as well as 1436 * removal process for boot memory which is initially always online. 1437 * In future if and when boot memory could be removed, this notifier 1438 * should be dropped and free_hotplug_page_range() should handle any 1439 * reserved pages allocated during boot. 1440 */ 1441 static int prevent_bootmem_remove_notifier(struct notifier_block *nb, 1442 unsigned long action, void *data) 1443 { 1444 struct mem_section *ms; 1445 struct memory_notify *arg = data; 1446 unsigned long end_pfn = arg->start_pfn + arg->nr_pages; 1447 unsigned long pfn = arg->start_pfn; 1448 1449 if (action != MEM_GOING_OFFLINE) 1450 return NOTIFY_OK; 1451 1452 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 1453 ms = __pfn_to_section(pfn); 1454 if (early_section(ms)) 1455 return NOTIFY_BAD; 1456 } 1457 return NOTIFY_OK; 1458 } 1459 1460 static struct notifier_block prevent_bootmem_remove_nb = { 1461 .notifier_call = prevent_bootmem_remove_notifier, 1462 }; 1463 1464 static int __init prevent_bootmem_remove_init(void) 1465 { 1466 return register_memory_notifier(&prevent_bootmem_remove_nb); 1467 } 1468 device_initcall(prevent_bootmem_remove_init); 1469 #endif 1470