1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/mm/mmu.c 4 * 5 * Copyright (C) 1995-2005 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/cache.h> 10 #include <linux/export.h> 11 #include <linux/kernel.h> 12 #include <linux/errno.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/kexec.h> 16 #include <linux/libfdt.h> 17 #include <linux/mman.h> 18 #include <linux/nodemask.h> 19 #include <linux/memblock.h> 20 #include <linux/memremap.h> 21 #include <linux/memory.h> 22 #include <linux/fs.h> 23 #include <linux/io.h> 24 #include <linux/mm.h> 25 #include <linux/vmalloc.h> 26 #include <linux/set_memory.h> 27 #include <linux/kfence.h> 28 29 #include <asm/barrier.h> 30 #include <asm/cputype.h> 31 #include <asm/fixmap.h> 32 #include <asm/kasan.h> 33 #include <asm/kernel-pgtable.h> 34 #include <asm/sections.h> 35 #include <asm/setup.h> 36 #include <linux/sizes.h> 37 #include <asm/tlb.h> 38 #include <asm/mmu_context.h> 39 #include <asm/ptdump.h> 40 #include <asm/tlbflush.h> 41 #include <asm/pgalloc.h> 42 #include <asm/kfence.h> 43 44 #define NO_BLOCK_MAPPINGS BIT(0) 45 #define NO_CONT_MAPPINGS BIT(1) 46 #define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ 47 48 u64 kimage_voffset __ro_after_init; 49 EXPORT_SYMBOL(kimage_voffset); 50 51 u32 __boot_cpu_mode[] = { BOOT_CPU_MODE_EL2, BOOT_CPU_MODE_EL1 }; 52 53 static bool rodata_is_rw __ro_after_init = true; 54 55 /* 56 * The booting CPU updates the failed status @__early_cpu_boot_status, 57 * with MMU turned off. 58 */ 59 long __section(".mmuoff.data.write") __early_cpu_boot_status; 60 61 /* 62 * Empty_zero_page is a special page that is used for zero-initialized data 63 * and COW. 64 */ 65 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; 66 EXPORT_SYMBOL(empty_zero_page); 67 68 static DEFINE_SPINLOCK(swapper_pgdir_lock); 69 static DEFINE_MUTEX(fixmap_lock); 70 71 void noinstr set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) 72 { 73 pgd_t *fixmap_pgdp; 74 75 /* 76 * Don't bother with the fixmap if swapper_pg_dir is still mapped 77 * writable in the kernel mapping. 78 */ 79 if (rodata_is_rw) { 80 WRITE_ONCE(*pgdp, pgd); 81 dsb(ishst); 82 isb(); 83 return; 84 } 85 86 spin_lock(&swapper_pgdir_lock); 87 fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp)); 88 WRITE_ONCE(*fixmap_pgdp, pgd); 89 /* 90 * We need dsb(ishst) here to ensure the page-table-walker sees 91 * our new entry before set_p?d() returns. The fixmap's 92 * flush_tlb_kernel_range() via clear_fixmap() does this for us. 93 */ 94 pgd_clear_fixmap(); 95 spin_unlock(&swapper_pgdir_lock); 96 } 97 98 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 99 unsigned long size, pgprot_t vma_prot) 100 { 101 if (!pfn_is_map_memory(pfn)) 102 return pgprot_noncached(vma_prot); 103 else if (file->f_flags & O_SYNC) 104 return pgprot_writecombine(vma_prot); 105 return vma_prot; 106 } 107 EXPORT_SYMBOL(phys_mem_access_prot); 108 109 static phys_addr_t __init early_pgtable_alloc(int shift) 110 { 111 phys_addr_t phys; 112 113 phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 114 MEMBLOCK_ALLOC_NOLEAKTRACE); 115 if (!phys) 116 panic("Failed to allocate page table page\n"); 117 118 return phys; 119 } 120 121 bool pgattr_change_is_safe(u64 old, u64 new) 122 { 123 /* 124 * The following mapping attributes may be updated in live 125 * kernel mappings without the need for break-before-make. 126 */ 127 pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG | 128 PTE_SWBITS_MASK; 129 130 /* creating or taking down mappings is always safe */ 131 if (!pte_valid(__pte(old)) || !pte_valid(__pte(new))) 132 return true; 133 134 /* A live entry's pfn should not change */ 135 if (pte_pfn(__pte(old)) != pte_pfn(__pte(new))) 136 return false; 137 138 /* live contiguous mappings may not be manipulated at all */ 139 if ((old | new) & PTE_CONT) 140 return false; 141 142 /* Transitioning from Non-Global to Global is unsafe */ 143 if (old & ~new & PTE_NG) 144 return false; 145 146 /* 147 * Changing the memory type between Normal and Normal-Tagged is safe 148 * since Tagged is considered a permission attribute from the 149 * mismatched attribute aliases perspective. 150 */ 151 if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || 152 (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) && 153 ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || 154 (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED))) 155 mask |= PTE_ATTRINDX_MASK; 156 157 return ((old ^ new) & ~mask) == 0; 158 } 159 160 static void init_clear_pgtable(void *table) 161 { 162 clear_page(table); 163 164 /* Ensure the zeroing is observed by page table walks. */ 165 dsb(ishst); 166 } 167 168 static void init_pte(pte_t *ptep, unsigned long addr, unsigned long end, 169 phys_addr_t phys, pgprot_t prot) 170 { 171 do { 172 pte_t old_pte = __ptep_get(ptep); 173 174 /* 175 * Required barriers to make this visible to the table walker 176 * are deferred to the end of alloc_init_cont_pte(). 177 */ 178 __set_pte_nosync(ptep, pfn_pte(__phys_to_pfn(phys), prot)); 179 180 /* 181 * After the PTE entry has been populated once, we 182 * only allow updates to the permission attributes. 183 */ 184 BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), 185 pte_val(__ptep_get(ptep)))); 186 187 phys += PAGE_SIZE; 188 } while (ptep++, addr += PAGE_SIZE, addr != end); 189 } 190 191 static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, 192 unsigned long end, phys_addr_t phys, 193 pgprot_t prot, 194 phys_addr_t (*pgtable_alloc)(int), 195 int flags) 196 { 197 unsigned long next; 198 pmd_t pmd = READ_ONCE(*pmdp); 199 pte_t *ptep; 200 201 BUG_ON(pmd_sect(pmd)); 202 if (pmd_none(pmd)) { 203 pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN; 204 phys_addr_t pte_phys; 205 206 if (flags & NO_EXEC_MAPPINGS) 207 pmdval |= PMD_TABLE_PXN; 208 BUG_ON(!pgtable_alloc); 209 pte_phys = pgtable_alloc(PAGE_SHIFT); 210 ptep = pte_set_fixmap(pte_phys); 211 init_clear_pgtable(ptep); 212 ptep += pte_index(addr); 213 __pmd_populate(pmdp, pte_phys, pmdval); 214 } else { 215 BUG_ON(pmd_bad(pmd)); 216 ptep = pte_set_fixmap_offset(pmdp, addr); 217 } 218 219 do { 220 pgprot_t __prot = prot; 221 222 next = pte_cont_addr_end(addr, end); 223 224 /* use a contiguous mapping if the range is suitably aligned */ 225 if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && 226 (flags & NO_CONT_MAPPINGS) == 0) 227 __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 228 229 init_pte(ptep, addr, next, phys, __prot); 230 231 ptep += pte_index(next) - pte_index(addr); 232 phys += next - addr; 233 } while (addr = next, addr != end); 234 235 /* 236 * Note: barriers and maintenance necessary to clear the fixmap slot 237 * ensure that all previous pgtable writes are visible to the table 238 * walker. 239 */ 240 pte_clear_fixmap(); 241 } 242 243 static void init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end, 244 phys_addr_t phys, pgprot_t prot, 245 phys_addr_t (*pgtable_alloc)(int), int flags) 246 { 247 unsigned long next; 248 249 do { 250 pmd_t old_pmd = READ_ONCE(*pmdp); 251 252 next = pmd_addr_end(addr, end); 253 254 /* try section mapping first */ 255 if (((addr | next | phys) & ~PMD_MASK) == 0 && 256 (flags & NO_BLOCK_MAPPINGS) == 0) { 257 pmd_set_huge(pmdp, phys, prot); 258 259 /* 260 * After the PMD entry has been populated once, we 261 * only allow updates to the permission attributes. 262 */ 263 BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), 264 READ_ONCE(pmd_val(*pmdp)))); 265 } else { 266 alloc_init_cont_pte(pmdp, addr, next, phys, prot, 267 pgtable_alloc, flags); 268 269 BUG_ON(pmd_val(old_pmd) != 0 && 270 pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); 271 } 272 phys += next - addr; 273 } while (pmdp++, addr = next, addr != end); 274 } 275 276 static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, 277 unsigned long end, phys_addr_t phys, 278 pgprot_t prot, 279 phys_addr_t (*pgtable_alloc)(int), int flags) 280 { 281 unsigned long next; 282 pud_t pud = READ_ONCE(*pudp); 283 pmd_t *pmdp; 284 285 /* 286 * Check for initial section mappings in the pgd/pud. 287 */ 288 BUG_ON(pud_sect(pud)); 289 if (pud_none(pud)) { 290 pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN; 291 phys_addr_t pmd_phys; 292 293 if (flags & NO_EXEC_MAPPINGS) 294 pudval |= PUD_TABLE_PXN; 295 BUG_ON(!pgtable_alloc); 296 pmd_phys = pgtable_alloc(PMD_SHIFT); 297 pmdp = pmd_set_fixmap(pmd_phys); 298 init_clear_pgtable(pmdp); 299 pmdp += pmd_index(addr); 300 __pud_populate(pudp, pmd_phys, pudval); 301 } else { 302 BUG_ON(pud_bad(pud)); 303 pmdp = pmd_set_fixmap_offset(pudp, addr); 304 } 305 306 do { 307 pgprot_t __prot = prot; 308 309 next = pmd_cont_addr_end(addr, end); 310 311 /* use a contiguous mapping if the range is suitably aligned */ 312 if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) && 313 (flags & NO_CONT_MAPPINGS) == 0) 314 __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 315 316 init_pmd(pmdp, addr, next, phys, __prot, pgtable_alloc, flags); 317 318 pmdp += pmd_index(next) - pmd_index(addr); 319 phys += next - addr; 320 } while (addr = next, addr != end); 321 322 pmd_clear_fixmap(); 323 } 324 325 static void alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end, 326 phys_addr_t phys, pgprot_t prot, 327 phys_addr_t (*pgtable_alloc)(int), 328 int flags) 329 { 330 unsigned long next; 331 p4d_t p4d = READ_ONCE(*p4dp); 332 pud_t *pudp; 333 334 if (p4d_none(p4d)) { 335 p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN; 336 phys_addr_t pud_phys; 337 338 if (flags & NO_EXEC_MAPPINGS) 339 p4dval |= P4D_TABLE_PXN; 340 BUG_ON(!pgtable_alloc); 341 pud_phys = pgtable_alloc(PUD_SHIFT); 342 pudp = pud_set_fixmap(pud_phys); 343 init_clear_pgtable(pudp); 344 pudp += pud_index(addr); 345 __p4d_populate(p4dp, pud_phys, p4dval); 346 } else { 347 BUG_ON(p4d_bad(p4d)); 348 pudp = pud_set_fixmap_offset(p4dp, addr); 349 } 350 351 do { 352 pud_t old_pud = READ_ONCE(*pudp); 353 354 next = pud_addr_end(addr, end); 355 356 /* 357 * For 4K granule only, attempt to put down a 1GB block 358 */ 359 if (pud_sect_supported() && 360 ((addr | next | phys) & ~PUD_MASK) == 0 && 361 (flags & NO_BLOCK_MAPPINGS) == 0) { 362 pud_set_huge(pudp, phys, prot); 363 364 /* 365 * After the PUD entry has been populated once, we 366 * only allow updates to the permission attributes. 367 */ 368 BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), 369 READ_ONCE(pud_val(*pudp)))); 370 } else { 371 alloc_init_cont_pmd(pudp, addr, next, phys, prot, 372 pgtable_alloc, flags); 373 374 BUG_ON(pud_val(old_pud) != 0 && 375 pud_val(old_pud) != READ_ONCE(pud_val(*pudp))); 376 } 377 phys += next - addr; 378 } while (pudp++, addr = next, addr != end); 379 380 pud_clear_fixmap(); 381 } 382 383 static void alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end, 384 phys_addr_t phys, pgprot_t prot, 385 phys_addr_t (*pgtable_alloc)(int), 386 int flags) 387 { 388 unsigned long next; 389 pgd_t pgd = READ_ONCE(*pgdp); 390 p4d_t *p4dp; 391 392 if (pgd_none(pgd)) { 393 pgdval_t pgdval = PGD_TYPE_TABLE | PGD_TABLE_UXN; 394 phys_addr_t p4d_phys; 395 396 if (flags & NO_EXEC_MAPPINGS) 397 pgdval |= PGD_TABLE_PXN; 398 BUG_ON(!pgtable_alloc); 399 p4d_phys = pgtable_alloc(P4D_SHIFT); 400 p4dp = p4d_set_fixmap(p4d_phys); 401 init_clear_pgtable(p4dp); 402 p4dp += p4d_index(addr); 403 __pgd_populate(pgdp, p4d_phys, pgdval); 404 } else { 405 BUG_ON(pgd_bad(pgd)); 406 p4dp = p4d_set_fixmap_offset(pgdp, addr); 407 } 408 409 do { 410 p4d_t old_p4d = READ_ONCE(*p4dp); 411 412 next = p4d_addr_end(addr, end); 413 414 alloc_init_pud(p4dp, addr, next, phys, prot, 415 pgtable_alloc, flags); 416 417 BUG_ON(p4d_val(old_p4d) != 0 && 418 p4d_val(old_p4d) != READ_ONCE(p4d_val(*p4dp))); 419 420 phys += next - addr; 421 } while (p4dp++, addr = next, addr != end); 422 423 p4d_clear_fixmap(); 424 } 425 426 static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys, 427 unsigned long virt, phys_addr_t size, 428 pgprot_t prot, 429 phys_addr_t (*pgtable_alloc)(int), 430 int flags) 431 { 432 unsigned long addr, end, next; 433 pgd_t *pgdp = pgd_offset_pgd(pgdir, virt); 434 435 /* 436 * If the virtual and physical address don't have the same offset 437 * within a page, we cannot map the region as the caller expects. 438 */ 439 if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) 440 return; 441 442 phys &= PAGE_MASK; 443 addr = virt & PAGE_MASK; 444 end = PAGE_ALIGN(virt + size); 445 446 do { 447 next = pgd_addr_end(addr, end); 448 alloc_init_p4d(pgdp, addr, next, phys, prot, pgtable_alloc, 449 flags); 450 phys += next - addr; 451 } while (pgdp++, addr = next, addr != end); 452 } 453 454 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, 455 unsigned long virt, phys_addr_t size, 456 pgprot_t prot, 457 phys_addr_t (*pgtable_alloc)(int), 458 int flags) 459 { 460 mutex_lock(&fixmap_lock); 461 __create_pgd_mapping_locked(pgdir, phys, virt, size, prot, 462 pgtable_alloc, flags); 463 mutex_unlock(&fixmap_lock); 464 } 465 466 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 467 extern __alias(__create_pgd_mapping_locked) 468 void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt, 469 phys_addr_t size, pgprot_t prot, 470 phys_addr_t (*pgtable_alloc)(int), int flags); 471 #endif 472 473 static phys_addr_t __pgd_pgtable_alloc(int shift) 474 { 475 /* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */ 476 void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL & ~__GFP_ZERO); 477 478 BUG_ON(!ptr); 479 return __pa(ptr); 480 } 481 482 static phys_addr_t pgd_pgtable_alloc(int shift) 483 { 484 phys_addr_t pa = __pgd_pgtable_alloc(shift); 485 struct ptdesc *ptdesc = page_ptdesc(phys_to_page(pa)); 486 487 /* 488 * Call proper page table ctor in case later we need to 489 * call core mm functions like apply_to_page_range() on 490 * this pre-allocated page table. 491 * 492 * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is 493 * folded, and if so pagetable_pte_ctor() becomes nop. 494 */ 495 if (shift == PAGE_SHIFT) 496 BUG_ON(!pagetable_pte_ctor(ptdesc)); 497 else if (shift == PMD_SHIFT) 498 BUG_ON(!pagetable_pmd_ctor(ptdesc)); 499 500 return pa; 501 } 502 503 /* 504 * This function can only be used to modify existing table entries, 505 * without allocating new levels of table. Note that this permits the 506 * creation of new section or page entries. 507 */ 508 void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 509 phys_addr_t size, pgprot_t prot) 510 { 511 if (virt < PAGE_OFFSET) { 512 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 513 &phys, virt); 514 return; 515 } 516 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 517 NO_CONT_MAPPINGS); 518 } 519 520 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 521 unsigned long virt, phys_addr_t size, 522 pgprot_t prot, bool page_mappings_only) 523 { 524 int flags = 0; 525 526 BUG_ON(mm == &init_mm); 527 528 if (page_mappings_only) 529 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 530 531 __create_pgd_mapping(mm->pgd, phys, virt, size, prot, 532 pgd_pgtable_alloc, flags); 533 } 534 535 static void update_mapping_prot(phys_addr_t phys, unsigned long virt, 536 phys_addr_t size, pgprot_t prot) 537 { 538 if (virt < PAGE_OFFSET) { 539 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", 540 &phys, virt); 541 return; 542 } 543 544 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 545 NO_CONT_MAPPINGS); 546 547 /* flush the TLBs after updating live kernel mappings */ 548 flush_tlb_kernel_range(virt, virt + size); 549 } 550 551 static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start, 552 phys_addr_t end, pgprot_t prot, int flags) 553 { 554 __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start, 555 prot, early_pgtable_alloc, flags); 556 } 557 558 void __init mark_linear_text_alias_ro(void) 559 { 560 /* 561 * Remove the write permissions from the linear alias of .text/.rodata 562 */ 563 update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext), 564 (unsigned long)__init_begin - (unsigned long)_stext, 565 PAGE_KERNEL_RO); 566 } 567 568 #ifdef CONFIG_KFENCE 569 570 bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL; 571 572 /* early_param() will be parsed before map_mem() below. */ 573 static int __init parse_kfence_early_init(char *arg) 574 { 575 int val; 576 577 if (get_option(&arg, &val)) 578 kfence_early_init = !!val; 579 return 0; 580 } 581 early_param("kfence.sample_interval", parse_kfence_early_init); 582 583 static phys_addr_t __init arm64_kfence_alloc_pool(void) 584 { 585 phys_addr_t kfence_pool; 586 587 if (!kfence_early_init) 588 return 0; 589 590 kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); 591 if (!kfence_pool) { 592 pr_err("failed to allocate kfence pool\n"); 593 kfence_early_init = false; 594 return 0; 595 } 596 597 /* Temporarily mark as NOMAP. */ 598 memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); 599 600 return kfence_pool; 601 } 602 603 static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) 604 { 605 if (!kfence_pool) 606 return; 607 608 /* KFENCE pool needs page-level mapping. */ 609 __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE, 610 pgprot_tagged(PAGE_KERNEL), 611 NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); 612 memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); 613 __kfence_pool = phys_to_virt(kfence_pool); 614 } 615 #else /* CONFIG_KFENCE */ 616 617 static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; } 618 static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { } 619 620 #endif /* CONFIG_KFENCE */ 621 622 static void __init map_mem(pgd_t *pgdp) 623 { 624 static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN); 625 phys_addr_t kernel_start = __pa_symbol(_stext); 626 phys_addr_t kernel_end = __pa_symbol(__init_begin); 627 phys_addr_t start, end; 628 phys_addr_t early_kfence_pool; 629 int flags = NO_EXEC_MAPPINGS; 630 u64 i; 631 632 /* 633 * Setting hierarchical PXNTable attributes on table entries covering 634 * the linear region is only possible if it is guaranteed that no table 635 * entries at any level are being shared between the linear region and 636 * the vmalloc region. Check whether this is true for the PGD level, in 637 * which case it is guaranteed to be true for all other levels as well. 638 * (Unless we are running with support for LPA2, in which case the 639 * entire reduced VA space is covered by a single pgd_t which will have 640 * been populated without the PXNTable attribute by the time we get here.) 641 */ 642 BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end) && 643 pgd_index(_PAGE_OFFSET(VA_BITS_MIN)) != PTRS_PER_PGD - 1); 644 645 early_kfence_pool = arm64_kfence_alloc_pool(); 646 647 if (can_set_direct_map()) 648 flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 649 650 /* 651 * Take care not to create a writable alias for the 652 * read-only text and rodata sections of the kernel image. 653 * So temporarily mark them as NOMAP to skip mappings in 654 * the following for-loop 655 */ 656 memblock_mark_nomap(kernel_start, kernel_end - kernel_start); 657 658 /* map all the memory banks */ 659 for_each_mem_range(i, &start, &end) { 660 if (start >= end) 661 break; 662 /* 663 * The linear map must allow allocation tags reading/writing 664 * if MTE is present. Otherwise, it has the same attributes as 665 * PAGE_KERNEL. 666 */ 667 __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL), 668 flags); 669 } 670 671 /* 672 * Map the linear alias of the [_stext, __init_begin) interval 673 * as non-executable now, and remove the write permission in 674 * mark_linear_text_alias_ro() below (which will be called after 675 * alternative patching has completed). This makes the contents 676 * of the region accessible to subsystems such as hibernate, 677 * but protects it from inadvertent modification or execution. 678 * Note that contiguous mappings cannot be remapped in this way, 679 * so we should avoid them here. 680 */ 681 __map_memblock(pgdp, kernel_start, kernel_end, 682 PAGE_KERNEL, NO_CONT_MAPPINGS); 683 memblock_clear_nomap(kernel_start, kernel_end - kernel_start); 684 arm64_kfence_map_pool(early_kfence_pool, pgdp); 685 } 686 687 void mark_rodata_ro(void) 688 { 689 unsigned long section_size; 690 691 /* 692 * mark .rodata as read only. Use __init_begin rather than __end_rodata 693 * to cover NOTES and EXCEPTION_TABLE. 694 */ 695 section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; 696 WRITE_ONCE(rodata_is_rw, false); 697 update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, 698 section_size, PAGE_KERNEL_RO); 699 } 700 701 static void __init declare_vma(struct vm_struct *vma, 702 void *va_start, void *va_end, 703 unsigned long vm_flags) 704 { 705 phys_addr_t pa_start = __pa_symbol(va_start); 706 unsigned long size = va_end - va_start; 707 708 BUG_ON(!PAGE_ALIGNED(pa_start)); 709 BUG_ON(!PAGE_ALIGNED(size)); 710 711 if (!(vm_flags & VM_NO_GUARD)) 712 size += PAGE_SIZE; 713 714 vma->addr = va_start; 715 vma->phys_addr = pa_start; 716 vma->size = size; 717 vma->flags = VM_MAP | vm_flags; 718 vma->caller = __builtin_return_address(0); 719 720 vm_area_add_early(vma); 721 } 722 723 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 724 static pgprot_t kernel_exec_prot(void) 725 { 726 return rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 727 } 728 729 static int __init map_entry_trampoline(void) 730 { 731 int i; 732 733 if (!arm64_kernel_unmapped_at_el0()) 734 return 0; 735 736 pgprot_t prot = kernel_exec_prot(); 737 phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); 738 739 /* The trampoline is always mapped and can therefore be global */ 740 pgprot_val(prot) &= ~PTE_NG; 741 742 /* Map only the text into the trampoline page table */ 743 memset(tramp_pg_dir, 0, PGD_SIZE); 744 __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, 745 entry_tramp_text_size(), prot, 746 __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS); 747 748 /* Map both the text and data into the kernel page table */ 749 for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++) 750 __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, 751 pa_start + i * PAGE_SIZE, prot); 752 753 if (IS_ENABLED(CONFIG_RELOCATABLE)) 754 __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, 755 pa_start + i * PAGE_SIZE, PAGE_KERNEL_RO); 756 757 return 0; 758 } 759 core_initcall(map_entry_trampoline); 760 #endif 761 762 /* 763 * Declare the VMA areas for the kernel 764 */ 765 static void __init declare_kernel_vmas(void) 766 { 767 static struct vm_struct vmlinux_seg[KERNEL_SEGMENT_COUNT]; 768 769 declare_vma(&vmlinux_seg[0], _stext, _etext, VM_NO_GUARD); 770 declare_vma(&vmlinux_seg[1], __start_rodata, __inittext_begin, VM_NO_GUARD); 771 declare_vma(&vmlinux_seg[2], __inittext_begin, __inittext_end, VM_NO_GUARD); 772 declare_vma(&vmlinux_seg[3], __initdata_begin, __initdata_end, VM_NO_GUARD); 773 declare_vma(&vmlinux_seg[4], _data, _end, 0); 774 } 775 776 void __pi_map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot, 777 int level, pte_t *tbl, bool may_use_cont, u64 va_offset); 778 779 static u8 idmap_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init, 780 kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init; 781 782 static void __init create_idmap(void) 783 { 784 u64 start = __pa_symbol(__idmap_text_start); 785 u64 end = __pa_symbol(__idmap_text_end); 786 u64 ptep = __pa_symbol(idmap_ptes); 787 788 __pi_map_range(&ptep, start, end, start, PAGE_KERNEL_ROX, 789 IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false, 790 __phys_to_virt(ptep) - ptep); 791 792 if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && !arm64_use_ng_mappings) { 793 extern u32 __idmap_kpti_flag; 794 u64 pa = __pa_symbol(&__idmap_kpti_flag); 795 796 /* 797 * The KPTI G-to-nG conversion code needs a read-write mapping 798 * of its synchronization flag in the ID map. 799 */ 800 ptep = __pa_symbol(kpti_ptes); 801 __pi_map_range(&ptep, pa, pa + sizeof(u32), pa, PAGE_KERNEL, 802 IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false, 803 __phys_to_virt(ptep) - ptep); 804 } 805 } 806 807 void __init paging_init(void) 808 { 809 map_mem(swapper_pg_dir); 810 811 memblock_allow_resize(); 812 813 create_idmap(); 814 declare_kernel_vmas(); 815 } 816 817 #ifdef CONFIG_MEMORY_HOTPLUG 818 static void free_hotplug_page_range(struct page *page, size_t size, 819 struct vmem_altmap *altmap) 820 { 821 if (altmap) { 822 vmem_altmap_free(altmap, size >> PAGE_SHIFT); 823 } else { 824 WARN_ON(PageReserved(page)); 825 free_pages((unsigned long)page_address(page), get_order(size)); 826 } 827 } 828 829 static void free_hotplug_pgtable_page(struct page *page) 830 { 831 free_hotplug_page_range(page, PAGE_SIZE, NULL); 832 } 833 834 static bool pgtable_range_aligned(unsigned long start, unsigned long end, 835 unsigned long floor, unsigned long ceiling, 836 unsigned long mask) 837 { 838 start &= mask; 839 if (start < floor) 840 return false; 841 842 if (ceiling) { 843 ceiling &= mask; 844 if (!ceiling) 845 return false; 846 } 847 848 if (end - 1 > ceiling - 1) 849 return false; 850 return true; 851 } 852 853 static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, 854 unsigned long end, bool free_mapped, 855 struct vmem_altmap *altmap) 856 { 857 pte_t *ptep, pte; 858 859 do { 860 ptep = pte_offset_kernel(pmdp, addr); 861 pte = __ptep_get(ptep); 862 if (pte_none(pte)) 863 continue; 864 865 WARN_ON(!pte_present(pte)); 866 __pte_clear(&init_mm, addr, ptep); 867 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 868 if (free_mapped) 869 free_hotplug_page_range(pte_page(pte), 870 PAGE_SIZE, altmap); 871 } while (addr += PAGE_SIZE, addr < end); 872 } 873 874 static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr, 875 unsigned long end, bool free_mapped, 876 struct vmem_altmap *altmap) 877 { 878 unsigned long next; 879 pmd_t *pmdp, pmd; 880 881 do { 882 next = pmd_addr_end(addr, end); 883 pmdp = pmd_offset(pudp, addr); 884 pmd = READ_ONCE(*pmdp); 885 if (pmd_none(pmd)) 886 continue; 887 888 WARN_ON(!pmd_present(pmd)); 889 if (pmd_sect(pmd)) { 890 pmd_clear(pmdp); 891 892 /* 893 * One TLBI should be sufficient here as the PMD_SIZE 894 * range is mapped with a single block entry. 895 */ 896 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 897 if (free_mapped) 898 free_hotplug_page_range(pmd_page(pmd), 899 PMD_SIZE, altmap); 900 continue; 901 } 902 WARN_ON(!pmd_table(pmd)); 903 unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap); 904 } while (addr = next, addr < end); 905 } 906 907 static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr, 908 unsigned long end, bool free_mapped, 909 struct vmem_altmap *altmap) 910 { 911 unsigned long next; 912 pud_t *pudp, pud; 913 914 do { 915 next = pud_addr_end(addr, end); 916 pudp = pud_offset(p4dp, addr); 917 pud = READ_ONCE(*pudp); 918 if (pud_none(pud)) 919 continue; 920 921 WARN_ON(!pud_present(pud)); 922 if (pud_sect(pud)) { 923 pud_clear(pudp); 924 925 /* 926 * One TLBI should be sufficient here as the PUD_SIZE 927 * range is mapped with a single block entry. 928 */ 929 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 930 if (free_mapped) 931 free_hotplug_page_range(pud_page(pud), 932 PUD_SIZE, altmap); 933 continue; 934 } 935 WARN_ON(!pud_table(pud)); 936 unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap); 937 } while (addr = next, addr < end); 938 } 939 940 static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr, 941 unsigned long end, bool free_mapped, 942 struct vmem_altmap *altmap) 943 { 944 unsigned long next; 945 p4d_t *p4dp, p4d; 946 947 do { 948 next = p4d_addr_end(addr, end); 949 p4dp = p4d_offset(pgdp, addr); 950 p4d = READ_ONCE(*p4dp); 951 if (p4d_none(p4d)) 952 continue; 953 954 WARN_ON(!p4d_present(p4d)); 955 unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap); 956 } while (addr = next, addr < end); 957 } 958 959 static void unmap_hotplug_range(unsigned long addr, unsigned long end, 960 bool free_mapped, struct vmem_altmap *altmap) 961 { 962 unsigned long next; 963 pgd_t *pgdp, pgd; 964 965 /* 966 * altmap can only be used as vmemmap mapping backing memory. 967 * In case the backing memory itself is not being freed, then 968 * altmap is irrelevant. Warn about this inconsistency when 969 * encountered. 970 */ 971 WARN_ON(!free_mapped && altmap); 972 973 do { 974 next = pgd_addr_end(addr, end); 975 pgdp = pgd_offset_k(addr); 976 pgd = READ_ONCE(*pgdp); 977 if (pgd_none(pgd)) 978 continue; 979 980 WARN_ON(!pgd_present(pgd)); 981 unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap); 982 } while (addr = next, addr < end); 983 } 984 985 static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, 986 unsigned long end, unsigned long floor, 987 unsigned long ceiling) 988 { 989 pte_t *ptep, pte; 990 unsigned long i, start = addr; 991 992 do { 993 ptep = pte_offset_kernel(pmdp, addr); 994 pte = __ptep_get(ptep); 995 996 /* 997 * This is just a sanity check here which verifies that 998 * pte clearing has been done by earlier unmap loops. 999 */ 1000 WARN_ON(!pte_none(pte)); 1001 } while (addr += PAGE_SIZE, addr < end); 1002 1003 if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK)) 1004 return; 1005 1006 /* 1007 * Check whether we can free the pte page if the rest of the 1008 * entries are empty. Overlap with other regions have been 1009 * handled by the floor/ceiling check. 1010 */ 1011 ptep = pte_offset_kernel(pmdp, 0UL); 1012 for (i = 0; i < PTRS_PER_PTE; i++) { 1013 if (!pte_none(__ptep_get(&ptep[i]))) 1014 return; 1015 } 1016 1017 pmd_clear(pmdp); 1018 __flush_tlb_kernel_pgtable(start); 1019 free_hotplug_pgtable_page(virt_to_page(ptep)); 1020 } 1021 1022 static void free_empty_pmd_table(pud_t *pudp, unsigned long addr, 1023 unsigned long end, unsigned long floor, 1024 unsigned long ceiling) 1025 { 1026 pmd_t *pmdp, pmd; 1027 unsigned long i, next, start = addr; 1028 1029 do { 1030 next = pmd_addr_end(addr, end); 1031 pmdp = pmd_offset(pudp, addr); 1032 pmd = READ_ONCE(*pmdp); 1033 if (pmd_none(pmd)) 1034 continue; 1035 1036 WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd)); 1037 free_empty_pte_table(pmdp, addr, next, floor, ceiling); 1038 } while (addr = next, addr < end); 1039 1040 if (CONFIG_PGTABLE_LEVELS <= 2) 1041 return; 1042 1043 if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK)) 1044 return; 1045 1046 /* 1047 * Check whether we can free the pmd page if the rest of the 1048 * entries are empty. Overlap with other regions have been 1049 * handled by the floor/ceiling check. 1050 */ 1051 pmdp = pmd_offset(pudp, 0UL); 1052 for (i = 0; i < PTRS_PER_PMD; i++) { 1053 if (!pmd_none(READ_ONCE(pmdp[i]))) 1054 return; 1055 } 1056 1057 pud_clear(pudp); 1058 __flush_tlb_kernel_pgtable(start); 1059 free_hotplug_pgtable_page(virt_to_page(pmdp)); 1060 } 1061 1062 static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr, 1063 unsigned long end, unsigned long floor, 1064 unsigned long ceiling) 1065 { 1066 pud_t *pudp, pud; 1067 unsigned long i, next, start = addr; 1068 1069 do { 1070 next = pud_addr_end(addr, end); 1071 pudp = pud_offset(p4dp, addr); 1072 pud = READ_ONCE(*pudp); 1073 if (pud_none(pud)) 1074 continue; 1075 1076 WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud)); 1077 free_empty_pmd_table(pudp, addr, next, floor, ceiling); 1078 } while (addr = next, addr < end); 1079 1080 if (!pgtable_l4_enabled()) 1081 return; 1082 1083 if (!pgtable_range_aligned(start, end, floor, ceiling, P4D_MASK)) 1084 return; 1085 1086 /* 1087 * Check whether we can free the pud page if the rest of the 1088 * entries are empty. Overlap with other regions have been 1089 * handled by the floor/ceiling check. 1090 */ 1091 pudp = pud_offset(p4dp, 0UL); 1092 for (i = 0; i < PTRS_PER_PUD; i++) { 1093 if (!pud_none(READ_ONCE(pudp[i]))) 1094 return; 1095 } 1096 1097 p4d_clear(p4dp); 1098 __flush_tlb_kernel_pgtable(start); 1099 free_hotplug_pgtable_page(virt_to_page(pudp)); 1100 } 1101 1102 static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr, 1103 unsigned long end, unsigned long floor, 1104 unsigned long ceiling) 1105 { 1106 p4d_t *p4dp, p4d; 1107 unsigned long i, next, start = addr; 1108 1109 do { 1110 next = p4d_addr_end(addr, end); 1111 p4dp = p4d_offset(pgdp, addr); 1112 p4d = READ_ONCE(*p4dp); 1113 if (p4d_none(p4d)) 1114 continue; 1115 1116 WARN_ON(!p4d_present(p4d)); 1117 free_empty_pud_table(p4dp, addr, next, floor, ceiling); 1118 } while (addr = next, addr < end); 1119 1120 if (!pgtable_l5_enabled()) 1121 return; 1122 1123 if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK)) 1124 return; 1125 1126 /* 1127 * Check whether we can free the p4d page if the rest of the 1128 * entries are empty. Overlap with other regions have been 1129 * handled by the floor/ceiling check. 1130 */ 1131 p4dp = p4d_offset(pgdp, 0UL); 1132 for (i = 0; i < PTRS_PER_P4D; i++) { 1133 if (!p4d_none(READ_ONCE(p4dp[i]))) 1134 return; 1135 } 1136 1137 pgd_clear(pgdp); 1138 __flush_tlb_kernel_pgtable(start); 1139 free_hotplug_pgtable_page(virt_to_page(p4dp)); 1140 } 1141 1142 static void free_empty_tables(unsigned long addr, unsigned long end, 1143 unsigned long floor, unsigned long ceiling) 1144 { 1145 unsigned long next; 1146 pgd_t *pgdp, pgd; 1147 1148 do { 1149 next = pgd_addr_end(addr, end); 1150 pgdp = pgd_offset_k(addr); 1151 pgd = READ_ONCE(*pgdp); 1152 if (pgd_none(pgd)) 1153 continue; 1154 1155 WARN_ON(!pgd_present(pgd)); 1156 free_empty_p4d_table(pgdp, addr, next, floor, ceiling); 1157 } while (addr = next, addr < end); 1158 } 1159 #endif 1160 1161 void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node, 1162 unsigned long addr, unsigned long next) 1163 { 1164 pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); 1165 } 1166 1167 int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, 1168 unsigned long addr, unsigned long next) 1169 { 1170 vmemmap_verify((pte_t *)pmdp, node, addr, next); 1171 return 1; 1172 } 1173 1174 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 1175 struct vmem_altmap *altmap) 1176 { 1177 WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); 1178 1179 if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES)) 1180 return vmemmap_populate_basepages(start, end, node, altmap); 1181 else 1182 return vmemmap_populate_hugepages(start, end, node, altmap); 1183 } 1184 1185 #ifdef CONFIG_MEMORY_HOTPLUG 1186 void vmemmap_free(unsigned long start, unsigned long end, 1187 struct vmem_altmap *altmap) 1188 { 1189 WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); 1190 1191 unmap_hotplug_range(start, end, true, altmap); 1192 free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); 1193 } 1194 #endif /* CONFIG_MEMORY_HOTPLUG */ 1195 1196 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) 1197 { 1198 pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot)); 1199 1200 /* Only allow permission changes for now */ 1201 if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)), 1202 pud_val(new_pud))) 1203 return 0; 1204 1205 VM_BUG_ON(phys & ~PUD_MASK); 1206 set_pud(pudp, new_pud); 1207 return 1; 1208 } 1209 1210 int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) 1211 { 1212 pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot)); 1213 1214 /* Only allow permission changes for now */ 1215 if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)), 1216 pmd_val(new_pmd))) 1217 return 0; 1218 1219 VM_BUG_ON(phys & ~PMD_MASK); 1220 set_pmd(pmdp, new_pmd); 1221 return 1; 1222 } 1223 1224 #ifndef __PAGETABLE_P4D_FOLDED 1225 void p4d_clear_huge(p4d_t *p4dp) 1226 { 1227 } 1228 #endif 1229 1230 int pud_clear_huge(pud_t *pudp) 1231 { 1232 if (!pud_sect(READ_ONCE(*pudp))) 1233 return 0; 1234 pud_clear(pudp); 1235 return 1; 1236 } 1237 1238 int pmd_clear_huge(pmd_t *pmdp) 1239 { 1240 if (!pmd_sect(READ_ONCE(*pmdp))) 1241 return 0; 1242 pmd_clear(pmdp); 1243 return 1; 1244 } 1245 1246 int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr) 1247 { 1248 pte_t *table; 1249 pmd_t pmd; 1250 1251 pmd = READ_ONCE(*pmdp); 1252 1253 if (!pmd_table(pmd)) { 1254 VM_WARN_ON(1); 1255 return 1; 1256 } 1257 1258 table = pte_offset_kernel(pmdp, addr); 1259 pmd_clear(pmdp); 1260 __flush_tlb_kernel_pgtable(addr); 1261 pte_free_kernel(NULL, table); 1262 return 1; 1263 } 1264 1265 int pud_free_pmd_page(pud_t *pudp, unsigned long addr) 1266 { 1267 pmd_t *table; 1268 pmd_t *pmdp; 1269 pud_t pud; 1270 unsigned long next, end; 1271 1272 pud = READ_ONCE(*pudp); 1273 1274 if (!pud_table(pud)) { 1275 VM_WARN_ON(1); 1276 return 1; 1277 } 1278 1279 table = pmd_offset(pudp, addr); 1280 pmdp = table; 1281 next = addr; 1282 end = addr + PUD_SIZE; 1283 do { 1284 pmd_free_pte_page(pmdp, next); 1285 } while (pmdp++, next += PMD_SIZE, next != end); 1286 1287 pud_clear(pudp); 1288 __flush_tlb_kernel_pgtable(addr); 1289 pmd_free(NULL, table); 1290 return 1; 1291 } 1292 1293 #ifdef CONFIG_MEMORY_HOTPLUG 1294 static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) 1295 { 1296 unsigned long end = start + size; 1297 1298 WARN_ON(pgdir != init_mm.pgd); 1299 WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END)); 1300 1301 unmap_hotplug_range(start, end, false, NULL); 1302 free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); 1303 } 1304 1305 struct range arch_get_mappable_range(void) 1306 { 1307 struct range mhp_range; 1308 u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual)); 1309 u64 end_linear_pa = __pa(PAGE_END - 1); 1310 1311 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 1312 /* 1313 * Check for a wrap, it is possible because of randomized linear 1314 * mapping the start physical address is actually bigger than 1315 * the end physical address. In this case set start to zero 1316 * because [0, end_linear_pa] range must still be able to cover 1317 * all addressable physical addresses. 1318 */ 1319 if (start_linear_pa > end_linear_pa) 1320 start_linear_pa = 0; 1321 } 1322 1323 WARN_ON(start_linear_pa > end_linear_pa); 1324 1325 /* 1326 * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] 1327 * accommodating both its ends but excluding PAGE_END. Max physical 1328 * range which can be mapped inside this linear mapping range, must 1329 * also be derived from its end points. 1330 */ 1331 mhp_range.start = start_linear_pa; 1332 mhp_range.end = end_linear_pa; 1333 1334 return mhp_range; 1335 } 1336 1337 int arch_add_memory(int nid, u64 start, u64 size, 1338 struct mhp_params *params) 1339 { 1340 int ret, flags = NO_EXEC_MAPPINGS; 1341 1342 VM_BUG_ON(!mhp_range_allowed(start, size, true)); 1343 1344 if (can_set_direct_map()) 1345 flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 1346 1347 __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), 1348 size, params->pgprot, __pgd_pgtable_alloc, 1349 flags); 1350 1351 memblock_clear_nomap(start, size); 1352 1353 ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, 1354 params); 1355 if (ret) 1356 __remove_pgd_mapping(swapper_pg_dir, 1357 __phys_to_virt(start), size); 1358 else { 1359 max_pfn = PFN_UP(start + size); 1360 max_low_pfn = max_pfn; 1361 } 1362 1363 return ret; 1364 } 1365 1366 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) 1367 { 1368 unsigned long start_pfn = start >> PAGE_SHIFT; 1369 unsigned long nr_pages = size >> PAGE_SHIFT; 1370 1371 __remove_pages(start_pfn, nr_pages, altmap); 1372 __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size); 1373 } 1374 1375 /* 1376 * This memory hotplug notifier helps prevent boot memory from being 1377 * inadvertently removed as it blocks pfn range offlining process in 1378 * __offline_pages(). Hence this prevents both offlining as well as 1379 * removal process for boot memory which is initially always online. 1380 * In future if and when boot memory could be removed, this notifier 1381 * should be dropped and free_hotplug_page_range() should handle any 1382 * reserved pages allocated during boot. 1383 */ 1384 static int prevent_bootmem_remove_notifier(struct notifier_block *nb, 1385 unsigned long action, void *data) 1386 { 1387 struct mem_section *ms; 1388 struct memory_notify *arg = data; 1389 unsigned long end_pfn = arg->start_pfn + arg->nr_pages; 1390 unsigned long pfn = arg->start_pfn; 1391 1392 if ((action != MEM_GOING_OFFLINE) && (action != MEM_OFFLINE)) 1393 return NOTIFY_OK; 1394 1395 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 1396 unsigned long start = PFN_PHYS(pfn); 1397 unsigned long end = start + (1UL << PA_SECTION_SHIFT); 1398 1399 ms = __pfn_to_section(pfn); 1400 if (!early_section(ms)) 1401 continue; 1402 1403 if (action == MEM_GOING_OFFLINE) { 1404 /* 1405 * Boot memory removal is not supported. Prevent 1406 * it via blocking any attempted offline request 1407 * for the boot memory and just report it. 1408 */ 1409 pr_warn("Boot memory [%lx %lx] offlining attempted\n", start, end); 1410 return NOTIFY_BAD; 1411 } else if (action == MEM_OFFLINE) { 1412 /* 1413 * This should have never happened. Boot memory 1414 * offlining should have been prevented by this 1415 * very notifier. Probably some memory removal 1416 * procedure might have changed which would then 1417 * require further debug. 1418 */ 1419 pr_err("Boot memory [%lx %lx] offlined\n", start, end); 1420 1421 /* 1422 * Core memory hotplug does not process a return 1423 * code from the notifier for MEM_OFFLINE events. 1424 * The error condition has been reported. Return 1425 * from here as if ignored. 1426 */ 1427 return NOTIFY_DONE; 1428 } 1429 } 1430 return NOTIFY_OK; 1431 } 1432 1433 static struct notifier_block prevent_bootmem_remove_nb = { 1434 .notifier_call = prevent_bootmem_remove_notifier, 1435 }; 1436 1437 /* 1438 * This ensures that boot memory sections on the platform are online 1439 * from early boot. Memory sections could not be prevented from being 1440 * offlined, unless for some reason they are not online to begin with. 1441 * This helps validate the basic assumption on which the above memory 1442 * event notifier works to prevent boot memory section offlining and 1443 * its possible removal. 1444 */ 1445 static void validate_bootmem_online(void) 1446 { 1447 phys_addr_t start, end, addr; 1448 struct mem_section *ms; 1449 u64 i; 1450 1451 /* 1452 * Scanning across all memblock might be expensive 1453 * on some big memory systems. Hence enable this 1454 * validation only with DEBUG_VM. 1455 */ 1456 if (!IS_ENABLED(CONFIG_DEBUG_VM)) 1457 return; 1458 1459 for_each_mem_range(i, &start, &end) { 1460 for (addr = start; addr < end; addr += (1UL << PA_SECTION_SHIFT)) { 1461 ms = __pfn_to_section(PHYS_PFN(addr)); 1462 1463 /* 1464 * All memory ranges in the system at this point 1465 * should have been marked as early sections. 1466 */ 1467 WARN_ON(!early_section(ms)); 1468 1469 /* 1470 * Memory notifier mechanism here to prevent boot 1471 * memory offlining depends on the fact that each 1472 * early section memory on the system is initially 1473 * online. Otherwise a given memory section which 1474 * is already offline will be overlooked and can 1475 * be removed completely. Call out such sections. 1476 */ 1477 if (!online_section(ms)) 1478 pr_err("Boot memory [%llx %llx] is offline, can be removed\n", 1479 addr, addr + (1UL << PA_SECTION_SHIFT)); 1480 } 1481 } 1482 } 1483 1484 static int __init prevent_bootmem_remove_init(void) 1485 { 1486 int ret = 0; 1487 1488 if (!IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 1489 return ret; 1490 1491 validate_bootmem_online(); 1492 ret = register_memory_notifier(&prevent_bootmem_remove_nb); 1493 if (ret) 1494 pr_err("%s: Notifier registration failed %d\n", __func__, ret); 1495 1496 return ret; 1497 } 1498 early_initcall(prevent_bootmem_remove_init); 1499 #endif 1500 1501 pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 1502 { 1503 if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) { 1504 /* 1505 * Break-before-make (BBM) is required for all user space mappings 1506 * when the permission changes from executable to non-executable 1507 * in cases where cpu is affected with errata #2645198. 1508 */ 1509 if (pte_user_exec(ptep_get(ptep))) 1510 return ptep_clear_flush(vma, addr, ptep); 1511 } 1512 return ptep_get_and_clear(vma->vm_mm, addr, ptep); 1513 } 1514 1515 void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 1516 pte_t old_pte, pte_t pte) 1517 { 1518 set_pte_at(vma->vm_mm, addr, ptep, pte); 1519 } 1520 1521 /* 1522 * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, 1523 * avoiding the possibility of conflicting TLB entries being allocated. 1524 */ 1525 void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp) 1526 { 1527 typedef void (ttbr_replace_func)(phys_addr_t); 1528 extern ttbr_replace_func idmap_cpu_replace_ttbr1; 1529 ttbr_replace_func *replace_phys; 1530 unsigned long daif; 1531 1532 /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */ 1533 phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp)); 1534 1535 if (cnp) 1536 ttbr1 |= TTBR_CNP_BIT; 1537 1538 replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); 1539 1540 cpu_install_idmap(); 1541 1542 /* 1543 * We really don't want to take *any* exceptions while TTBR1 is 1544 * in the process of being replaced so mask everything. 1545 */ 1546 daif = local_daif_save(); 1547 replace_phys(ttbr1); 1548 local_daif_restore(daif); 1549 1550 cpu_uninstall_idmap(); 1551 } 1552