1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/mm/mmu.c 4 * 5 * Copyright (C) 1995-2005 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/cache.h> 10 #include <linux/export.h> 11 #include <linux/kernel.h> 12 #include <linux/errno.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/kexec.h> 16 #include <linux/libfdt.h> 17 #include <linux/mman.h> 18 #include <linux/nodemask.h> 19 #include <linux/memblock.h> 20 #include <linux/memremap.h> 21 #include <linux/memory.h> 22 #include <linux/fs.h> 23 #include <linux/io.h> 24 #include <linux/mm.h> 25 #include <linux/vmalloc.h> 26 #include <linux/set_memory.h> 27 #include <linux/kfence.h> 28 #include <linux/pkeys.h> 29 #include <linux/mm_inline.h> 30 #include <linux/pagewalk.h> 31 #include <linux/stop_machine.h> 32 33 #include <asm/barrier.h> 34 #include <asm/cputype.h> 35 #include <asm/fixmap.h> 36 #include <asm/kasan.h> 37 #include <asm/kernel-pgtable.h> 38 #include <asm/sections.h> 39 #include <asm/setup.h> 40 #include <linux/sizes.h> 41 #include <asm/tlb.h> 42 #include <asm/mmu_context.h> 43 #include <asm/ptdump.h> 44 #include <asm/tlbflush.h> 45 #include <asm/pgalloc.h> 46 #include <asm/kfence.h> 47 48 #define NO_BLOCK_MAPPINGS BIT(0) 49 #define NO_CONT_MAPPINGS BIT(1) 50 #define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ 51 52 DEFINE_STATIC_KEY_FALSE(arm64_ptdump_lock_key); 53 54 u64 kimage_voffset __ro_after_init; 55 EXPORT_SYMBOL(kimage_voffset); 56 57 u32 __boot_cpu_mode[] = { BOOT_CPU_MODE_EL2, BOOT_CPU_MODE_EL1 }; 58 59 static bool rodata_is_rw __ro_after_init = true; 60 61 /* 62 * The booting CPU updates the failed status @__early_cpu_boot_status, 63 * with MMU turned off. 64 */ 65 long __section(".mmuoff.data.write") __early_cpu_boot_status; 66 67 /* 68 * Empty_zero_page is a special page that is used for zero-initialized data 69 * and COW. 70 */ 71 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; 72 EXPORT_SYMBOL(empty_zero_page); 73 74 static DEFINE_SPINLOCK(swapper_pgdir_lock); 75 static DEFINE_MUTEX(fixmap_lock); 76 77 void noinstr set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) 78 { 79 pgd_t *fixmap_pgdp; 80 81 /* 82 * Don't bother with the fixmap if swapper_pg_dir is still mapped 83 * writable in the kernel mapping. 84 */ 85 if (rodata_is_rw) { 86 WRITE_ONCE(*pgdp, pgd); 87 dsb(ishst); 88 isb(); 89 return; 90 } 91 92 spin_lock(&swapper_pgdir_lock); 93 fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp)); 94 WRITE_ONCE(*fixmap_pgdp, pgd); 95 /* 96 * We need dsb(ishst) here to ensure the page-table-walker sees 97 * our new entry before set_p?d() returns. The fixmap's 98 * flush_tlb_kernel_range() via clear_fixmap() does this for us. 99 */ 100 pgd_clear_fixmap(); 101 spin_unlock(&swapper_pgdir_lock); 102 } 103 104 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 105 unsigned long size, pgprot_t vma_prot) 106 { 107 if (!pfn_is_map_memory(pfn)) 108 return pgprot_noncached(vma_prot); 109 else if (file->f_flags & O_SYNC) 110 return pgprot_writecombine(vma_prot); 111 return vma_prot; 112 } 113 EXPORT_SYMBOL(phys_mem_access_prot); 114 115 static phys_addr_t __init early_pgtable_alloc(enum pgtable_type pgtable_type) 116 { 117 phys_addr_t phys; 118 119 phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 120 MEMBLOCK_ALLOC_NOLEAKTRACE); 121 if (!phys) 122 panic("Failed to allocate page table page\n"); 123 124 return phys; 125 } 126 127 bool pgattr_change_is_safe(pteval_t old, pteval_t new) 128 { 129 /* 130 * The following mapping attributes may be updated in live 131 * kernel mappings without the need for break-before-make. 132 */ 133 pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG | 134 PTE_SWBITS_MASK; 135 136 /* creating or taking down mappings is always safe */ 137 if (!pte_valid(__pte(old)) || !pte_valid(__pte(new))) 138 return true; 139 140 /* A live entry's pfn should not change */ 141 if (pte_pfn(__pte(old)) != pte_pfn(__pte(new))) 142 return false; 143 144 /* live contiguous mappings may not be manipulated at all */ 145 if ((old | new) & PTE_CONT) 146 return false; 147 148 /* Transitioning from Non-Global to Global is unsafe */ 149 if (old & ~new & PTE_NG) 150 return false; 151 152 /* 153 * Changing the memory type between Normal and Normal-Tagged is safe 154 * since Tagged is considered a permission attribute from the 155 * mismatched attribute aliases perspective. 156 */ 157 if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || 158 (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) && 159 ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || 160 (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED))) 161 mask |= PTE_ATTRINDX_MASK; 162 163 return ((old ^ new) & ~mask) == 0; 164 } 165 166 static void init_clear_pgtable(void *table) 167 { 168 clear_page(table); 169 170 /* Ensure the zeroing is observed by page table walks. */ 171 dsb(ishst); 172 } 173 174 static void init_pte(pte_t *ptep, unsigned long addr, unsigned long end, 175 phys_addr_t phys, pgprot_t prot) 176 { 177 do { 178 pte_t old_pte = __ptep_get(ptep); 179 180 /* 181 * Required barriers to make this visible to the table walker 182 * are deferred to the end of alloc_init_cont_pte(). 183 */ 184 __set_pte_nosync(ptep, pfn_pte(__phys_to_pfn(phys), prot)); 185 186 /* 187 * After the PTE entry has been populated once, we 188 * only allow updates to the permission attributes. 189 */ 190 BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), 191 pte_val(__ptep_get(ptep)))); 192 193 phys += PAGE_SIZE; 194 } while (ptep++, addr += PAGE_SIZE, addr != end); 195 } 196 197 static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, 198 unsigned long end, phys_addr_t phys, 199 pgprot_t prot, 200 phys_addr_t (*pgtable_alloc)(enum pgtable_type), 201 int flags) 202 { 203 unsigned long next; 204 pmd_t pmd = READ_ONCE(*pmdp); 205 pte_t *ptep; 206 207 BUG_ON(pmd_sect(pmd)); 208 if (pmd_none(pmd)) { 209 pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF; 210 phys_addr_t pte_phys; 211 212 if (flags & NO_EXEC_MAPPINGS) 213 pmdval |= PMD_TABLE_PXN; 214 BUG_ON(!pgtable_alloc); 215 pte_phys = pgtable_alloc(TABLE_PTE); 216 if (pte_phys == INVALID_PHYS_ADDR) 217 return -ENOMEM; 218 ptep = pte_set_fixmap(pte_phys); 219 init_clear_pgtable(ptep); 220 ptep += pte_index(addr); 221 __pmd_populate(pmdp, pte_phys, pmdval); 222 } else { 223 BUG_ON(pmd_bad(pmd)); 224 ptep = pte_set_fixmap_offset(pmdp, addr); 225 } 226 227 do { 228 pgprot_t __prot = prot; 229 230 next = pte_cont_addr_end(addr, end); 231 232 /* use a contiguous mapping if the range is suitably aligned */ 233 if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && 234 (flags & NO_CONT_MAPPINGS) == 0) 235 __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 236 237 init_pte(ptep, addr, next, phys, __prot); 238 239 ptep += pte_index(next) - pte_index(addr); 240 phys += next - addr; 241 } while (addr = next, addr != end); 242 243 /* 244 * Note: barriers and maintenance necessary to clear the fixmap slot 245 * ensure that all previous pgtable writes are visible to the table 246 * walker. 247 */ 248 pte_clear_fixmap(); 249 250 return 0; 251 } 252 253 static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end, 254 phys_addr_t phys, pgprot_t prot, 255 phys_addr_t (*pgtable_alloc)(enum pgtable_type), int flags) 256 { 257 unsigned long next; 258 259 do { 260 pmd_t old_pmd = READ_ONCE(*pmdp); 261 262 next = pmd_addr_end(addr, end); 263 264 /* try section mapping first */ 265 if (((addr | next | phys) & ~PMD_MASK) == 0 && 266 (flags & NO_BLOCK_MAPPINGS) == 0) { 267 pmd_set_huge(pmdp, phys, prot); 268 269 /* 270 * After the PMD entry has been populated once, we 271 * only allow updates to the permission attributes. 272 */ 273 BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), 274 READ_ONCE(pmd_val(*pmdp)))); 275 } else { 276 int ret; 277 278 ret = alloc_init_cont_pte(pmdp, addr, next, phys, prot, 279 pgtable_alloc, flags); 280 if (ret) 281 return ret; 282 283 BUG_ON(pmd_val(old_pmd) != 0 && 284 pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); 285 } 286 phys += next - addr; 287 } while (pmdp++, addr = next, addr != end); 288 289 return 0; 290 } 291 292 static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, 293 unsigned long end, phys_addr_t phys, 294 pgprot_t prot, 295 phys_addr_t (*pgtable_alloc)(enum pgtable_type), 296 int flags) 297 { 298 int ret; 299 unsigned long next; 300 pud_t pud = READ_ONCE(*pudp); 301 pmd_t *pmdp; 302 303 /* 304 * Check for initial section mappings in the pgd/pud. 305 */ 306 BUG_ON(pud_sect(pud)); 307 if (pud_none(pud)) { 308 pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF; 309 phys_addr_t pmd_phys; 310 311 if (flags & NO_EXEC_MAPPINGS) 312 pudval |= PUD_TABLE_PXN; 313 BUG_ON(!pgtable_alloc); 314 pmd_phys = pgtable_alloc(TABLE_PMD); 315 if (pmd_phys == INVALID_PHYS_ADDR) 316 return -ENOMEM; 317 pmdp = pmd_set_fixmap(pmd_phys); 318 init_clear_pgtable(pmdp); 319 pmdp += pmd_index(addr); 320 __pud_populate(pudp, pmd_phys, pudval); 321 } else { 322 BUG_ON(pud_bad(pud)); 323 pmdp = pmd_set_fixmap_offset(pudp, addr); 324 } 325 326 do { 327 pgprot_t __prot = prot; 328 329 next = pmd_cont_addr_end(addr, end); 330 331 /* use a contiguous mapping if the range is suitably aligned */ 332 if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) && 333 (flags & NO_CONT_MAPPINGS) == 0) 334 __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 335 336 ret = init_pmd(pmdp, addr, next, phys, __prot, pgtable_alloc, flags); 337 if (ret) 338 goto out; 339 340 pmdp += pmd_index(next) - pmd_index(addr); 341 phys += next - addr; 342 } while (addr = next, addr != end); 343 344 out: 345 pmd_clear_fixmap(); 346 347 return ret; 348 } 349 350 static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end, 351 phys_addr_t phys, pgprot_t prot, 352 phys_addr_t (*pgtable_alloc)(enum pgtable_type), 353 int flags) 354 { 355 int ret = 0; 356 unsigned long next; 357 p4d_t p4d = READ_ONCE(*p4dp); 358 pud_t *pudp; 359 360 if (p4d_none(p4d)) { 361 p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN | P4D_TABLE_AF; 362 phys_addr_t pud_phys; 363 364 if (flags & NO_EXEC_MAPPINGS) 365 p4dval |= P4D_TABLE_PXN; 366 BUG_ON(!pgtable_alloc); 367 pud_phys = pgtable_alloc(TABLE_PUD); 368 if (pud_phys == INVALID_PHYS_ADDR) 369 return -ENOMEM; 370 pudp = pud_set_fixmap(pud_phys); 371 init_clear_pgtable(pudp); 372 pudp += pud_index(addr); 373 __p4d_populate(p4dp, pud_phys, p4dval); 374 } else { 375 BUG_ON(p4d_bad(p4d)); 376 pudp = pud_set_fixmap_offset(p4dp, addr); 377 } 378 379 do { 380 pud_t old_pud = READ_ONCE(*pudp); 381 382 next = pud_addr_end(addr, end); 383 384 /* 385 * For 4K granule only, attempt to put down a 1GB block 386 */ 387 if (pud_sect_supported() && 388 ((addr | next | phys) & ~PUD_MASK) == 0 && 389 (flags & NO_BLOCK_MAPPINGS) == 0) { 390 pud_set_huge(pudp, phys, prot); 391 392 /* 393 * After the PUD entry has been populated once, we 394 * only allow updates to the permission attributes. 395 */ 396 BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), 397 READ_ONCE(pud_val(*pudp)))); 398 } else { 399 ret = alloc_init_cont_pmd(pudp, addr, next, phys, prot, 400 pgtable_alloc, flags); 401 if (ret) 402 goto out; 403 404 BUG_ON(pud_val(old_pud) != 0 && 405 pud_val(old_pud) != READ_ONCE(pud_val(*pudp))); 406 } 407 phys += next - addr; 408 } while (pudp++, addr = next, addr != end); 409 410 out: 411 pud_clear_fixmap(); 412 413 return ret; 414 } 415 416 static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end, 417 phys_addr_t phys, pgprot_t prot, 418 phys_addr_t (*pgtable_alloc)(enum pgtable_type), 419 int flags) 420 { 421 int ret; 422 unsigned long next; 423 pgd_t pgd = READ_ONCE(*pgdp); 424 p4d_t *p4dp; 425 426 if (pgd_none(pgd)) { 427 pgdval_t pgdval = PGD_TYPE_TABLE | PGD_TABLE_UXN | PGD_TABLE_AF; 428 phys_addr_t p4d_phys; 429 430 if (flags & NO_EXEC_MAPPINGS) 431 pgdval |= PGD_TABLE_PXN; 432 BUG_ON(!pgtable_alloc); 433 p4d_phys = pgtable_alloc(TABLE_P4D); 434 if (p4d_phys == INVALID_PHYS_ADDR) 435 return -ENOMEM; 436 p4dp = p4d_set_fixmap(p4d_phys); 437 init_clear_pgtable(p4dp); 438 p4dp += p4d_index(addr); 439 __pgd_populate(pgdp, p4d_phys, pgdval); 440 } else { 441 BUG_ON(pgd_bad(pgd)); 442 p4dp = p4d_set_fixmap_offset(pgdp, addr); 443 } 444 445 do { 446 p4d_t old_p4d = READ_ONCE(*p4dp); 447 448 next = p4d_addr_end(addr, end); 449 450 ret = alloc_init_pud(p4dp, addr, next, phys, prot, 451 pgtable_alloc, flags); 452 if (ret) 453 goto out; 454 455 BUG_ON(p4d_val(old_p4d) != 0 && 456 p4d_val(old_p4d) != READ_ONCE(p4d_val(*p4dp))); 457 458 phys += next - addr; 459 } while (p4dp++, addr = next, addr != end); 460 461 out: 462 p4d_clear_fixmap(); 463 464 return ret; 465 } 466 467 static int __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys, 468 unsigned long virt, phys_addr_t size, 469 pgprot_t prot, 470 phys_addr_t (*pgtable_alloc)(enum pgtable_type), 471 int flags) 472 { 473 int ret; 474 unsigned long addr, end, next; 475 pgd_t *pgdp = pgd_offset_pgd(pgdir, virt); 476 477 /* 478 * If the virtual and physical address don't have the same offset 479 * within a page, we cannot map the region as the caller expects. 480 */ 481 if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) 482 return -EINVAL; 483 484 phys &= PAGE_MASK; 485 addr = virt & PAGE_MASK; 486 end = PAGE_ALIGN(virt + size); 487 488 do { 489 next = pgd_addr_end(addr, end); 490 ret = alloc_init_p4d(pgdp, addr, next, phys, prot, pgtable_alloc, 491 flags); 492 if (ret) 493 return ret; 494 phys += next - addr; 495 } while (pgdp++, addr = next, addr != end); 496 497 return 0; 498 } 499 500 static int __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, 501 unsigned long virt, phys_addr_t size, 502 pgprot_t prot, 503 phys_addr_t (*pgtable_alloc)(enum pgtable_type), 504 int flags) 505 { 506 int ret; 507 508 mutex_lock(&fixmap_lock); 509 ret = __create_pgd_mapping_locked(pgdir, phys, virt, size, prot, 510 pgtable_alloc, flags); 511 mutex_unlock(&fixmap_lock); 512 513 return ret; 514 } 515 516 static void early_create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, 517 unsigned long virt, phys_addr_t size, 518 pgprot_t prot, 519 phys_addr_t (*pgtable_alloc)(enum pgtable_type), 520 int flags) 521 { 522 int ret; 523 524 ret = __create_pgd_mapping(pgdir, phys, virt, size, prot, pgtable_alloc, 525 flags); 526 if (ret) 527 panic("Failed to create page tables\n"); 528 } 529 530 static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, gfp_t gfp, 531 enum pgtable_type pgtable_type) 532 { 533 /* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */ 534 struct ptdesc *ptdesc = pagetable_alloc(gfp & ~__GFP_ZERO, 0); 535 phys_addr_t pa; 536 537 if (!ptdesc) 538 return INVALID_PHYS_ADDR; 539 540 pa = page_to_phys(ptdesc_page(ptdesc)); 541 542 switch (pgtable_type) { 543 case TABLE_PTE: 544 BUG_ON(!pagetable_pte_ctor(mm, ptdesc)); 545 break; 546 case TABLE_PMD: 547 BUG_ON(!pagetable_pmd_ctor(mm, ptdesc)); 548 break; 549 case TABLE_PUD: 550 pagetable_pud_ctor(ptdesc); 551 break; 552 case TABLE_P4D: 553 pagetable_p4d_ctor(ptdesc); 554 break; 555 } 556 557 return pa; 558 } 559 560 static phys_addr_t 561 pgd_pgtable_alloc_init_mm_gfp(enum pgtable_type pgtable_type, gfp_t gfp) 562 { 563 return __pgd_pgtable_alloc(&init_mm, gfp, pgtable_type); 564 } 565 566 static phys_addr_t __maybe_unused 567 pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type) 568 { 569 return pgd_pgtable_alloc_init_mm_gfp(pgtable_type, GFP_PGTABLE_KERNEL); 570 } 571 572 static phys_addr_t 573 pgd_pgtable_alloc_special_mm(enum pgtable_type pgtable_type) 574 { 575 return __pgd_pgtable_alloc(NULL, GFP_PGTABLE_KERNEL, pgtable_type); 576 } 577 578 static void split_contpte(pte_t *ptep) 579 { 580 int i; 581 582 ptep = PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES); 583 for (i = 0; i < CONT_PTES; i++, ptep++) 584 __set_pte(ptep, pte_mknoncont(__ptep_get(ptep))); 585 } 586 587 static int split_pmd(pmd_t *pmdp, pmd_t pmd, gfp_t gfp, bool to_cont) 588 { 589 pmdval_t tableprot = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF; 590 unsigned long pfn = pmd_pfn(pmd); 591 pgprot_t prot = pmd_pgprot(pmd); 592 phys_addr_t pte_phys; 593 pte_t *ptep; 594 int i; 595 596 pte_phys = pgd_pgtable_alloc_init_mm_gfp(TABLE_PTE, gfp); 597 if (pte_phys == INVALID_PHYS_ADDR) 598 return -ENOMEM; 599 ptep = (pte_t *)phys_to_virt(pte_phys); 600 601 if (pgprot_val(prot) & PMD_SECT_PXN) 602 tableprot |= PMD_TABLE_PXN; 603 604 prot = __pgprot((pgprot_val(prot) & ~PTE_TYPE_MASK) | PTE_TYPE_PAGE); 605 prot = __pgprot(pgprot_val(prot) & ~PTE_CONT); 606 if (to_cont) 607 prot = __pgprot(pgprot_val(prot) | PTE_CONT); 608 609 for (i = 0; i < PTRS_PER_PTE; i++, ptep++, pfn++) 610 __set_pte(ptep, pfn_pte(pfn, prot)); 611 612 /* 613 * Ensure the pte entries are visible to the table walker by the time 614 * the pmd entry that points to the ptes is visible. 615 */ 616 dsb(ishst); 617 __pmd_populate(pmdp, pte_phys, tableprot); 618 619 return 0; 620 } 621 622 static void split_contpmd(pmd_t *pmdp) 623 { 624 int i; 625 626 pmdp = PTR_ALIGN_DOWN(pmdp, sizeof(*pmdp) * CONT_PMDS); 627 for (i = 0; i < CONT_PMDS; i++, pmdp++) 628 set_pmd(pmdp, pmd_mknoncont(pmdp_get(pmdp))); 629 } 630 631 static int split_pud(pud_t *pudp, pud_t pud, gfp_t gfp, bool to_cont) 632 { 633 pudval_t tableprot = PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF; 634 unsigned int step = PMD_SIZE >> PAGE_SHIFT; 635 unsigned long pfn = pud_pfn(pud); 636 pgprot_t prot = pud_pgprot(pud); 637 phys_addr_t pmd_phys; 638 pmd_t *pmdp; 639 int i; 640 641 pmd_phys = pgd_pgtable_alloc_init_mm_gfp(TABLE_PMD, gfp); 642 if (pmd_phys == INVALID_PHYS_ADDR) 643 return -ENOMEM; 644 pmdp = (pmd_t *)phys_to_virt(pmd_phys); 645 646 if (pgprot_val(prot) & PMD_SECT_PXN) 647 tableprot |= PUD_TABLE_PXN; 648 649 prot = __pgprot((pgprot_val(prot) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT); 650 prot = __pgprot(pgprot_val(prot) & ~PTE_CONT); 651 if (to_cont) 652 prot = __pgprot(pgprot_val(prot) | PTE_CONT); 653 654 for (i = 0; i < PTRS_PER_PMD; i++, pmdp++, pfn += step) 655 set_pmd(pmdp, pfn_pmd(pfn, prot)); 656 657 /* 658 * Ensure the pmd entries are visible to the table walker by the time 659 * the pud entry that points to the pmds is visible. 660 */ 661 dsb(ishst); 662 __pud_populate(pudp, pmd_phys, tableprot); 663 664 return 0; 665 } 666 667 static int split_kernel_leaf_mapping_locked(unsigned long addr) 668 { 669 pgd_t *pgdp, pgd; 670 p4d_t *p4dp, p4d; 671 pud_t *pudp, pud; 672 pmd_t *pmdp, pmd; 673 pte_t *ptep, pte; 674 int ret = 0; 675 676 /* 677 * PGD: If addr is PGD aligned then addr already describes a leaf 678 * boundary. If not present then there is nothing to split. 679 */ 680 if (ALIGN_DOWN(addr, PGDIR_SIZE) == addr) 681 goto out; 682 pgdp = pgd_offset_k(addr); 683 pgd = pgdp_get(pgdp); 684 if (!pgd_present(pgd)) 685 goto out; 686 687 /* 688 * P4D: If addr is P4D aligned then addr already describes a leaf 689 * boundary. If not present then there is nothing to split. 690 */ 691 if (ALIGN_DOWN(addr, P4D_SIZE) == addr) 692 goto out; 693 p4dp = p4d_offset(pgdp, addr); 694 p4d = p4dp_get(p4dp); 695 if (!p4d_present(p4d)) 696 goto out; 697 698 /* 699 * PUD: If addr is PUD aligned then addr already describes a leaf 700 * boundary. If not present then there is nothing to split. Otherwise, 701 * if we have a pud leaf, split to contpmd. 702 */ 703 if (ALIGN_DOWN(addr, PUD_SIZE) == addr) 704 goto out; 705 pudp = pud_offset(p4dp, addr); 706 pud = pudp_get(pudp); 707 if (!pud_present(pud)) 708 goto out; 709 if (pud_leaf(pud)) { 710 ret = split_pud(pudp, pud, GFP_PGTABLE_KERNEL, true); 711 if (ret) 712 goto out; 713 } 714 715 /* 716 * CONTPMD: If addr is CONTPMD aligned then addr already describes a 717 * leaf boundary. If not present then there is nothing to split. 718 * Otherwise, if we have a contpmd leaf, split to pmd. 719 */ 720 if (ALIGN_DOWN(addr, CONT_PMD_SIZE) == addr) 721 goto out; 722 pmdp = pmd_offset(pudp, addr); 723 pmd = pmdp_get(pmdp); 724 if (!pmd_present(pmd)) 725 goto out; 726 if (pmd_leaf(pmd)) { 727 if (pmd_cont(pmd)) 728 split_contpmd(pmdp); 729 /* 730 * PMD: If addr is PMD aligned then addr already describes a 731 * leaf boundary. Otherwise, split to contpte. 732 */ 733 if (ALIGN_DOWN(addr, PMD_SIZE) == addr) 734 goto out; 735 ret = split_pmd(pmdp, pmd, GFP_PGTABLE_KERNEL, true); 736 if (ret) 737 goto out; 738 } 739 740 /* 741 * CONTPTE: If addr is CONTPTE aligned then addr already describes a 742 * leaf boundary. If not present then there is nothing to split. 743 * Otherwise, if we have a contpte leaf, split to pte. 744 */ 745 if (ALIGN_DOWN(addr, CONT_PTE_SIZE) == addr) 746 goto out; 747 ptep = pte_offset_kernel(pmdp, addr); 748 pte = __ptep_get(ptep); 749 if (!pte_present(pte)) 750 goto out; 751 if (pte_cont(pte)) 752 split_contpte(ptep); 753 754 out: 755 return ret; 756 } 757 758 static inline bool force_pte_mapping(void) 759 { 760 const bool bbml2 = system_capabilities_finalized() ? 761 system_supports_bbml2_noabort() : cpu_supports_bbml2_noabort(); 762 763 if (debug_pagealloc_enabled()) 764 return true; 765 if (bbml2) 766 return false; 767 return rodata_full || arm64_kfence_can_set_direct_map() || is_realm_world(); 768 } 769 770 static inline bool split_leaf_mapping_possible(void) 771 { 772 /* 773 * !BBML2_NOABORT systems should never run into scenarios where we would 774 * have to split. So exit early and let calling code detect it and raise 775 * a warning. 776 */ 777 if (!system_supports_bbml2_noabort()) 778 return false; 779 return !force_pte_mapping(); 780 } 781 782 static DEFINE_MUTEX(pgtable_split_lock); 783 784 int split_kernel_leaf_mapping(unsigned long start, unsigned long end) 785 { 786 int ret; 787 788 /* 789 * Exit early if the region is within a pte-mapped area or if we can't 790 * split. For the latter case, the permission change code will raise a 791 * warning if not already pte-mapped. 792 */ 793 if (!split_leaf_mapping_possible() || is_kfence_address((void *)start)) 794 return 0; 795 796 /* 797 * Ensure start and end are at least page-aligned since this is the 798 * finest granularity we can split to. 799 */ 800 if (start != PAGE_ALIGN(start) || end != PAGE_ALIGN(end)) 801 return -EINVAL; 802 803 mutex_lock(&pgtable_split_lock); 804 arch_enter_lazy_mmu_mode(); 805 806 /* 807 * The split_kernel_leaf_mapping_locked() may sleep, it is not a 808 * problem for ARM64 since ARM64's lazy MMU implementation allows 809 * sleeping. 810 * 811 * Optimize for the common case of splitting out a single page from a 812 * larger mapping. Here we can just split on the "least aligned" of 813 * start and end and this will guarantee that there must also be a split 814 * on the more aligned address since the both addresses must be in the 815 * same contpte block and it must have been split to ptes. 816 */ 817 if (end - start == PAGE_SIZE) { 818 start = __ffs(start) < __ffs(end) ? start : end; 819 ret = split_kernel_leaf_mapping_locked(start); 820 } else { 821 ret = split_kernel_leaf_mapping_locked(start); 822 if (!ret) 823 ret = split_kernel_leaf_mapping_locked(end); 824 } 825 826 arch_leave_lazy_mmu_mode(); 827 mutex_unlock(&pgtable_split_lock); 828 return ret; 829 } 830 831 static int split_to_ptes_pud_entry(pud_t *pudp, unsigned long addr, 832 unsigned long next, struct mm_walk *walk) 833 { 834 gfp_t gfp = *(gfp_t *)walk->private; 835 pud_t pud = pudp_get(pudp); 836 int ret = 0; 837 838 if (pud_leaf(pud)) 839 ret = split_pud(pudp, pud, gfp, false); 840 841 return ret; 842 } 843 844 static int split_to_ptes_pmd_entry(pmd_t *pmdp, unsigned long addr, 845 unsigned long next, struct mm_walk *walk) 846 { 847 gfp_t gfp = *(gfp_t *)walk->private; 848 pmd_t pmd = pmdp_get(pmdp); 849 int ret = 0; 850 851 if (pmd_leaf(pmd)) { 852 if (pmd_cont(pmd)) 853 split_contpmd(pmdp); 854 ret = split_pmd(pmdp, pmd, gfp, false); 855 856 /* 857 * We have split the pmd directly to ptes so there is no need to 858 * visit each pte to check if they are contpte. 859 */ 860 walk->action = ACTION_CONTINUE; 861 } 862 863 return ret; 864 } 865 866 static int split_to_ptes_pte_entry(pte_t *ptep, unsigned long addr, 867 unsigned long next, struct mm_walk *walk) 868 { 869 pte_t pte = __ptep_get(ptep); 870 871 if (pte_cont(pte)) 872 split_contpte(ptep); 873 874 return 0; 875 } 876 877 static const struct mm_walk_ops split_to_ptes_ops = { 878 .pud_entry = split_to_ptes_pud_entry, 879 .pmd_entry = split_to_ptes_pmd_entry, 880 .pte_entry = split_to_ptes_pte_entry, 881 }; 882 883 static int range_split_to_ptes(unsigned long start, unsigned long end, gfp_t gfp) 884 { 885 int ret; 886 887 arch_enter_lazy_mmu_mode(); 888 ret = walk_kernel_page_table_range_lockless(start, end, 889 &split_to_ptes_ops, NULL, &gfp); 890 arch_leave_lazy_mmu_mode(); 891 892 return ret; 893 } 894 895 static bool linear_map_requires_bbml2 __initdata; 896 897 u32 idmap_kpti_bbml2_flag; 898 899 static void __init init_idmap_kpti_bbml2_flag(void) 900 { 901 WRITE_ONCE(idmap_kpti_bbml2_flag, 1); 902 /* Must be visible to other CPUs before stop_machine() is called. */ 903 smp_mb(); 904 } 905 906 static int __init linear_map_split_to_ptes(void *__unused) 907 { 908 /* 909 * Repainting the linear map must be done by CPU0 (the boot CPU) because 910 * that's the only CPU that we know supports BBML2. The other CPUs will 911 * be held in a waiting area with the idmap active. 912 */ 913 if (!smp_processor_id()) { 914 unsigned long lstart = _PAGE_OFFSET(vabits_actual); 915 unsigned long lend = PAGE_END; 916 unsigned long kstart = (unsigned long)lm_alias(_stext); 917 unsigned long kend = (unsigned long)lm_alias(__init_begin); 918 int ret; 919 920 /* 921 * Wait for all secondary CPUs to be put into the waiting area. 922 */ 923 smp_cond_load_acquire(&idmap_kpti_bbml2_flag, VAL == num_online_cpus()); 924 925 /* 926 * Walk all of the linear map [lstart, lend), except the kernel 927 * linear map alias [kstart, kend), and split all mappings to 928 * PTE. The kernel alias remains static throughout runtime so 929 * can continue to be safely mapped with large mappings. 930 */ 931 ret = range_split_to_ptes(lstart, kstart, GFP_ATOMIC); 932 if (!ret) 933 ret = range_split_to_ptes(kend, lend, GFP_ATOMIC); 934 if (ret) 935 panic("Failed to split linear map\n"); 936 flush_tlb_kernel_range(lstart, lend); 937 938 /* 939 * Relies on dsb in flush_tlb_kernel_range() to avoid reordering 940 * before any page table split operations. 941 */ 942 WRITE_ONCE(idmap_kpti_bbml2_flag, 0); 943 } else { 944 typedef void (wait_split_fn)(void); 945 extern wait_split_fn wait_linear_map_split_to_ptes; 946 wait_split_fn *wait_fn; 947 948 wait_fn = (void *)__pa_symbol(wait_linear_map_split_to_ptes); 949 950 /* 951 * At least one secondary CPU doesn't support BBML2 so cannot 952 * tolerate the size of the live mappings changing. So have the 953 * secondary CPUs wait for the boot CPU to make the changes 954 * with the idmap active and init_mm inactive. 955 */ 956 cpu_install_idmap(); 957 wait_fn(); 958 cpu_uninstall_idmap(); 959 } 960 961 return 0; 962 } 963 964 void __init linear_map_maybe_split_to_ptes(void) 965 { 966 if (linear_map_requires_bbml2 && !system_supports_bbml2_noabort()) { 967 init_idmap_kpti_bbml2_flag(); 968 stop_machine(linear_map_split_to_ptes, NULL, cpu_online_mask); 969 } 970 } 971 972 /* 973 * This function can only be used to modify existing table entries, 974 * without allocating new levels of table. Note that this permits the 975 * creation of new section or page entries. 976 */ 977 void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 978 phys_addr_t size, pgprot_t prot) 979 { 980 if (virt < PAGE_OFFSET) { 981 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 982 &phys, virt); 983 return; 984 } 985 early_create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 986 NO_CONT_MAPPINGS); 987 } 988 989 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 990 unsigned long virt, phys_addr_t size, 991 pgprot_t prot, bool page_mappings_only) 992 { 993 int flags = 0; 994 995 BUG_ON(mm == &init_mm); 996 997 if (page_mappings_only) 998 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 999 1000 early_create_pgd_mapping(mm->pgd, phys, virt, size, prot, 1001 pgd_pgtable_alloc_special_mm, flags); 1002 } 1003 1004 static void update_mapping_prot(phys_addr_t phys, unsigned long virt, 1005 phys_addr_t size, pgprot_t prot) 1006 { 1007 if (virt < PAGE_OFFSET) { 1008 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", 1009 &phys, virt); 1010 return; 1011 } 1012 1013 early_create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 1014 NO_CONT_MAPPINGS); 1015 1016 /* flush the TLBs after updating live kernel mappings */ 1017 flush_tlb_kernel_range(virt, virt + size); 1018 } 1019 1020 static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start, 1021 phys_addr_t end, pgprot_t prot, int flags) 1022 { 1023 early_create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start, 1024 prot, early_pgtable_alloc, flags); 1025 } 1026 1027 void __init mark_linear_text_alias_ro(void) 1028 { 1029 /* 1030 * Remove the write permissions from the linear alias of .text/.rodata 1031 */ 1032 update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text), 1033 (unsigned long)__init_begin - (unsigned long)_text, 1034 PAGE_KERNEL_RO); 1035 } 1036 1037 #ifdef CONFIG_KFENCE 1038 1039 bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL; 1040 1041 /* early_param() will be parsed before map_mem() below. */ 1042 static int __init parse_kfence_early_init(char *arg) 1043 { 1044 int val; 1045 1046 if (get_option(&arg, &val)) 1047 kfence_early_init = !!val; 1048 return 0; 1049 } 1050 early_param("kfence.sample_interval", parse_kfence_early_init); 1051 1052 static phys_addr_t __init arm64_kfence_alloc_pool(void) 1053 { 1054 phys_addr_t kfence_pool; 1055 1056 if (!kfence_early_init) 1057 return 0; 1058 1059 kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); 1060 if (!kfence_pool) { 1061 pr_err("failed to allocate kfence pool\n"); 1062 kfence_early_init = false; 1063 return 0; 1064 } 1065 1066 /* Temporarily mark as NOMAP. */ 1067 memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); 1068 1069 return kfence_pool; 1070 } 1071 1072 static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) 1073 { 1074 if (!kfence_pool) 1075 return; 1076 1077 /* KFENCE pool needs page-level mapping. */ 1078 __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE, 1079 pgprot_tagged(PAGE_KERNEL), 1080 NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); 1081 memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); 1082 __kfence_pool = phys_to_virt(kfence_pool); 1083 } 1084 1085 bool arch_kfence_init_pool(void) 1086 { 1087 unsigned long start = (unsigned long)__kfence_pool; 1088 unsigned long end = start + KFENCE_POOL_SIZE; 1089 int ret; 1090 1091 /* Exit early if we know the linear map is already pte-mapped. */ 1092 if (!split_leaf_mapping_possible()) 1093 return true; 1094 1095 /* Kfence pool is already pte-mapped for the early init case. */ 1096 if (kfence_early_init) 1097 return true; 1098 1099 mutex_lock(&pgtable_split_lock); 1100 ret = range_split_to_ptes(start, end, GFP_PGTABLE_KERNEL); 1101 mutex_unlock(&pgtable_split_lock); 1102 1103 /* 1104 * Since the system supports bbml2_noabort, tlb invalidation is not 1105 * required here; the pgtable mappings have been split to pte but larger 1106 * entries may safely linger in the TLB. 1107 */ 1108 1109 return !ret; 1110 } 1111 #else /* CONFIG_KFENCE */ 1112 1113 static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; } 1114 static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { } 1115 1116 #endif /* CONFIG_KFENCE */ 1117 1118 static void __init map_mem(pgd_t *pgdp) 1119 { 1120 static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN); 1121 phys_addr_t kernel_start = __pa_symbol(_text); 1122 phys_addr_t kernel_end = __pa_symbol(__init_begin); 1123 phys_addr_t start, end; 1124 phys_addr_t early_kfence_pool; 1125 int flags = NO_EXEC_MAPPINGS; 1126 u64 i; 1127 1128 /* 1129 * Setting hierarchical PXNTable attributes on table entries covering 1130 * the linear region is only possible if it is guaranteed that no table 1131 * entries at any level are being shared between the linear region and 1132 * the vmalloc region. Check whether this is true for the PGD level, in 1133 * which case it is guaranteed to be true for all other levels as well. 1134 * (Unless we are running with support for LPA2, in which case the 1135 * entire reduced VA space is covered by a single pgd_t which will have 1136 * been populated without the PXNTable attribute by the time we get here.) 1137 */ 1138 BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end) && 1139 pgd_index(_PAGE_OFFSET(VA_BITS_MIN)) != PTRS_PER_PGD - 1); 1140 1141 early_kfence_pool = arm64_kfence_alloc_pool(); 1142 1143 linear_map_requires_bbml2 = !force_pte_mapping() && can_set_direct_map(); 1144 1145 if (force_pte_mapping()) 1146 flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 1147 1148 /* 1149 * Take care not to create a writable alias for the 1150 * read-only text and rodata sections of the kernel image. 1151 * So temporarily mark them as NOMAP to skip mappings in 1152 * the following for-loop 1153 */ 1154 memblock_mark_nomap(kernel_start, kernel_end - kernel_start); 1155 1156 /* map all the memory banks */ 1157 for_each_mem_range(i, &start, &end) { 1158 if (start >= end) 1159 break; 1160 /* 1161 * The linear map must allow allocation tags reading/writing 1162 * if MTE is present. Otherwise, it has the same attributes as 1163 * PAGE_KERNEL. 1164 */ 1165 __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL), 1166 flags); 1167 } 1168 1169 /* 1170 * Map the linear alias of the [_text, __init_begin) interval 1171 * as non-executable now, and remove the write permission in 1172 * mark_linear_text_alias_ro() below (which will be called after 1173 * alternative patching has completed). This makes the contents 1174 * of the region accessible to subsystems such as hibernate, 1175 * but protects it from inadvertent modification or execution. 1176 * Note that contiguous mappings cannot be remapped in this way, 1177 * so we should avoid them here. 1178 */ 1179 __map_memblock(pgdp, kernel_start, kernel_end, 1180 PAGE_KERNEL, NO_CONT_MAPPINGS); 1181 memblock_clear_nomap(kernel_start, kernel_end - kernel_start); 1182 arm64_kfence_map_pool(early_kfence_pool, pgdp); 1183 } 1184 1185 void mark_rodata_ro(void) 1186 { 1187 unsigned long section_size; 1188 1189 /* 1190 * mark .rodata as read only. Use __init_begin rather than __end_rodata 1191 * to cover NOTES and EXCEPTION_TABLE. 1192 */ 1193 section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; 1194 WRITE_ONCE(rodata_is_rw, false); 1195 update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, 1196 section_size, PAGE_KERNEL_RO); 1197 /* mark the range between _text and _stext as read only. */ 1198 update_mapping_prot(__pa_symbol(_text), (unsigned long)_text, 1199 (unsigned long)_stext - (unsigned long)_text, 1200 PAGE_KERNEL_RO); 1201 } 1202 1203 static void __init declare_vma(struct vm_struct *vma, 1204 void *va_start, void *va_end, 1205 unsigned long vm_flags) 1206 { 1207 phys_addr_t pa_start = __pa_symbol(va_start); 1208 unsigned long size = va_end - va_start; 1209 1210 BUG_ON(!PAGE_ALIGNED(pa_start)); 1211 BUG_ON(!PAGE_ALIGNED(size)); 1212 1213 if (!(vm_flags & VM_NO_GUARD)) 1214 size += PAGE_SIZE; 1215 1216 vma->addr = va_start; 1217 vma->phys_addr = pa_start; 1218 vma->size = size; 1219 vma->flags = VM_MAP | vm_flags; 1220 vma->caller = __builtin_return_address(0); 1221 1222 vm_area_add_early(vma); 1223 } 1224 1225 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 1226 #define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT)) 1227 1228 static phys_addr_t kpti_ng_temp_alloc __initdata; 1229 1230 static phys_addr_t __init kpti_ng_pgd_alloc(enum pgtable_type type) 1231 { 1232 kpti_ng_temp_alloc -= PAGE_SIZE; 1233 return kpti_ng_temp_alloc; 1234 } 1235 1236 static int __init __kpti_install_ng_mappings(void *__unused) 1237 { 1238 typedef void (kpti_remap_fn)(int, int, phys_addr_t, unsigned long); 1239 extern kpti_remap_fn idmap_kpti_install_ng_mappings; 1240 kpti_remap_fn *remap_fn; 1241 1242 int cpu = smp_processor_id(); 1243 int levels = CONFIG_PGTABLE_LEVELS; 1244 int order = order_base_2(levels); 1245 u64 kpti_ng_temp_pgd_pa = 0; 1246 pgd_t *kpti_ng_temp_pgd; 1247 u64 alloc = 0; 1248 1249 if (levels == 5 && !pgtable_l5_enabled()) 1250 levels = 4; 1251 else if (levels == 4 && !pgtable_l4_enabled()) 1252 levels = 3; 1253 1254 remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); 1255 1256 if (!cpu) { 1257 int ret; 1258 1259 alloc = __get_free_pages(GFP_ATOMIC | __GFP_ZERO, order); 1260 kpti_ng_temp_pgd = (pgd_t *)(alloc + (levels - 1) * PAGE_SIZE); 1261 kpti_ng_temp_alloc = kpti_ng_temp_pgd_pa = __pa(kpti_ng_temp_pgd); 1262 1263 // 1264 // Create a minimal page table hierarchy that permits us to map 1265 // the swapper page tables temporarily as we traverse them. 1266 // 1267 // The physical pages are laid out as follows: 1268 // 1269 // +--------+-/-------+-/------ +-/------ +-\\\--------+ 1270 // : PTE[] : | PMD[] : | PUD[] : | P4D[] : ||| PGD[] : 1271 // +--------+-\-------+-\------ +-\------ +-///--------+ 1272 // ^ 1273 // The first page is mapped into this hierarchy at a PMD_SHIFT 1274 // aligned virtual address, so that we can manipulate the PTE 1275 // level entries while the mapping is active. The first entry 1276 // covers the PTE[] page itself, the remaining entries are free 1277 // to be used as a ad-hoc fixmap. 1278 // 1279 ret = __create_pgd_mapping_locked(kpti_ng_temp_pgd, __pa(alloc), 1280 KPTI_NG_TEMP_VA, PAGE_SIZE, PAGE_KERNEL, 1281 kpti_ng_pgd_alloc, 0); 1282 if (ret) 1283 panic("Failed to create page tables\n"); 1284 } 1285 1286 cpu_install_idmap(); 1287 remap_fn(cpu, num_online_cpus(), kpti_ng_temp_pgd_pa, KPTI_NG_TEMP_VA); 1288 cpu_uninstall_idmap(); 1289 1290 if (!cpu) { 1291 free_pages(alloc, order); 1292 arm64_use_ng_mappings = true; 1293 } 1294 1295 return 0; 1296 } 1297 1298 void __init kpti_install_ng_mappings(void) 1299 { 1300 /* Check whether KPTI is going to be used */ 1301 if (!arm64_kernel_unmapped_at_el0()) 1302 return; 1303 1304 /* 1305 * We don't need to rewrite the page-tables if either we've done 1306 * it already or we have KASLR enabled and therefore have not 1307 * created any global mappings at all. 1308 */ 1309 if (arm64_use_ng_mappings) 1310 return; 1311 1312 init_idmap_kpti_bbml2_flag(); 1313 stop_machine(__kpti_install_ng_mappings, NULL, cpu_online_mask); 1314 } 1315 1316 static pgprot_t __init kernel_exec_prot(void) 1317 { 1318 return rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 1319 } 1320 1321 static int __init map_entry_trampoline(void) 1322 { 1323 int i; 1324 1325 if (!arm64_kernel_unmapped_at_el0()) 1326 return 0; 1327 1328 pgprot_t prot = kernel_exec_prot(); 1329 phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); 1330 1331 /* The trampoline is always mapped and can therefore be global */ 1332 pgprot_val(prot) &= ~PTE_NG; 1333 1334 /* Map only the text into the trampoline page table */ 1335 memset(tramp_pg_dir, 0, PGD_SIZE); 1336 early_create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, 1337 entry_tramp_text_size(), prot, 1338 pgd_pgtable_alloc_init_mm, NO_BLOCK_MAPPINGS); 1339 1340 /* Map both the text and data into the kernel page table */ 1341 for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++) 1342 __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, 1343 pa_start + i * PAGE_SIZE, prot); 1344 1345 if (IS_ENABLED(CONFIG_RELOCATABLE)) 1346 __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, 1347 pa_start + i * PAGE_SIZE, PAGE_KERNEL_RO); 1348 1349 return 0; 1350 } 1351 core_initcall(map_entry_trampoline); 1352 #endif 1353 1354 /* 1355 * Declare the VMA areas for the kernel 1356 */ 1357 static void __init declare_kernel_vmas(void) 1358 { 1359 static struct vm_struct vmlinux_seg[KERNEL_SEGMENT_COUNT]; 1360 1361 declare_vma(&vmlinux_seg[0], _text, _etext, VM_NO_GUARD); 1362 declare_vma(&vmlinux_seg[1], __start_rodata, __inittext_begin, VM_NO_GUARD); 1363 declare_vma(&vmlinux_seg[2], __inittext_begin, __inittext_end, VM_NO_GUARD); 1364 declare_vma(&vmlinux_seg[3], __initdata_begin, __initdata_end, VM_NO_GUARD); 1365 declare_vma(&vmlinux_seg[4], _data, _end, 0); 1366 } 1367 1368 void __pi_map_range(phys_addr_t *pte, u64 start, u64 end, phys_addr_t pa, 1369 pgprot_t prot, int level, pte_t *tbl, bool may_use_cont, 1370 u64 va_offset); 1371 1372 static u8 idmap_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init, 1373 kpti_bbml2_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init; 1374 1375 static void __init create_idmap(void) 1376 { 1377 phys_addr_t start = __pa_symbol(__idmap_text_start); 1378 phys_addr_t end = __pa_symbol(__idmap_text_end); 1379 phys_addr_t ptep = __pa_symbol(idmap_ptes); 1380 1381 __pi_map_range(&ptep, start, end, start, PAGE_KERNEL_ROX, 1382 IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false, 1383 __phys_to_virt(ptep) - ptep); 1384 1385 if (linear_map_requires_bbml2 || 1386 (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && !arm64_use_ng_mappings)) { 1387 phys_addr_t pa = __pa_symbol(&idmap_kpti_bbml2_flag); 1388 1389 /* 1390 * The KPTI G-to-nG conversion code needs a read-write mapping 1391 * of its synchronization flag in the ID map. This is also used 1392 * when splitting the linear map to ptes if a secondary CPU 1393 * doesn't support bbml2. 1394 */ 1395 ptep = __pa_symbol(kpti_bbml2_ptes); 1396 __pi_map_range(&ptep, pa, pa + sizeof(u32), pa, PAGE_KERNEL, 1397 IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false, 1398 __phys_to_virt(ptep) - ptep); 1399 } 1400 } 1401 1402 void __init paging_init(void) 1403 { 1404 map_mem(swapper_pg_dir); 1405 1406 memblock_allow_resize(); 1407 1408 create_idmap(); 1409 declare_kernel_vmas(); 1410 } 1411 1412 #ifdef CONFIG_MEMORY_HOTPLUG 1413 static void free_hotplug_page_range(struct page *page, size_t size, 1414 struct vmem_altmap *altmap) 1415 { 1416 if (altmap) { 1417 vmem_altmap_free(altmap, size >> PAGE_SHIFT); 1418 } else { 1419 WARN_ON(PageReserved(page)); 1420 __free_pages(page, get_order(size)); 1421 } 1422 } 1423 1424 static void free_hotplug_pgtable_page(struct page *page) 1425 { 1426 free_hotplug_page_range(page, PAGE_SIZE, NULL); 1427 } 1428 1429 static bool pgtable_range_aligned(unsigned long start, unsigned long end, 1430 unsigned long floor, unsigned long ceiling, 1431 unsigned long mask) 1432 { 1433 start &= mask; 1434 if (start < floor) 1435 return false; 1436 1437 if (ceiling) { 1438 ceiling &= mask; 1439 if (!ceiling) 1440 return false; 1441 } 1442 1443 if (end - 1 > ceiling - 1) 1444 return false; 1445 return true; 1446 } 1447 1448 static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, 1449 unsigned long end, bool free_mapped, 1450 struct vmem_altmap *altmap) 1451 { 1452 pte_t *ptep, pte; 1453 1454 do { 1455 ptep = pte_offset_kernel(pmdp, addr); 1456 pte = __ptep_get(ptep); 1457 if (pte_none(pte)) 1458 continue; 1459 1460 WARN_ON(!pte_present(pte)); 1461 __pte_clear(&init_mm, addr, ptep); 1462 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 1463 if (free_mapped) 1464 free_hotplug_page_range(pte_page(pte), 1465 PAGE_SIZE, altmap); 1466 } while (addr += PAGE_SIZE, addr < end); 1467 } 1468 1469 static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr, 1470 unsigned long end, bool free_mapped, 1471 struct vmem_altmap *altmap) 1472 { 1473 unsigned long next; 1474 pmd_t *pmdp, pmd; 1475 1476 do { 1477 next = pmd_addr_end(addr, end); 1478 pmdp = pmd_offset(pudp, addr); 1479 pmd = READ_ONCE(*pmdp); 1480 if (pmd_none(pmd)) 1481 continue; 1482 1483 WARN_ON(!pmd_present(pmd)); 1484 if (pmd_sect(pmd)) { 1485 pmd_clear(pmdp); 1486 1487 /* 1488 * One TLBI should be sufficient here as the PMD_SIZE 1489 * range is mapped with a single block entry. 1490 */ 1491 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 1492 if (free_mapped) 1493 free_hotplug_page_range(pmd_page(pmd), 1494 PMD_SIZE, altmap); 1495 continue; 1496 } 1497 WARN_ON(!pmd_table(pmd)); 1498 unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap); 1499 } while (addr = next, addr < end); 1500 } 1501 1502 static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr, 1503 unsigned long end, bool free_mapped, 1504 struct vmem_altmap *altmap) 1505 { 1506 unsigned long next; 1507 pud_t *pudp, pud; 1508 1509 do { 1510 next = pud_addr_end(addr, end); 1511 pudp = pud_offset(p4dp, addr); 1512 pud = READ_ONCE(*pudp); 1513 if (pud_none(pud)) 1514 continue; 1515 1516 WARN_ON(!pud_present(pud)); 1517 if (pud_sect(pud)) { 1518 pud_clear(pudp); 1519 1520 /* 1521 * One TLBI should be sufficient here as the PUD_SIZE 1522 * range is mapped with a single block entry. 1523 */ 1524 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 1525 if (free_mapped) 1526 free_hotplug_page_range(pud_page(pud), 1527 PUD_SIZE, altmap); 1528 continue; 1529 } 1530 WARN_ON(!pud_table(pud)); 1531 unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap); 1532 } while (addr = next, addr < end); 1533 } 1534 1535 static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr, 1536 unsigned long end, bool free_mapped, 1537 struct vmem_altmap *altmap) 1538 { 1539 unsigned long next; 1540 p4d_t *p4dp, p4d; 1541 1542 do { 1543 next = p4d_addr_end(addr, end); 1544 p4dp = p4d_offset(pgdp, addr); 1545 p4d = READ_ONCE(*p4dp); 1546 if (p4d_none(p4d)) 1547 continue; 1548 1549 WARN_ON(!p4d_present(p4d)); 1550 unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap); 1551 } while (addr = next, addr < end); 1552 } 1553 1554 static void unmap_hotplug_range(unsigned long addr, unsigned long end, 1555 bool free_mapped, struct vmem_altmap *altmap) 1556 { 1557 unsigned long next; 1558 pgd_t *pgdp, pgd; 1559 1560 /* 1561 * altmap can only be used as vmemmap mapping backing memory. 1562 * In case the backing memory itself is not being freed, then 1563 * altmap is irrelevant. Warn about this inconsistency when 1564 * encountered. 1565 */ 1566 WARN_ON(!free_mapped && altmap); 1567 1568 do { 1569 next = pgd_addr_end(addr, end); 1570 pgdp = pgd_offset_k(addr); 1571 pgd = READ_ONCE(*pgdp); 1572 if (pgd_none(pgd)) 1573 continue; 1574 1575 WARN_ON(!pgd_present(pgd)); 1576 unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap); 1577 } while (addr = next, addr < end); 1578 } 1579 1580 static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, 1581 unsigned long end, unsigned long floor, 1582 unsigned long ceiling) 1583 { 1584 pte_t *ptep, pte; 1585 unsigned long i, start = addr; 1586 1587 do { 1588 ptep = pte_offset_kernel(pmdp, addr); 1589 pte = __ptep_get(ptep); 1590 1591 /* 1592 * This is just a sanity check here which verifies that 1593 * pte clearing has been done by earlier unmap loops. 1594 */ 1595 WARN_ON(!pte_none(pte)); 1596 } while (addr += PAGE_SIZE, addr < end); 1597 1598 if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK)) 1599 return; 1600 1601 /* 1602 * Check whether we can free the pte page if the rest of the 1603 * entries are empty. Overlap with other regions have been 1604 * handled by the floor/ceiling check. 1605 */ 1606 ptep = pte_offset_kernel(pmdp, 0UL); 1607 for (i = 0; i < PTRS_PER_PTE; i++) { 1608 if (!pte_none(__ptep_get(&ptep[i]))) 1609 return; 1610 } 1611 1612 pmd_clear(pmdp); 1613 __flush_tlb_kernel_pgtable(start); 1614 free_hotplug_pgtable_page(virt_to_page(ptep)); 1615 } 1616 1617 static void free_empty_pmd_table(pud_t *pudp, unsigned long addr, 1618 unsigned long end, unsigned long floor, 1619 unsigned long ceiling) 1620 { 1621 pmd_t *pmdp, pmd; 1622 unsigned long i, next, start = addr; 1623 1624 do { 1625 next = pmd_addr_end(addr, end); 1626 pmdp = pmd_offset(pudp, addr); 1627 pmd = READ_ONCE(*pmdp); 1628 if (pmd_none(pmd)) 1629 continue; 1630 1631 WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd)); 1632 free_empty_pte_table(pmdp, addr, next, floor, ceiling); 1633 } while (addr = next, addr < end); 1634 1635 if (CONFIG_PGTABLE_LEVELS <= 2) 1636 return; 1637 1638 if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK)) 1639 return; 1640 1641 /* 1642 * Check whether we can free the pmd page if the rest of the 1643 * entries are empty. Overlap with other regions have been 1644 * handled by the floor/ceiling check. 1645 */ 1646 pmdp = pmd_offset(pudp, 0UL); 1647 for (i = 0; i < PTRS_PER_PMD; i++) { 1648 if (!pmd_none(READ_ONCE(pmdp[i]))) 1649 return; 1650 } 1651 1652 pud_clear(pudp); 1653 __flush_tlb_kernel_pgtable(start); 1654 free_hotplug_pgtable_page(virt_to_page(pmdp)); 1655 } 1656 1657 static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr, 1658 unsigned long end, unsigned long floor, 1659 unsigned long ceiling) 1660 { 1661 pud_t *pudp, pud; 1662 unsigned long i, next, start = addr; 1663 1664 do { 1665 next = pud_addr_end(addr, end); 1666 pudp = pud_offset(p4dp, addr); 1667 pud = READ_ONCE(*pudp); 1668 if (pud_none(pud)) 1669 continue; 1670 1671 WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud)); 1672 free_empty_pmd_table(pudp, addr, next, floor, ceiling); 1673 } while (addr = next, addr < end); 1674 1675 if (!pgtable_l4_enabled()) 1676 return; 1677 1678 if (!pgtable_range_aligned(start, end, floor, ceiling, P4D_MASK)) 1679 return; 1680 1681 /* 1682 * Check whether we can free the pud page if the rest of the 1683 * entries are empty. Overlap with other regions have been 1684 * handled by the floor/ceiling check. 1685 */ 1686 pudp = pud_offset(p4dp, 0UL); 1687 for (i = 0; i < PTRS_PER_PUD; i++) { 1688 if (!pud_none(READ_ONCE(pudp[i]))) 1689 return; 1690 } 1691 1692 p4d_clear(p4dp); 1693 __flush_tlb_kernel_pgtable(start); 1694 free_hotplug_pgtable_page(virt_to_page(pudp)); 1695 } 1696 1697 static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr, 1698 unsigned long end, unsigned long floor, 1699 unsigned long ceiling) 1700 { 1701 p4d_t *p4dp, p4d; 1702 unsigned long i, next, start = addr; 1703 1704 do { 1705 next = p4d_addr_end(addr, end); 1706 p4dp = p4d_offset(pgdp, addr); 1707 p4d = READ_ONCE(*p4dp); 1708 if (p4d_none(p4d)) 1709 continue; 1710 1711 WARN_ON(!p4d_present(p4d)); 1712 free_empty_pud_table(p4dp, addr, next, floor, ceiling); 1713 } while (addr = next, addr < end); 1714 1715 if (!pgtable_l5_enabled()) 1716 return; 1717 1718 if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK)) 1719 return; 1720 1721 /* 1722 * Check whether we can free the p4d page if the rest of the 1723 * entries are empty. Overlap with other regions have been 1724 * handled by the floor/ceiling check. 1725 */ 1726 p4dp = p4d_offset(pgdp, 0UL); 1727 for (i = 0; i < PTRS_PER_P4D; i++) { 1728 if (!p4d_none(READ_ONCE(p4dp[i]))) 1729 return; 1730 } 1731 1732 pgd_clear(pgdp); 1733 __flush_tlb_kernel_pgtable(start); 1734 free_hotplug_pgtable_page(virt_to_page(p4dp)); 1735 } 1736 1737 static void free_empty_tables(unsigned long addr, unsigned long end, 1738 unsigned long floor, unsigned long ceiling) 1739 { 1740 unsigned long next; 1741 pgd_t *pgdp, pgd; 1742 1743 do { 1744 next = pgd_addr_end(addr, end); 1745 pgdp = pgd_offset_k(addr); 1746 pgd = READ_ONCE(*pgdp); 1747 if (pgd_none(pgd)) 1748 continue; 1749 1750 WARN_ON(!pgd_present(pgd)); 1751 free_empty_p4d_table(pgdp, addr, next, floor, ceiling); 1752 } while (addr = next, addr < end); 1753 } 1754 #endif 1755 1756 void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node, 1757 unsigned long addr, unsigned long next) 1758 { 1759 pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); 1760 } 1761 1762 int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, 1763 unsigned long addr, unsigned long next) 1764 { 1765 vmemmap_verify((pte_t *)pmdp, node, addr, next); 1766 1767 return pmd_sect(READ_ONCE(*pmdp)); 1768 } 1769 1770 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 1771 struct vmem_altmap *altmap) 1772 { 1773 WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); 1774 /* [start, end] should be within one section */ 1775 WARN_ON_ONCE(end - start > PAGES_PER_SECTION * sizeof(struct page)); 1776 1777 if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES) || 1778 (end - start < PAGES_PER_SECTION * sizeof(struct page))) 1779 return vmemmap_populate_basepages(start, end, node, altmap); 1780 else 1781 return vmemmap_populate_hugepages(start, end, node, altmap); 1782 } 1783 1784 #ifdef CONFIG_MEMORY_HOTPLUG 1785 void vmemmap_free(unsigned long start, unsigned long end, 1786 struct vmem_altmap *altmap) 1787 { 1788 WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); 1789 1790 unmap_hotplug_range(start, end, true, altmap); 1791 free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); 1792 } 1793 #endif /* CONFIG_MEMORY_HOTPLUG */ 1794 1795 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) 1796 { 1797 pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot)); 1798 1799 /* Only allow permission changes for now */ 1800 if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)), 1801 pud_val(new_pud))) 1802 return 0; 1803 1804 VM_BUG_ON(phys & ~PUD_MASK); 1805 set_pud(pudp, new_pud); 1806 return 1; 1807 } 1808 1809 int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) 1810 { 1811 pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot)); 1812 1813 /* Only allow permission changes for now */ 1814 if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)), 1815 pmd_val(new_pmd))) 1816 return 0; 1817 1818 VM_BUG_ON(phys & ~PMD_MASK); 1819 set_pmd(pmdp, new_pmd); 1820 return 1; 1821 } 1822 1823 #ifndef __PAGETABLE_P4D_FOLDED 1824 void p4d_clear_huge(p4d_t *p4dp) 1825 { 1826 } 1827 #endif 1828 1829 int pud_clear_huge(pud_t *pudp) 1830 { 1831 if (!pud_sect(READ_ONCE(*pudp))) 1832 return 0; 1833 pud_clear(pudp); 1834 return 1; 1835 } 1836 1837 int pmd_clear_huge(pmd_t *pmdp) 1838 { 1839 if (!pmd_sect(READ_ONCE(*pmdp))) 1840 return 0; 1841 pmd_clear(pmdp); 1842 return 1; 1843 } 1844 1845 static int __pmd_free_pte_page(pmd_t *pmdp, unsigned long addr, 1846 bool acquire_mmap_lock) 1847 { 1848 pte_t *table; 1849 pmd_t pmd; 1850 1851 pmd = READ_ONCE(*pmdp); 1852 1853 if (!pmd_table(pmd)) { 1854 VM_WARN_ON(1); 1855 return 1; 1856 } 1857 1858 /* See comment in pud_free_pmd_page for static key logic */ 1859 table = pte_offset_kernel(pmdp, addr); 1860 pmd_clear(pmdp); 1861 __flush_tlb_kernel_pgtable(addr); 1862 if (static_branch_unlikely(&arm64_ptdump_lock_key) && acquire_mmap_lock) { 1863 mmap_read_lock(&init_mm); 1864 mmap_read_unlock(&init_mm); 1865 } 1866 1867 pte_free_kernel(NULL, table); 1868 return 1; 1869 } 1870 1871 int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr) 1872 { 1873 /* If ptdump is walking the pagetables, acquire init_mm.mmap_lock */ 1874 return __pmd_free_pte_page(pmdp, addr, /* acquire_mmap_lock = */ true); 1875 } 1876 1877 int pud_free_pmd_page(pud_t *pudp, unsigned long addr) 1878 { 1879 pmd_t *table; 1880 pmd_t *pmdp; 1881 pud_t pud; 1882 unsigned long next, end; 1883 1884 pud = READ_ONCE(*pudp); 1885 1886 if (!pud_table(pud)) { 1887 VM_WARN_ON(1); 1888 return 1; 1889 } 1890 1891 table = pmd_offset(pudp, addr); 1892 1893 /* 1894 * Our objective is to prevent ptdump from reading a PMD table which has 1895 * been freed. In this race, if pud_free_pmd_page observes the key on 1896 * (which got flipped by ptdump) then the mmap lock sequence here will, 1897 * as a result of the mmap write lock/unlock sequence in ptdump, give 1898 * us the correct synchronization. If not, this means that ptdump has 1899 * yet not started walking the pagetables - the sequence of barriers 1900 * issued by __flush_tlb_kernel_pgtable() guarantees that ptdump will 1901 * observe an empty PUD. 1902 */ 1903 pud_clear(pudp); 1904 __flush_tlb_kernel_pgtable(addr); 1905 if (static_branch_unlikely(&arm64_ptdump_lock_key)) { 1906 mmap_read_lock(&init_mm); 1907 mmap_read_unlock(&init_mm); 1908 } 1909 1910 pmdp = table; 1911 next = addr; 1912 end = addr + PUD_SIZE; 1913 do { 1914 if (pmd_present(pmdp_get(pmdp))) 1915 /* 1916 * PMD has been isolated, so ptdump won't see it. No 1917 * need to acquire init_mm.mmap_lock. 1918 */ 1919 __pmd_free_pte_page(pmdp, next, /* acquire_mmap_lock = */ false); 1920 } while (pmdp++, next += PMD_SIZE, next != end); 1921 1922 pmd_free(NULL, table); 1923 return 1; 1924 } 1925 1926 #ifdef CONFIG_MEMORY_HOTPLUG 1927 static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) 1928 { 1929 unsigned long end = start + size; 1930 1931 WARN_ON(pgdir != init_mm.pgd); 1932 WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END)); 1933 1934 unmap_hotplug_range(start, end, false, NULL); 1935 free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); 1936 } 1937 1938 struct range arch_get_mappable_range(void) 1939 { 1940 struct range mhp_range; 1941 phys_addr_t start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual)); 1942 phys_addr_t end_linear_pa = __pa(PAGE_END - 1); 1943 1944 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 1945 /* 1946 * Check for a wrap, it is possible because of randomized linear 1947 * mapping the start physical address is actually bigger than 1948 * the end physical address. In this case set start to zero 1949 * because [0, end_linear_pa] range must still be able to cover 1950 * all addressable physical addresses. 1951 */ 1952 if (start_linear_pa > end_linear_pa) 1953 start_linear_pa = 0; 1954 } 1955 1956 WARN_ON(start_linear_pa > end_linear_pa); 1957 1958 /* 1959 * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] 1960 * accommodating both its ends but excluding PAGE_END. Max physical 1961 * range which can be mapped inside this linear mapping range, must 1962 * also be derived from its end points. 1963 */ 1964 mhp_range.start = start_linear_pa; 1965 mhp_range.end = end_linear_pa; 1966 1967 return mhp_range; 1968 } 1969 1970 int arch_add_memory(int nid, u64 start, u64 size, 1971 struct mhp_params *params) 1972 { 1973 int ret, flags = NO_EXEC_MAPPINGS; 1974 1975 VM_BUG_ON(!mhp_range_allowed(start, size, true)); 1976 1977 if (force_pte_mapping()) 1978 flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 1979 1980 ret = __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), 1981 size, params->pgprot, pgd_pgtable_alloc_init_mm, 1982 flags); 1983 if (ret) 1984 goto err; 1985 1986 memblock_clear_nomap(start, size); 1987 1988 ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, 1989 params); 1990 if (ret) 1991 goto err; 1992 1993 /* Address of hotplugged memory can be smaller */ 1994 max_pfn = max(max_pfn, PFN_UP(start + size)); 1995 max_low_pfn = max_pfn; 1996 1997 return 0; 1998 1999 err: 2000 __remove_pgd_mapping(swapper_pg_dir, 2001 __phys_to_virt(start), size); 2002 return ret; 2003 } 2004 2005 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) 2006 { 2007 unsigned long start_pfn = start >> PAGE_SHIFT; 2008 unsigned long nr_pages = size >> PAGE_SHIFT; 2009 2010 __remove_pages(start_pfn, nr_pages, altmap); 2011 __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size); 2012 } 2013 2014 /* 2015 * This memory hotplug notifier helps prevent boot memory from being 2016 * inadvertently removed as it blocks pfn range offlining process in 2017 * __offline_pages(). Hence this prevents both offlining as well as 2018 * removal process for boot memory which is initially always online. 2019 * In future if and when boot memory could be removed, this notifier 2020 * should be dropped and free_hotplug_page_range() should handle any 2021 * reserved pages allocated during boot. 2022 */ 2023 static int prevent_bootmem_remove_notifier(struct notifier_block *nb, 2024 unsigned long action, void *data) 2025 { 2026 struct mem_section *ms; 2027 struct memory_notify *arg = data; 2028 unsigned long end_pfn = arg->start_pfn + arg->nr_pages; 2029 unsigned long pfn = arg->start_pfn; 2030 2031 if ((action != MEM_GOING_OFFLINE) && (action != MEM_OFFLINE)) 2032 return NOTIFY_OK; 2033 2034 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 2035 unsigned long start = PFN_PHYS(pfn); 2036 unsigned long end = start + (1UL << PA_SECTION_SHIFT); 2037 2038 ms = __pfn_to_section(pfn); 2039 if (!early_section(ms)) 2040 continue; 2041 2042 if (action == MEM_GOING_OFFLINE) { 2043 /* 2044 * Boot memory removal is not supported. Prevent 2045 * it via blocking any attempted offline request 2046 * for the boot memory and just report it. 2047 */ 2048 pr_warn("Boot memory [%lx %lx] offlining attempted\n", start, end); 2049 return NOTIFY_BAD; 2050 } else if (action == MEM_OFFLINE) { 2051 /* 2052 * This should have never happened. Boot memory 2053 * offlining should have been prevented by this 2054 * very notifier. Probably some memory removal 2055 * procedure might have changed which would then 2056 * require further debug. 2057 */ 2058 pr_err("Boot memory [%lx %lx] offlined\n", start, end); 2059 2060 /* 2061 * Core memory hotplug does not process a return 2062 * code from the notifier for MEM_OFFLINE events. 2063 * The error condition has been reported. Return 2064 * from here as if ignored. 2065 */ 2066 return NOTIFY_DONE; 2067 } 2068 } 2069 return NOTIFY_OK; 2070 } 2071 2072 static struct notifier_block prevent_bootmem_remove_nb = { 2073 .notifier_call = prevent_bootmem_remove_notifier, 2074 }; 2075 2076 /* 2077 * This ensures that boot memory sections on the platform are online 2078 * from early boot. Memory sections could not be prevented from being 2079 * offlined, unless for some reason they are not online to begin with. 2080 * This helps validate the basic assumption on which the above memory 2081 * event notifier works to prevent boot memory section offlining and 2082 * its possible removal. 2083 */ 2084 static void validate_bootmem_online(void) 2085 { 2086 phys_addr_t start, end, addr; 2087 struct mem_section *ms; 2088 u64 i; 2089 2090 /* 2091 * Scanning across all memblock might be expensive 2092 * on some big memory systems. Hence enable this 2093 * validation only with DEBUG_VM. 2094 */ 2095 if (!IS_ENABLED(CONFIG_DEBUG_VM)) 2096 return; 2097 2098 for_each_mem_range(i, &start, &end) { 2099 for (addr = start; addr < end; addr += (1UL << PA_SECTION_SHIFT)) { 2100 ms = __pfn_to_section(PHYS_PFN(addr)); 2101 2102 /* 2103 * All memory ranges in the system at this point 2104 * should have been marked as early sections. 2105 */ 2106 WARN_ON(!early_section(ms)); 2107 2108 /* 2109 * Memory notifier mechanism here to prevent boot 2110 * memory offlining depends on the fact that each 2111 * early section memory on the system is initially 2112 * online. Otherwise a given memory section which 2113 * is already offline will be overlooked and can 2114 * be removed completely. Call out such sections. 2115 */ 2116 if (!online_section(ms)) 2117 pr_err("Boot memory [%llx %llx] is offline, can be removed\n", 2118 addr, addr + (1UL << PA_SECTION_SHIFT)); 2119 } 2120 } 2121 } 2122 2123 static int __init prevent_bootmem_remove_init(void) 2124 { 2125 int ret = 0; 2126 2127 if (!IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 2128 return ret; 2129 2130 validate_bootmem_online(); 2131 ret = register_memory_notifier(&prevent_bootmem_remove_nb); 2132 if (ret) 2133 pr_err("%s: Notifier registration failed %d\n", __func__, ret); 2134 2135 return ret; 2136 } 2137 early_initcall(prevent_bootmem_remove_init); 2138 #endif 2139 2140 pte_t modify_prot_start_ptes(struct vm_area_struct *vma, unsigned long addr, 2141 pte_t *ptep, unsigned int nr) 2142 { 2143 pte_t pte = get_and_clear_ptes(vma->vm_mm, addr, ptep, nr); 2144 2145 if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) { 2146 /* 2147 * Break-before-make (BBM) is required for all user space mappings 2148 * when the permission changes from executable to non-executable 2149 * in cases where cpu is affected with errata #2645198. 2150 */ 2151 if (pte_accessible(vma->vm_mm, pte) && pte_user_exec(pte)) 2152 __flush_tlb_range(vma, addr, nr * PAGE_SIZE, 2153 PAGE_SIZE, true, 3); 2154 } 2155 2156 return pte; 2157 } 2158 2159 pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 2160 { 2161 return modify_prot_start_ptes(vma, addr, ptep, 1); 2162 } 2163 2164 void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr, 2165 pte_t *ptep, pte_t old_pte, pte_t pte, 2166 unsigned int nr) 2167 { 2168 set_ptes(vma->vm_mm, addr, ptep, pte, nr); 2169 } 2170 2171 void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 2172 pte_t old_pte, pte_t pte) 2173 { 2174 modify_prot_commit_ptes(vma, addr, ptep, old_pte, pte, 1); 2175 } 2176 2177 /* 2178 * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, 2179 * avoiding the possibility of conflicting TLB entries being allocated. 2180 */ 2181 void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp) 2182 { 2183 typedef void (ttbr_replace_func)(phys_addr_t); 2184 extern ttbr_replace_func idmap_cpu_replace_ttbr1; 2185 ttbr_replace_func *replace_phys; 2186 unsigned long daif; 2187 2188 /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */ 2189 phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp)); 2190 2191 if (cnp) 2192 ttbr1 |= TTBR_CNP_BIT; 2193 2194 replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); 2195 2196 cpu_install_idmap(); 2197 2198 /* 2199 * We really don't want to take *any* exceptions while TTBR1 is 2200 * in the process of being replaced so mask everything. 2201 */ 2202 daif = local_daif_save(); 2203 replace_phys(ttbr1); 2204 local_daif_restore(daif); 2205 2206 cpu_uninstall_idmap(); 2207 } 2208 2209 #ifdef CONFIG_ARCH_HAS_PKEYS 2210 int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val) 2211 { 2212 u64 new_por; 2213 u64 old_por; 2214 2215 if (!system_supports_poe()) 2216 return -ENOSPC; 2217 2218 /* 2219 * This code should only be called with valid 'pkey' 2220 * values originating from in-kernel users. Complain 2221 * if a bad value is observed. 2222 */ 2223 if (WARN_ON_ONCE(pkey >= arch_max_pkey())) 2224 return -EINVAL; 2225 2226 /* Set the bits we need in POR: */ 2227 new_por = POE_RWX; 2228 if (init_val & PKEY_DISABLE_WRITE) 2229 new_por &= ~POE_W; 2230 if (init_val & PKEY_DISABLE_ACCESS) 2231 new_por &= ~POE_RW; 2232 if (init_val & PKEY_DISABLE_READ) 2233 new_por &= ~POE_R; 2234 if (init_val & PKEY_DISABLE_EXECUTE) 2235 new_por &= ~POE_X; 2236 2237 /* Shift the bits in to the correct place in POR for pkey: */ 2238 new_por = POR_ELx_PERM_PREP(pkey, new_por); 2239 2240 /* Get old POR and mask off any old bits in place: */ 2241 old_por = read_sysreg_s(SYS_POR_EL0); 2242 old_por &= ~(POE_MASK << POR_ELx_PERM_SHIFT(pkey)); 2243 2244 /* Write old part along with new part: */ 2245 write_sysreg_s(old_por | new_por, SYS_POR_EL0); 2246 2247 return 0; 2248 } 2249 #endif 2250