1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012 ARM Ltd. 4 */ 5 #ifndef __ASM_PGTABLE_H 6 #define __ASM_PGTABLE_H 7 8 #include <asm/bug.h> 9 #include <asm/proc-fns.h> 10 11 #include <asm/memory.h> 12 #include <asm/mte.h> 13 #include <asm/pgtable-hwdef.h> 14 #include <asm/pgtable-prot.h> 15 #include <asm/tlbflush.h> 16 17 /* 18 * VMALLOC range. 19 * 20 * VMALLOC_START: beginning of the kernel vmalloc space 21 * VMALLOC_END: extends to the available space below vmemmap 22 */ 23 #define VMALLOC_START (MODULES_END) 24 #if VA_BITS == VA_BITS_MIN 25 #define VMALLOC_END (VMEMMAP_START - SZ_8M) 26 #else 27 #define VMEMMAP_UNUSED_NPAGES ((_PAGE_OFFSET(vabits_actual) - PAGE_OFFSET) >> PAGE_SHIFT) 28 #define VMALLOC_END (VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M) 29 #endif 30 31 #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) 32 33 #ifndef __ASSEMBLER__ 34 35 #include <asm/cmpxchg.h> 36 #include <asm/fixmap.h> 37 #include <asm/por.h> 38 #include <linux/mmdebug.h> 39 #include <linux/mm_types.h> 40 #include <linux/sched.h> 41 #include <linux/page_table_check.h> 42 43 static inline void emit_pte_barriers(void) 44 { 45 /* 46 * These barriers are emitted under certain conditions after a pte entry 47 * was modified (see e.g. __set_pte_complete()). The dsb makes the store 48 * visible to the table walker. The isb ensures that any previous 49 * speculative "invalid translation" marker that is in the CPU's 50 * pipeline gets cleared, so that any access to that address after 51 * setting the pte to valid won't cause a spurious fault. If the thread 52 * gets preempted after storing to the pgtable but before emitting these 53 * barriers, __switch_to() emits a dsb which ensure the walker gets to 54 * see the store. There is no guarantee of an isb being issued though. 55 * This is safe because it will still get issued (albeit on a 56 * potentially different CPU) when the thread starts running again, 57 * before any access to the address. 58 */ 59 dsb(ishst); 60 isb(); 61 } 62 63 static inline void queue_pte_barriers(void) 64 { 65 unsigned long flags; 66 67 if (in_interrupt()) { 68 emit_pte_barriers(); 69 return; 70 } 71 72 flags = read_thread_flags(); 73 74 if (flags & BIT(TIF_LAZY_MMU)) { 75 /* Avoid the atomic op if already set. */ 76 if (!(flags & BIT(TIF_LAZY_MMU_PENDING))) 77 set_thread_flag(TIF_LAZY_MMU_PENDING); 78 } else { 79 emit_pte_barriers(); 80 } 81 } 82 83 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 84 static inline void arch_enter_lazy_mmu_mode(void) 85 { 86 /* 87 * lazy_mmu_mode is not supposed to permit nesting. But in practice this 88 * does happen with CONFIG_DEBUG_PAGEALLOC, where a page allocation 89 * inside a lazy_mmu_mode section (such as zap_pte_range()) will change 90 * permissions on the linear map with apply_to_page_range(), which 91 * re-enters lazy_mmu_mode. So we tolerate nesting in our 92 * implementation. The first call to arch_leave_lazy_mmu_mode() will 93 * flush and clear the flag such that the remainder of the work in the 94 * outer nest behaves as if outside of lazy mmu mode. This is safe and 95 * keeps tracking simple. 96 */ 97 98 if (in_interrupt()) 99 return; 100 101 set_thread_flag(TIF_LAZY_MMU); 102 } 103 104 static inline void arch_flush_lazy_mmu_mode(void) 105 { 106 if (in_interrupt()) 107 return; 108 109 if (test_and_clear_thread_flag(TIF_LAZY_MMU_PENDING)) 110 emit_pte_barriers(); 111 } 112 113 static inline void arch_leave_lazy_mmu_mode(void) 114 { 115 if (in_interrupt()) 116 return; 117 118 arch_flush_lazy_mmu_mode(); 119 clear_thread_flag(TIF_LAZY_MMU); 120 } 121 122 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 123 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 124 125 /* Set stride and tlb_level in flush_*_tlb_range */ 126 #define flush_pmd_tlb_range(vma, addr, end) \ 127 __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2) 128 #define flush_pud_tlb_range(vma, addr, end) \ 129 __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1) 130 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 131 132 /* 133 * We use local TLB invalidation instruction when reusing page in 134 * write protection fault handler to avoid TLBI broadcast in the hot 135 * path. This will cause spurious page faults if stale read-only TLB 136 * entries exist. 137 */ 138 #define flush_tlb_fix_spurious_fault(vma, address, ptep) \ 139 local_flush_tlb_page_nonotify(vma, address) 140 141 #define flush_tlb_fix_spurious_fault_pmd(vma, address, pmdp) \ 142 local_flush_tlb_page_nonotify(vma, address) 143 144 /* 145 * ZERO_PAGE is a global shared page that is always zero: used 146 * for zero-mapped memory areas etc.. 147 */ 148 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 149 #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) 150 151 #define pte_ERROR(e) \ 152 pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e)) 153 154 #ifdef CONFIG_ARM64_PA_BITS_52 155 static inline phys_addr_t __pte_to_phys(pte_t pte) 156 { 157 pte_val(pte) &= ~PTE_MAYBE_SHARED; 158 return (pte_val(pte) & PTE_ADDR_LOW) | 159 ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT); 160 } 161 static inline pteval_t __phys_to_pte_val(phys_addr_t phys) 162 { 163 return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK; 164 } 165 #else 166 static inline phys_addr_t __pte_to_phys(pte_t pte) 167 { 168 return pte_val(pte) & PTE_ADDR_LOW; 169 } 170 171 static inline pteval_t __phys_to_pte_val(phys_addr_t phys) 172 { 173 return phys; 174 } 175 #endif 176 177 #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) 178 #define pfn_pte(pfn,prot) \ 179 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 180 181 #define pte_none(pte) (!pte_val(pte)) 182 #define __pte_clear(mm, addr, ptep) \ 183 __set_pte(ptep, __pte(0)) 184 #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 185 186 /* 187 * The following only work if pte_present(). Undefined behaviour otherwise. 188 */ 189 #define pte_present(pte) (pte_valid(pte) || pte_present_invalid(pte)) 190 #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) 191 #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) 192 #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) 193 #define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY)) 194 #define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) 195 #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) 196 #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) 197 #define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \ 198 PTE_ATTRINDX(MT_NORMAL_TAGGED)) 199 200 #define pte_cont_addr_end(addr, end) \ 201 ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ 202 (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 203 }) 204 205 #define pmd_cont_addr_end(addr, end) \ 206 ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \ 207 (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 208 }) 209 210 #define pte_hw_dirty(pte) (pte_write(pte) && !pte_rdonly(pte)) 211 #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) 212 #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) 213 214 #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) 215 #define pte_present_invalid(pte) \ 216 ((pte_val(pte) & (PTE_VALID | PTE_PRESENT_INVALID)) == PTE_PRESENT_INVALID) 217 /* 218 * Execute-only user mappings do not have the PTE_USER bit set. All valid 219 * kernel mappings have the PTE_UXN bit set. 220 */ 221 #define pte_valid_not_user(pte) \ 222 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) 223 /* 224 * Returns true if the pte is valid and has the contiguous bit set. 225 */ 226 #define pte_valid_cont(pte) (pte_valid(pte) && pte_cont(pte)) 227 /* 228 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending 229 * so that we don't erroneously return false for pages that have been 230 * remapped as PROT_NONE but are yet to be flushed from the TLB. 231 * Note that we can't make any assumptions based on the state of the access 232 * flag, since __ptep_clear_flush_young() elides a DSB when invalidating the 233 * TLB. 234 */ 235 #define pte_accessible(mm, pte) \ 236 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) 237 238 static inline bool por_el0_allows_pkey(u8 pkey, bool write, bool execute) 239 { 240 u64 por; 241 242 if (!system_supports_poe()) 243 return true; 244 245 por = read_sysreg_s(SYS_POR_EL0); 246 247 if (write) 248 return por_elx_allows_write(por, pkey); 249 250 if (execute) 251 return por_elx_allows_exec(por, pkey); 252 253 return por_elx_allows_read(por, pkey); 254 } 255 256 /* 257 * p??_access_permitted() is true for valid user mappings (PTE_USER 258 * bit set, subject to the write permission check). For execute-only 259 * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits 260 * not set) must return false. PROT_NONE mappings do not have the 261 * PTE_VALID bit set. 262 */ 263 #define pte_access_permitted_no_overlay(pte, write) \ 264 (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte))) 265 #define pte_access_permitted(pte, write) \ 266 (pte_access_permitted_no_overlay(pte, write) && \ 267 por_el0_allows_pkey(FIELD_GET(PTE_PO_IDX_MASK, pte_val(pte)), write, false)) 268 #define pmd_access_permitted(pmd, write) \ 269 (pte_access_permitted(pmd_pte(pmd), (write))) 270 #define pud_access_permitted(pud, write) \ 271 (pte_access_permitted(pud_pte(pud), (write))) 272 273 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) 274 { 275 pte_val(pte) &= ~pgprot_val(prot); 276 return pte; 277 } 278 279 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) 280 { 281 pte_val(pte) |= pgprot_val(prot); 282 return pte; 283 } 284 285 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) 286 { 287 pmd_val(pmd) &= ~pgprot_val(prot); 288 return pmd; 289 } 290 291 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) 292 { 293 pmd_val(pmd) |= pgprot_val(prot); 294 return pmd; 295 } 296 297 static inline pte_t pte_mkwrite_novma(pte_t pte) 298 { 299 pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); 300 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 301 return pte; 302 } 303 304 static inline pte_t pte_mkclean(pte_t pte) 305 { 306 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); 307 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 308 309 return pte; 310 } 311 312 static inline pte_t pte_mkdirty(pte_t pte) 313 { 314 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 315 316 if (pte_write(pte)) 317 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 318 319 return pte; 320 } 321 322 static inline pte_t pte_wrprotect(pte_t pte) 323 { 324 /* 325 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY 326 * clear), set the PTE_DIRTY bit. 327 */ 328 if (pte_hw_dirty(pte)) 329 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 330 331 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); 332 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 333 return pte; 334 } 335 336 static inline pte_t pte_mkold(pte_t pte) 337 { 338 return clear_pte_bit(pte, __pgprot(PTE_AF)); 339 } 340 341 static inline pte_t pte_mkyoung(pte_t pte) 342 { 343 return set_pte_bit(pte, __pgprot(PTE_AF)); 344 } 345 346 static inline pte_t pte_mkspecial(pte_t pte) 347 { 348 return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); 349 } 350 351 static inline pte_t pte_mkcont(pte_t pte) 352 { 353 return set_pte_bit(pte, __pgprot(PTE_CONT)); 354 } 355 356 static inline pte_t pte_mknoncont(pte_t pte) 357 { 358 return clear_pte_bit(pte, __pgprot(PTE_CONT)); 359 } 360 361 static inline pte_t pte_mkvalid(pte_t pte) 362 { 363 return set_pte_bit(pte, __pgprot(PTE_VALID)); 364 } 365 366 static inline pte_t pte_mkinvalid(pte_t pte) 367 { 368 pte = set_pte_bit(pte, __pgprot(PTE_PRESENT_INVALID)); 369 pte = clear_pte_bit(pte, __pgprot(PTE_VALID)); 370 return pte; 371 } 372 373 static inline pmd_t pmd_mkcont(pmd_t pmd) 374 { 375 return __pmd(pmd_val(pmd) | PMD_SECT_CONT); 376 } 377 378 static inline pmd_t pmd_mknoncont(pmd_t pmd) 379 { 380 return __pmd(pmd_val(pmd) & ~PMD_SECT_CONT); 381 } 382 383 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 384 static inline int pte_uffd_wp(pte_t pte) 385 { 386 return !!(pte_val(pte) & PTE_UFFD_WP); 387 } 388 389 static inline pte_t pte_mkuffd_wp(pte_t pte) 390 { 391 return pte_wrprotect(set_pte_bit(pte, __pgprot(PTE_UFFD_WP))); 392 } 393 394 static inline pte_t pte_clear_uffd_wp(pte_t pte) 395 { 396 return clear_pte_bit(pte, __pgprot(PTE_UFFD_WP)); 397 } 398 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 399 400 static inline void __set_pte_nosync(pte_t *ptep, pte_t pte) 401 { 402 WRITE_ONCE(*ptep, pte); 403 } 404 405 static inline void __set_pte_complete(pte_t pte) 406 { 407 /* 408 * Only if the new pte is valid and kernel, otherwise TLB maintenance 409 * has the necessary barriers. 410 */ 411 if (pte_valid_not_user(pte)) 412 queue_pte_barriers(); 413 } 414 415 static inline void __set_pte(pte_t *ptep, pte_t pte) 416 { 417 __set_pte_nosync(ptep, pte); 418 __set_pte_complete(pte); 419 } 420 421 static inline pte_t __ptep_get(pte_t *ptep) 422 { 423 return READ_ONCE(*ptep); 424 } 425 426 extern void __sync_icache_dcache(pte_t pteval); 427 bool pgattr_change_is_safe(pteval_t old, pteval_t new); 428 429 /* 430 * PTE bits configuration in the presence of hardware Dirty Bit Management 431 * (PTE_WRITE == PTE_DBM): 432 * 433 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) 434 * 0 0 | 1 0 0 435 * 0 1 | 1 1 0 436 * 1 0 | 1 0 1 437 * 1 1 | 0 1 x 438 * 439 * When hardware DBM is not present, the software PTE_DIRTY bit is updated via 440 * the page fault mechanism. Checking the dirty status of a pte becomes: 441 * 442 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) 443 */ 444 445 static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, 446 pte_t pte) 447 { 448 pte_t old_pte; 449 450 if (!IS_ENABLED(CONFIG_DEBUG_VM)) 451 return; 452 453 old_pte = __ptep_get(ptep); 454 455 if (!pte_valid(old_pte) || !pte_valid(pte)) 456 return; 457 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1) 458 return; 459 460 /* 461 * Check for potential race with hardware updates of the pte 462 * (__ptep_set_access_flags safely changes valid ptes without going 463 * through an invalid entry). 464 */ 465 VM_WARN_ONCE(!pte_young(pte), 466 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", 467 __func__, pte_val(old_pte), pte_val(pte)); 468 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), 469 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", 470 __func__, pte_val(old_pte), pte_val(pte)); 471 VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)), 472 "%s: unsafe attribute change: 0x%016llx -> 0x%016llx", 473 __func__, pte_val(old_pte), pte_val(pte)); 474 } 475 476 static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages) 477 { 478 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) 479 __sync_icache_dcache(pte); 480 481 /* 482 * If the PTE would provide user space access to the tags associated 483 * with it then ensure that the MTE tags are synchronised. Although 484 * pte_access_permitted_no_overlay() returns false for exec only 485 * mappings, they don't expose tags (instruction fetches don't check 486 * tags). 487 */ 488 if (system_supports_mte() && pte_access_permitted_no_overlay(pte, false) && 489 !pte_special(pte) && pte_tagged(pte)) 490 mte_sync_tags(pte, nr_pages); 491 } 492 493 /* 494 * Select all bits except the pfn 495 */ 496 #define pte_pgprot pte_pgprot 497 static inline pgprot_t pte_pgprot(pte_t pte) 498 { 499 unsigned long pfn = pte_pfn(pte); 500 501 return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); 502 } 503 504 #define pte_advance_pfn pte_advance_pfn 505 static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) 506 { 507 return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte)); 508 } 509 510 /* 511 * Hugetlb definitions. 512 */ 513 #define HUGE_MAX_HSTATE 4 514 #define HPAGE_SHIFT PMD_SHIFT 515 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 516 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 517 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 518 519 static inline pte_t pgd_pte(pgd_t pgd) 520 { 521 return __pte(pgd_val(pgd)); 522 } 523 524 static inline pte_t p4d_pte(p4d_t p4d) 525 { 526 return __pte(p4d_val(p4d)); 527 } 528 529 static inline pte_t pud_pte(pud_t pud) 530 { 531 return __pte(pud_val(pud)); 532 } 533 534 static inline pud_t pte_pud(pte_t pte) 535 { 536 return __pud(pte_val(pte)); 537 } 538 539 static inline pmd_t pud_pmd(pud_t pud) 540 { 541 return __pmd(pud_val(pud)); 542 } 543 544 static inline pte_t pmd_pte(pmd_t pmd) 545 { 546 return __pte(pmd_val(pmd)); 547 } 548 549 static inline pmd_t pte_pmd(pte_t pte) 550 { 551 return __pmd(pte_val(pte)); 552 } 553 554 static inline pgprot_t mk_pud_sect_prot(pgprot_t prot) 555 { 556 return __pgprot((pgprot_val(prot) & ~PUD_TYPE_MASK) | PUD_TYPE_SECT); 557 } 558 559 static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot) 560 { 561 return __pgprot((pgprot_val(prot) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT); 562 } 563 564 static inline pte_t pte_swp_mkexclusive(pte_t pte) 565 { 566 return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 567 } 568 569 static inline bool pte_swp_exclusive(pte_t pte) 570 { 571 return pte_val(pte) & PTE_SWP_EXCLUSIVE; 572 } 573 574 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 575 { 576 return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 577 } 578 579 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 580 static inline pte_t pte_swp_mkuffd_wp(pte_t pte) 581 { 582 return set_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP)); 583 } 584 585 static inline int pte_swp_uffd_wp(pte_t pte) 586 { 587 return !!(pte_val(pte) & PTE_SWP_UFFD_WP); 588 } 589 590 static inline pte_t pte_swp_clear_uffd_wp(pte_t pte) 591 { 592 return clear_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP)); 593 } 594 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 595 596 #ifdef CONFIG_NUMA_BALANCING 597 /* 598 * See the comment in include/linux/pgtable.h 599 */ 600 static inline int pte_protnone(pte_t pte) 601 { 602 /* 603 * pte_present_invalid() tells us that the pte is invalid from HW 604 * perspective but present from SW perspective, so the fields are to be 605 * interpreted as per the HW layout. The second 2 checks are the unique 606 * encoding that we use for PROT_NONE. It is insufficient to only use 607 * the first check because we share the same encoding scheme with pmds 608 * which support pmd_mkinvalid(), so can be present-invalid without 609 * being PROT_NONE. 610 */ 611 return pte_present_invalid(pte) && !pte_user(pte) && !pte_user_exec(pte); 612 } 613 614 static inline int pmd_protnone(pmd_t pmd) 615 { 616 return pte_protnone(pmd_pte(pmd)); 617 } 618 #endif 619 620 #define pmd_present(pmd) pte_present(pmd_pte(pmd)) 621 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) 622 #define pmd_young(pmd) pte_young(pmd_pte(pmd)) 623 #define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) 624 #define pmd_user(pmd) pte_user(pmd_pte(pmd)) 625 #define pmd_user_exec(pmd) pte_user_exec(pmd_pte(pmd)) 626 #define pmd_cont(pmd) pte_cont(pmd_pte(pmd)) 627 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) 628 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) 629 #define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))) 630 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) 631 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) 632 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) 633 #define pmd_mkinvalid(pmd) pte_pmd(pte_mkinvalid(pmd_pte(pmd))) 634 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 635 #define pmd_uffd_wp(pmd) pte_uffd_wp(pmd_pte(pmd)) 636 #define pmd_mkuffd_wp(pmd) pte_pmd(pte_mkuffd_wp(pmd_pte(pmd))) 637 #define pmd_clear_uffd_wp(pmd) pte_pmd(pte_clear_uffd_wp(pmd_pte(pmd))) 638 #define pmd_swp_uffd_wp(pmd) pte_swp_uffd_wp(pmd_pte(pmd)) 639 #define pmd_swp_mkuffd_wp(pmd) pte_pmd(pte_swp_mkuffd_wp(pmd_pte(pmd))) 640 #define pmd_swp_clear_uffd_wp(pmd) \ 641 pte_pmd(pte_swp_clear_uffd_wp(pmd_pte(pmd))) 642 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 643 644 #define pmd_write(pmd) pte_write(pmd_pte(pmd)) 645 646 static inline pmd_t pmd_mkhuge(pmd_t pmd) 647 { 648 /* 649 * It's possible that the pmd is present-invalid on entry 650 * and in that case it needs to remain present-invalid on 651 * exit. So ensure the VALID bit does not get modified. 652 */ 653 pmdval_t mask = PMD_TYPE_MASK & ~PTE_VALID; 654 pmdval_t val = PMD_TYPE_SECT & ~PTE_VALID; 655 656 return __pmd((pmd_val(pmd) & ~mask) | val); 657 } 658 659 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP 660 #define pmd_special(pte) (!!((pmd_val(pte) & PTE_SPECIAL))) 661 static inline pmd_t pmd_mkspecial(pmd_t pmd) 662 { 663 return set_pmd_bit(pmd, __pgprot(PTE_SPECIAL)); 664 } 665 #endif 666 667 #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) 668 #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) 669 #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) 670 #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 671 672 #define pud_young(pud) pte_young(pud_pte(pud)) 673 #define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) 674 #define pud_write(pud) pte_write(pud_pte(pud)) 675 676 static inline pud_t pud_mkhuge(pud_t pud) 677 { 678 /* 679 * It's possible that the pud is present-invalid on entry 680 * and in that case it needs to remain present-invalid on 681 * exit. So ensure the VALID bit does not get modified. 682 */ 683 pudval_t mask = PUD_TYPE_MASK & ~PTE_VALID; 684 pudval_t val = PUD_TYPE_SECT & ~PTE_VALID; 685 686 return __pud((pud_val(pud) & ~mask) | val); 687 } 688 689 #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud)) 690 #define __phys_to_pud_val(phys) __phys_to_pte_val(phys) 691 #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) 692 #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 693 694 #define pmd_pgprot pmd_pgprot 695 static inline pgprot_t pmd_pgprot(pmd_t pmd) 696 { 697 unsigned long pfn = pmd_pfn(pmd); 698 699 return __pgprot(pmd_val(pfn_pmd(pfn, __pgprot(0))) ^ pmd_val(pmd)); 700 } 701 702 #define pud_pgprot pud_pgprot 703 static inline pgprot_t pud_pgprot(pud_t pud) 704 { 705 unsigned long pfn = pud_pfn(pud); 706 707 return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud)); 708 } 709 710 static inline void __set_ptes_anysz(struct mm_struct *mm, pte_t *ptep, 711 pte_t pte, unsigned int nr, 712 unsigned long pgsize) 713 { 714 unsigned long stride = pgsize >> PAGE_SHIFT; 715 716 switch (pgsize) { 717 case PAGE_SIZE: 718 page_table_check_ptes_set(mm, ptep, pte, nr); 719 break; 720 case PMD_SIZE: 721 page_table_check_pmds_set(mm, (pmd_t *)ptep, pte_pmd(pte), nr); 722 break; 723 #ifndef __PAGETABLE_PMD_FOLDED 724 case PUD_SIZE: 725 page_table_check_puds_set(mm, (pud_t *)ptep, pte_pud(pte), nr); 726 break; 727 #endif 728 default: 729 VM_WARN_ON(1); 730 } 731 732 __sync_cache_and_tags(pte, nr * stride); 733 734 for (;;) { 735 __check_safe_pte_update(mm, ptep, pte); 736 __set_pte_nosync(ptep, pte); 737 if (--nr == 0) 738 break; 739 ptep++; 740 pte = pte_advance_pfn(pte, stride); 741 } 742 743 __set_pte_complete(pte); 744 } 745 746 static inline void __set_ptes(struct mm_struct *mm, 747 unsigned long __always_unused addr, 748 pte_t *ptep, pte_t pte, unsigned int nr) 749 { 750 __set_ptes_anysz(mm, ptep, pte, nr, PAGE_SIZE); 751 } 752 753 static inline void __set_pmds(struct mm_struct *mm, 754 unsigned long __always_unused addr, 755 pmd_t *pmdp, pmd_t pmd, unsigned int nr) 756 { 757 __set_ptes_anysz(mm, (pte_t *)pmdp, pmd_pte(pmd), nr, PMD_SIZE); 758 } 759 #define set_pmd_at(mm, addr, pmdp, pmd) __set_pmds(mm, addr, pmdp, pmd, 1) 760 761 static inline void __set_puds(struct mm_struct *mm, 762 unsigned long __always_unused addr, 763 pud_t *pudp, pud_t pud, unsigned int nr) 764 { 765 __set_ptes_anysz(mm, (pte_t *)pudp, pud_pte(pud), nr, PUD_SIZE); 766 } 767 #define set_pud_at(mm, addr, pudp, pud) __set_puds(mm, addr, pudp, pud, 1) 768 769 #define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d)) 770 #define __phys_to_p4d_val(phys) __phys_to_pte_val(phys) 771 772 #define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd)) 773 #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys) 774 775 #define __pgprot_modify(prot,mask,bits) \ 776 __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 777 778 #define pgprot_nx(prot) \ 779 __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN) 780 781 #define pgprot_decrypted(prot) \ 782 __pgprot_modify(prot, PROT_NS_SHARED, PROT_NS_SHARED) 783 #define pgprot_encrypted(prot) \ 784 __pgprot_modify(prot, PROT_NS_SHARED, 0) 785 786 /* 787 * Mark the prot value as uncacheable and unbufferable. 788 */ 789 #define pgprot_noncached(prot) \ 790 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) 791 #define pgprot_writecombine(prot) \ 792 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 793 #define pgprot_device(prot) \ 794 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) 795 #define pgprot_tagged(prot) \ 796 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED)) 797 #define pgprot_mhp pgprot_tagged 798 /* 799 * DMA allocations for non-coherent devices use what the Arm architecture calls 800 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses 801 * and merging of writes. This is different from "Device-nGnR[nE]" memory which 802 * is intended for MMIO and thus forbids speculation, preserves access size, 803 * requires strict alignment and can also force write responses to come from the 804 * endpoint. 805 */ 806 #define pgprot_dmacoherent(prot) \ 807 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ 808 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 809 810 #define __HAVE_PHYS_MEM_ACCESS_PROT 811 struct file; 812 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 813 unsigned long size, pgprot_t vma_prot); 814 815 #define pmd_none(pmd) (!pmd_val(pmd)) 816 817 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 818 PMD_TYPE_TABLE) 819 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 820 PMD_TYPE_SECT) 821 #define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd)) 822 #define pmd_bad(pmd) (!pmd_table(pmd)) 823 824 #define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE) 825 #define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE) 826 827 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 828 static inline int pmd_trans_huge(pmd_t pmd) 829 { 830 /* 831 * If pmd is present-invalid, pmd_table() won't detect it 832 * as a table, so force the valid bit for the comparison. 833 */ 834 return pmd_present(pmd) && !pmd_table(__pmd(pmd_val(pmd) | PTE_VALID)); 835 } 836 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 837 838 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 839 static inline bool pud_sect(pud_t pud) { return false; } 840 static inline bool pud_table(pud_t pud) { return true; } 841 #else 842 #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 843 PUD_TYPE_SECT) 844 #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 845 PUD_TYPE_TABLE) 846 #endif 847 848 extern pgd_t swapper_pg_dir[]; 849 extern pgd_t idmap_pg_dir[]; 850 extern pgd_t tramp_pg_dir[]; 851 extern pgd_t reserved_pg_dir[]; 852 853 extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); 854 855 static inline bool in_swapper_pgdir(void *addr) 856 { 857 return ((unsigned long)addr & PAGE_MASK) == 858 ((unsigned long)swapper_pg_dir & PAGE_MASK); 859 } 860 861 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 862 { 863 #ifdef __PAGETABLE_PMD_FOLDED 864 if (in_swapper_pgdir(pmdp)) { 865 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd))); 866 return; 867 } 868 #endif /* __PAGETABLE_PMD_FOLDED */ 869 870 WRITE_ONCE(*pmdp, pmd); 871 872 if (pmd_valid(pmd)) 873 queue_pte_barriers(); 874 } 875 876 static inline void pmd_clear(pmd_t *pmdp) 877 { 878 set_pmd(pmdp, __pmd(0)); 879 } 880 881 static inline phys_addr_t pmd_page_paddr(pmd_t pmd) 882 { 883 return __pmd_to_phys(pmd); 884 } 885 886 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 887 { 888 return (unsigned long)__va(pmd_page_paddr(pmd)); 889 } 890 891 /* Find an entry in the third-level page table. */ 892 #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) 893 894 #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) 895 #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) 896 #define pte_clear_fixmap() clear_fixmap(FIX_PTE) 897 898 #define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd)) 899 900 /* use ONLY for statically allocated translation tables */ 901 #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) 902 903 #if CONFIG_PGTABLE_LEVELS > 2 904 905 #define pmd_ERROR(e) \ 906 pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) 907 908 #define pud_none(pud) (!pud_val(pud)) 909 #define pud_bad(pud) ((pud_val(pud) & PUD_TYPE_MASK) != \ 910 PUD_TYPE_TABLE) 911 #define pud_present(pud) pte_present(pud_pte(pud)) 912 #ifndef __PAGETABLE_PMD_FOLDED 913 #define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) 914 #else 915 #define pud_leaf(pud) false 916 #endif 917 #define pud_valid(pud) pte_valid(pud_pte(pud)) 918 #define pud_user(pud) pte_user(pud_pte(pud)) 919 #define pud_user_exec(pud) pte_user_exec(pud_pte(pud)) 920 921 static inline bool pgtable_l4_enabled(void); 922 923 static inline void set_pud(pud_t *pudp, pud_t pud) 924 { 925 if (!pgtable_l4_enabled() && in_swapper_pgdir(pudp)) { 926 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud))); 927 return; 928 } 929 930 WRITE_ONCE(*pudp, pud); 931 932 if (pud_valid(pud)) 933 queue_pte_barriers(); 934 } 935 936 static inline void pud_clear(pud_t *pudp) 937 { 938 set_pud(pudp, __pud(0)); 939 } 940 941 static inline phys_addr_t pud_page_paddr(pud_t pud) 942 { 943 return __pud_to_phys(pud); 944 } 945 946 static inline pmd_t *pud_pgtable(pud_t pud) 947 { 948 return (pmd_t *)__va(pud_page_paddr(pud)); 949 } 950 951 /* Find an entry in the second-level page table. */ 952 #define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t)) 953 954 #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) 955 #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) 956 #define pmd_clear_fixmap() clear_fixmap(FIX_PMD) 957 958 #define pud_page(pud) phys_to_page(__pud_to_phys(pud)) 959 960 /* use ONLY for statically allocated translation tables */ 961 #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) 962 963 #else 964 965 #define pud_valid(pud) false 966 #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) 967 #define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */ 968 969 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ 970 #define pmd_set_fixmap(addr) NULL 971 #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp) 972 #define pmd_clear_fixmap() 973 974 #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir) 975 976 #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 977 978 #if CONFIG_PGTABLE_LEVELS > 3 979 980 static __always_inline bool pgtable_l4_enabled(void) 981 { 982 if (CONFIG_PGTABLE_LEVELS > 4 || !IS_ENABLED(CONFIG_ARM64_LPA2)) 983 return true; 984 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT)) 985 return vabits_actual == VA_BITS; 986 return alternative_has_cap_unlikely(ARM64_HAS_VA52); 987 } 988 989 static inline bool mm_pud_folded(const struct mm_struct *mm) 990 { 991 return !pgtable_l4_enabled(); 992 } 993 #define mm_pud_folded mm_pud_folded 994 995 #define pud_ERROR(e) \ 996 pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e)) 997 998 #define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d)) 999 #define p4d_bad(p4d) (pgtable_l4_enabled() && \ 1000 ((p4d_val(p4d) & P4D_TYPE_MASK) != \ 1001 P4D_TYPE_TABLE)) 1002 #define p4d_present(p4d) (!p4d_none(p4d)) 1003 1004 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) 1005 { 1006 if (in_swapper_pgdir(p4dp)) { 1007 set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d))); 1008 return; 1009 } 1010 1011 WRITE_ONCE(*p4dp, p4d); 1012 queue_pte_barriers(); 1013 } 1014 1015 static inline void p4d_clear(p4d_t *p4dp) 1016 { 1017 if (pgtable_l4_enabled()) 1018 set_p4d(p4dp, __p4d(0)); 1019 } 1020 1021 static inline phys_addr_t p4d_page_paddr(p4d_t p4d) 1022 { 1023 return __p4d_to_phys(p4d); 1024 } 1025 1026 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 1027 1028 static inline pud_t *p4d_to_folded_pud(p4d_t *p4dp, unsigned long addr) 1029 { 1030 /* Ensure that 'p4dp' indexes a page table according to 'addr' */ 1031 VM_BUG_ON(((addr >> P4D_SHIFT) ^ ((u64)p4dp >> 3)) % PTRS_PER_P4D); 1032 1033 return (pud_t *)PTR_ALIGN_DOWN(p4dp, PAGE_SIZE) + pud_index(addr); 1034 } 1035 1036 static inline pud_t *p4d_pgtable(p4d_t p4d) 1037 { 1038 return (pud_t *)__va(p4d_page_paddr(p4d)); 1039 } 1040 1041 static inline phys_addr_t pud_offset_phys(p4d_t *p4dp, unsigned long addr) 1042 { 1043 BUG_ON(!pgtable_l4_enabled()); 1044 1045 return p4d_page_paddr(READ_ONCE(*p4dp)) + pud_index(addr) * sizeof(pud_t); 1046 } 1047 1048 static inline 1049 pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long addr) 1050 { 1051 if (!pgtable_l4_enabled()) 1052 return p4d_to_folded_pud(p4dp, addr); 1053 return (pud_t *)__va(p4d_page_paddr(p4d)) + pud_index(addr); 1054 } 1055 #define pud_offset_lockless pud_offset_lockless 1056 1057 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long addr) 1058 { 1059 return pud_offset_lockless(p4dp, READ_ONCE(*p4dp), addr); 1060 } 1061 #define pud_offset pud_offset 1062 1063 static inline pud_t *pud_set_fixmap(unsigned long addr) 1064 { 1065 if (!pgtable_l4_enabled()) 1066 return NULL; 1067 return (pud_t *)set_fixmap_offset(FIX_PUD, addr); 1068 } 1069 1070 static inline pud_t *pud_set_fixmap_offset(p4d_t *p4dp, unsigned long addr) 1071 { 1072 if (!pgtable_l4_enabled()) 1073 return p4d_to_folded_pud(p4dp, addr); 1074 return pud_set_fixmap(pud_offset_phys(p4dp, addr)); 1075 } 1076 1077 static inline void pud_clear_fixmap(void) 1078 { 1079 if (pgtable_l4_enabled()) 1080 clear_fixmap(FIX_PUD); 1081 } 1082 1083 /* use ONLY for statically allocated translation tables */ 1084 static inline pud_t *pud_offset_kimg(p4d_t *p4dp, u64 addr) 1085 { 1086 if (!pgtable_l4_enabled()) 1087 return p4d_to_folded_pud(p4dp, addr); 1088 return (pud_t *)__phys_to_kimg(pud_offset_phys(p4dp, addr)); 1089 } 1090 1091 #define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d))) 1092 1093 #else 1094 1095 static inline bool pgtable_l4_enabled(void) { return false; } 1096 1097 #define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;}) 1098 1099 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */ 1100 #define pud_set_fixmap(addr) NULL 1101 #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) 1102 #define pud_clear_fixmap() 1103 1104 #define pud_offset_kimg(dir,addr) ((pud_t *)dir) 1105 1106 #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 1107 1108 #if CONFIG_PGTABLE_LEVELS > 4 1109 1110 static __always_inline bool pgtable_l5_enabled(void) 1111 { 1112 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT)) 1113 return vabits_actual == VA_BITS; 1114 return alternative_has_cap_unlikely(ARM64_HAS_VA52); 1115 } 1116 1117 static inline bool mm_p4d_folded(const struct mm_struct *mm) 1118 { 1119 return !pgtable_l5_enabled(); 1120 } 1121 #define mm_p4d_folded mm_p4d_folded 1122 1123 #define p4d_ERROR(e) \ 1124 pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e)) 1125 1126 #define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd)) 1127 #define pgd_bad(pgd) (pgtable_l5_enabled() && \ 1128 ((pgd_val(pgd) & PGD_TYPE_MASK) != \ 1129 PGD_TYPE_TABLE)) 1130 #define pgd_present(pgd) (!pgd_none(pgd)) 1131 1132 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 1133 { 1134 if (in_swapper_pgdir(pgdp)) { 1135 set_swapper_pgd(pgdp, __pgd(pgd_val(pgd))); 1136 return; 1137 } 1138 1139 WRITE_ONCE(*pgdp, pgd); 1140 queue_pte_barriers(); 1141 } 1142 1143 static inline void pgd_clear(pgd_t *pgdp) 1144 { 1145 if (pgtable_l5_enabled()) 1146 set_pgd(pgdp, __pgd(0)); 1147 } 1148 1149 static inline phys_addr_t pgd_page_paddr(pgd_t pgd) 1150 { 1151 return __pgd_to_phys(pgd); 1152 } 1153 1154 #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) 1155 1156 static inline p4d_t *pgd_to_folded_p4d(pgd_t *pgdp, unsigned long addr) 1157 { 1158 /* Ensure that 'pgdp' indexes a page table according to 'addr' */ 1159 VM_BUG_ON(((addr >> PGDIR_SHIFT) ^ ((u64)pgdp >> 3)) % PTRS_PER_PGD); 1160 1161 return (p4d_t *)PTR_ALIGN_DOWN(pgdp, PAGE_SIZE) + p4d_index(addr); 1162 } 1163 1164 static inline phys_addr_t p4d_offset_phys(pgd_t *pgdp, unsigned long addr) 1165 { 1166 BUG_ON(!pgtable_l5_enabled()); 1167 1168 return pgd_page_paddr(READ_ONCE(*pgdp)) + p4d_index(addr) * sizeof(p4d_t); 1169 } 1170 1171 static inline 1172 p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long addr) 1173 { 1174 if (!pgtable_l5_enabled()) 1175 return pgd_to_folded_p4d(pgdp, addr); 1176 return (p4d_t *)__va(pgd_page_paddr(pgd)) + p4d_index(addr); 1177 } 1178 #define p4d_offset_lockless p4d_offset_lockless 1179 1180 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long addr) 1181 { 1182 return p4d_offset_lockless(pgdp, READ_ONCE(*pgdp), addr); 1183 } 1184 1185 static inline p4d_t *p4d_set_fixmap(unsigned long addr) 1186 { 1187 if (!pgtable_l5_enabled()) 1188 return NULL; 1189 return (p4d_t *)set_fixmap_offset(FIX_P4D, addr); 1190 } 1191 1192 static inline p4d_t *p4d_set_fixmap_offset(pgd_t *pgdp, unsigned long addr) 1193 { 1194 if (!pgtable_l5_enabled()) 1195 return pgd_to_folded_p4d(pgdp, addr); 1196 return p4d_set_fixmap(p4d_offset_phys(pgdp, addr)); 1197 } 1198 1199 static inline void p4d_clear_fixmap(void) 1200 { 1201 if (pgtable_l5_enabled()) 1202 clear_fixmap(FIX_P4D); 1203 } 1204 1205 /* use ONLY for statically allocated translation tables */ 1206 static inline p4d_t *p4d_offset_kimg(pgd_t *pgdp, u64 addr) 1207 { 1208 if (!pgtable_l5_enabled()) 1209 return pgd_to_folded_p4d(pgdp, addr); 1210 return (p4d_t *)__phys_to_kimg(p4d_offset_phys(pgdp, addr)); 1211 } 1212 1213 #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd))) 1214 1215 #else 1216 1217 static inline bool pgtable_l5_enabled(void) { return false; } 1218 1219 #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) 1220 1221 /* Match p4d_offset folding in <asm/generic/pgtable-nop4d.h> */ 1222 #define p4d_set_fixmap(addr) NULL 1223 #define p4d_set_fixmap_offset(p4dp, addr) ((p4d_t *)p4dp) 1224 #define p4d_clear_fixmap() 1225 1226 #define p4d_offset_kimg(dir,addr) ((p4d_t *)dir) 1227 1228 static inline 1229 p4d_t *p4d_offset_lockless_folded(pgd_t *pgdp, pgd_t pgd, unsigned long addr) 1230 { 1231 /* 1232 * With runtime folding of the pud, pud_offset_lockless() passes 1233 * the 'pgd_t *' we return here to p4d_to_folded_pud(), which 1234 * will offset the pointer assuming that it points into 1235 * a page-table page. However, the fast GUP path passes us a 1236 * pgd_t allocated on the stack and so we must use the original 1237 * pointer in 'pgdp' to construct the p4d pointer instead of 1238 * using the generic p4d_offset_lockless() implementation. 1239 * 1240 * Note: reusing the original pointer means that we may 1241 * dereference the same (live) page-table entry multiple times. 1242 * This is safe because it is still only loaded once in the 1243 * context of each level and the CPU guarantees same-address 1244 * read-after-read ordering. 1245 */ 1246 return p4d_offset(pgdp, addr); 1247 } 1248 #define p4d_offset_lockless p4d_offset_lockless_folded 1249 1250 #endif /* CONFIG_PGTABLE_LEVELS > 4 */ 1251 1252 #define pgd_ERROR(e) \ 1253 pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e)) 1254 1255 #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) 1256 #define pgd_clear_fixmap() clear_fixmap(FIX_PGD) 1257 1258 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 1259 { 1260 /* 1261 * Normal and Normal-Tagged are two different memory types and indices 1262 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK. 1263 */ 1264 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 1265 PTE_PRESENT_INVALID | PTE_VALID | PTE_WRITE | 1266 PTE_GP | PTE_ATTRINDX_MASK | PTE_PO_IDX_MASK; 1267 1268 /* preserve the hardware dirty information */ 1269 if (pte_hw_dirty(pte)) 1270 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 1271 1272 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 1273 /* 1274 * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware 1275 * dirtiness again. 1276 */ 1277 if (pte_sw_dirty(pte)) 1278 pte = pte_mkdirty(pte); 1279 return pte; 1280 } 1281 1282 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1283 { 1284 return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 1285 } 1286 1287 extern int __ptep_set_access_flags(struct vm_area_struct *vma, 1288 unsigned long address, pte_t *ptep, 1289 pte_t entry, int dirty); 1290 1291 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1292 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 1293 static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 1294 unsigned long address, pmd_t *pmdp, 1295 pmd_t entry, int dirty) 1296 { 1297 return __ptep_set_access_flags(vma, address, (pte_t *)pmdp, 1298 pmd_pte(entry), dirty); 1299 } 1300 #endif 1301 1302 #ifdef CONFIG_PAGE_TABLE_CHECK 1303 static inline bool pte_user_accessible_page(pte_t pte) 1304 { 1305 return pte_valid(pte) && (pte_user(pte) || pte_user_exec(pte)); 1306 } 1307 1308 static inline bool pmd_user_accessible_page(pmd_t pmd) 1309 { 1310 return pmd_valid(pmd) && !pmd_table(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd)); 1311 } 1312 1313 static inline bool pud_user_accessible_page(pud_t pud) 1314 { 1315 return pud_valid(pud) && !pud_table(pud) && (pud_user(pud) || pud_user_exec(pud)); 1316 } 1317 #endif 1318 1319 /* 1320 * Atomic pte/pmd modifications. 1321 */ 1322 static inline int __ptep_test_and_clear_young(struct vm_area_struct *vma, 1323 unsigned long address, 1324 pte_t *ptep) 1325 { 1326 pte_t old_pte, pte; 1327 1328 pte = __ptep_get(ptep); 1329 do { 1330 old_pte = pte; 1331 pte = pte_mkold(pte); 1332 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1333 pte_val(old_pte), pte_val(pte)); 1334 } while (pte_val(pte) != pte_val(old_pte)); 1335 1336 return pte_young(pte); 1337 } 1338 1339 static inline int __ptep_clear_flush_young(struct vm_area_struct *vma, 1340 unsigned long address, pte_t *ptep) 1341 { 1342 int young = __ptep_test_and_clear_young(vma, address, ptep); 1343 1344 if (young) { 1345 /* 1346 * We can elide the trailing DSB here since the worst that can 1347 * happen is that a CPU continues to use the young entry in its 1348 * TLB and we mistakenly reclaim the associated page. The 1349 * window for such an event is bounded by the next 1350 * context-switch, which provides a DSB to complete the TLB 1351 * invalidation. 1352 */ 1353 flush_tlb_page_nosync(vma, address); 1354 } 1355 1356 return young; 1357 } 1358 1359 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) 1360 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1361 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1362 unsigned long address, 1363 pmd_t *pmdp) 1364 { 1365 /* Operation applies to PMD table entry only if FEAT_HAFT is enabled */ 1366 VM_WARN_ON(pmd_table(READ_ONCE(*pmdp)) && !system_supports_haft()); 1367 return __ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 1368 } 1369 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */ 1370 1371 static inline pte_t __ptep_get_and_clear_anysz(struct mm_struct *mm, 1372 pte_t *ptep, 1373 unsigned long pgsize) 1374 { 1375 pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0)); 1376 1377 switch (pgsize) { 1378 case PAGE_SIZE: 1379 page_table_check_pte_clear(mm, pte); 1380 break; 1381 case PMD_SIZE: 1382 page_table_check_pmd_clear(mm, pte_pmd(pte)); 1383 break; 1384 #ifndef __PAGETABLE_PMD_FOLDED 1385 case PUD_SIZE: 1386 page_table_check_pud_clear(mm, pte_pud(pte)); 1387 break; 1388 #endif 1389 default: 1390 VM_WARN_ON(1); 1391 } 1392 1393 return pte; 1394 } 1395 1396 static inline pte_t __ptep_get_and_clear(struct mm_struct *mm, 1397 unsigned long address, pte_t *ptep) 1398 { 1399 return __ptep_get_and_clear_anysz(mm, ptep, PAGE_SIZE); 1400 } 1401 1402 static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1403 pte_t *ptep, unsigned int nr, int full) 1404 { 1405 for (;;) { 1406 __ptep_get_and_clear(mm, addr, ptep); 1407 if (--nr == 0) 1408 break; 1409 ptep++; 1410 addr += PAGE_SIZE; 1411 } 1412 } 1413 1414 static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm, 1415 unsigned long addr, pte_t *ptep, 1416 unsigned int nr, int full) 1417 { 1418 pte_t pte, tmp_pte; 1419 1420 pte = __ptep_get_and_clear(mm, addr, ptep); 1421 while (--nr) { 1422 ptep++; 1423 addr += PAGE_SIZE; 1424 tmp_pte = __ptep_get_and_clear(mm, addr, ptep); 1425 if (pte_dirty(tmp_pte)) 1426 pte = pte_mkdirty(pte); 1427 if (pte_young(tmp_pte)) 1428 pte = pte_mkyoung(pte); 1429 } 1430 return pte; 1431 } 1432 1433 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1434 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1435 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 1436 unsigned long address, pmd_t *pmdp) 1437 { 1438 return pte_pmd(__ptep_get_and_clear_anysz(mm, (pte_t *)pmdp, PMD_SIZE)); 1439 } 1440 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1441 1442 static inline void ___ptep_set_wrprotect(struct mm_struct *mm, 1443 unsigned long address, pte_t *ptep, 1444 pte_t pte) 1445 { 1446 pte_t old_pte; 1447 1448 do { 1449 old_pte = pte; 1450 pte = pte_wrprotect(pte); 1451 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1452 pte_val(old_pte), pte_val(pte)); 1453 } while (pte_val(pte) != pte_val(old_pte)); 1454 } 1455 1456 /* 1457 * __ptep_set_wrprotect - mark read-only while transferring potential hardware 1458 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. 1459 */ 1460 static inline void __ptep_set_wrprotect(struct mm_struct *mm, 1461 unsigned long address, pte_t *ptep) 1462 { 1463 ___ptep_set_wrprotect(mm, address, ptep, __ptep_get(ptep)); 1464 } 1465 1466 static inline void __wrprotect_ptes(struct mm_struct *mm, unsigned long address, 1467 pte_t *ptep, unsigned int nr) 1468 { 1469 unsigned int i; 1470 1471 for (i = 0; i < nr; i++, address += PAGE_SIZE, ptep++) 1472 __ptep_set_wrprotect(mm, address, ptep); 1473 } 1474 1475 static inline void __clear_young_dirty_pte(struct vm_area_struct *vma, 1476 unsigned long addr, pte_t *ptep, 1477 pte_t pte, cydp_t flags) 1478 { 1479 pte_t old_pte; 1480 1481 do { 1482 old_pte = pte; 1483 1484 if (flags & CYDP_CLEAR_YOUNG) 1485 pte = pte_mkold(pte); 1486 if (flags & CYDP_CLEAR_DIRTY) 1487 pte = pte_mkclean(pte); 1488 1489 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1490 pte_val(old_pte), pte_val(pte)); 1491 } while (pte_val(pte) != pte_val(old_pte)); 1492 } 1493 1494 static inline void __clear_young_dirty_ptes(struct vm_area_struct *vma, 1495 unsigned long addr, pte_t *ptep, 1496 unsigned int nr, cydp_t flags) 1497 { 1498 pte_t pte; 1499 1500 for (;;) { 1501 pte = __ptep_get(ptep); 1502 1503 if (flags == (CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY)) 1504 __set_pte(ptep, pte_mkclean(pte_mkold(pte))); 1505 else 1506 __clear_young_dirty_pte(vma, addr, ptep, pte, flags); 1507 1508 if (--nr == 0) 1509 break; 1510 ptep++; 1511 addr += PAGE_SIZE; 1512 } 1513 } 1514 1515 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1516 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 1517 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1518 unsigned long address, pmd_t *pmdp) 1519 { 1520 __ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 1521 } 1522 1523 #define pmdp_establish pmdp_establish 1524 static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 1525 unsigned long address, pmd_t *pmdp, pmd_t pmd) 1526 { 1527 page_table_check_pmd_set(vma->vm_mm, pmdp, pmd); 1528 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd))); 1529 } 1530 #endif 1531 1532 /* 1533 * Encode and decode a swap entry: 1534 * bits 0-1: present (must be zero) 1535 * bits 2: remember PG_anon_exclusive 1536 * bit 3: remember uffd-wp state 1537 * bits 6-10: swap type 1538 * bit 11: PTE_PRESENT_INVALID (must be zero) 1539 * bits 12-61: swap offset 1540 */ 1541 #define __SWP_TYPE_SHIFT 6 1542 #define __SWP_TYPE_BITS 5 1543 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 1544 #define __SWP_OFFSET_SHIFT 12 1545 #define __SWP_OFFSET_BITS 50 1546 #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) 1547 1548 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 1549 #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) 1550 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 1551 1552 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1553 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 1554 1555 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1556 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 1557 #define __swp_entry_to_pmd(swp) __pmd((swp).val) 1558 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 1559 1560 /* 1561 * Ensure that there are not more swap files than can be encoded in the kernel 1562 * PTEs. 1563 */ 1564 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 1565 1566 #ifdef CONFIG_ARM64_MTE 1567 1568 #define __HAVE_ARCH_PREPARE_TO_SWAP 1569 extern int arch_prepare_to_swap(struct folio *folio); 1570 1571 #define __HAVE_ARCH_SWAP_INVALIDATE 1572 static inline void arch_swap_invalidate_page(int type, pgoff_t offset) 1573 { 1574 if (system_supports_mte()) 1575 mte_invalidate_tags(type, offset); 1576 } 1577 1578 static inline void arch_swap_invalidate_area(int type) 1579 { 1580 if (system_supports_mte()) 1581 mte_invalidate_tags_area(type); 1582 } 1583 1584 #define __HAVE_ARCH_SWAP_RESTORE 1585 extern void arch_swap_restore(swp_entry_t entry, struct folio *folio); 1586 1587 #endif /* CONFIG_ARM64_MTE */ 1588 1589 /* 1590 * On AArch64, the cache coherency is handled via the __set_ptes() function. 1591 */ 1592 static inline void update_mmu_cache_range(struct vm_fault *vmf, 1593 struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 1594 unsigned int nr) 1595 { 1596 /* 1597 * We don't do anything here, so there's a very small chance of 1598 * us retaking a user fault which we just fixed up. The alternative 1599 * is doing a dsb(ishst), but that penalises the fastpath. 1600 */ 1601 } 1602 1603 #define update_mmu_cache(vma, addr, ptep) \ 1604 update_mmu_cache_range(NULL, vma, addr, ptep, 1) 1605 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) 1606 1607 #ifdef CONFIG_ARM64_PA_BITS_52 1608 #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) 1609 #else 1610 #define phys_to_ttbr(addr) (addr) 1611 #endif 1612 1613 /* 1614 * On arm64 without hardware Access Flag, copying from user will fail because 1615 * the pte is old and cannot be marked young. So we always end up with zeroed 1616 * page after fork() + CoW for pfn mappings. We don't always have a 1617 * hardware-managed access flag on arm64. 1618 */ 1619 #define arch_has_hw_pte_young cpu_has_hw_af 1620 1621 #ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG 1622 #define arch_has_hw_nonleaf_pmd_young system_supports_haft 1623 #endif 1624 1625 /* 1626 * Experimentally, it's cheap to set the access flag in hardware and we 1627 * benefit from prefaulting mappings as 'old' to start with. 1628 */ 1629 #define arch_wants_old_prefaulted_pte cpu_has_hw_af 1630 1631 /* 1632 * Request exec memory is read into pagecache in at least 64K folios. This size 1633 * can be contpte-mapped when 4K base pages are in use (16 pages into 1 iTLB 1634 * entry), and HPA can coalesce it (4 pages into 1 TLB entry) when 16K base 1635 * pages are in use. 1636 */ 1637 #define exec_folio_order() ilog2(SZ_64K >> PAGE_SHIFT) 1638 1639 static inline bool pud_sect_supported(void) 1640 { 1641 return PAGE_SIZE == SZ_4K; 1642 } 1643 1644 1645 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1646 #define ptep_modify_prot_start ptep_modify_prot_start 1647 extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma, 1648 unsigned long addr, pte_t *ptep); 1649 1650 #define ptep_modify_prot_commit ptep_modify_prot_commit 1651 extern void ptep_modify_prot_commit(struct vm_area_struct *vma, 1652 unsigned long addr, pte_t *ptep, 1653 pte_t old_pte, pte_t new_pte); 1654 1655 #define modify_prot_start_ptes modify_prot_start_ptes 1656 extern pte_t modify_prot_start_ptes(struct vm_area_struct *vma, 1657 unsigned long addr, pte_t *ptep, 1658 unsigned int nr); 1659 1660 #define modify_prot_commit_ptes modify_prot_commit_ptes 1661 extern void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr, 1662 pte_t *ptep, pte_t old_pte, pte_t pte, 1663 unsigned int nr); 1664 1665 #ifdef CONFIG_ARM64_CONTPTE 1666 1667 /* 1668 * The contpte APIs are used to transparently manage the contiguous bit in ptes 1669 * where it is possible and makes sense to do so. The PTE_CONT bit is considered 1670 * a private implementation detail of the public ptep API (see below). 1671 */ 1672 extern void __contpte_try_fold(struct mm_struct *mm, unsigned long addr, 1673 pte_t *ptep, pte_t pte); 1674 extern void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, 1675 pte_t *ptep, pte_t pte); 1676 extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte); 1677 extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep); 1678 extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, 1679 pte_t *ptep, pte_t pte, unsigned int nr); 1680 extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1681 pte_t *ptep, unsigned int nr, int full); 1682 extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, 1683 unsigned long addr, pte_t *ptep, 1684 unsigned int nr, int full); 1685 extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, 1686 unsigned long addr, pte_t *ptep); 1687 extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, 1688 unsigned long addr, pte_t *ptep); 1689 extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, 1690 pte_t *ptep, unsigned int nr); 1691 extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma, 1692 unsigned long addr, pte_t *ptep, 1693 pte_t entry, int dirty); 1694 extern void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma, 1695 unsigned long addr, pte_t *ptep, 1696 unsigned int nr, cydp_t flags); 1697 1698 static __always_inline void contpte_try_fold(struct mm_struct *mm, 1699 unsigned long addr, pte_t *ptep, pte_t pte) 1700 { 1701 /* 1702 * Only bother trying if both the virtual and physical addresses are 1703 * aligned and correspond to the last entry in a contig range. The core 1704 * code mostly modifies ranges from low to high, so this is the likely 1705 * the last modification in the contig range, so a good time to fold. 1706 * We can't fold special mappings, because there is no associated folio. 1707 */ 1708 1709 const unsigned long contmask = CONT_PTES - 1; 1710 bool valign = ((addr >> PAGE_SHIFT) & contmask) == contmask; 1711 1712 if (unlikely(valign)) { 1713 bool palign = (pte_pfn(pte) & contmask) == contmask; 1714 1715 if (unlikely(palign && 1716 pte_valid(pte) && !pte_cont(pte) && !pte_special(pte))) 1717 __contpte_try_fold(mm, addr, ptep, pte); 1718 } 1719 } 1720 1721 static __always_inline void contpte_try_unfold(struct mm_struct *mm, 1722 unsigned long addr, pte_t *ptep, pte_t pte) 1723 { 1724 if (unlikely(pte_valid_cont(pte))) 1725 __contpte_try_unfold(mm, addr, ptep, pte); 1726 } 1727 1728 #define pte_batch_hint pte_batch_hint 1729 static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte) 1730 { 1731 if (!pte_valid_cont(pte)) 1732 return 1; 1733 1734 return CONT_PTES - (((unsigned long)ptep >> 3) & (CONT_PTES - 1)); 1735 } 1736 1737 /* 1738 * The below functions constitute the public API that arm64 presents to the 1739 * core-mm to manipulate PTE entries within their page tables (or at least this 1740 * is the subset of the API that arm64 needs to implement). These public 1741 * versions will automatically and transparently apply the contiguous bit where 1742 * it makes sense to do so. Therefore any users that are contig-aware (e.g. 1743 * hugetlb, kernel mapper) should NOT use these APIs, but instead use the 1744 * private versions, which are prefixed with double underscore. All of these 1745 * APIs except for ptep_get_lockless() are expected to be called with the PTL 1746 * held. Although the contiguous bit is considered private to the 1747 * implementation, it is deliberately allowed to leak through the getters (e.g. 1748 * ptep_get()), back to core code. This is required so that pte_leaf_size() can 1749 * provide an accurate size for perf_get_pgtable_size(). But this leakage means 1750 * its possible a pte will be passed to a setter with the contiguous bit set, so 1751 * we explicitly clear the contiguous bit in those cases to prevent accidentally 1752 * setting it in the pgtable. 1753 */ 1754 1755 #define ptep_get ptep_get 1756 static inline pte_t ptep_get(pte_t *ptep) 1757 { 1758 pte_t pte = __ptep_get(ptep); 1759 1760 if (likely(!pte_valid_cont(pte))) 1761 return pte; 1762 1763 return contpte_ptep_get(ptep, pte); 1764 } 1765 1766 #define ptep_get_lockless ptep_get_lockless 1767 static inline pte_t ptep_get_lockless(pte_t *ptep) 1768 { 1769 pte_t pte = __ptep_get(ptep); 1770 1771 if (likely(!pte_valid_cont(pte))) 1772 return pte; 1773 1774 return contpte_ptep_get_lockless(ptep); 1775 } 1776 1777 static inline void set_pte(pte_t *ptep, pte_t pte) 1778 { 1779 /* 1780 * We don't have the mm or vaddr so cannot unfold contig entries (since 1781 * it requires tlb maintenance). set_pte() is not used in core code, so 1782 * this should never even be called. Regardless do our best to service 1783 * any call and emit a warning if there is any attempt to set a pte on 1784 * top of an existing contig range. 1785 */ 1786 pte_t orig_pte = __ptep_get(ptep); 1787 1788 WARN_ON_ONCE(pte_valid_cont(orig_pte)); 1789 __set_pte(ptep, pte_mknoncont(pte)); 1790 } 1791 1792 #define set_ptes set_ptes 1793 static __always_inline void set_ptes(struct mm_struct *mm, unsigned long addr, 1794 pte_t *ptep, pte_t pte, unsigned int nr) 1795 { 1796 pte = pte_mknoncont(pte); 1797 1798 if (likely(nr == 1)) { 1799 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1800 __set_ptes(mm, addr, ptep, pte, 1); 1801 contpte_try_fold(mm, addr, ptep, pte); 1802 } else { 1803 contpte_set_ptes(mm, addr, ptep, pte, nr); 1804 } 1805 } 1806 1807 static inline void pte_clear(struct mm_struct *mm, 1808 unsigned long addr, pte_t *ptep) 1809 { 1810 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1811 __pte_clear(mm, addr, ptep); 1812 } 1813 1814 #define clear_full_ptes clear_full_ptes 1815 static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1816 pte_t *ptep, unsigned int nr, int full) 1817 { 1818 if (likely(nr == 1)) { 1819 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1820 __clear_full_ptes(mm, addr, ptep, nr, full); 1821 } else { 1822 contpte_clear_full_ptes(mm, addr, ptep, nr, full); 1823 } 1824 } 1825 1826 #define get_and_clear_full_ptes get_and_clear_full_ptes 1827 static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, 1828 unsigned long addr, pte_t *ptep, 1829 unsigned int nr, int full) 1830 { 1831 pte_t pte; 1832 1833 if (likely(nr == 1)) { 1834 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1835 pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full); 1836 } else { 1837 pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full); 1838 } 1839 1840 return pte; 1841 } 1842 1843 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1844 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 1845 unsigned long addr, pte_t *ptep) 1846 { 1847 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1848 return __ptep_get_and_clear(mm, addr, ptep); 1849 } 1850 1851 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1852 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 1853 unsigned long addr, pte_t *ptep) 1854 { 1855 pte_t orig_pte = __ptep_get(ptep); 1856 1857 if (likely(!pte_valid_cont(orig_pte))) 1858 return __ptep_test_and_clear_young(vma, addr, ptep); 1859 1860 return contpte_ptep_test_and_clear_young(vma, addr, ptep); 1861 } 1862 1863 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1864 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 1865 unsigned long addr, pte_t *ptep) 1866 { 1867 pte_t orig_pte = __ptep_get(ptep); 1868 1869 if (likely(!pte_valid_cont(orig_pte))) 1870 return __ptep_clear_flush_young(vma, addr, ptep); 1871 1872 return contpte_ptep_clear_flush_young(vma, addr, ptep); 1873 } 1874 1875 #define wrprotect_ptes wrprotect_ptes 1876 static __always_inline void wrprotect_ptes(struct mm_struct *mm, 1877 unsigned long addr, pte_t *ptep, unsigned int nr) 1878 { 1879 if (likely(nr == 1)) { 1880 /* 1881 * Optimization: wrprotect_ptes() can only be called for present 1882 * ptes so we only need to check contig bit as condition for 1883 * unfold, and we can remove the contig bit from the pte we read 1884 * to avoid re-reading. This speeds up fork() which is sensitive 1885 * for order-0 folios. Equivalent to contpte_try_unfold(). 1886 */ 1887 pte_t orig_pte = __ptep_get(ptep); 1888 1889 if (unlikely(pte_cont(orig_pte))) { 1890 __contpte_try_unfold(mm, addr, ptep, orig_pte); 1891 orig_pte = pte_mknoncont(orig_pte); 1892 } 1893 ___ptep_set_wrprotect(mm, addr, ptep, orig_pte); 1894 } else { 1895 contpte_wrprotect_ptes(mm, addr, ptep, nr); 1896 } 1897 } 1898 1899 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 1900 static inline void ptep_set_wrprotect(struct mm_struct *mm, 1901 unsigned long addr, pte_t *ptep) 1902 { 1903 wrprotect_ptes(mm, addr, ptep, 1); 1904 } 1905 1906 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1907 static inline int ptep_set_access_flags(struct vm_area_struct *vma, 1908 unsigned long addr, pte_t *ptep, 1909 pte_t entry, int dirty) 1910 { 1911 pte_t orig_pte = __ptep_get(ptep); 1912 1913 entry = pte_mknoncont(entry); 1914 1915 if (likely(!pte_valid_cont(orig_pte))) 1916 return __ptep_set_access_flags(vma, addr, ptep, entry, dirty); 1917 1918 return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty); 1919 } 1920 1921 #define clear_young_dirty_ptes clear_young_dirty_ptes 1922 static inline void clear_young_dirty_ptes(struct vm_area_struct *vma, 1923 unsigned long addr, pte_t *ptep, 1924 unsigned int nr, cydp_t flags) 1925 { 1926 if (likely(nr == 1 && !pte_cont(__ptep_get(ptep)))) 1927 __clear_young_dirty_ptes(vma, addr, ptep, nr, flags); 1928 else 1929 contpte_clear_young_dirty_ptes(vma, addr, ptep, nr, flags); 1930 } 1931 1932 #else /* CONFIG_ARM64_CONTPTE */ 1933 1934 #define ptep_get __ptep_get 1935 #define set_pte __set_pte 1936 #define set_ptes __set_ptes 1937 #define pte_clear __pte_clear 1938 #define clear_full_ptes __clear_full_ptes 1939 #define get_and_clear_full_ptes __get_and_clear_full_ptes 1940 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1941 #define ptep_get_and_clear __ptep_get_and_clear 1942 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1943 #define ptep_test_and_clear_young __ptep_test_and_clear_young 1944 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1945 #define ptep_clear_flush_young __ptep_clear_flush_young 1946 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 1947 #define ptep_set_wrprotect __ptep_set_wrprotect 1948 #define wrprotect_ptes __wrprotect_ptes 1949 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1950 #define ptep_set_access_flags __ptep_set_access_flags 1951 #define clear_young_dirty_ptes __clear_young_dirty_ptes 1952 1953 #endif /* CONFIG_ARM64_CONTPTE */ 1954 1955 #endif /* !__ASSEMBLER__ */ 1956 1957 #endif /* __ASM_PGTABLE_H */ 1958