1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012 ARM Ltd. 4 */ 5 #ifndef __ASM_PGTABLE_H 6 #define __ASM_PGTABLE_H 7 8 #include <asm/bug.h> 9 #include <asm/proc-fns.h> 10 11 #include <asm/memory.h> 12 #include <asm/mte.h> 13 #include <asm/pgtable-hwdef.h> 14 #include <asm/pgtable-prot.h> 15 #include <asm/tlbflush.h> 16 17 /* 18 * VMALLOC range. 19 * 20 * VMALLOC_START: beginning of the kernel vmalloc space 21 * VMALLOC_END: extends to the available space below vmemmap 22 */ 23 #define VMALLOC_START (MODULES_END) 24 #if VA_BITS == VA_BITS_MIN 25 #define VMALLOC_END (VMEMMAP_START - SZ_8M) 26 #else 27 #define VMEMMAP_UNUSED_NPAGES ((_PAGE_OFFSET(vabits_actual) - PAGE_OFFSET) >> PAGE_SHIFT) 28 #define VMALLOC_END (VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M) 29 #endif 30 31 #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) 32 33 #ifndef __ASSEMBLER__ 34 35 #include <asm/cmpxchg.h> 36 #include <asm/fixmap.h> 37 #include <asm/por.h> 38 #include <linux/mmdebug.h> 39 #include <linux/mm_types.h> 40 #include <linux/sched.h> 41 #include <linux/page_table_check.h> 42 43 static inline void emit_pte_barriers(void) 44 { 45 /* 46 * These barriers are emitted under certain conditions after a pte entry 47 * was modified (see e.g. __set_pte_complete()). The dsb makes the store 48 * visible to the table walker. The isb ensures that any previous 49 * speculative "invalid translation" marker that is in the CPU's 50 * pipeline gets cleared, so that any access to that address after 51 * setting the pte to valid won't cause a spurious fault. If the thread 52 * gets preempted after storing to the pgtable but before emitting these 53 * barriers, __switch_to() emits a dsb which ensure the walker gets to 54 * see the store. There is no guarantee of an isb being issued though. 55 * This is safe because it will still get issued (albeit on a 56 * potentially different CPU) when the thread starts running again, 57 * before any access to the address. 58 */ 59 dsb(ishst); 60 isb(); 61 } 62 63 static inline void queue_pte_barriers(void) 64 { 65 if (is_lazy_mmu_mode_active()) { 66 /* Avoid the atomic op if already set. */ 67 if (!test_thread_flag(TIF_LAZY_MMU_PENDING)) 68 set_thread_flag(TIF_LAZY_MMU_PENDING); 69 } else { 70 emit_pte_barriers(); 71 } 72 } 73 74 static inline void arch_enter_lazy_mmu_mode(void) {} 75 76 static inline void arch_flush_lazy_mmu_mode(void) 77 { 78 if (test_and_clear_thread_flag(TIF_LAZY_MMU_PENDING)) 79 emit_pte_barriers(); 80 } 81 82 static inline void arch_leave_lazy_mmu_mode(void) 83 { 84 arch_flush_lazy_mmu_mode(); 85 } 86 87 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 88 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 89 90 /* Set stride and tlb_level in flush_*_tlb_range */ 91 #define flush_pmd_tlb_range(vma, addr, end) \ 92 __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2) 93 #define flush_pud_tlb_range(vma, addr, end) \ 94 __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1) 95 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 96 97 /* 98 * We use local TLB invalidation instruction when reusing page in 99 * write protection fault handler to avoid TLBI broadcast in the hot 100 * path. This will cause spurious page faults if stale read-only TLB 101 * entries exist. 102 */ 103 #define flush_tlb_fix_spurious_fault(vma, address, ptep) \ 104 local_flush_tlb_page_nonotify(vma, address) 105 106 #define flush_tlb_fix_spurious_fault_pmd(vma, address, pmdp) \ 107 local_flush_tlb_page_nonotify(vma, address) 108 109 /* 110 * ZERO_PAGE is a global shared page that is always zero: used 111 * for zero-mapped memory areas etc.. 112 */ 113 #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) 114 115 #define pte_ERROR(e) \ 116 pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e)) 117 118 #ifdef CONFIG_ARM64_PA_BITS_52 119 static inline phys_addr_t __pte_to_phys(pte_t pte) 120 { 121 pte_val(pte) &= ~PTE_MAYBE_SHARED; 122 return (pte_val(pte) & PTE_ADDR_LOW) | 123 ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT); 124 } 125 static inline pteval_t __phys_to_pte_val(phys_addr_t phys) 126 { 127 return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK; 128 } 129 #else 130 static inline phys_addr_t __pte_to_phys(pte_t pte) 131 { 132 return pte_val(pte) & PTE_ADDR_LOW; 133 } 134 135 static inline pteval_t __phys_to_pte_val(phys_addr_t phys) 136 { 137 return phys; 138 } 139 #endif 140 141 #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) 142 #define pfn_pte(pfn,prot) \ 143 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 144 145 #define pte_none(pte) (!pte_val(pte)) 146 #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 147 148 /* 149 * The following only work if pte_present(). Undefined behaviour otherwise. 150 */ 151 #define pte_present(pte) (pte_valid(pte) || pte_present_invalid(pte)) 152 #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) 153 #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) 154 #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) 155 #define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY)) 156 #define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) 157 #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) 158 #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) 159 #define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \ 160 PTE_ATTRINDX(MT_NORMAL_TAGGED)) 161 162 #define pte_cont_addr_end(addr, end) \ 163 ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ 164 (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 165 }) 166 167 #define pmd_cont_addr_end(addr, end) \ 168 ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \ 169 (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 170 }) 171 172 #define pte_hw_dirty(pte) (pte_write(pte) && !pte_rdonly(pte)) 173 #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) 174 #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) 175 176 #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) 177 #define pte_present_invalid(pte) \ 178 ((pte_val(pte) & (PTE_VALID | PTE_PRESENT_INVALID)) == PTE_PRESENT_INVALID) 179 /* 180 * Execute-only user mappings do not have the PTE_USER bit set. All valid 181 * kernel mappings have the PTE_UXN bit set. 182 */ 183 #define pte_valid_not_user(pte) \ 184 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) 185 /* 186 * Returns true if the pte is valid and has the contiguous bit set. 187 */ 188 #define pte_valid_cont(pte) (pte_valid(pte) && pte_cont(pte)) 189 /* 190 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending 191 * so that we don't erroneously return false for pages that have been 192 * remapped as PROT_NONE but are yet to be flushed from the TLB. 193 * Note that we can't make any assumptions based on the state of the access 194 * flag, since __ptep_clear_flush_young() elides a DSB when invalidating the 195 * TLB. 196 */ 197 #define pte_accessible(mm, pte) \ 198 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) 199 200 static inline bool por_el0_allows_pkey(u8 pkey, bool write, bool execute) 201 { 202 u64 por; 203 204 if (!system_supports_poe()) 205 return true; 206 207 por = read_sysreg_s(SYS_POR_EL0); 208 209 if (write) 210 return por_elx_allows_write(por, pkey); 211 212 if (execute) 213 return por_elx_allows_exec(por, pkey); 214 215 return por_elx_allows_read(por, pkey); 216 } 217 218 /* 219 * p??_access_permitted() is true for valid user mappings (PTE_USER 220 * bit set, subject to the write permission check). For execute-only 221 * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits 222 * not set) must return false. PROT_NONE mappings do not have the 223 * PTE_VALID bit set. 224 */ 225 #define pte_access_permitted_no_overlay(pte, write) \ 226 (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte))) 227 #define pte_access_permitted(pte, write) \ 228 (pte_access_permitted_no_overlay(pte, write) && \ 229 por_el0_allows_pkey(FIELD_GET(PTE_PO_IDX_MASK, pte_val(pte)), write, false)) 230 #define pmd_access_permitted(pmd, write) \ 231 (pte_access_permitted(pmd_pte(pmd), (write))) 232 #define pud_access_permitted(pud, write) \ 233 (pte_access_permitted(pud_pte(pud), (write))) 234 235 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) 236 { 237 pte_val(pte) &= ~pgprot_val(prot); 238 return pte; 239 } 240 241 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) 242 { 243 pte_val(pte) |= pgprot_val(prot); 244 return pte; 245 } 246 247 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) 248 { 249 pmd_val(pmd) &= ~pgprot_val(prot); 250 return pmd; 251 } 252 253 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) 254 { 255 pmd_val(pmd) |= pgprot_val(prot); 256 return pmd; 257 } 258 259 static inline pte_t pte_mkwrite_novma(pte_t pte) 260 { 261 pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); 262 if (pte_sw_dirty(pte)) 263 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 264 return pte; 265 } 266 267 static inline pte_t pte_mkclean(pte_t pte) 268 { 269 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); 270 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 271 272 return pte; 273 } 274 275 static inline pte_t pte_mkdirty(pte_t pte) 276 { 277 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 278 279 if (pte_write(pte)) 280 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 281 282 return pte; 283 } 284 285 static inline pte_t pte_wrprotect(pte_t pte) 286 { 287 /* 288 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY 289 * clear), set the PTE_DIRTY bit. 290 */ 291 if (pte_hw_dirty(pte)) 292 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 293 294 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); 295 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 296 return pte; 297 } 298 299 static inline pte_t pte_mkold(pte_t pte) 300 { 301 return clear_pte_bit(pte, __pgprot(PTE_AF)); 302 } 303 304 static inline pte_t pte_mkyoung(pte_t pte) 305 { 306 return set_pte_bit(pte, __pgprot(PTE_AF)); 307 } 308 309 static inline pte_t pte_mkspecial(pte_t pte) 310 { 311 return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); 312 } 313 314 static inline pte_t pte_mkcont(pte_t pte) 315 { 316 return set_pte_bit(pte, __pgprot(PTE_CONT)); 317 } 318 319 static inline pte_t pte_mknoncont(pte_t pte) 320 { 321 return clear_pte_bit(pte, __pgprot(PTE_CONT)); 322 } 323 324 static inline pte_t pte_mkvalid(pte_t pte) 325 { 326 return set_pte_bit(pte, __pgprot(PTE_VALID)); 327 } 328 329 static inline pte_t pte_mkinvalid(pte_t pte) 330 { 331 pte = set_pte_bit(pte, __pgprot(PTE_PRESENT_INVALID)); 332 pte = clear_pte_bit(pte, __pgprot(PTE_VALID)); 333 return pte; 334 } 335 336 static inline pmd_t pmd_mkcont(pmd_t pmd) 337 { 338 return __pmd(pmd_val(pmd) | PMD_SECT_CONT); 339 } 340 341 static inline pmd_t pmd_mknoncont(pmd_t pmd) 342 { 343 return __pmd(pmd_val(pmd) & ~PMD_SECT_CONT); 344 } 345 346 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 347 static inline int pte_uffd_wp(pte_t pte) 348 { 349 return !!(pte_val(pte) & PTE_UFFD_WP); 350 } 351 352 static inline pte_t pte_mkuffd_wp(pte_t pte) 353 { 354 return pte_wrprotect(set_pte_bit(pte, __pgprot(PTE_UFFD_WP))); 355 } 356 357 static inline pte_t pte_clear_uffd_wp(pte_t pte) 358 { 359 return clear_pte_bit(pte, __pgprot(PTE_UFFD_WP)); 360 } 361 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 362 363 static inline void __set_pte_nosync(pte_t *ptep, pte_t pte) 364 { 365 WRITE_ONCE(*ptep, pte); 366 } 367 368 static inline void __set_pte_complete(pte_t pte) 369 { 370 /* 371 * Only if the new pte is valid and kernel, otherwise TLB maintenance 372 * has the necessary barriers. 373 */ 374 if (pte_valid_not_user(pte)) 375 queue_pte_barriers(); 376 } 377 378 static inline void __set_pte(pte_t *ptep, pte_t pte) 379 { 380 __set_pte_nosync(ptep, pte); 381 __set_pte_complete(pte); 382 } 383 384 static inline pte_t __ptep_get(pte_t *ptep) 385 { 386 return READ_ONCE(*ptep); 387 } 388 389 extern void __sync_icache_dcache(pte_t pteval); 390 bool pgattr_change_is_safe(pteval_t old, pteval_t new); 391 392 /* 393 * PTE bits configuration in the presence of hardware Dirty Bit Management 394 * (PTE_WRITE == PTE_DBM): 395 * 396 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) 397 * 0 0 | 1 0 0 398 * 0 1 | 1 1 0 399 * 1 0 | 1 0 1 400 * 1 1 | 0 1 x 401 * 402 * When hardware DBM is not present, the software PTE_DIRTY bit is updated via 403 * the page fault mechanism. Checking the dirty status of a pte becomes: 404 * 405 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) 406 */ 407 408 static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, 409 pte_t pte) 410 { 411 pte_t old_pte; 412 413 if (!IS_ENABLED(CONFIG_DEBUG_VM)) 414 return; 415 416 old_pte = __ptep_get(ptep); 417 418 if (!pte_valid(old_pte) || !pte_valid(pte)) 419 return; 420 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1) 421 return; 422 423 /* 424 * Check for potential race with hardware updates of the pte 425 * (__ptep_set_access_flags safely changes valid ptes without going 426 * through an invalid entry). 427 */ 428 VM_WARN_ONCE(!pte_young(pte), 429 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", 430 __func__, pte_val(old_pte), pte_val(pte)); 431 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), 432 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", 433 __func__, pte_val(old_pte), pte_val(pte)); 434 VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)), 435 "%s: unsafe attribute change: 0x%016llx -> 0x%016llx", 436 __func__, pte_val(old_pte), pte_val(pte)); 437 } 438 439 static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages) 440 { 441 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) 442 __sync_icache_dcache(pte); 443 444 /* 445 * If the PTE would provide user space access to the tags associated 446 * with it then ensure that the MTE tags are synchronised. Although 447 * pte_access_permitted_no_overlay() returns false for exec only 448 * mappings, they don't expose tags (instruction fetches don't check 449 * tags). 450 */ 451 if (system_supports_mte() && pte_access_permitted_no_overlay(pte, false) && 452 !pte_special(pte) && pte_tagged(pte)) 453 mte_sync_tags(pte, nr_pages); 454 } 455 456 /* 457 * Select all bits except the pfn 458 */ 459 #define pte_pgprot pte_pgprot 460 static inline pgprot_t pte_pgprot(pte_t pte) 461 { 462 unsigned long pfn = pte_pfn(pte); 463 464 return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); 465 } 466 467 #define pte_advance_pfn pte_advance_pfn 468 static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) 469 { 470 return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte)); 471 } 472 473 /* 474 * Hugetlb definitions. 475 */ 476 #define HUGE_MAX_HSTATE 4 477 #define HPAGE_SHIFT PMD_SHIFT 478 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 479 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 480 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 481 482 static inline pte_t pgd_pte(pgd_t pgd) 483 { 484 return __pte(pgd_val(pgd)); 485 } 486 487 static inline pte_t p4d_pte(p4d_t p4d) 488 { 489 return __pte(p4d_val(p4d)); 490 } 491 492 static inline pte_t pud_pte(pud_t pud) 493 { 494 return __pte(pud_val(pud)); 495 } 496 497 static inline pud_t pte_pud(pte_t pte) 498 { 499 return __pud(pte_val(pte)); 500 } 501 502 static inline pmd_t pud_pmd(pud_t pud) 503 { 504 return __pmd(pud_val(pud)); 505 } 506 507 static inline pte_t pmd_pte(pmd_t pmd) 508 { 509 return __pte(pmd_val(pmd)); 510 } 511 512 static inline pmd_t pte_pmd(pte_t pte) 513 { 514 return __pmd(pte_val(pte)); 515 } 516 517 static inline pgprot_t mk_pud_sect_prot(pgprot_t prot) 518 { 519 return __pgprot((pgprot_val(prot) & ~PUD_TYPE_MASK) | PUD_TYPE_SECT); 520 } 521 522 static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot) 523 { 524 return __pgprot((pgprot_val(prot) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT); 525 } 526 527 static inline pte_t pte_swp_mkexclusive(pte_t pte) 528 { 529 return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 530 } 531 532 static inline bool pte_swp_exclusive(pte_t pte) 533 { 534 return pte_val(pte) & PTE_SWP_EXCLUSIVE; 535 } 536 537 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 538 { 539 return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 540 } 541 542 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 543 static inline pte_t pte_swp_mkuffd_wp(pte_t pte) 544 { 545 return set_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP)); 546 } 547 548 static inline int pte_swp_uffd_wp(pte_t pte) 549 { 550 return !!(pte_val(pte) & PTE_SWP_UFFD_WP); 551 } 552 553 static inline pte_t pte_swp_clear_uffd_wp(pte_t pte) 554 { 555 return clear_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP)); 556 } 557 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 558 559 #ifdef CONFIG_NUMA_BALANCING 560 /* 561 * See the comment in include/linux/pgtable.h 562 */ 563 static inline int pte_protnone(pte_t pte) 564 { 565 /* 566 * pte_present_invalid() tells us that the pte is invalid from HW 567 * perspective but present from SW perspective, so the fields are to be 568 * interpreted as per the HW layout. The second 2 checks are the unique 569 * encoding that we use for PROT_NONE. It is insufficient to only use 570 * the first check because we share the same encoding scheme with pmds 571 * which support pmd_mkinvalid(), so can be present-invalid without 572 * being PROT_NONE. 573 */ 574 return pte_present_invalid(pte) && !pte_user(pte) && !pte_user_exec(pte); 575 } 576 577 static inline int pmd_protnone(pmd_t pmd) 578 { 579 return pte_protnone(pmd_pte(pmd)); 580 } 581 #endif 582 583 #define pmd_present(pmd) pte_present(pmd_pte(pmd)) 584 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) 585 #define pmd_young(pmd) pte_young(pmd_pte(pmd)) 586 #define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) 587 #define pmd_user(pmd) pte_user(pmd_pte(pmd)) 588 #define pmd_user_exec(pmd) pte_user_exec(pmd_pte(pmd)) 589 #define pmd_cont(pmd) pte_cont(pmd_pte(pmd)) 590 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) 591 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) 592 #define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))) 593 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) 594 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) 595 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) 596 #define pmd_mkinvalid(pmd) pte_pmd(pte_mkinvalid(pmd_pte(pmd))) 597 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 598 #define pmd_uffd_wp(pmd) pte_uffd_wp(pmd_pte(pmd)) 599 #define pmd_mkuffd_wp(pmd) pte_pmd(pte_mkuffd_wp(pmd_pte(pmd))) 600 #define pmd_clear_uffd_wp(pmd) pte_pmd(pte_clear_uffd_wp(pmd_pte(pmd))) 601 #define pmd_swp_uffd_wp(pmd) pte_swp_uffd_wp(pmd_pte(pmd)) 602 #define pmd_swp_mkuffd_wp(pmd) pte_pmd(pte_swp_mkuffd_wp(pmd_pte(pmd))) 603 #define pmd_swp_clear_uffd_wp(pmd) \ 604 pte_pmd(pte_swp_clear_uffd_wp(pmd_pte(pmd))) 605 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 606 607 #define pmd_write(pmd) pte_write(pmd_pte(pmd)) 608 609 static inline pmd_t pmd_mkhuge(pmd_t pmd) 610 { 611 /* 612 * It's possible that the pmd is present-invalid on entry 613 * and in that case it needs to remain present-invalid on 614 * exit. So ensure the VALID bit does not get modified. 615 */ 616 pmdval_t mask = PMD_TYPE_MASK & ~PTE_VALID; 617 pmdval_t val = PMD_TYPE_SECT & ~PTE_VALID; 618 619 return __pmd((pmd_val(pmd) & ~mask) | val); 620 } 621 622 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP 623 #define pmd_special(pte) (!!((pmd_val(pte) & PTE_SPECIAL))) 624 static inline pmd_t pmd_mkspecial(pmd_t pmd) 625 { 626 return set_pmd_bit(pmd, __pgprot(PTE_SPECIAL)); 627 } 628 #endif 629 630 #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) 631 #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) 632 #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) 633 #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 634 635 #define pud_young(pud) pte_young(pud_pte(pud)) 636 #define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) 637 #define pud_write(pud) pte_write(pud_pte(pud)) 638 639 static inline pud_t pud_mkhuge(pud_t pud) 640 { 641 /* 642 * It's possible that the pud is present-invalid on entry 643 * and in that case it needs to remain present-invalid on 644 * exit. So ensure the VALID bit does not get modified. 645 */ 646 pudval_t mask = PUD_TYPE_MASK & ~PTE_VALID; 647 pudval_t val = PUD_TYPE_SECT & ~PTE_VALID; 648 649 return __pud((pud_val(pud) & ~mask) | val); 650 } 651 652 #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud)) 653 #define __phys_to_pud_val(phys) __phys_to_pte_val(phys) 654 #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) 655 #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 656 657 #define pmd_pgprot pmd_pgprot 658 static inline pgprot_t pmd_pgprot(pmd_t pmd) 659 { 660 unsigned long pfn = pmd_pfn(pmd); 661 662 return __pgprot(pmd_val(pfn_pmd(pfn, __pgprot(0))) ^ pmd_val(pmd)); 663 } 664 665 #define pud_pgprot pud_pgprot 666 static inline pgprot_t pud_pgprot(pud_t pud) 667 { 668 unsigned long pfn = pud_pfn(pud); 669 670 return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud)); 671 } 672 673 static inline void __set_ptes_anysz(struct mm_struct *mm, unsigned long addr, 674 pte_t *ptep, pte_t pte, unsigned int nr, 675 unsigned long pgsize) 676 { 677 unsigned long stride = pgsize >> PAGE_SHIFT; 678 679 switch (pgsize) { 680 case PAGE_SIZE: 681 page_table_check_ptes_set(mm, addr, ptep, pte, nr); 682 break; 683 case PMD_SIZE: 684 page_table_check_pmds_set(mm, addr, (pmd_t *)ptep, 685 pte_pmd(pte), nr); 686 break; 687 #ifndef __PAGETABLE_PMD_FOLDED 688 case PUD_SIZE: 689 page_table_check_puds_set(mm, addr, (pud_t *)ptep, 690 pte_pud(pte), nr); 691 break; 692 #endif 693 default: 694 VM_WARN_ON(1); 695 } 696 697 __sync_cache_and_tags(pte, nr * stride); 698 699 for (;;) { 700 __check_safe_pte_update(mm, ptep, pte); 701 __set_pte_nosync(ptep, pte); 702 if (--nr == 0) 703 break; 704 ptep++; 705 pte = pte_advance_pfn(pte, stride); 706 } 707 708 __set_pte_complete(pte); 709 } 710 711 static inline void __set_ptes(struct mm_struct *mm, unsigned long addr, 712 pte_t *ptep, pte_t pte, unsigned int nr) 713 { 714 __set_ptes_anysz(mm, addr, ptep, pte, nr, PAGE_SIZE); 715 } 716 717 static inline void __set_pmds(struct mm_struct *mm, unsigned long addr, 718 pmd_t *pmdp, pmd_t pmd, unsigned int nr) 719 { 720 __set_ptes_anysz(mm, addr, (pte_t *)pmdp, pmd_pte(pmd), nr, PMD_SIZE); 721 } 722 #define set_pmd_at(mm, addr, pmdp, pmd) __set_pmds(mm, addr, pmdp, pmd, 1) 723 724 static inline void __set_puds(struct mm_struct *mm, unsigned long addr, 725 pud_t *pudp, pud_t pud, unsigned int nr) 726 { 727 __set_ptes_anysz(mm, addr, (pte_t *)pudp, pud_pte(pud), nr, PUD_SIZE); 728 } 729 #define set_pud_at(mm, addr, pudp, pud) __set_puds(mm, addr, pudp, pud, 1) 730 731 #define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d)) 732 #define __phys_to_p4d_val(phys) __phys_to_pte_val(phys) 733 734 #define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd)) 735 #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys) 736 737 #define __pgprot_modify(prot,mask,bits) \ 738 __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 739 740 #define pgprot_nx(prot) \ 741 __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN) 742 743 #define pgprot_decrypted(prot) \ 744 __pgprot_modify(prot, PROT_NS_SHARED, PROT_NS_SHARED) 745 #define pgprot_encrypted(prot) \ 746 __pgprot_modify(prot, PROT_NS_SHARED, 0) 747 748 /* 749 * Mark the prot value as uncacheable and unbufferable. 750 */ 751 #define pgprot_noncached(prot) \ 752 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) 753 #define pgprot_writecombine(prot) \ 754 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 755 #define pgprot_device(prot) \ 756 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) 757 #define pgprot_tagged(prot) \ 758 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED)) 759 #define pgprot_mhp pgprot_tagged 760 /* 761 * DMA allocations for non-coherent devices use what the Arm architecture calls 762 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses 763 * and merging of writes. This is different from "Device-nGnR[nE]" memory which 764 * is intended for MMIO and thus forbids speculation, preserves access size, 765 * requires strict alignment and can also force write responses to come from the 766 * endpoint. 767 */ 768 #define pgprot_dmacoherent(prot) \ 769 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ 770 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 771 772 #define __HAVE_PHYS_MEM_ACCESS_PROT 773 struct file; 774 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 775 unsigned long size, pgprot_t vma_prot); 776 777 #define pmd_none(pmd) (!pmd_val(pmd)) 778 779 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 780 PMD_TYPE_TABLE) 781 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 782 PMD_TYPE_SECT) 783 #define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd)) 784 #define pmd_bad(pmd) (!pmd_table(pmd)) 785 786 #define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE) 787 #define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE) 788 789 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 790 static inline int pmd_trans_huge(pmd_t pmd) 791 { 792 /* 793 * If pmd is present-invalid, pmd_table() won't detect it 794 * as a table, so force the valid bit for the comparison. 795 */ 796 return pmd_present(pmd) && !pmd_table(__pmd(pmd_val(pmd) | PTE_VALID)); 797 } 798 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 799 800 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 801 static inline bool pud_sect(pud_t pud) { return false; } 802 static inline bool pud_table(pud_t pud) { return true; } 803 #else 804 #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 805 PUD_TYPE_SECT) 806 #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 807 PUD_TYPE_TABLE) 808 #endif 809 810 extern pgd_t swapper_pg_dir[]; 811 extern pgd_t idmap_pg_dir[]; 812 extern pgd_t tramp_pg_dir[]; 813 extern pgd_t reserved_pg_dir[]; 814 815 extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); 816 817 static inline bool in_swapper_pgdir(void *addr) 818 { 819 return ((unsigned long)addr & PAGE_MASK) == 820 ((unsigned long)swapper_pg_dir & PAGE_MASK); 821 } 822 823 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 824 { 825 #ifdef __PAGETABLE_PMD_FOLDED 826 if (in_swapper_pgdir(pmdp)) { 827 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd))); 828 return; 829 } 830 #endif /* __PAGETABLE_PMD_FOLDED */ 831 832 WRITE_ONCE(*pmdp, pmd); 833 834 if (pmd_valid(pmd)) 835 queue_pte_barriers(); 836 } 837 838 static inline void pmd_clear(pmd_t *pmdp) 839 { 840 set_pmd(pmdp, __pmd(0)); 841 } 842 843 static inline phys_addr_t pmd_page_paddr(pmd_t pmd) 844 { 845 return __pmd_to_phys(pmd); 846 } 847 848 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 849 { 850 return (unsigned long)__va(pmd_page_paddr(pmd)); 851 } 852 853 /* Find an entry in the third-level page table. */ 854 #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) 855 856 #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) 857 #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) 858 #define pte_clear_fixmap() clear_fixmap(FIX_PTE) 859 860 #define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd)) 861 862 /* use ONLY for statically allocated translation tables */ 863 #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) 864 865 #if CONFIG_PGTABLE_LEVELS > 2 866 867 #define pmd_ERROR(e) \ 868 pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) 869 870 #define pud_none(pud) (!pud_val(pud)) 871 #define pud_bad(pud) ((pud_val(pud) & PUD_TYPE_MASK) != \ 872 PUD_TYPE_TABLE) 873 #define pud_present(pud) pte_present(pud_pte(pud)) 874 #ifndef __PAGETABLE_PMD_FOLDED 875 #define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) 876 #else 877 #define pud_leaf(pud) false 878 #endif 879 #define pud_valid(pud) pte_valid(pud_pte(pud)) 880 #define pud_user(pud) pte_user(pud_pte(pud)) 881 #define pud_user_exec(pud) pte_user_exec(pud_pte(pud)) 882 883 static inline bool pgtable_l4_enabled(void); 884 885 static inline void set_pud(pud_t *pudp, pud_t pud) 886 { 887 if (!pgtable_l4_enabled() && in_swapper_pgdir(pudp)) { 888 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud))); 889 return; 890 } 891 892 WRITE_ONCE(*pudp, pud); 893 894 if (pud_valid(pud)) 895 queue_pte_barriers(); 896 } 897 898 static inline void pud_clear(pud_t *pudp) 899 { 900 set_pud(pudp, __pud(0)); 901 } 902 903 static inline phys_addr_t pud_page_paddr(pud_t pud) 904 { 905 return __pud_to_phys(pud); 906 } 907 908 static inline pmd_t *pud_pgtable(pud_t pud) 909 { 910 return (pmd_t *)__va(pud_page_paddr(pud)); 911 } 912 913 /* Find an entry in the second-level page table. */ 914 #define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t)) 915 916 #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) 917 #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) 918 #define pmd_clear_fixmap() clear_fixmap(FIX_PMD) 919 920 #define pud_page(pud) phys_to_page(__pud_to_phys(pud)) 921 922 /* use ONLY for statically allocated translation tables */ 923 #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) 924 925 #else 926 927 #define pud_valid(pud) false 928 #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) 929 #define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */ 930 931 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ 932 #define pmd_set_fixmap(addr) NULL 933 #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp) 934 #define pmd_clear_fixmap() 935 936 #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir) 937 938 #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 939 940 #if CONFIG_PGTABLE_LEVELS > 3 941 942 static __always_inline bool pgtable_l4_enabled(void) 943 { 944 if (CONFIG_PGTABLE_LEVELS > 4 || !IS_ENABLED(CONFIG_ARM64_LPA2)) 945 return true; 946 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT)) 947 return vabits_actual == VA_BITS; 948 return alternative_has_cap_unlikely(ARM64_HAS_VA52); 949 } 950 951 static inline bool mm_pud_folded(const struct mm_struct *mm) 952 { 953 return !pgtable_l4_enabled(); 954 } 955 #define mm_pud_folded mm_pud_folded 956 957 #define pud_ERROR(e) \ 958 pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e)) 959 960 #define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d)) 961 #define p4d_bad(p4d) (pgtable_l4_enabled() && \ 962 ((p4d_val(p4d) & P4D_TYPE_MASK) != \ 963 P4D_TYPE_TABLE)) 964 #define p4d_present(p4d) (!p4d_none(p4d)) 965 966 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) 967 { 968 if (in_swapper_pgdir(p4dp)) { 969 set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d))); 970 return; 971 } 972 973 WRITE_ONCE(*p4dp, p4d); 974 queue_pte_barriers(); 975 } 976 977 static inline void p4d_clear(p4d_t *p4dp) 978 { 979 if (pgtable_l4_enabled()) 980 set_p4d(p4dp, __p4d(0)); 981 } 982 983 static inline phys_addr_t p4d_page_paddr(p4d_t p4d) 984 { 985 return __p4d_to_phys(p4d); 986 } 987 988 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 989 990 static inline pud_t *p4d_to_folded_pud(p4d_t *p4dp, unsigned long addr) 991 { 992 /* Ensure that 'p4dp' indexes a page table according to 'addr' */ 993 VM_BUG_ON(((addr >> P4D_SHIFT) ^ ((u64)p4dp >> 3)) % PTRS_PER_P4D); 994 995 return (pud_t *)PTR_ALIGN_DOWN(p4dp, PAGE_SIZE) + pud_index(addr); 996 } 997 998 static inline pud_t *p4d_pgtable(p4d_t p4d) 999 { 1000 return (pud_t *)__va(p4d_page_paddr(p4d)); 1001 } 1002 1003 static inline phys_addr_t pud_offset_phys(p4d_t *p4dp, unsigned long addr) 1004 { 1005 BUG_ON(!pgtable_l4_enabled()); 1006 1007 return p4d_page_paddr(READ_ONCE(*p4dp)) + pud_index(addr) * sizeof(pud_t); 1008 } 1009 1010 static inline 1011 pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long addr) 1012 { 1013 if (!pgtable_l4_enabled()) 1014 return p4d_to_folded_pud(p4dp, addr); 1015 return (pud_t *)__va(p4d_page_paddr(p4d)) + pud_index(addr); 1016 } 1017 #define pud_offset_lockless pud_offset_lockless 1018 1019 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long addr) 1020 { 1021 return pud_offset_lockless(p4dp, READ_ONCE(*p4dp), addr); 1022 } 1023 #define pud_offset pud_offset 1024 1025 static inline pud_t *pud_set_fixmap(unsigned long addr) 1026 { 1027 if (!pgtable_l4_enabled()) 1028 return NULL; 1029 return (pud_t *)set_fixmap_offset(FIX_PUD, addr); 1030 } 1031 1032 static inline pud_t *pud_set_fixmap_offset(p4d_t *p4dp, unsigned long addr) 1033 { 1034 if (!pgtable_l4_enabled()) 1035 return p4d_to_folded_pud(p4dp, addr); 1036 return pud_set_fixmap(pud_offset_phys(p4dp, addr)); 1037 } 1038 1039 static inline void pud_clear_fixmap(void) 1040 { 1041 if (pgtable_l4_enabled()) 1042 clear_fixmap(FIX_PUD); 1043 } 1044 1045 /* use ONLY for statically allocated translation tables */ 1046 static inline pud_t *pud_offset_kimg(p4d_t *p4dp, u64 addr) 1047 { 1048 if (!pgtable_l4_enabled()) 1049 return p4d_to_folded_pud(p4dp, addr); 1050 return (pud_t *)__phys_to_kimg(pud_offset_phys(p4dp, addr)); 1051 } 1052 1053 #define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d))) 1054 1055 #else 1056 1057 static inline bool pgtable_l4_enabled(void) { return false; } 1058 1059 #define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;}) 1060 1061 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */ 1062 #define pud_set_fixmap(addr) NULL 1063 #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) 1064 #define pud_clear_fixmap() 1065 1066 #define pud_offset_kimg(dir,addr) ((pud_t *)dir) 1067 1068 #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 1069 1070 #if CONFIG_PGTABLE_LEVELS > 4 1071 1072 static __always_inline bool pgtable_l5_enabled(void) 1073 { 1074 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT)) 1075 return vabits_actual == VA_BITS; 1076 return alternative_has_cap_unlikely(ARM64_HAS_VA52); 1077 } 1078 1079 static inline bool mm_p4d_folded(const struct mm_struct *mm) 1080 { 1081 return !pgtable_l5_enabled(); 1082 } 1083 #define mm_p4d_folded mm_p4d_folded 1084 1085 #define p4d_ERROR(e) \ 1086 pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e)) 1087 1088 #define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd)) 1089 #define pgd_bad(pgd) (pgtable_l5_enabled() && \ 1090 ((pgd_val(pgd) & PGD_TYPE_MASK) != \ 1091 PGD_TYPE_TABLE)) 1092 #define pgd_present(pgd) (!pgd_none(pgd)) 1093 1094 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 1095 { 1096 if (in_swapper_pgdir(pgdp)) { 1097 set_swapper_pgd(pgdp, __pgd(pgd_val(pgd))); 1098 return; 1099 } 1100 1101 WRITE_ONCE(*pgdp, pgd); 1102 queue_pte_barriers(); 1103 } 1104 1105 static inline void pgd_clear(pgd_t *pgdp) 1106 { 1107 if (pgtable_l5_enabled()) 1108 set_pgd(pgdp, __pgd(0)); 1109 } 1110 1111 static inline phys_addr_t pgd_page_paddr(pgd_t pgd) 1112 { 1113 return __pgd_to_phys(pgd); 1114 } 1115 1116 #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) 1117 1118 static inline p4d_t *pgd_to_folded_p4d(pgd_t *pgdp, unsigned long addr) 1119 { 1120 /* Ensure that 'pgdp' indexes a page table according to 'addr' */ 1121 VM_BUG_ON(((addr >> PGDIR_SHIFT) ^ ((u64)pgdp >> 3)) % PTRS_PER_PGD); 1122 1123 return (p4d_t *)PTR_ALIGN_DOWN(pgdp, PAGE_SIZE) + p4d_index(addr); 1124 } 1125 1126 static inline phys_addr_t p4d_offset_phys(pgd_t *pgdp, unsigned long addr) 1127 { 1128 BUG_ON(!pgtable_l5_enabled()); 1129 1130 return pgd_page_paddr(READ_ONCE(*pgdp)) + p4d_index(addr) * sizeof(p4d_t); 1131 } 1132 1133 static inline 1134 p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long addr) 1135 { 1136 if (!pgtable_l5_enabled()) 1137 return pgd_to_folded_p4d(pgdp, addr); 1138 return (p4d_t *)__va(pgd_page_paddr(pgd)) + p4d_index(addr); 1139 } 1140 #define p4d_offset_lockless p4d_offset_lockless 1141 1142 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long addr) 1143 { 1144 return p4d_offset_lockless(pgdp, READ_ONCE(*pgdp), addr); 1145 } 1146 1147 static inline p4d_t *p4d_set_fixmap(unsigned long addr) 1148 { 1149 if (!pgtable_l5_enabled()) 1150 return NULL; 1151 return (p4d_t *)set_fixmap_offset(FIX_P4D, addr); 1152 } 1153 1154 static inline p4d_t *p4d_set_fixmap_offset(pgd_t *pgdp, unsigned long addr) 1155 { 1156 if (!pgtable_l5_enabled()) 1157 return pgd_to_folded_p4d(pgdp, addr); 1158 return p4d_set_fixmap(p4d_offset_phys(pgdp, addr)); 1159 } 1160 1161 static inline void p4d_clear_fixmap(void) 1162 { 1163 if (pgtable_l5_enabled()) 1164 clear_fixmap(FIX_P4D); 1165 } 1166 1167 /* use ONLY for statically allocated translation tables */ 1168 static inline p4d_t *p4d_offset_kimg(pgd_t *pgdp, u64 addr) 1169 { 1170 if (!pgtable_l5_enabled()) 1171 return pgd_to_folded_p4d(pgdp, addr); 1172 return (p4d_t *)__phys_to_kimg(p4d_offset_phys(pgdp, addr)); 1173 } 1174 1175 #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd))) 1176 1177 #else 1178 1179 static inline bool pgtable_l5_enabled(void) { return false; } 1180 1181 #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) 1182 1183 /* Match p4d_offset folding in <asm/generic/pgtable-nop4d.h> */ 1184 #define p4d_set_fixmap(addr) NULL 1185 #define p4d_set_fixmap_offset(p4dp, addr) ((p4d_t *)p4dp) 1186 #define p4d_clear_fixmap() 1187 1188 #define p4d_offset_kimg(dir,addr) ((p4d_t *)dir) 1189 1190 static inline 1191 p4d_t *p4d_offset_lockless_folded(pgd_t *pgdp, pgd_t pgd, unsigned long addr) 1192 { 1193 /* 1194 * With runtime folding of the pud, pud_offset_lockless() passes 1195 * the 'pgd_t *' we return here to p4d_to_folded_pud(), which 1196 * will offset the pointer assuming that it points into 1197 * a page-table page. However, the fast GUP path passes us a 1198 * pgd_t allocated on the stack and so we must use the original 1199 * pointer in 'pgdp' to construct the p4d pointer instead of 1200 * using the generic p4d_offset_lockless() implementation. 1201 * 1202 * Note: reusing the original pointer means that we may 1203 * dereference the same (live) page-table entry multiple times. 1204 * This is safe because it is still only loaded once in the 1205 * context of each level and the CPU guarantees same-address 1206 * read-after-read ordering. 1207 */ 1208 return p4d_offset(pgdp, addr); 1209 } 1210 #define p4d_offset_lockless p4d_offset_lockless_folded 1211 1212 #endif /* CONFIG_PGTABLE_LEVELS > 4 */ 1213 1214 #define pgd_ERROR(e) \ 1215 pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e)) 1216 1217 #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) 1218 #define pgd_clear_fixmap() clear_fixmap(FIX_PGD) 1219 1220 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 1221 { 1222 /* 1223 * Normal and Normal-Tagged are two different memory types and indices 1224 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK. 1225 */ 1226 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 1227 PTE_PRESENT_INVALID | PTE_VALID | PTE_WRITE | 1228 PTE_GP | PTE_ATTRINDX_MASK | PTE_PO_IDX_MASK; 1229 1230 /* preserve the hardware dirty information */ 1231 if (pte_hw_dirty(pte)) 1232 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 1233 1234 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 1235 /* 1236 * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware 1237 * dirtiness again. 1238 */ 1239 if (pte_sw_dirty(pte)) 1240 pte = pte_mkdirty(pte); 1241 return pte; 1242 } 1243 1244 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1245 { 1246 return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 1247 } 1248 1249 extern int __ptep_set_access_flags(struct vm_area_struct *vma, 1250 unsigned long address, pte_t *ptep, 1251 pte_t entry, int dirty); 1252 1253 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1254 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 1255 static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 1256 unsigned long address, pmd_t *pmdp, 1257 pmd_t entry, int dirty) 1258 { 1259 return __ptep_set_access_flags(vma, address, (pte_t *)pmdp, 1260 pmd_pte(entry), dirty); 1261 } 1262 #endif 1263 1264 #ifdef CONFIG_PAGE_TABLE_CHECK 1265 static inline bool pte_user_accessible_page(pte_t pte, unsigned long addr) 1266 { 1267 return pte_valid(pte) && (pte_user(pte) || pte_user_exec(pte)); 1268 } 1269 1270 static inline bool pmd_user_accessible_page(pmd_t pmd, unsigned long addr) 1271 { 1272 return pmd_valid(pmd) && !pmd_table(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd)); 1273 } 1274 1275 static inline bool pud_user_accessible_page(pud_t pud, unsigned long addr) 1276 { 1277 return pud_valid(pud) && !pud_table(pud) && (pud_user(pud) || pud_user_exec(pud)); 1278 } 1279 #endif 1280 1281 /* 1282 * Atomic pte/pmd modifications. 1283 */ 1284 1285 static inline void __pte_clear(struct mm_struct *mm, 1286 unsigned long addr, pte_t *ptep) 1287 { 1288 __set_pte(ptep, __pte(0)); 1289 } 1290 1291 static inline int __ptep_test_and_clear_young(struct vm_area_struct *vma, 1292 unsigned long address, 1293 pte_t *ptep) 1294 { 1295 pte_t old_pte, pte; 1296 1297 pte = __ptep_get(ptep); 1298 do { 1299 old_pte = pte; 1300 pte = pte_mkold(pte); 1301 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1302 pte_val(old_pte), pte_val(pte)); 1303 } while (pte_val(pte) != pte_val(old_pte)); 1304 1305 return pte_young(pte); 1306 } 1307 1308 static inline int __ptep_clear_flush_young(struct vm_area_struct *vma, 1309 unsigned long address, pte_t *ptep) 1310 { 1311 int young = __ptep_test_and_clear_young(vma, address, ptep); 1312 1313 if (young) { 1314 /* 1315 * We can elide the trailing DSB here since the worst that can 1316 * happen is that a CPU continues to use the young entry in its 1317 * TLB and we mistakenly reclaim the associated page. The 1318 * window for such an event is bounded by the next 1319 * context-switch, which provides a DSB to complete the TLB 1320 * invalidation. 1321 */ 1322 flush_tlb_page_nosync(vma, address); 1323 } 1324 1325 return young; 1326 } 1327 1328 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) 1329 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1330 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1331 unsigned long address, 1332 pmd_t *pmdp) 1333 { 1334 /* Operation applies to PMD table entry only if FEAT_HAFT is enabled */ 1335 VM_WARN_ON(pmd_table(READ_ONCE(*pmdp)) && !system_supports_haft()); 1336 return __ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 1337 } 1338 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */ 1339 1340 static inline pte_t __ptep_get_and_clear_anysz(struct mm_struct *mm, 1341 unsigned long address, 1342 pte_t *ptep, 1343 unsigned long pgsize) 1344 { 1345 pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0)); 1346 1347 switch (pgsize) { 1348 case PAGE_SIZE: 1349 page_table_check_pte_clear(mm, address, pte); 1350 break; 1351 case PMD_SIZE: 1352 page_table_check_pmd_clear(mm, address, pte_pmd(pte)); 1353 break; 1354 #ifndef __PAGETABLE_PMD_FOLDED 1355 case PUD_SIZE: 1356 page_table_check_pud_clear(mm, address, pte_pud(pte)); 1357 break; 1358 #endif 1359 default: 1360 VM_WARN_ON(1); 1361 } 1362 1363 return pte; 1364 } 1365 1366 static inline pte_t __ptep_get_and_clear(struct mm_struct *mm, 1367 unsigned long address, pte_t *ptep) 1368 { 1369 return __ptep_get_and_clear_anysz(mm, address, ptep, PAGE_SIZE); 1370 } 1371 1372 static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1373 pte_t *ptep, unsigned int nr, int full) 1374 { 1375 for (;;) { 1376 __ptep_get_and_clear(mm, addr, ptep); 1377 if (--nr == 0) 1378 break; 1379 ptep++; 1380 addr += PAGE_SIZE; 1381 } 1382 } 1383 1384 static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm, 1385 unsigned long addr, pte_t *ptep, 1386 unsigned int nr, int full) 1387 { 1388 pte_t pte, tmp_pte; 1389 1390 pte = __ptep_get_and_clear(mm, addr, ptep); 1391 while (--nr) { 1392 ptep++; 1393 addr += PAGE_SIZE; 1394 tmp_pte = __ptep_get_and_clear(mm, addr, ptep); 1395 if (pte_dirty(tmp_pte)) 1396 pte = pte_mkdirty(pte); 1397 if (pte_young(tmp_pte)) 1398 pte = pte_mkyoung(pte); 1399 } 1400 return pte; 1401 } 1402 1403 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1404 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1405 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 1406 unsigned long address, pmd_t *pmdp) 1407 { 1408 return pte_pmd(__ptep_get_and_clear_anysz(mm, address, (pte_t *)pmdp, PMD_SIZE)); 1409 } 1410 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1411 1412 static inline void ___ptep_set_wrprotect(struct mm_struct *mm, 1413 unsigned long address, pte_t *ptep, 1414 pte_t pte) 1415 { 1416 pte_t old_pte; 1417 1418 do { 1419 old_pte = pte; 1420 pte = pte_wrprotect(pte); 1421 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1422 pte_val(old_pte), pte_val(pte)); 1423 } while (pte_val(pte) != pte_val(old_pte)); 1424 } 1425 1426 /* 1427 * __ptep_set_wrprotect - mark read-only while transferring potential hardware 1428 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. 1429 */ 1430 static inline void __ptep_set_wrprotect(struct mm_struct *mm, 1431 unsigned long address, pte_t *ptep) 1432 { 1433 ___ptep_set_wrprotect(mm, address, ptep, __ptep_get(ptep)); 1434 } 1435 1436 static inline void __wrprotect_ptes(struct mm_struct *mm, unsigned long address, 1437 pte_t *ptep, unsigned int nr) 1438 { 1439 unsigned int i; 1440 1441 for (i = 0; i < nr; i++, address += PAGE_SIZE, ptep++) 1442 __ptep_set_wrprotect(mm, address, ptep); 1443 } 1444 1445 static inline void __clear_young_dirty_pte(struct vm_area_struct *vma, 1446 unsigned long addr, pte_t *ptep, 1447 pte_t pte, cydp_t flags) 1448 { 1449 pte_t old_pte; 1450 1451 do { 1452 old_pte = pte; 1453 1454 if (flags & CYDP_CLEAR_YOUNG) 1455 pte = pte_mkold(pte); 1456 if (flags & CYDP_CLEAR_DIRTY) 1457 pte = pte_mkclean(pte); 1458 1459 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1460 pte_val(old_pte), pte_val(pte)); 1461 } while (pte_val(pte) != pte_val(old_pte)); 1462 } 1463 1464 static inline void __clear_young_dirty_ptes(struct vm_area_struct *vma, 1465 unsigned long addr, pte_t *ptep, 1466 unsigned int nr, cydp_t flags) 1467 { 1468 pte_t pte; 1469 1470 for (;;) { 1471 pte = __ptep_get(ptep); 1472 1473 if (flags == (CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY)) 1474 __set_pte(ptep, pte_mkclean(pte_mkold(pte))); 1475 else 1476 __clear_young_dirty_pte(vma, addr, ptep, pte, flags); 1477 1478 if (--nr == 0) 1479 break; 1480 ptep++; 1481 addr += PAGE_SIZE; 1482 } 1483 } 1484 1485 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1486 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 1487 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1488 unsigned long address, pmd_t *pmdp) 1489 { 1490 __ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 1491 } 1492 1493 #define pmdp_establish pmdp_establish 1494 static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 1495 unsigned long address, pmd_t *pmdp, pmd_t pmd) 1496 { 1497 page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd); 1498 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd))); 1499 } 1500 #endif 1501 1502 /* 1503 * Encode and decode a swap entry: 1504 * bits 0-1: present (must be zero) 1505 * bits 2: remember PG_anon_exclusive 1506 * bit 3: remember uffd-wp state 1507 * bits 6-10: swap type 1508 * bit 11: PTE_PRESENT_INVALID (must be zero) 1509 * bits 12-61: swap offset 1510 */ 1511 #define __SWP_TYPE_SHIFT 6 1512 #define __SWP_TYPE_BITS 5 1513 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 1514 #define __SWP_OFFSET_SHIFT 12 1515 #define __SWP_OFFSET_BITS 50 1516 #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) 1517 1518 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 1519 #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) 1520 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 1521 1522 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1523 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 1524 1525 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1526 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 1527 #define __swp_entry_to_pmd(swp) __pmd((swp).val) 1528 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 1529 1530 /* 1531 * Ensure that there are not more swap files than can be encoded in the kernel 1532 * PTEs. 1533 */ 1534 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 1535 1536 #ifdef CONFIG_ARM64_MTE 1537 1538 #define __HAVE_ARCH_PREPARE_TO_SWAP 1539 extern int arch_prepare_to_swap(struct folio *folio); 1540 1541 #define __HAVE_ARCH_SWAP_INVALIDATE 1542 static inline void arch_swap_invalidate_page(int type, pgoff_t offset) 1543 { 1544 if (system_supports_mte()) 1545 mte_invalidate_tags(type, offset); 1546 } 1547 1548 static inline void arch_swap_invalidate_area(int type) 1549 { 1550 if (system_supports_mte()) 1551 mte_invalidate_tags_area(type); 1552 } 1553 1554 #define __HAVE_ARCH_SWAP_RESTORE 1555 extern void arch_swap_restore(swp_entry_t entry, struct folio *folio); 1556 1557 #endif /* CONFIG_ARM64_MTE */ 1558 1559 /* 1560 * On AArch64, the cache coherency is handled via the __set_ptes() function. 1561 */ 1562 static inline void update_mmu_cache_range(struct vm_fault *vmf, 1563 struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 1564 unsigned int nr) 1565 { 1566 /* 1567 * We don't do anything here, so there's a very small chance of 1568 * us retaking a user fault which we just fixed up. The alternative 1569 * is doing a dsb(ishst), but that penalises the fastpath. 1570 */ 1571 } 1572 1573 #define update_mmu_cache(vma, addr, ptep) \ 1574 update_mmu_cache_range(NULL, vma, addr, ptep, 1) 1575 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) 1576 1577 #ifdef CONFIG_ARM64_PA_BITS_52 1578 #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) 1579 #else 1580 #define phys_to_ttbr(addr) (addr) 1581 #endif 1582 1583 /* 1584 * On arm64 without hardware Access Flag, copying from user will fail because 1585 * the pte is old and cannot be marked young. So we always end up with zeroed 1586 * page after fork() + CoW for pfn mappings. We don't always have a 1587 * hardware-managed access flag on arm64. 1588 */ 1589 #define arch_has_hw_pte_young cpu_has_hw_af 1590 1591 #ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG 1592 #define arch_has_hw_nonleaf_pmd_young system_supports_haft 1593 #endif 1594 1595 /* 1596 * Experimentally, it's cheap to set the access flag in hardware and we 1597 * benefit from prefaulting mappings as 'old' to start with. 1598 */ 1599 #define arch_wants_old_prefaulted_pte cpu_has_hw_af 1600 1601 /* 1602 * Request exec memory is read into pagecache in at least 64K folios. This size 1603 * can be contpte-mapped when 4K base pages are in use (16 pages into 1 iTLB 1604 * entry), and HPA can coalesce it (4 pages into 1 TLB entry) when 16K base 1605 * pages are in use. 1606 */ 1607 #define exec_folio_order() ilog2(SZ_64K >> PAGE_SHIFT) 1608 1609 static inline bool pud_sect_supported(void) 1610 { 1611 return PAGE_SIZE == SZ_4K; 1612 } 1613 1614 1615 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1616 #define ptep_modify_prot_start ptep_modify_prot_start 1617 extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma, 1618 unsigned long addr, pte_t *ptep); 1619 1620 #define ptep_modify_prot_commit ptep_modify_prot_commit 1621 extern void ptep_modify_prot_commit(struct vm_area_struct *vma, 1622 unsigned long addr, pte_t *ptep, 1623 pte_t old_pte, pte_t new_pte); 1624 1625 #define modify_prot_start_ptes modify_prot_start_ptes 1626 extern pte_t modify_prot_start_ptes(struct vm_area_struct *vma, 1627 unsigned long addr, pte_t *ptep, 1628 unsigned int nr); 1629 1630 #define modify_prot_commit_ptes modify_prot_commit_ptes 1631 extern void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr, 1632 pte_t *ptep, pte_t old_pte, pte_t pte, 1633 unsigned int nr); 1634 1635 #ifdef CONFIG_ARM64_CONTPTE 1636 1637 /* 1638 * The contpte APIs are used to transparently manage the contiguous bit in ptes 1639 * where it is possible and makes sense to do so. The PTE_CONT bit is considered 1640 * a private implementation detail of the public ptep API (see below). 1641 */ 1642 extern void __contpte_try_fold(struct mm_struct *mm, unsigned long addr, 1643 pte_t *ptep, pte_t pte); 1644 extern void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, 1645 pte_t *ptep, pte_t pte); 1646 extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte); 1647 extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep); 1648 extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, 1649 pte_t *ptep, pte_t pte, unsigned int nr); 1650 extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1651 pte_t *ptep, unsigned int nr, int full); 1652 extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, 1653 unsigned long addr, pte_t *ptep, 1654 unsigned int nr, int full); 1655 int contpte_test_and_clear_young_ptes(struct vm_area_struct *vma, 1656 unsigned long addr, pte_t *ptep, unsigned int nr); 1657 int contpte_clear_flush_young_ptes(struct vm_area_struct *vma, 1658 unsigned long addr, pte_t *ptep, unsigned int nr); 1659 extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, 1660 pte_t *ptep, unsigned int nr); 1661 extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma, 1662 unsigned long addr, pte_t *ptep, 1663 pte_t entry, int dirty); 1664 extern void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma, 1665 unsigned long addr, pte_t *ptep, 1666 unsigned int nr, cydp_t flags); 1667 1668 static __always_inline void contpte_try_fold(struct mm_struct *mm, 1669 unsigned long addr, pte_t *ptep, pte_t pte) 1670 { 1671 /* 1672 * Only bother trying if both the virtual and physical addresses are 1673 * aligned and correspond to the last entry in a contig range. The core 1674 * code mostly modifies ranges from low to high, so this is the likely 1675 * the last modification in the contig range, so a good time to fold. 1676 * We can't fold special mappings, because there is no associated folio. 1677 */ 1678 1679 const unsigned long contmask = CONT_PTES - 1; 1680 bool valign = ((addr >> PAGE_SHIFT) & contmask) == contmask; 1681 1682 if (unlikely(valign)) { 1683 bool palign = (pte_pfn(pte) & contmask) == contmask; 1684 1685 if (unlikely(palign && 1686 pte_valid(pte) && !pte_cont(pte) && !pte_special(pte))) 1687 __contpte_try_fold(mm, addr, ptep, pte); 1688 } 1689 } 1690 1691 static __always_inline void contpte_try_unfold(struct mm_struct *mm, 1692 unsigned long addr, pte_t *ptep, pte_t pte) 1693 { 1694 if (unlikely(pte_valid_cont(pte))) 1695 __contpte_try_unfold(mm, addr, ptep, pte); 1696 } 1697 1698 #define pte_batch_hint pte_batch_hint 1699 static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte) 1700 { 1701 if (!pte_valid_cont(pte)) 1702 return 1; 1703 1704 return CONT_PTES - (((unsigned long)ptep >> 3) & (CONT_PTES - 1)); 1705 } 1706 1707 /* 1708 * The below functions constitute the public API that arm64 presents to the 1709 * core-mm to manipulate PTE entries within their page tables (or at least this 1710 * is the subset of the API that arm64 needs to implement). These public 1711 * versions will automatically and transparently apply the contiguous bit where 1712 * it makes sense to do so. Therefore any users that are contig-aware (e.g. 1713 * hugetlb, kernel mapper) should NOT use these APIs, but instead use the 1714 * private versions, which are prefixed with double underscore. All of these 1715 * APIs except for ptep_get_lockless() are expected to be called with the PTL 1716 * held. Although the contiguous bit is considered private to the 1717 * implementation, it is deliberately allowed to leak through the getters (e.g. 1718 * ptep_get()), back to core code. This is required so that pte_leaf_size() can 1719 * provide an accurate size for perf_get_pgtable_size(). But this leakage means 1720 * its possible a pte will be passed to a setter with the contiguous bit set, so 1721 * we explicitly clear the contiguous bit in those cases to prevent accidentally 1722 * setting it in the pgtable. 1723 */ 1724 1725 #define ptep_get ptep_get 1726 static inline pte_t ptep_get(pte_t *ptep) 1727 { 1728 pte_t pte = __ptep_get(ptep); 1729 1730 if (likely(!pte_valid_cont(pte))) 1731 return pte; 1732 1733 return contpte_ptep_get(ptep, pte); 1734 } 1735 1736 #define ptep_get_lockless ptep_get_lockless 1737 static inline pte_t ptep_get_lockless(pte_t *ptep) 1738 { 1739 pte_t pte = __ptep_get(ptep); 1740 1741 if (likely(!pte_valid_cont(pte))) 1742 return pte; 1743 1744 return contpte_ptep_get_lockless(ptep); 1745 } 1746 1747 static inline void set_pte(pte_t *ptep, pte_t pte) 1748 { 1749 /* 1750 * We don't have the mm or vaddr so cannot unfold contig entries (since 1751 * it requires tlb maintenance). set_pte() is not used in core code, so 1752 * this should never even be called. Regardless do our best to service 1753 * any call and emit a warning if there is any attempt to set a pte on 1754 * top of an existing contig range. 1755 */ 1756 pte_t orig_pte = __ptep_get(ptep); 1757 1758 WARN_ON_ONCE(pte_valid_cont(orig_pte)); 1759 __set_pte(ptep, pte_mknoncont(pte)); 1760 } 1761 1762 #define set_ptes set_ptes 1763 static __always_inline void set_ptes(struct mm_struct *mm, unsigned long addr, 1764 pte_t *ptep, pte_t pte, unsigned int nr) 1765 { 1766 pte = pte_mknoncont(pte); 1767 1768 if (likely(nr == 1)) { 1769 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1770 __set_ptes(mm, addr, ptep, pte, 1); 1771 contpte_try_fold(mm, addr, ptep, pte); 1772 } else { 1773 contpte_set_ptes(mm, addr, ptep, pte, nr); 1774 } 1775 } 1776 1777 static inline void pte_clear(struct mm_struct *mm, 1778 unsigned long addr, pte_t *ptep) 1779 { 1780 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1781 __pte_clear(mm, addr, ptep); 1782 } 1783 1784 #define clear_full_ptes clear_full_ptes 1785 static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1786 pte_t *ptep, unsigned int nr, int full) 1787 { 1788 if (likely(nr == 1)) { 1789 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1790 __clear_full_ptes(mm, addr, ptep, nr, full); 1791 } else { 1792 contpte_clear_full_ptes(mm, addr, ptep, nr, full); 1793 } 1794 } 1795 1796 #define get_and_clear_full_ptes get_and_clear_full_ptes 1797 static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, 1798 unsigned long addr, pte_t *ptep, 1799 unsigned int nr, int full) 1800 { 1801 pte_t pte; 1802 1803 if (likely(nr == 1)) { 1804 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1805 pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full); 1806 } else { 1807 pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full); 1808 } 1809 1810 return pte; 1811 } 1812 1813 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1814 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 1815 unsigned long addr, pte_t *ptep) 1816 { 1817 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1818 return __ptep_get_and_clear(mm, addr, ptep); 1819 } 1820 1821 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1822 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 1823 unsigned long addr, pte_t *ptep) 1824 { 1825 pte_t orig_pte = __ptep_get(ptep); 1826 1827 if (likely(!pte_valid_cont(orig_pte))) 1828 return __ptep_test_and_clear_young(vma, addr, ptep); 1829 1830 return contpte_test_and_clear_young_ptes(vma, addr, ptep, 1); 1831 } 1832 1833 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1834 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 1835 unsigned long addr, pte_t *ptep) 1836 { 1837 pte_t orig_pte = __ptep_get(ptep); 1838 1839 if (likely(!pte_valid_cont(orig_pte))) 1840 return __ptep_clear_flush_young(vma, addr, ptep); 1841 1842 return contpte_clear_flush_young_ptes(vma, addr, ptep, 1); 1843 } 1844 1845 #define clear_flush_young_ptes clear_flush_young_ptes 1846 static inline int clear_flush_young_ptes(struct vm_area_struct *vma, 1847 unsigned long addr, pte_t *ptep, 1848 unsigned int nr) 1849 { 1850 if (likely(nr == 1 && !pte_cont(__ptep_get(ptep)))) 1851 return __ptep_clear_flush_young(vma, addr, ptep); 1852 1853 return contpte_clear_flush_young_ptes(vma, addr, ptep, nr); 1854 } 1855 1856 #define wrprotect_ptes wrprotect_ptes 1857 static __always_inline void wrprotect_ptes(struct mm_struct *mm, 1858 unsigned long addr, pte_t *ptep, unsigned int nr) 1859 { 1860 if (likely(nr == 1)) { 1861 /* 1862 * Optimization: wrprotect_ptes() can only be called for present 1863 * ptes so we only need to check contig bit as condition for 1864 * unfold, and we can remove the contig bit from the pte we read 1865 * to avoid re-reading. This speeds up fork() which is sensitive 1866 * for order-0 folios. Equivalent to contpte_try_unfold(). 1867 */ 1868 pte_t orig_pte = __ptep_get(ptep); 1869 1870 if (unlikely(pte_cont(orig_pte))) { 1871 __contpte_try_unfold(mm, addr, ptep, orig_pte); 1872 orig_pte = pte_mknoncont(orig_pte); 1873 } 1874 ___ptep_set_wrprotect(mm, addr, ptep, orig_pte); 1875 } else { 1876 contpte_wrprotect_ptes(mm, addr, ptep, nr); 1877 } 1878 } 1879 1880 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 1881 static inline void ptep_set_wrprotect(struct mm_struct *mm, 1882 unsigned long addr, pte_t *ptep) 1883 { 1884 wrprotect_ptes(mm, addr, ptep, 1); 1885 } 1886 1887 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1888 static inline int ptep_set_access_flags(struct vm_area_struct *vma, 1889 unsigned long addr, pte_t *ptep, 1890 pte_t entry, int dirty) 1891 { 1892 pte_t orig_pte = __ptep_get(ptep); 1893 1894 entry = pte_mknoncont(entry); 1895 1896 if (likely(!pte_valid_cont(orig_pte))) 1897 return __ptep_set_access_flags(vma, addr, ptep, entry, dirty); 1898 1899 return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty); 1900 } 1901 1902 #define clear_young_dirty_ptes clear_young_dirty_ptes 1903 static inline void clear_young_dirty_ptes(struct vm_area_struct *vma, 1904 unsigned long addr, pte_t *ptep, 1905 unsigned int nr, cydp_t flags) 1906 { 1907 if (likely(nr == 1 && !pte_cont(__ptep_get(ptep)))) 1908 __clear_young_dirty_ptes(vma, addr, ptep, nr, flags); 1909 else 1910 contpte_clear_young_dirty_ptes(vma, addr, ptep, nr, flags); 1911 } 1912 1913 #else /* CONFIG_ARM64_CONTPTE */ 1914 1915 #define ptep_get __ptep_get 1916 #define set_pte __set_pte 1917 #define set_ptes __set_ptes 1918 #define pte_clear __pte_clear 1919 #define clear_full_ptes __clear_full_ptes 1920 #define get_and_clear_full_ptes __get_and_clear_full_ptes 1921 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1922 #define ptep_get_and_clear __ptep_get_and_clear 1923 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1924 #define ptep_test_and_clear_young __ptep_test_and_clear_young 1925 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1926 #define ptep_clear_flush_young __ptep_clear_flush_young 1927 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 1928 #define ptep_set_wrprotect __ptep_set_wrprotect 1929 #define wrprotect_ptes __wrprotect_ptes 1930 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1931 #define ptep_set_access_flags __ptep_set_access_flags 1932 #define clear_young_dirty_ptes __clear_young_dirty_ptes 1933 1934 #endif /* CONFIG_ARM64_CONTPTE */ 1935 1936 #endif /* !__ASSEMBLER__ */ 1937 1938 #endif /* __ASM_PGTABLE_H */ 1939