1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012 ARM Ltd. 4 */ 5 #ifndef __ASM_PGTABLE_H 6 #define __ASM_PGTABLE_H 7 8 #include <asm/bug.h> 9 #include <asm/proc-fns.h> 10 11 #include <asm/memory.h> 12 #include <asm/mte.h> 13 #include <asm/pgtable-hwdef.h> 14 #include <asm/pgtable-prot.h> 15 #include <asm/tlbflush.h> 16 17 /* 18 * VMALLOC range. 19 * 20 * VMALLOC_START: beginning of the kernel vmalloc space 21 * VMALLOC_END: extends to the available space below vmemmap 22 */ 23 #define VMALLOC_START (MODULES_END) 24 #if VA_BITS == VA_BITS_MIN 25 #define VMALLOC_END (VMEMMAP_START - SZ_8M) 26 #else 27 #define VMEMMAP_UNUSED_NPAGES ((_PAGE_OFFSET(vabits_actual) - PAGE_OFFSET) >> PAGE_SHIFT) 28 #define VMALLOC_END (VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M) 29 #endif 30 31 #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) 32 33 #ifndef __ASSEMBLY__ 34 35 #include <asm/cmpxchg.h> 36 #include <asm/fixmap.h> 37 #include <asm/por.h> 38 #include <linux/mmdebug.h> 39 #include <linux/mm_types.h> 40 #include <linux/sched.h> 41 #include <linux/page_table_check.h> 42 43 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 44 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 45 46 /* Set stride and tlb_level in flush_*_tlb_range */ 47 #define flush_pmd_tlb_range(vma, addr, end) \ 48 __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2) 49 #define flush_pud_tlb_range(vma, addr, end) \ 50 __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1) 51 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 52 53 /* 54 * Outside of a few very special situations (e.g. hibernation), we always 55 * use broadcast TLB invalidation instructions, therefore a spurious page 56 * fault on one CPU which has been handled concurrently by another CPU 57 * does not need to perform additional invalidation. 58 */ 59 #define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0) 60 61 /* 62 * ZERO_PAGE is a global shared page that is always zero: used 63 * for zero-mapped memory areas etc.. 64 */ 65 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 66 #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) 67 68 #define pte_ERROR(e) \ 69 pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e)) 70 71 #ifdef CONFIG_ARM64_PA_BITS_52 72 static inline phys_addr_t __pte_to_phys(pte_t pte) 73 { 74 pte_val(pte) &= ~PTE_MAYBE_SHARED; 75 return (pte_val(pte) & PTE_ADDR_LOW) | 76 ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT); 77 } 78 static inline pteval_t __phys_to_pte_val(phys_addr_t phys) 79 { 80 return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK; 81 } 82 #else 83 static inline phys_addr_t __pte_to_phys(pte_t pte) 84 { 85 return pte_val(pte) & PTE_ADDR_LOW; 86 } 87 88 static inline pteval_t __phys_to_pte_val(phys_addr_t phys) 89 { 90 return phys; 91 } 92 #endif 93 94 #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) 95 #define pfn_pte(pfn,prot) \ 96 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 97 98 #define pte_none(pte) (!pte_val(pte)) 99 #define __pte_clear(mm, addr, ptep) \ 100 __set_pte(ptep, __pte(0)) 101 #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 102 103 /* 104 * The following only work if pte_present(). Undefined behaviour otherwise. 105 */ 106 #define pte_present(pte) (pte_valid(pte) || pte_present_invalid(pte)) 107 #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) 108 #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) 109 #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) 110 #define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY)) 111 #define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) 112 #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) 113 #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) 114 #define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP)) 115 #define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \ 116 PTE_ATTRINDX(MT_NORMAL_TAGGED)) 117 118 #define pte_cont_addr_end(addr, end) \ 119 ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ 120 (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 121 }) 122 123 #define pmd_cont_addr_end(addr, end) \ 124 ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \ 125 (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 126 }) 127 128 #define pte_hw_dirty(pte) (pte_write(pte) && !pte_rdonly(pte)) 129 #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) 130 #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) 131 132 #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) 133 #define pte_present_invalid(pte) \ 134 ((pte_val(pte) & (PTE_VALID | PTE_PRESENT_INVALID)) == PTE_PRESENT_INVALID) 135 /* 136 * Execute-only user mappings do not have the PTE_USER bit set. All valid 137 * kernel mappings have the PTE_UXN bit set. 138 */ 139 #define pte_valid_not_user(pte) \ 140 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) 141 /* 142 * Returns true if the pte is valid and has the contiguous bit set. 143 */ 144 #define pte_valid_cont(pte) (pte_valid(pte) && pte_cont(pte)) 145 /* 146 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending 147 * so that we don't erroneously return false for pages that have been 148 * remapped as PROT_NONE but are yet to be flushed from the TLB. 149 * Note that we can't make any assumptions based on the state of the access 150 * flag, since __ptep_clear_flush_young() elides a DSB when invalidating the 151 * TLB. 152 */ 153 #define pte_accessible(mm, pte) \ 154 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) 155 156 static inline bool por_el0_allows_pkey(u8 pkey, bool write, bool execute) 157 { 158 u64 por; 159 160 if (!system_supports_poe()) 161 return true; 162 163 por = read_sysreg_s(SYS_POR_EL0); 164 165 if (write) 166 return por_elx_allows_write(por, pkey); 167 168 if (execute) 169 return por_elx_allows_exec(por, pkey); 170 171 return por_elx_allows_read(por, pkey); 172 } 173 174 /* 175 * p??_access_permitted() is true for valid user mappings (PTE_USER 176 * bit set, subject to the write permission check). For execute-only 177 * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits 178 * not set) must return false. PROT_NONE mappings do not have the 179 * PTE_VALID bit set. 180 */ 181 #define pte_access_permitted_no_overlay(pte, write) \ 182 (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte))) 183 #define pte_access_permitted(pte, write) \ 184 (pte_access_permitted_no_overlay(pte, write) && \ 185 por_el0_allows_pkey(FIELD_GET(PTE_PO_IDX_MASK, pte_val(pte)), write, false)) 186 #define pmd_access_permitted(pmd, write) \ 187 (pte_access_permitted(pmd_pte(pmd), (write))) 188 #define pud_access_permitted(pud, write) \ 189 (pte_access_permitted(pud_pte(pud), (write))) 190 191 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) 192 { 193 pte_val(pte) &= ~pgprot_val(prot); 194 return pte; 195 } 196 197 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) 198 { 199 pte_val(pte) |= pgprot_val(prot); 200 return pte; 201 } 202 203 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) 204 { 205 pmd_val(pmd) &= ~pgprot_val(prot); 206 return pmd; 207 } 208 209 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) 210 { 211 pmd_val(pmd) |= pgprot_val(prot); 212 return pmd; 213 } 214 215 static inline pte_t pte_mkwrite_novma(pte_t pte) 216 { 217 pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); 218 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 219 return pte; 220 } 221 222 static inline pte_t pte_mkclean(pte_t pte) 223 { 224 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); 225 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 226 227 return pte; 228 } 229 230 static inline pte_t pte_mkdirty(pte_t pte) 231 { 232 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 233 234 if (pte_write(pte)) 235 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 236 237 return pte; 238 } 239 240 static inline pte_t pte_wrprotect(pte_t pte) 241 { 242 /* 243 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY 244 * clear), set the PTE_DIRTY bit. 245 */ 246 if (pte_hw_dirty(pte)) 247 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 248 249 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); 250 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 251 return pte; 252 } 253 254 static inline pte_t pte_mkold(pte_t pte) 255 { 256 return clear_pte_bit(pte, __pgprot(PTE_AF)); 257 } 258 259 static inline pte_t pte_mkyoung(pte_t pte) 260 { 261 return set_pte_bit(pte, __pgprot(PTE_AF)); 262 } 263 264 static inline pte_t pte_mkspecial(pte_t pte) 265 { 266 return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); 267 } 268 269 static inline pte_t pte_mkcont(pte_t pte) 270 { 271 return set_pte_bit(pte, __pgprot(PTE_CONT)); 272 } 273 274 static inline pte_t pte_mknoncont(pte_t pte) 275 { 276 return clear_pte_bit(pte, __pgprot(PTE_CONT)); 277 } 278 279 static inline pte_t pte_mkvalid(pte_t pte) 280 { 281 return set_pte_bit(pte, __pgprot(PTE_VALID)); 282 } 283 284 static inline pte_t pte_mkinvalid(pte_t pte) 285 { 286 pte = set_pte_bit(pte, __pgprot(PTE_PRESENT_INVALID)); 287 pte = clear_pte_bit(pte, __pgprot(PTE_VALID)); 288 return pte; 289 } 290 291 static inline pmd_t pmd_mkcont(pmd_t pmd) 292 { 293 return __pmd(pmd_val(pmd) | PMD_SECT_CONT); 294 } 295 296 static inline pte_t pte_mkdevmap(pte_t pte) 297 { 298 return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); 299 } 300 301 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 302 static inline int pte_uffd_wp(pte_t pte) 303 { 304 return !!(pte_val(pte) & PTE_UFFD_WP); 305 } 306 307 static inline pte_t pte_mkuffd_wp(pte_t pte) 308 { 309 return pte_wrprotect(set_pte_bit(pte, __pgprot(PTE_UFFD_WP))); 310 } 311 312 static inline pte_t pte_clear_uffd_wp(pte_t pte) 313 { 314 return clear_pte_bit(pte, __pgprot(PTE_UFFD_WP)); 315 } 316 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 317 318 static inline void __set_pte_nosync(pte_t *ptep, pte_t pte) 319 { 320 WRITE_ONCE(*ptep, pte); 321 } 322 323 static inline void __set_pte(pte_t *ptep, pte_t pte) 324 { 325 __set_pte_nosync(ptep, pte); 326 327 /* 328 * Only if the new pte is valid and kernel, otherwise TLB maintenance 329 * or update_mmu_cache() have the necessary barriers. 330 */ 331 if (pte_valid_not_user(pte)) { 332 dsb(ishst); 333 isb(); 334 } 335 } 336 337 static inline pte_t __ptep_get(pte_t *ptep) 338 { 339 return READ_ONCE(*ptep); 340 } 341 342 extern void __sync_icache_dcache(pte_t pteval); 343 bool pgattr_change_is_safe(pteval_t old, pteval_t new); 344 345 /* 346 * PTE bits configuration in the presence of hardware Dirty Bit Management 347 * (PTE_WRITE == PTE_DBM): 348 * 349 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) 350 * 0 0 | 1 0 0 351 * 0 1 | 1 1 0 352 * 1 0 | 1 0 1 353 * 1 1 | 0 1 x 354 * 355 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via 356 * the page fault mechanism. Checking the dirty status of a pte becomes: 357 * 358 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) 359 */ 360 361 static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, 362 pte_t pte) 363 { 364 pte_t old_pte; 365 366 if (!IS_ENABLED(CONFIG_DEBUG_VM)) 367 return; 368 369 old_pte = __ptep_get(ptep); 370 371 if (!pte_valid(old_pte) || !pte_valid(pte)) 372 return; 373 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1) 374 return; 375 376 /* 377 * Check for potential race with hardware updates of the pte 378 * (__ptep_set_access_flags safely changes valid ptes without going 379 * through an invalid entry). 380 */ 381 VM_WARN_ONCE(!pte_young(pte), 382 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", 383 __func__, pte_val(old_pte), pte_val(pte)); 384 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), 385 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", 386 __func__, pte_val(old_pte), pte_val(pte)); 387 VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)), 388 "%s: unsafe attribute change: 0x%016llx -> 0x%016llx", 389 __func__, pte_val(old_pte), pte_val(pte)); 390 } 391 392 static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages) 393 { 394 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) 395 __sync_icache_dcache(pte); 396 397 /* 398 * If the PTE would provide user space access to the tags associated 399 * with it then ensure that the MTE tags are synchronised. Although 400 * pte_access_permitted_no_overlay() returns false for exec only 401 * mappings, they don't expose tags (instruction fetches don't check 402 * tags). 403 */ 404 if (system_supports_mte() && pte_access_permitted_no_overlay(pte, false) && 405 !pte_special(pte) && pte_tagged(pte)) 406 mte_sync_tags(pte, nr_pages); 407 } 408 409 /* 410 * Select all bits except the pfn 411 */ 412 #define pte_pgprot pte_pgprot 413 static inline pgprot_t pte_pgprot(pte_t pte) 414 { 415 unsigned long pfn = pte_pfn(pte); 416 417 return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); 418 } 419 420 #define pte_advance_pfn pte_advance_pfn 421 static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) 422 { 423 return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte)); 424 } 425 426 static inline void __set_ptes(struct mm_struct *mm, 427 unsigned long __always_unused addr, 428 pte_t *ptep, pte_t pte, unsigned int nr) 429 { 430 page_table_check_ptes_set(mm, ptep, pte, nr); 431 __sync_cache_and_tags(pte, nr); 432 433 for (;;) { 434 __check_safe_pte_update(mm, ptep, pte); 435 __set_pte(ptep, pte); 436 if (--nr == 0) 437 break; 438 ptep++; 439 pte = pte_advance_pfn(pte, 1); 440 } 441 } 442 443 /* 444 * Hugetlb definitions. 445 */ 446 #define HUGE_MAX_HSTATE 4 447 #define HPAGE_SHIFT PMD_SHIFT 448 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 449 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 450 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 451 452 static inline pte_t pgd_pte(pgd_t pgd) 453 { 454 return __pte(pgd_val(pgd)); 455 } 456 457 static inline pte_t p4d_pte(p4d_t p4d) 458 { 459 return __pte(p4d_val(p4d)); 460 } 461 462 static inline pte_t pud_pte(pud_t pud) 463 { 464 return __pte(pud_val(pud)); 465 } 466 467 static inline pud_t pte_pud(pte_t pte) 468 { 469 return __pud(pte_val(pte)); 470 } 471 472 static inline pmd_t pud_pmd(pud_t pud) 473 { 474 return __pmd(pud_val(pud)); 475 } 476 477 static inline pte_t pmd_pte(pmd_t pmd) 478 { 479 return __pte(pmd_val(pmd)); 480 } 481 482 static inline pmd_t pte_pmd(pte_t pte) 483 { 484 return __pmd(pte_val(pte)); 485 } 486 487 static inline pgprot_t mk_pud_sect_prot(pgprot_t prot) 488 { 489 return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT); 490 } 491 492 static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot) 493 { 494 return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT); 495 } 496 497 static inline pte_t pte_swp_mkexclusive(pte_t pte) 498 { 499 return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 500 } 501 502 static inline int pte_swp_exclusive(pte_t pte) 503 { 504 return pte_val(pte) & PTE_SWP_EXCLUSIVE; 505 } 506 507 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 508 { 509 return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 510 } 511 512 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 513 static inline pte_t pte_swp_mkuffd_wp(pte_t pte) 514 { 515 return set_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP)); 516 } 517 518 static inline int pte_swp_uffd_wp(pte_t pte) 519 { 520 return !!(pte_val(pte) & PTE_SWP_UFFD_WP); 521 } 522 523 static inline pte_t pte_swp_clear_uffd_wp(pte_t pte) 524 { 525 return clear_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP)); 526 } 527 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 528 529 #ifdef CONFIG_NUMA_BALANCING 530 /* 531 * See the comment in include/linux/pgtable.h 532 */ 533 static inline int pte_protnone(pte_t pte) 534 { 535 /* 536 * pte_present_invalid() tells us that the pte is invalid from HW 537 * perspective but present from SW perspective, so the fields are to be 538 * interpretted as per the HW layout. The second 2 checks are the unique 539 * encoding that we use for PROT_NONE. It is insufficient to only use 540 * the first check because we share the same encoding scheme with pmds 541 * which support pmd_mkinvalid(), so can be present-invalid without 542 * being PROT_NONE. 543 */ 544 return pte_present_invalid(pte) && !pte_user(pte) && !pte_user_exec(pte); 545 } 546 547 static inline int pmd_protnone(pmd_t pmd) 548 { 549 return pte_protnone(pmd_pte(pmd)); 550 } 551 #endif 552 553 #define pmd_present(pmd) pte_present(pmd_pte(pmd)) 554 555 /* 556 * THP definitions. 557 */ 558 559 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 560 static inline int pmd_trans_huge(pmd_t pmd) 561 { 562 return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); 563 } 564 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 565 566 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) 567 #define pmd_young(pmd) pte_young(pmd_pte(pmd)) 568 #define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) 569 #define pmd_user(pmd) pte_user(pmd_pte(pmd)) 570 #define pmd_user_exec(pmd) pte_user_exec(pmd_pte(pmd)) 571 #define pmd_cont(pmd) pte_cont(pmd_pte(pmd)) 572 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) 573 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) 574 #define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))) 575 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) 576 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) 577 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) 578 #define pmd_mkinvalid(pmd) pte_pmd(pte_mkinvalid(pmd_pte(pmd))) 579 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 580 #define pmd_uffd_wp(pmd) pte_uffd_wp(pmd_pte(pmd)) 581 #define pmd_mkuffd_wp(pmd) pte_pmd(pte_mkuffd_wp(pmd_pte(pmd))) 582 #define pmd_clear_uffd_wp(pmd) pte_pmd(pte_clear_uffd_wp(pmd_pte(pmd))) 583 #define pmd_swp_uffd_wp(pmd) pte_swp_uffd_wp(pmd_pte(pmd)) 584 #define pmd_swp_mkuffd_wp(pmd) pte_pmd(pte_swp_mkuffd_wp(pmd_pte(pmd))) 585 #define pmd_swp_clear_uffd_wp(pmd) \ 586 pte_pmd(pte_swp_clear_uffd_wp(pmd_pte(pmd))) 587 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 588 589 #define pmd_write(pmd) pte_write(pmd_pte(pmd)) 590 591 #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) 592 593 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 594 #define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) 595 #endif 596 static inline pmd_t pmd_mkdevmap(pmd_t pmd) 597 { 598 return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP))); 599 } 600 601 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP 602 #define pmd_special(pte) (!!((pmd_val(pte) & PTE_SPECIAL))) 603 static inline pmd_t pmd_mkspecial(pmd_t pmd) 604 { 605 return set_pmd_bit(pmd, __pgprot(PTE_SPECIAL)); 606 } 607 #endif 608 609 #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) 610 #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) 611 #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) 612 #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 613 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 614 615 #define pud_young(pud) pte_young(pud_pte(pud)) 616 #define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) 617 #define pud_write(pud) pte_write(pud_pte(pud)) 618 619 #define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT)) 620 621 #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud)) 622 #define __phys_to_pud_val(phys) __phys_to_pte_val(phys) 623 #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) 624 #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 625 626 #ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP 627 #define pud_special(pte) pte_special(pud_pte(pud)) 628 #define pud_mkspecial(pte) pte_pud(pte_mkspecial(pud_pte(pud))) 629 #endif 630 631 #define pmd_pgprot pmd_pgprot 632 static inline pgprot_t pmd_pgprot(pmd_t pmd) 633 { 634 unsigned long pfn = pmd_pfn(pmd); 635 636 return __pgprot(pmd_val(pfn_pmd(pfn, __pgprot(0))) ^ pmd_val(pmd)); 637 } 638 639 #define pud_pgprot pud_pgprot 640 static inline pgprot_t pud_pgprot(pud_t pud) 641 { 642 unsigned long pfn = pud_pfn(pud); 643 644 return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud)); 645 } 646 647 static inline void __set_pte_at(struct mm_struct *mm, 648 unsigned long __always_unused addr, 649 pte_t *ptep, pte_t pte, unsigned int nr) 650 { 651 __sync_cache_and_tags(pte, nr); 652 __check_safe_pte_update(mm, ptep, pte); 653 __set_pte(ptep, pte); 654 } 655 656 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 657 pmd_t *pmdp, pmd_t pmd) 658 { 659 page_table_check_pmd_set(mm, pmdp, pmd); 660 return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd), 661 PMD_SIZE >> PAGE_SHIFT); 662 } 663 664 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 665 pud_t *pudp, pud_t pud) 666 { 667 page_table_check_pud_set(mm, pudp, pud); 668 return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud), 669 PUD_SIZE >> PAGE_SHIFT); 670 } 671 672 #define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d)) 673 #define __phys_to_p4d_val(phys) __phys_to_pte_val(phys) 674 675 #define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd)) 676 #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys) 677 678 #define __pgprot_modify(prot,mask,bits) \ 679 __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 680 681 #define pgprot_nx(prot) \ 682 __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN) 683 684 #define pgprot_decrypted(prot) \ 685 __pgprot_modify(prot, PROT_NS_SHARED, PROT_NS_SHARED) 686 #define pgprot_encrypted(prot) \ 687 __pgprot_modify(prot, PROT_NS_SHARED, 0) 688 689 /* 690 * Mark the prot value as uncacheable and unbufferable. 691 */ 692 #define pgprot_noncached(prot) \ 693 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) 694 #define pgprot_writecombine(prot) \ 695 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 696 #define pgprot_device(prot) \ 697 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) 698 #define pgprot_tagged(prot) \ 699 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED)) 700 #define pgprot_mhp pgprot_tagged 701 /* 702 * DMA allocations for non-coherent devices use what the Arm architecture calls 703 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses 704 * and merging of writes. This is different from "Device-nGnR[nE]" memory which 705 * is intended for MMIO and thus forbids speculation, preserves access size, 706 * requires strict alignment and can also force write responses to come from the 707 * endpoint. 708 */ 709 #define pgprot_dmacoherent(prot) \ 710 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ 711 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 712 713 #define __HAVE_PHYS_MEM_ACCESS_PROT 714 struct file; 715 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 716 unsigned long size, pgprot_t vma_prot); 717 718 #define pmd_none(pmd) (!pmd_val(pmd)) 719 720 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 721 PMD_TYPE_TABLE) 722 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 723 PMD_TYPE_SECT) 724 #define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd)) 725 #define pmd_bad(pmd) (!pmd_table(pmd)) 726 727 #define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE) 728 #define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE) 729 730 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 731 static inline bool pud_sect(pud_t pud) { return false; } 732 static inline bool pud_table(pud_t pud) { return true; } 733 #else 734 #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 735 PUD_TYPE_SECT) 736 #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 737 PUD_TYPE_TABLE) 738 #endif 739 740 extern pgd_t init_pg_dir[]; 741 extern pgd_t init_pg_end[]; 742 extern pgd_t swapper_pg_dir[]; 743 extern pgd_t idmap_pg_dir[]; 744 extern pgd_t tramp_pg_dir[]; 745 extern pgd_t reserved_pg_dir[]; 746 747 extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); 748 749 static inline bool in_swapper_pgdir(void *addr) 750 { 751 return ((unsigned long)addr & PAGE_MASK) == 752 ((unsigned long)swapper_pg_dir & PAGE_MASK); 753 } 754 755 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 756 { 757 #ifdef __PAGETABLE_PMD_FOLDED 758 if (in_swapper_pgdir(pmdp)) { 759 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd))); 760 return; 761 } 762 #endif /* __PAGETABLE_PMD_FOLDED */ 763 764 WRITE_ONCE(*pmdp, pmd); 765 766 if (pmd_valid(pmd)) { 767 dsb(ishst); 768 isb(); 769 } 770 } 771 772 static inline void pmd_clear(pmd_t *pmdp) 773 { 774 set_pmd(pmdp, __pmd(0)); 775 } 776 777 static inline phys_addr_t pmd_page_paddr(pmd_t pmd) 778 { 779 return __pmd_to_phys(pmd); 780 } 781 782 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 783 { 784 return (unsigned long)__va(pmd_page_paddr(pmd)); 785 } 786 787 /* Find an entry in the third-level page table. */ 788 #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) 789 790 #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) 791 #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) 792 #define pte_clear_fixmap() clear_fixmap(FIX_PTE) 793 794 #define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd)) 795 796 /* use ONLY for statically allocated translation tables */ 797 #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) 798 799 /* 800 * Conversion functions: convert a page and protection to a page entry, 801 * and a page entry and page directory to the page they refer to. 802 */ 803 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) 804 805 #if CONFIG_PGTABLE_LEVELS > 2 806 807 #define pmd_ERROR(e) \ 808 pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) 809 810 #define pud_none(pud) (!pud_val(pud)) 811 #define pud_bad(pud) (!pud_table(pud)) 812 #define pud_present(pud) pte_present(pud_pte(pud)) 813 #ifndef __PAGETABLE_PMD_FOLDED 814 #define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) 815 #else 816 #define pud_leaf(pud) false 817 #endif 818 #define pud_valid(pud) pte_valid(pud_pte(pud)) 819 #define pud_user(pud) pte_user(pud_pte(pud)) 820 #define pud_user_exec(pud) pte_user_exec(pud_pte(pud)) 821 822 static inline bool pgtable_l4_enabled(void); 823 824 static inline void set_pud(pud_t *pudp, pud_t pud) 825 { 826 if (!pgtable_l4_enabled() && in_swapper_pgdir(pudp)) { 827 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud))); 828 return; 829 } 830 831 WRITE_ONCE(*pudp, pud); 832 833 if (pud_valid(pud)) { 834 dsb(ishst); 835 isb(); 836 } 837 } 838 839 static inline void pud_clear(pud_t *pudp) 840 { 841 set_pud(pudp, __pud(0)); 842 } 843 844 static inline phys_addr_t pud_page_paddr(pud_t pud) 845 { 846 return __pud_to_phys(pud); 847 } 848 849 static inline pmd_t *pud_pgtable(pud_t pud) 850 { 851 return (pmd_t *)__va(pud_page_paddr(pud)); 852 } 853 854 /* Find an entry in the second-level page table. */ 855 #define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t)) 856 857 #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) 858 #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) 859 #define pmd_clear_fixmap() clear_fixmap(FIX_PMD) 860 861 #define pud_page(pud) phys_to_page(__pud_to_phys(pud)) 862 863 /* use ONLY for statically allocated translation tables */ 864 #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) 865 866 #else 867 868 #define pud_valid(pud) false 869 #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) 870 #define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */ 871 872 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ 873 #define pmd_set_fixmap(addr) NULL 874 #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp) 875 #define pmd_clear_fixmap() 876 877 #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir) 878 879 #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 880 881 #if CONFIG_PGTABLE_LEVELS > 3 882 883 static __always_inline bool pgtable_l4_enabled(void) 884 { 885 if (CONFIG_PGTABLE_LEVELS > 4 || !IS_ENABLED(CONFIG_ARM64_LPA2)) 886 return true; 887 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT)) 888 return vabits_actual == VA_BITS; 889 return alternative_has_cap_unlikely(ARM64_HAS_VA52); 890 } 891 892 static inline bool mm_pud_folded(const struct mm_struct *mm) 893 { 894 return !pgtable_l4_enabled(); 895 } 896 #define mm_pud_folded mm_pud_folded 897 898 #define pud_ERROR(e) \ 899 pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e)) 900 901 #define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d)) 902 #define p4d_bad(p4d) (pgtable_l4_enabled() && !(p4d_val(p4d) & P4D_TABLE_BIT)) 903 #define p4d_present(p4d) (!p4d_none(p4d)) 904 905 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) 906 { 907 if (in_swapper_pgdir(p4dp)) { 908 set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d))); 909 return; 910 } 911 912 WRITE_ONCE(*p4dp, p4d); 913 dsb(ishst); 914 isb(); 915 } 916 917 static inline void p4d_clear(p4d_t *p4dp) 918 { 919 if (pgtable_l4_enabled()) 920 set_p4d(p4dp, __p4d(0)); 921 } 922 923 static inline phys_addr_t p4d_page_paddr(p4d_t p4d) 924 { 925 return __p4d_to_phys(p4d); 926 } 927 928 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 929 930 static inline pud_t *p4d_to_folded_pud(p4d_t *p4dp, unsigned long addr) 931 { 932 /* Ensure that 'p4dp' indexes a page table according to 'addr' */ 933 VM_BUG_ON(((addr >> P4D_SHIFT) ^ ((u64)p4dp >> 3)) % PTRS_PER_P4D); 934 935 return (pud_t *)PTR_ALIGN_DOWN(p4dp, PAGE_SIZE) + pud_index(addr); 936 } 937 938 static inline pud_t *p4d_pgtable(p4d_t p4d) 939 { 940 return (pud_t *)__va(p4d_page_paddr(p4d)); 941 } 942 943 static inline phys_addr_t pud_offset_phys(p4d_t *p4dp, unsigned long addr) 944 { 945 BUG_ON(!pgtable_l4_enabled()); 946 947 return p4d_page_paddr(READ_ONCE(*p4dp)) + pud_index(addr) * sizeof(pud_t); 948 } 949 950 static inline 951 pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long addr) 952 { 953 if (!pgtable_l4_enabled()) 954 return p4d_to_folded_pud(p4dp, addr); 955 return (pud_t *)__va(p4d_page_paddr(p4d)) + pud_index(addr); 956 } 957 #define pud_offset_lockless pud_offset_lockless 958 959 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long addr) 960 { 961 return pud_offset_lockless(p4dp, READ_ONCE(*p4dp), addr); 962 } 963 #define pud_offset pud_offset 964 965 static inline pud_t *pud_set_fixmap(unsigned long addr) 966 { 967 if (!pgtable_l4_enabled()) 968 return NULL; 969 return (pud_t *)set_fixmap_offset(FIX_PUD, addr); 970 } 971 972 static inline pud_t *pud_set_fixmap_offset(p4d_t *p4dp, unsigned long addr) 973 { 974 if (!pgtable_l4_enabled()) 975 return p4d_to_folded_pud(p4dp, addr); 976 return pud_set_fixmap(pud_offset_phys(p4dp, addr)); 977 } 978 979 static inline void pud_clear_fixmap(void) 980 { 981 if (pgtable_l4_enabled()) 982 clear_fixmap(FIX_PUD); 983 } 984 985 /* use ONLY for statically allocated translation tables */ 986 static inline pud_t *pud_offset_kimg(p4d_t *p4dp, u64 addr) 987 { 988 if (!pgtable_l4_enabled()) 989 return p4d_to_folded_pud(p4dp, addr); 990 return (pud_t *)__phys_to_kimg(pud_offset_phys(p4dp, addr)); 991 } 992 993 #define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d))) 994 995 #else 996 997 static inline bool pgtable_l4_enabled(void) { return false; } 998 999 #define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;}) 1000 1001 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */ 1002 #define pud_set_fixmap(addr) NULL 1003 #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) 1004 #define pud_clear_fixmap() 1005 1006 #define pud_offset_kimg(dir,addr) ((pud_t *)dir) 1007 1008 #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 1009 1010 #if CONFIG_PGTABLE_LEVELS > 4 1011 1012 static __always_inline bool pgtable_l5_enabled(void) 1013 { 1014 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT)) 1015 return vabits_actual == VA_BITS; 1016 return alternative_has_cap_unlikely(ARM64_HAS_VA52); 1017 } 1018 1019 static inline bool mm_p4d_folded(const struct mm_struct *mm) 1020 { 1021 return !pgtable_l5_enabled(); 1022 } 1023 #define mm_p4d_folded mm_p4d_folded 1024 1025 #define p4d_ERROR(e) \ 1026 pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e)) 1027 1028 #define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd)) 1029 #define pgd_bad(pgd) (pgtable_l5_enabled() && !(pgd_val(pgd) & PGD_TABLE_BIT)) 1030 #define pgd_present(pgd) (!pgd_none(pgd)) 1031 1032 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 1033 { 1034 if (in_swapper_pgdir(pgdp)) { 1035 set_swapper_pgd(pgdp, __pgd(pgd_val(pgd))); 1036 return; 1037 } 1038 1039 WRITE_ONCE(*pgdp, pgd); 1040 dsb(ishst); 1041 isb(); 1042 } 1043 1044 static inline void pgd_clear(pgd_t *pgdp) 1045 { 1046 if (pgtable_l5_enabled()) 1047 set_pgd(pgdp, __pgd(0)); 1048 } 1049 1050 static inline phys_addr_t pgd_page_paddr(pgd_t pgd) 1051 { 1052 return __pgd_to_phys(pgd); 1053 } 1054 1055 #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) 1056 1057 static inline p4d_t *pgd_to_folded_p4d(pgd_t *pgdp, unsigned long addr) 1058 { 1059 /* Ensure that 'pgdp' indexes a page table according to 'addr' */ 1060 VM_BUG_ON(((addr >> PGDIR_SHIFT) ^ ((u64)pgdp >> 3)) % PTRS_PER_PGD); 1061 1062 return (p4d_t *)PTR_ALIGN_DOWN(pgdp, PAGE_SIZE) + p4d_index(addr); 1063 } 1064 1065 static inline phys_addr_t p4d_offset_phys(pgd_t *pgdp, unsigned long addr) 1066 { 1067 BUG_ON(!pgtable_l5_enabled()); 1068 1069 return pgd_page_paddr(READ_ONCE(*pgdp)) + p4d_index(addr) * sizeof(p4d_t); 1070 } 1071 1072 static inline 1073 p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long addr) 1074 { 1075 if (!pgtable_l5_enabled()) 1076 return pgd_to_folded_p4d(pgdp, addr); 1077 return (p4d_t *)__va(pgd_page_paddr(pgd)) + p4d_index(addr); 1078 } 1079 #define p4d_offset_lockless p4d_offset_lockless 1080 1081 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long addr) 1082 { 1083 return p4d_offset_lockless(pgdp, READ_ONCE(*pgdp), addr); 1084 } 1085 1086 static inline p4d_t *p4d_set_fixmap(unsigned long addr) 1087 { 1088 if (!pgtable_l5_enabled()) 1089 return NULL; 1090 return (p4d_t *)set_fixmap_offset(FIX_P4D, addr); 1091 } 1092 1093 static inline p4d_t *p4d_set_fixmap_offset(pgd_t *pgdp, unsigned long addr) 1094 { 1095 if (!pgtable_l5_enabled()) 1096 return pgd_to_folded_p4d(pgdp, addr); 1097 return p4d_set_fixmap(p4d_offset_phys(pgdp, addr)); 1098 } 1099 1100 static inline void p4d_clear_fixmap(void) 1101 { 1102 if (pgtable_l5_enabled()) 1103 clear_fixmap(FIX_P4D); 1104 } 1105 1106 /* use ONLY for statically allocated translation tables */ 1107 static inline p4d_t *p4d_offset_kimg(pgd_t *pgdp, u64 addr) 1108 { 1109 if (!pgtable_l5_enabled()) 1110 return pgd_to_folded_p4d(pgdp, addr); 1111 return (p4d_t *)__phys_to_kimg(p4d_offset_phys(pgdp, addr)); 1112 } 1113 1114 #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd))) 1115 1116 #else 1117 1118 static inline bool pgtable_l5_enabled(void) { return false; } 1119 1120 #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) 1121 1122 /* Match p4d_offset folding in <asm/generic/pgtable-nop4d.h> */ 1123 #define p4d_set_fixmap(addr) NULL 1124 #define p4d_set_fixmap_offset(p4dp, addr) ((p4d_t *)p4dp) 1125 #define p4d_clear_fixmap() 1126 1127 #define p4d_offset_kimg(dir,addr) ((p4d_t *)dir) 1128 1129 static inline 1130 p4d_t *p4d_offset_lockless_folded(pgd_t *pgdp, pgd_t pgd, unsigned long addr) 1131 { 1132 /* 1133 * With runtime folding of the pud, pud_offset_lockless() passes 1134 * the 'pgd_t *' we return here to p4d_to_folded_pud(), which 1135 * will offset the pointer assuming that it points into 1136 * a page-table page. However, the fast GUP path passes us a 1137 * pgd_t allocated on the stack and so we must use the original 1138 * pointer in 'pgdp' to construct the p4d pointer instead of 1139 * using the generic p4d_offset_lockless() implementation. 1140 * 1141 * Note: reusing the original pointer means that we may 1142 * dereference the same (live) page-table entry multiple times. 1143 * This is safe because it is still only loaded once in the 1144 * context of each level and the CPU guarantees same-address 1145 * read-after-read ordering. 1146 */ 1147 return p4d_offset(pgdp, addr); 1148 } 1149 #define p4d_offset_lockless p4d_offset_lockless_folded 1150 1151 #endif /* CONFIG_PGTABLE_LEVELS > 4 */ 1152 1153 #define pgd_ERROR(e) \ 1154 pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e)) 1155 1156 #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) 1157 #define pgd_clear_fixmap() clear_fixmap(FIX_PGD) 1158 1159 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 1160 { 1161 /* 1162 * Normal and Normal-Tagged are two different memory types and indices 1163 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK. 1164 */ 1165 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 1166 PTE_PRESENT_INVALID | PTE_VALID | PTE_WRITE | 1167 PTE_GP | PTE_ATTRINDX_MASK | PTE_PO_IDX_MASK; 1168 1169 /* preserve the hardware dirty information */ 1170 if (pte_hw_dirty(pte)) 1171 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 1172 1173 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 1174 /* 1175 * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware 1176 * dirtiness again. 1177 */ 1178 if (pte_sw_dirty(pte)) 1179 pte = pte_mkdirty(pte); 1180 return pte; 1181 } 1182 1183 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1184 { 1185 return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 1186 } 1187 1188 extern int __ptep_set_access_flags(struct vm_area_struct *vma, 1189 unsigned long address, pte_t *ptep, 1190 pte_t entry, int dirty); 1191 1192 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1193 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 1194 static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 1195 unsigned long address, pmd_t *pmdp, 1196 pmd_t entry, int dirty) 1197 { 1198 return __ptep_set_access_flags(vma, address, (pte_t *)pmdp, 1199 pmd_pte(entry), dirty); 1200 } 1201 1202 static inline int pud_devmap(pud_t pud) 1203 { 1204 return 0; 1205 } 1206 1207 static inline int pgd_devmap(pgd_t pgd) 1208 { 1209 return 0; 1210 } 1211 #endif 1212 1213 #ifdef CONFIG_PAGE_TABLE_CHECK 1214 static inline bool pte_user_accessible_page(pte_t pte) 1215 { 1216 return pte_valid(pte) && (pte_user(pte) || pte_user_exec(pte)); 1217 } 1218 1219 static inline bool pmd_user_accessible_page(pmd_t pmd) 1220 { 1221 return pmd_valid(pmd) && !pmd_table(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd)); 1222 } 1223 1224 static inline bool pud_user_accessible_page(pud_t pud) 1225 { 1226 return pud_valid(pud) && !pud_table(pud) && (pud_user(pud) || pud_user_exec(pud)); 1227 } 1228 #endif 1229 1230 /* 1231 * Atomic pte/pmd modifications. 1232 */ 1233 static inline int __ptep_test_and_clear_young(struct vm_area_struct *vma, 1234 unsigned long address, 1235 pte_t *ptep) 1236 { 1237 pte_t old_pte, pte; 1238 1239 pte = __ptep_get(ptep); 1240 do { 1241 old_pte = pte; 1242 pte = pte_mkold(pte); 1243 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1244 pte_val(old_pte), pte_val(pte)); 1245 } while (pte_val(pte) != pte_val(old_pte)); 1246 1247 return pte_young(pte); 1248 } 1249 1250 static inline int __ptep_clear_flush_young(struct vm_area_struct *vma, 1251 unsigned long address, pte_t *ptep) 1252 { 1253 int young = __ptep_test_and_clear_young(vma, address, ptep); 1254 1255 if (young) { 1256 /* 1257 * We can elide the trailing DSB here since the worst that can 1258 * happen is that a CPU continues to use the young entry in its 1259 * TLB and we mistakenly reclaim the associated page. The 1260 * window for such an event is bounded by the next 1261 * context-switch, which provides a DSB to complete the TLB 1262 * invalidation. 1263 */ 1264 flush_tlb_page_nosync(vma, address); 1265 } 1266 1267 return young; 1268 } 1269 1270 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) 1271 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1272 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1273 unsigned long address, 1274 pmd_t *pmdp) 1275 { 1276 /* Operation applies to PMD table entry only if FEAT_HAFT is enabled */ 1277 VM_WARN_ON(pmd_table(READ_ONCE(*pmdp)) && !system_supports_haft()); 1278 return __ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 1279 } 1280 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */ 1281 1282 static inline pte_t __ptep_get_and_clear(struct mm_struct *mm, 1283 unsigned long address, pte_t *ptep) 1284 { 1285 pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0)); 1286 1287 page_table_check_pte_clear(mm, pte); 1288 1289 return pte; 1290 } 1291 1292 static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1293 pte_t *ptep, unsigned int nr, int full) 1294 { 1295 for (;;) { 1296 __ptep_get_and_clear(mm, addr, ptep); 1297 if (--nr == 0) 1298 break; 1299 ptep++; 1300 addr += PAGE_SIZE; 1301 } 1302 } 1303 1304 static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm, 1305 unsigned long addr, pte_t *ptep, 1306 unsigned int nr, int full) 1307 { 1308 pte_t pte, tmp_pte; 1309 1310 pte = __ptep_get_and_clear(mm, addr, ptep); 1311 while (--nr) { 1312 ptep++; 1313 addr += PAGE_SIZE; 1314 tmp_pte = __ptep_get_and_clear(mm, addr, ptep); 1315 if (pte_dirty(tmp_pte)) 1316 pte = pte_mkdirty(pte); 1317 if (pte_young(tmp_pte)) 1318 pte = pte_mkyoung(pte); 1319 } 1320 return pte; 1321 } 1322 1323 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1324 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1325 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 1326 unsigned long address, pmd_t *pmdp) 1327 { 1328 pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0)); 1329 1330 page_table_check_pmd_clear(mm, pmd); 1331 1332 return pmd; 1333 } 1334 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1335 1336 static inline void ___ptep_set_wrprotect(struct mm_struct *mm, 1337 unsigned long address, pte_t *ptep, 1338 pte_t pte) 1339 { 1340 pte_t old_pte; 1341 1342 do { 1343 old_pte = pte; 1344 pte = pte_wrprotect(pte); 1345 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1346 pte_val(old_pte), pte_val(pte)); 1347 } while (pte_val(pte) != pte_val(old_pte)); 1348 } 1349 1350 /* 1351 * __ptep_set_wrprotect - mark read-only while transferring potential hardware 1352 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. 1353 */ 1354 static inline void __ptep_set_wrprotect(struct mm_struct *mm, 1355 unsigned long address, pte_t *ptep) 1356 { 1357 ___ptep_set_wrprotect(mm, address, ptep, __ptep_get(ptep)); 1358 } 1359 1360 static inline void __wrprotect_ptes(struct mm_struct *mm, unsigned long address, 1361 pte_t *ptep, unsigned int nr) 1362 { 1363 unsigned int i; 1364 1365 for (i = 0; i < nr; i++, address += PAGE_SIZE, ptep++) 1366 __ptep_set_wrprotect(mm, address, ptep); 1367 } 1368 1369 static inline void __clear_young_dirty_pte(struct vm_area_struct *vma, 1370 unsigned long addr, pte_t *ptep, 1371 pte_t pte, cydp_t flags) 1372 { 1373 pte_t old_pte; 1374 1375 do { 1376 old_pte = pte; 1377 1378 if (flags & CYDP_CLEAR_YOUNG) 1379 pte = pte_mkold(pte); 1380 if (flags & CYDP_CLEAR_DIRTY) 1381 pte = pte_mkclean(pte); 1382 1383 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1384 pte_val(old_pte), pte_val(pte)); 1385 } while (pte_val(pte) != pte_val(old_pte)); 1386 } 1387 1388 static inline void __clear_young_dirty_ptes(struct vm_area_struct *vma, 1389 unsigned long addr, pte_t *ptep, 1390 unsigned int nr, cydp_t flags) 1391 { 1392 pte_t pte; 1393 1394 for (;;) { 1395 pte = __ptep_get(ptep); 1396 1397 if (flags == (CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY)) 1398 __set_pte(ptep, pte_mkclean(pte_mkold(pte))); 1399 else 1400 __clear_young_dirty_pte(vma, addr, ptep, pte, flags); 1401 1402 if (--nr == 0) 1403 break; 1404 ptep++; 1405 addr += PAGE_SIZE; 1406 } 1407 } 1408 1409 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1410 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 1411 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1412 unsigned long address, pmd_t *pmdp) 1413 { 1414 __ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 1415 } 1416 1417 #define pmdp_establish pmdp_establish 1418 static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 1419 unsigned long address, pmd_t *pmdp, pmd_t pmd) 1420 { 1421 page_table_check_pmd_set(vma->vm_mm, pmdp, pmd); 1422 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd))); 1423 } 1424 #endif 1425 1426 /* 1427 * Encode and decode a swap entry: 1428 * bits 0-1: present (must be zero) 1429 * bits 2: remember PG_anon_exclusive 1430 * bit 3: remember uffd-wp state 1431 * bits 6-10: swap type 1432 * bit 11: PTE_PRESENT_INVALID (must be zero) 1433 * bits 12-61: swap offset 1434 */ 1435 #define __SWP_TYPE_SHIFT 6 1436 #define __SWP_TYPE_BITS 5 1437 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 1438 #define __SWP_OFFSET_SHIFT 12 1439 #define __SWP_OFFSET_BITS 50 1440 #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) 1441 1442 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 1443 #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) 1444 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 1445 1446 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1447 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 1448 1449 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1450 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 1451 #define __swp_entry_to_pmd(swp) __pmd((swp).val) 1452 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 1453 1454 /* 1455 * Ensure that there are not more swap files than can be encoded in the kernel 1456 * PTEs. 1457 */ 1458 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 1459 1460 #ifdef CONFIG_ARM64_MTE 1461 1462 #define __HAVE_ARCH_PREPARE_TO_SWAP 1463 extern int arch_prepare_to_swap(struct folio *folio); 1464 1465 #define __HAVE_ARCH_SWAP_INVALIDATE 1466 static inline void arch_swap_invalidate_page(int type, pgoff_t offset) 1467 { 1468 if (system_supports_mte()) 1469 mte_invalidate_tags(type, offset); 1470 } 1471 1472 static inline void arch_swap_invalidate_area(int type) 1473 { 1474 if (system_supports_mte()) 1475 mte_invalidate_tags_area(type); 1476 } 1477 1478 #define __HAVE_ARCH_SWAP_RESTORE 1479 extern void arch_swap_restore(swp_entry_t entry, struct folio *folio); 1480 1481 #endif /* CONFIG_ARM64_MTE */ 1482 1483 /* 1484 * On AArch64, the cache coherency is handled via the __set_ptes() function. 1485 */ 1486 static inline void update_mmu_cache_range(struct vm_fault *vmf, 1487 struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 1488 unsigned int nr) 1489 { 1490 /* 1491 * We don't do anything here, so there's a very small chance of 1492 * us retaking a user fault which we just fixed up. The alternative 1493 * is doing a dsb(ishst), but that penalises the fastpath. 1494 */ 1495 } 1496 1497 #define update_mmu_cache(vma, addr, ptep) \ 1498 update_mmu_cache_range(NULL, vma, addr, ptep, 1) 1499 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) 1500 1501 #ifdef CONFIG_ARM64_PA_BITS_52 1502 #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) 1503 #else 1504 #define phys_to_ttbr(addr) (addr) 1505 #endif 1506 1507 /* 1508 * On arm64 without hardware Access Flag, copying from user will fail because 1509 * the pte is old and cannot be marked young. So we always end up with zeroed 1510 * page after fork() + CoW for pfn mappings. We don't always have a 1511 * hardware-managed access flag on arm64. 1512 */ 1513 #define arch_has_hw_pte_young cpu_has_hw_af 1514 1515 #ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG 1516 #define arch_has_hw_nonleaf_pmd_young system_supports_haft 1517 #endif 1518 1519 /* 1520 * Experimentally, it's cheap to set the access flag in hardware and we 1521 * benefit from prefaulting mappings as 'old' to start with. 1522 */ 1523 #define arch_wants_old_prefaulted_pte cpu_has_hw_af 1524 1525 static inline bool pud_sect_supported(void) 1526 { 1527 return PAGE_SIZE == SZ_4K; 1528 } 1529 1530 1531 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1532 #define ptep_modify_prot_start ptep_modify_prot_start 1533 extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma, 1534 unsigned long addr, pte_t *ptep); 1535 1536 #define ptep_modify_prot_commit ptep_modify_prot_commit 1537 extern void ptep_modify_prot_commit(struct vm_area_struct *vma, 1538 unsigned long addr, pte_t *ptep, 1539 pte_t old_pte, pte_t new_pte); 1540 1541 #ifdef CONFIG_ARM64_CONTPTE 1542 1543 /* 1544 * The contpte APIs are used to transparently manage the contiguous bit in ptes 1545 * where it is possible and makes sense to do so. The PTE_CONT bit is considered 1546 * a private implementation detail of the public ptep API (see below). 1547 */ 1548 extern void __contpte_try_fold(struct mm_struct *mm, unsigned long addr, 1549 pte_t *ptep, pte_t pte); 1550 extern void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, 1551 pte_t *ptep, pte_t pte); 1552 extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte); 1553 extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep); 1554 extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, 1555 pte_t *ptep, pte_t pte, unsigned int nr); 1556 extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1557 pte_t *ptep, unsigned int nr, int full); 1558 extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, 1559 unsigned long addr, pte_t *ptep, 1560 unsigned int nr, int full); 1561 extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, 1562 unsigned long addr, pte_t *ptep); 1563 extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, 1564 unsigned long addr, pte_t *ptep); 1565 extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, 1566 pte_t *ptep, unsigned int nr); 1567 extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma, 1568 unsigned long addr, pte_t *ptep, 1569 pte_t entry, int dirty); 1570 extern void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma, 1571 unsigned long addr, pte_t *ptep, 1572 unsigned int nr, cydp_t flags); 1573 1574 static __always_inline void contpte_try_fold(struct mm_struct *mm, 1575 unsigned long addr, pte_t *ptep, pte_t pte) 1576 { 1577 /* 1578 * Only bother trying if both the virtual and physical addresses are 1579 * aligned and correspond to the last entry in a contig range. The core 1580 * code mostly modifies ranges from low to high, so this is the likely 1581 * the last modification in the contig range, so a good time to fold. 1582 * We can't fold special mappings, because there is no associated folio. 1583 */ 1584 1585 const unsigned long contmask = CONT_PTES - 1; 1586 bool valign = ((addr >> PAGE_SHIFT) & contmask) == contmask; 1587 1588 if (unlikely(valign)) { 1589 bool palign = (pte_pfn(pte) & contmask) == contmask; 1590 1591 if (unlikely(palign && 1592 pte_valid(pte) && !pte_cont(pte) && !pte_special(pte))) 1593 __contpte_try_fold(mm, addr, ptep, pte); 1594 } 1595 } 1596 1597 static __always_inline void contpte_try_unfold(struct mm_struct *mm, 1598 unsigned long addr, pte_t *ptep, pte_t pte) 1599 { 1600 if (unlikely(pte_valid_cont(pte))) 1601 __contpte_try_unfold(mm, addr, ptep, pte); 1602 } 1603 1604 #define pte_batch_hint pte_batch_hint 1605 static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte) 1606 { 1607 if (!pte_valid_cont(pte)) 1608 return 1; 1609 1610 return CONT_PTES - (((unsigned long)ptep >> 3) & (CONT_PTES - 1)); 1611 } 1612 1613 /* 1614 * The below functions constitute the public API that arm64 presents to the 1615 * core-mm to manipulate PTE entries within their page tables (or at least this 1616 * is the subset of the API that arm64 needs to implement). These public 1617 * versions will automatically and transparently apply the contiguous bit where 1618 * it makes sense to do so. Therefore any users that are contig-aware (e.g. 1619 * hugetlb, kernel mapper) should NOT use these APIs, but instead use the 1620 * private versions, which are prefixed with double underscore. All of these 1621 * APIs except for ptep_get_lockless() are expected to be called with the PTL 1622 * held. Although the contiguous bit is considered private to the 1623 * implementation, it is deliberately allowed to leak through the getters (e.g. 1624 * ptep_get()), back to core code. This is required so that pte_leaf_size() can 1625 * provide an accurate size for perf_get_pgtable_size(). But this leakage means 1626 * its possible a pte will be passed to a setter with the contiguous bit set, so 1627 * we explicitly clear the contiguous bit in those cases to prevent accidentally 1628 * setting it in the pgtable. 1629 */ 1630 1631 #define ptep_get ptep_get 1632 static inline pte_t ptep_get(pte_t *ptep) 1633 { 1634 pte_t pte = __ptep_get(ptep); 1635 1636 if (likely(!pte_valid_cont(pte))) 1637 return pte; 1638 1639 return contpte_ptep_get(ptep, pte); 1640 } 1641 1642 #define ptep_get_lockless ptep_get_lockless 1643 static inline pte_t ptep_get_lockless(pte_t *ptep) 1644 { 1645 pte_t pte = __ptep_get(ptep); 1646 1647 if (likely(!pte_valid_cont(pte))) 1648 return pte; 1649 1650 return contpte_ptep_get_lockless(ptep); 1651 } 1652 1653 static inline void set_pte(pte_t *ptep, pte_t pte) 1654 { 1655 /* 1656 * We don't have the mm or vaddr so cannot unfold contig entries (since 1657 * it requires tlb maintenance). set_pte() is not used in core code, so 1658 * this should never even be called. Regardless do our best to service 1659 * any call and emit a warning if there is any attempt to set a pte on 1660 * top of an existing contig range. 1661 */ 1662 pte_t orig_pte = __ptep_get(ptep); 1663 1664 WARN_ON_ONCE(pte_valid_cont(orig_pte)); 1665 __set_pte(ptep, pte_mknoncont(pte)); 1666 } 1667 1668 #define set_ptes set_ptes 1669 static __always_inline void set_ptes(struct mm_struct *mm, unsigned long addr, 1670 pte_t *ptep, pte_t pte, unsigned int nr) 1671 { 1672 pte = pte_mknoncont(pte); 1673 1674 if (likely(nr == 1)) { 1675 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1676 __set_ptes(mm, addr, ptep, pte, 1); 1677 contpte_try_fold(mm, addr, ptep, pte); 1678 } else { 1679 contpte_set_ptes(mm, addr, ptep, pte, nr); 1680 } 1681 } 1682 1683 static inline void pte_clear(struct mm_struct *mm, 1684 unsigned long addr, pte_t *ptep) 1685 { 1686 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1687 __pte_clear(mm, addr, ptep); 1688 } 1689 1690 #define clear_full_ptes clear_full_ptes 1691 static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1692 pte_t *ptep, unsigned int nr, int full) 1693 { 1694 if (likely(nr == 1)) { 1695 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1696 __clear_full_ptes(mm, addr, ptep, nr, full); 1697 } else { 1698 contpte_clear_full_ptes(mm, addr, ptep, nr, full); 1699 } 1700 } 1701 1702 #define get_and_clear_full_ptes get_and_clear_full_ptes 1703 static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, 1704 unsigned long addr, pte_t *ptep, 1705 unsigned int nr, int full) 1706 { 1707 pte_t pte; 1708 1709 if (likely(nr == 1)) { 1710 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1711 pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full); 1712 } else { 1713 pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full); 1714 } 1715 1716 return pte; 1717 } 1718 1719 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1720 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 1721 unsigned long addr, pte_t *ptep) 1722 { 1723 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1724 return __ptep_get_and_clear(mm, addr, ptep); 1725 } 1726 1727 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1728 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 1729 unsigned long addr, pte_t *ptep) 1730 { 1731 pte_t orig_pte = __ptep_get(ptep); 1732 1733 if (likely(!pte_valid_cont(orig_pte))) 1734 return __ptep_test_and_clear_young(vma, addr, ptep); 1735 1736 return contpte_ptep_test_and_clear_young(vma, addr, ptep); 1737 } 1738 1739 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1740 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 1741 unsigned long addr, pte_t *ptep) 1742 { 1743 pte_t orig_pte = __ptep_get(ptep); 1744 1745 if (likely(!pte_valid_cont(orig_pte))) 1746 return __ptep_clear_flush_young(vma, addr, ptep); 1747 1748 return contpte_ptep_clear_flush_young(vma, addr, ptep); 1749 } 1750 1751 #define wrprotect_ptes wrprotect_ptes 1752 static __always_inline void wrprotect_ptes(struct mm_struct *mm, 1753 unsigned long addr, pte_t *ptep, unsigned int nr) 1754 { 1755 if (likely(nr == 1)) { 1756 /* 1757 * Optimization: wrprotect_ptes() can only be called for present 1758 * ptes so we only need to check contig bit as condition for 1759 * unfold, and we can remove the contig bit from the pte we read 1760 * to avoid re-reading. This speeds up fork() which is sensitive 1761 * for order-0 folios. Equivalent to contpte_try_unfold(). 1762 */ 1763 pte_t orig_pte = __ptep_get(ptep); 1764 1765 if (unlikely(pte_cont(orig_pte))) { 1766 __contpte_try_unfold(mm, addr, ptep, orig_pte); 1767 orig_pte = pte_mknoncont(orig_pte); 1768 } 1769 ___ptep_set_wrprotect(mm, addr, ptep, orig_pte); 1770 } else { 1771 contpte_wrprotect_ptes(mm, addr, ptep, nr); 1772 } 1773 } 1774 1775 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 1776 static inline void ptep_set_wrprotect(struct mm_struct *mm, 1777 unsigned long addr, pte_t *ptep) 1778 { 1779 wrprotect_ptes(mm, addr, ptep, 1); 1780 } 1781 1782 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1783 static inline int ptep_set_access_flags(struct vm_area_struct *vma, 1784 unsigned long addr, pte_t *ptep, 1785 pte_t entry, int dirty) 1786 { 1787 pte_t orig_pte = __ptep_get(ptep); 1788 1789 entry = pte_mknoncont(entry); 1790 1791 if (likely(!pte_valid_cont(orig_pte))) 1792 return __ptep_set_access_flags(vma, addr, ptep, entry, dirty); 1793 1794 return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty); 1795 } 1796 1797 #define clear_young_dirty_ptes clear_young_dirty_ptes 1798 static inline void clear_young_dirty_ptes(struct vm_area_struct *vma, 1799 unsigned long addr, pte_t *ptep, 1800 unsigned int nr, cydp_t flags) 1801 { 1802 if (likely(nr == 1 && !pte_cont(__ptep_get(ptep)))) 1803 __clear_young_dirty_ptes(vma, addr, ptep, nr, flags); 1804 else 1805 contpte_clear_young_dirty_ptes(vma, addr, ptep, nr, flags); 1806 } 1807 1808 #else /* CONFIG_ARM64_CONTPTE */ 1809 1810 #define ptep_get __ptep_get 1811 #define set_pte __set_pte 1812 #define set_ptes __set_ptes 1813 #define pte_clear __pte_clear 1814 #define clear_full_ptes __clear_full_ptes 1815 #define get_and_clear_full_ptes __get_and_clear_full_ptes 1816 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1817 #define ptep_get_and_clear __ptep_get_and_clear 1818 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1819 #define ptep_test_and_clear_young __ptep_test_and_clear_young 1820 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1821 #define ptep_clear_flush_young __ptep_clear_flush_young 1822 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 1823 #define ptep_set_wrprotect __ptep_set_wrprotect 1824 #define wrprotect_ptes __wrprotect_ptes 1825 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1826 #define ptep_set_access_flags __ptep_set_access_flags 1827 #define clear_young_dirty_ptes __clear_young_dirty_ptes 1828 1829 #endif /* CONFIG_ARM64_CONTPTE */ 1830 1831 #endif /* !__ASSEMBLY__ */ 1832 1833 #endif /* __ASM_PGTABLE_H */ 1834