1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */ 24f04d8f0SCatalin Marinas /* 34f04d8f0SCatalin Marinas * Copyright (C) 2012 ARM Ltd. 44f04d8f0SCatalin Marinas */ 54f04d8f0SCatalin Marinas #ifndef __ASM_PGTABLE_H 64f04d8f0SCatalin Marinas #define __ASM_PGTABLE_H 74f04d8f0SCatalin Marinas 82f4b829cSCatalin Marinas #include <asm/bug.h> 94f04d8f0SCatalin Marinas #include <asm/proc-fns.h> 104f04d8f0SCatalin Marinas 114f04d8f0SCatalin Marinas #include <asm/memory.h> 1234bfeea4SCatalin Marinas #include <asm/mte.h> 134f04d8f0SCatalin Marinas #include <asm/pgtable-hwdef.h> 143eca86e7SMark Rutland #include <asm/pgtable-prot.h> 153403e56bSAlex Van Brunt #include <asm/tlbflush.h> 164f04d8f0SCatalin Marinas 174f04d8f0SCatalin Marinas /* 183e1907d5SArd Biesheuvel * VMALLOC range. 1908375198SCatalin Marinas * 20f9040773SArd Biesheuvel * VMALLOC_START: beginning of the kernel vmalloc space 21d432b8d5SArd Biesheuvel * VMALLOC_END: extends to the available space below vmemmap 224f04d8f0SCatalin Marinas */ 23f9040773SArd Biesheuvel #define VMALLOC_START (MODULES_END) 24d432b8d5SArd Biesheuvel #if VA_BITS == VA_BITS_MIN 25b730b0f2SArd Biesheuvel #define VMALLOC_END (VMEMMAP_START - SZ_8M) 26d432b8d5SArd Biesheuvel #else 27d432b8d5SArd Biesheuvel #define VMEMMAP_UNUSED_NPAGES ((_PAGE_OFFSET(vabits_actual) - PAGE_OFFSET) >> PAGE_SHIFT) 28d432b8d5SArd Biesheuvel #define VMALLOC_END (VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M) 29d432b8d5SArd Biesheuvel #endif 304f04d8f0SCatalin Marinas 317bc1a0f9SArd Biesheuvel #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) 327bc1a0f9SArd Biesheuvel 334f04d8f0SCatalin Marinas #ifndef __ASSEMBLY__ 342f4b829cSCatalin Marinas 353bbf7157SCatalin Marinas #include <asm/cmpxchg.h> 36961faac1SMark Rutland #include <asm/fixmap.h> 372f4b829cSCatalin Marinas #include <linux/mmdebug.h> 3886c9e812SWill Deacon #include <linux/mm_types.h> 3986c9e812SWill Deacon #include <linux/sched.h> 4042b25471SKefeng Wang #include <linux/page_table_check.h> 412f4b829cSCatalin Marinas 42a7ac1cfaSZhenyu Ye #ifdef CONFIG_TRANSPARENT_HUGEPAGE 43a7ac1cfaSZhenyu Ye #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 44a7ac1cfaSZhenyu Ye 45a7ac1cfaSZhenyu Ye /* Set stride and tlb_level in flush_*_tlb_range */ 46a7ac1cfaSZhenyu Ye #define flush_pmd_tlb_range(vma, addr, end) \ 47a7ac1cfaSZhenyu Ye __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2) 48a7ac1cfaSZhenyu Ye #define flush_pud_tlb_range(vma, addr, end) \ 49a7ac1cfaSZhenyu Ye __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1) 50a7ac1cfaSZhenyu Ye #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 51a7ac1cfaSZhenyu Ye 524f04d8f0SCatalin Marinas /* 536a1bdb17SWill Deacon * Outside of a few very special situations (e.g. hibernation), we always 546a1bdb17SWill Deacon * use broadcast TLB invalidation instructions, therefore a spurious page 556a1bdb17SWill Deacon * fault on one CPU which has been handled concurrently by another CPU 566a1bdb17SWill Deacon * does not need to perform additional invalidation. 576a1bdb17SWill Deacon */ 5899c29133SGerald Schaefer #define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0) 596a1bdb17SWill Deacon 606a1bdb17SWill Deacon /* 614f04d8f0SCatalin Marinas * ZERO_PAGE is a global shared page that is always zero: used 624f04d8f0SCatalin Marinas * for zero-mapped memory areas etc.. 634f04d8f0SCatalin Marinas */ 645227cfa7SMark Rutland extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 652077be67SLaura Abbott #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) 664f04d8f0SCatalin Marinas 672cf660ebSGavin Shan #define pte_ERROR(e) \ 682cf660ebSGavin Shan pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e)) 697078db46SCatalin Marinas 7075387b92SKristina Martsenko /* 7175387b92SKristina Martsenko * Macros to convert between a physical address and its placement in a 7275387b92SKristina Martsenko * page table entry, taking care of 52-bit addresses. 7375387b92SKristina Martsenko */ 7475387b92SKristina Martsenko #ifdef CONFIG_ARM64_PA_BITS_52 75c7c386fbSArnd Bergmann static inline phys_addr_t __pte_to_phys(pte_t pte) 76c7c386fbSArnd Bergmann { 77925a0eb4SArd Biesheuvel pte_val(pte) &= ~PTE_MAYBE_SHARED; 78c7c386fbSArnd Bergmann return (pte_val(pte) & PTE_ADDR_LOW) | 79a4ee2861SAnshuman Khandual ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT); 80c7c386fbSArnd Bergmann } 81c7c386fbSArnd Bergmann static inline pteval_t __phys_to_pte_val(phys_addr_t phys) 82c7c386fbSArnd Bergmann { 83925a0eb4SArd Biesheuvel return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK; 84c7c386fbSArnd Bergmann } 8575387b92SKristina Martsenko #else 86925a0eb4SArd Biesheuvel #define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW) 8775387b92SKristina Martsenko #define __phys_to_pte_val(phys) (phys) 8875387b92SKristina Martsenko #endif 894f04d8f0SCatalin Marinas 9075387b92SKristina Martsenko #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) 9175387b92SKristina Martsenko #define pfn_pte(pfn,prot) \ 9275387b92SKristina Martsenko __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 934f04d8f0SCatalin Marinas 944f04d8f0SCatalin Marinas #define pte_none(pte) (!pte_val(pte)) 955a00bfd6SRyan Roberts #define __pte_clear(mm, addr, ptep) \ 965a00bfd6SRyan Roberts __set_pte(ptep, __pte(0)) 974f04d8f0SCatalin Marinas #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 987078db46SCatalin Marinas 994f04d8f0SCatalin Marinas /* 1004f04d8f0SCatalin Marinas * The following only work if pte_present(). Undefined behaviour otherwise. 1014f04d8f0SCatalin Marinas */ 102f0f5863aSRyan Roberts #define pte_present(pte) (pte_valid(pte) || pte_present_invalid(pte)) 10384fe6826SSteve Capper #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) 10484fe6826SSteve Capper #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) 10584fe6826SSteve Capper #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) 106d0ba9612SAnshuman Khandual #define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY)) 10742b25471SKefeng Wang #define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) 108ec663d96SCatalin Marinas #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) 10993ef666aSJeremy Linton #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) 11073b20c84SRobin Murphy #define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP)) 11134bfeea4SCatalin Marinas #define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \ 11234bfeea4SCatalin Marinas PTE_ATTRINDX(MT_NORMAL_TAGGED)) 1134f04d8f0SCatalin Marinas 114d27cfa1fSArd Biesheuvel #define pte_cont_addr_end(addr, end) \ 115d27cfa1fSArd Biesheuvel ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ 116d27cfa1fSArd Biesheuvel (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 117d27cfa1fSArd Biesheuvel }) 118d27cfa1fSArd Biesheuvel 119d27cfa1fSArd Biesheuvel #define pmd_cont_addr_end(addr, end) \ 120d27cfa1fSArd Biesheuvel ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \ 121d27cfa1fSArd Biesheuvel (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 122d27cfa1fSArd Biesheuvel }) 123d27cfa1fSArd Biesheuvel 124d0ba9612SAnshuman Khandual #define pte_hw_dirty(pte) (pte_write(pte) && !pte_rdonly(pte)) 1252f4b829cSCatalin Marinas #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) 1262f4b829cSCatalin Marinas #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) 1272f4b829cSCatalin Marinas 128766ffb69SWill Deacon #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) 129b28c74e2SRyan Roberts #define pte_present_invalid(pte) \ 130b28c74e2SRyan Roberts ((pte_val(pte) & (PTE_VALID | PTE_PRESENT_INVALID)) == PTE_PRESENT_INVALID) 13118107f8aSVladimir Murzin /* 13218107f8aSVladimir Murzin * Execute-only user mappings do not have the PTE_USER bit set. All valid 13318107f8aSVladimir Murzin * kernel mappings have the PTE_UXN bit set. 13418107f8aSVladimir Murzin */ 135ec663d96SCatalin Marinas #define pte_valid_not_user(pte) \ 13618107f8aSVladimir Murzin ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) 13776c714beSWill Deacon /* 1384602e575SRyan Roberts * Returns true if the pte is valid and has the contiguous bit set. 1394602e575SRyan Roberts */ 1404602e575SRyan Roberts #define pte_valid_cont(pte) (pte_valid(pte) && pte_cont(pte)) 1414602e575SRyan Roberts /* 14276c714beSWill Deacon * Could the pte be present in the TLB? We must check mm_tlb_flush_pending 14376c714beSWill Deacon * so that we don't erroneously return false for pages that have been 14476c714beSWill Deacon * remapped as PROT_NONE but are yet to be flushed from the TLB. 14507509e10SWill Deacon * Note that we can't make any assumptions based on the state of the access 1465a00bfd6SRyan Roberts * flag, since __ptep_clear_flush_young() elides a DSB when invalidating the 14707509e10SWill Deacon * TLB. 14876c714beSWill Deacon */ 14976c714beSWill Deacon #define pte_accessible(mm, pte) \ 15007509e10SWill Deacon (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) 1514f04d8f0SCatalin Marinas 1526218f96cSCatalin Marinas /* 15318107f8aSVladimir Murzin * p??_access_permitted() is true for valid user mappings (PTE_USER 15418107f8aSVladimir Murzin * bit set, subject to the write permission check). For execute-only 15518107f8aSVladimir Murzin * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits 15618107f8aSVladimir Murzin * not set) must return false. PROT_NONE mappings do not have the 15718107f8aSVladimir Murzin * PTE_VALID bit set. 1586218f96cSCatalin Marinas */ 1596218f96cSCatalin Marinas #define pte_access_permitted(pte, write) \ 16018107f8aSVladimir Murzin (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte))) 1616218f96cSCatalin Marinas #define pmd_access_permitted(pmd, write) \ 1626218f96cSCatalin Marinas (pte_access_permitted(pmd_pte(pmd), (write))) 1636218f96cSCatalin Marinas #define pud_access_permitted(pud, write) \ 1646218f96cSCatalin Marinas (pte_access_permitted(pud_pte(pud), (write))) 1656218f96cSCatalin Marinas 166b6d4f280SLaura Abbott static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) 167b6d4f280SLaura Abbott { 168b6d4f280SLaura Abbott pte_val(pte) &= ~pgprot_val(prot); 169b6d4f280SLaura Abbott return pte; 170b6d4f280SLaura Abbott } 171b6d4f280SLaura Abbott 172b6d4f280SLaura Abbott static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) 173b6d4f280SLaura Abbott { 174b6d4f280SLaura Abbott pte_val(pte) |= pgprot_val(prot); 175b6d4f280SLaura Abbott return pte; 176b6d4f280SLaura Abbott } 177b6d4f280SLaura Abbott 178b65399f6SAnshuman Khandual static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) 179b65399f6SAnshuman Khandual { 180b65399f6SAnshuman Khandual pmd_val(pmd) &= ~pgprot_val(prot); 181b65399f6SAnshuman Khandual return pmd; 182b65399f6SAnshuman Khandual } 183b65399f6SAnshuman Khandual 184b65399f6SAnshuman Khandual static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) 185b65399f6SAnshuman Khandual { 186b65399f6SAnshuman Khandual pmd_val(pmd) |= pgprot_val(prot); 187b65399f6SAnshuman Khandual return pmd; 188b65399f6SAnshuman Khandual } 189b65399f6SAnshuman Khandual 1902f0584f3SRick Edgecombe static inline pte_t pte_mkwrite_novma(pte_t pte) 19144b6dfc5SSteve Capper { 19273e86cb0SCatalin Marinas pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); 19373e86cb0SCatalin Marinas pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 19473e86cb0SCatalin Marinas return pte; 19544b6dfc5SSteve Capper } 19644b6dfc5SSteve Capper 19744b6dfc5SSteve Capper static inline pte_t pte_mkclean(pte_t pte) 19844b6dfc5SSteve Capper { 1998781bcbcSSteve Capper pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); 2008781bcbcSSteve Capper pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 2018781bcbcSSteve Capper 2028781bcbcSSteve Capper return pte; 20344b6dfc5SSteve Capper } 20444b6dfc5SSteve Capper 20544b6dfc5SSteve Capper static inline pte_t pte_mkdirty(pte_t pte) 20644b6dfc5SSteve Capper { 2078781bcbcSSteve Capper pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 2088781bcbcSSteve Capper 2098781bcbcSSteve Capper if (pte_write(pte)) 2108781bcbcSSteve Capper pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 2118781bcbcSSteve Capper 2128781bcbcSSteve Capper return pte; 21344b6dfc5SSteve Capper } 21444b6dfc5SSteve Capper 215ff1712f9SWill Deacon static inline pte_t pte_wrprotect(pte_t pte) 216ff1712f9SWill Deacon { 217ff1712f9SWill Deacon /* 218ff1712f9SWill Deacon * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY 219ff1712f9SWill Deacon * clear), set the PTE_DIRTY bit. 220ff1712f9SWill Deacon */ 221ff1712f9SWill Deacon if (pte_hw_dirty(pte)) 2226477c388SAnshuman Khandual pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 223ff1712f9SWill Deacon 224ff1712f9SWill Deacon pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); 225ff1712f9SWill Deacon pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 226ff1712f9SWill Deacon return pte; 227ff1712f9SWill Deacon } 228ff1712f9SWill Deacon 22944b6dfc5SSteve Capper static inline pte_t pte_mkold(pte_t pte) 23044b6dfc5SSteve Capper { 231b6d4f280SLaura Abbott return clear_pte_bit(pte, __pgprot(PTE_AF)); 23244b6dfc5SSteve Capper } 23344b6dfc5SSteve Capper 23444b6dfc5SSteve Capper static inline pte_t pte_mkyoung(pte_t pte) 23544b6dfc5SSteve Capper { 236b6d4f280SLaura Abbott return set_pte_bit(pte, __pgprot(PTE_AF)); 23744b6dfc5SSteve Capper } 23844b6dfc5SSteve Capper 23944b6dfc5SSteve Capper static inline pte_t pte_mkspecial(pte_t pte) 24044b6dfc5SSteve Capper { 241b6d4f280SLaura Abbott return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); 24244b6dfc5SSteve Capper } 2434f04d8f0SCatalin Marinas 24493ef666aSJeremy Linton static inline pte_t pte_mkcont(pte_t pte) 24593ef666aSJeremy Linton { 24666b3923aSDavid Woods pte = set_pte_bit(pte, __pgprot(PTE_CONT)); 24766b3923aSDavid Woods return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE)); 24893ef666aSJeremy Linton } 24993ef666aSJeremy Linton 25093ef666aSJeremy Linton static inline pte_t pte_mknoncont(pte_t pte) 25193ef666aSJeremy Linton { 25293ef666aSJeremy Linton return clear_pte_bit(pte, __pgprot(PTE_CONT)); 25393ef666aSJeremy Linton } 25493ef666aSJeremy Linton 2555ebe3a44SJames Morse static inline pte_t pte_mkpresent(pte_t pte) 2565ebe3a44SJames Morse { 2575ebe3a44SJames Morse return set_pte_bit(pte, __pgprot(PTE_VALID)); 2585ebe3a44SJames Morse } 2595ebe3a44SJames Morse 260b28c74e2SRyan Roberts static inline pte_t pte_mkinvalid(pte_t pte) 261b28c74e2SRyan Roberts { 262b28c74e2SRyan Roberts pte = set_pte_bit(pte, __pgprot(PTE_PRESENT_INVALID)); 263b28c74e2SRyan Roberts pte = clear_pte_bit(pte, __pgprot(PTE_VALID)); 264b28c74e2SRyan Roberts return pte; 265b28c74e2SRyan Roberts } 266b28c74e2SRyan Roberts 26766b3923aSDavid Woods static inline pmd_t pmd_mkcont(pmd_t pmd) 26866b3923aSDavid Woods { 26966b3923aSDavid Woods return __pmd(pmd_val(pmd) | PMD_SECT_CONT); 27066b3923aSDavid Woods } 27166b3923aSDavid Woods 27273b20c84SRobin Murphy static inline pte_t pte_mkdevmap(pte_t pte) 27373b20c84SRobin Murphy { 27430e23538SJia He return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); 27573b20c84SRobin Murphy } 27673b20c84SRobin Murphy 2775b32510aSRyan Roberts #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 2785b32510aSRyan Roberts static inline int pte_uffd_wp(pte_t pte) 2795b32510aSRyan Roberts { 2805b32510aSRyan Roberts return !!(pte_val(pte) & PTE_UFFD_WP); 2815b32510aSRyan Roberts } 2825b32510aSRyan Roberts 2835b32510aSRyan Roberts static inline pte_t pte_mkuffd_wp(pte_t pte) 2845b32510aSRyan Roberts { 2855b32510aSRyan Roberts return pte_wrprotect(set_pte_bit(pte, __pgprot(PTE_UFFD_WP))); 2865b32510aSRyan Roberts } 2875b32510aSRyan Roberts 2885b32510aSRyan Roberts static inline pte_t pte_clear_uffd_wp(pte_t pte) 2895b32510aSRyan Roberts { 2905b32510aSRyan Roberts return clear_pte_bit(pte, __pgprot(PTE_UFFD_WP)); 2915b32510aSRyan Roberts } 2925b32510aSRyan Roberts #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 2935b32510aSRyan Roberts 2941fcb7ceaSRyan Roberts static inline void __set_pte_nosync(pte_t *ptep, pte_t pte) 2954f04d8f0SCatalin Marinas { 29620a004e7SWill Deacon WRITE_ONCE(*ptep, pte); 2971fcb7ceaSRyan Roberts } 2981fcb7ceaSRyan Roberts 2991fcb7ceaSRyan Roberts static inline void __set_pte(pte_t *ptep, pte_t pte) 3001fcb7ceaSRyan Roberts { 3011fcb7ceaSRyan Roberts __set_pte_nosync(ptep, pte); 3027f0b1bf0SCatalin Marinas 3037f0b1bf0SCatalin Marinas /* 3047f0b1bf0SCatalin Marinas * Only if the new pte is valid and kernel, otherwise TLB maintenance 3057f0b1bf0SCatalin Marinas * or update_mmu_cache() have the necessary barriers. 3067f0b1bf0SCatalin Marinas */ 307d0b7a302SWill Deacon if (pte_valid_not_user(pte)) { 3087f0b1bf0SCatalin Marinas dsb(ishst); 309d0b7a302SWill Deacon isb(); 310d0b7a302SWill Deacon } 3114f04d8f0SCatalin Marinas } 3124f04d8f0SCatalin Marinas 3135a00bfd6SRyan Roberts static inline pte_t __ptep_get(pte_t *ptep) 31453273655SRyan Roberts { 31553273655SRyan Roberts return READ_ONCE(*ptep); 31653273655SRyan Roberts } 31753273655SRyan Roberts 318907e21c1SShaokun Zhang extern void __sync_icache_dcache(pte_t pteval); 319004fc58fSAnshuman Khandual bool pgattr_change_is_safe(u64 old, u64 new); 3204f04d8f0SCatalin Marinas 3212f4b829cSCatalin Marinas /* 3222f4b829cSCatalin Marinas * PTE bits configuration in the presence of hardware Dirty Bit Management 3232f4b829cSCatalin Marinas * (PTE_WRITE == PTE_DBM): 3242f4b829cSCatalin Marinas * 3252f4b829cSCatalin Marinas * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) 3262f4b829cSCatalin Marinas * 0 0 | 1 0 0 3272f4b829cSCatalin Marinas * 0 1 | 1 1 0 3282f4b829cSCatalin Marinas * 1 0 | 1 0 1 3292f4b829cSCatalin Marinas * 1 1 | 0 1 x 3302f4b829cSCatalin Marinas * 3312f4b829cSCatalin Marinas * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via 3322f4b829cSCatalin Marinas * the page fault mechanism. Checking the dirty status of a pte becomes: 3332f4b829cSCatalin Marinas * 334b847415cSCatalin Marinas * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) 3352f4b829cSCatalin Marinas */ 3369b604722SMark Rutland 337004fc58fSAnshuman Khandual static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, 3389b604722SMark Rutland pte_t pte) 3394f04d8f0SCatalin Marinas { 34020a004e7SWill Deacon pte_t old_pte; 34120a004e7SWill Deacon 3429b604722SMark Rutland if (!IS_ENABLED(CONFIG_DEBUG_VM)) 3439b604722SMark Rutland return; 3449b604722SMark Rutland 3455a00bfd6SRyan Roberts old_pte = __ptep_get(ptep); 3469b604722SMark Rutland 3479b604722SMark Rutland if (!pte_valid(old_pte) || !pte_valid(pte)) 3489b604722SMark Rutland return; 3499b604722SMark Rutland if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1) 3509b604722SMark Rutland return; 35102522463SWill Deacon 3522f4b829cSCatalin Marinas /* 3539b604722SMark Rutland * Check for potential race with hardware updates of the pte 3545a00bfd6SRyan Roberts * (__ptep_set_access_flags safely changes valid ptes without going 3559b604722SMark Rutland * through an invalid entry). 3562f4b829cSCatalin Marinas */ 35782d34008SCatalin Marinas VM_WARN_ONCE(!pte_young(pte), 35882d34008SCatalin Marinas "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", 35920a004e7SWill Deacon __func__, pte_val(old_pte), pte_val(pte)); 36020a004e7SWill Deacon VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), 36182d34008SCatalin Marinas "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", 36220a004e7SWill Deacon __func__, pte_val(old_pte), pte_val(pte)); 363004fc58fSAnshuman Khandual VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)), 364004fc58fSAnshuman Khandual "%s: unsafe attribute change: 0x%016llx -> 0x%016llx", 365004fc58fSAnshuman Khandual __func__, pte_val(old_pte), pte_val(pte)); 3662f4b829cSCatalin Marinas } 3672f4b829cSCatalin Marinas 3683425cec4SRyan Roberts static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages) 3699b604722SMark Rutland { 3709b604722SMark Rutland if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) 3719b604722SMark Rutland __sync_icache_dcache(pte); 3729b604722SMark Rutland 37369e3b846SSteven Price /* 37469e3b846SSteven Price * If the PTE would provide user space access to the tags associated 37569e3b846SSteven Price * with it then ensure that the MTE tags are synchronised. Although 37669e3b846SSteven Price * pte_access_permitted() returns false for exec only mappings, they 37769e3b846SSteven Price * don't expose tags (instruction fetches don't check tags). 37869e3b846SSteven Price */ 37969e3b846SSteven Price if (system_supports_mte() && pte_access_permitted(pte, false) && 380332c151cSPeter Collingbourne !pte_special(pte) && pte_tagged(pte)) 3813425cec4SRyan Roberts mte_sync_tags(pte, nr_pages); 3824f04d8f0SCatalin Marinas } 3834f04d8f0SCatalin Marinas 3846e8f5887SRyan Roberts /* 3856e8f5887SRyan Roberts * Select all bits except the pfn 3866e8f5887SRyan Roberts */ 3876e8f5887SRyan Roberts static inline pgprot_t pte_pgprot(pte_t pte) 3886e8f5887SRyan Roberts { 3896e8f5887SRyan Roberts unsigned long pfn = pte_pfn(pte); 3906e8f5887SRyan Roberts 3916e8f5887SRyan Roberts return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); 3926e8f5887SRyan Roberts } 3936e8f5887SRyan Roberts 394c1bd2b40SRyan Roberts #define pte_advance_pfn pte_advance_pfn 395c1bd2b40SRyan Roberts static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) 3966e8f5887SRyan Roberts { 397c1bd2b40SRyan Roberts return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte)); 3986e8f5887SRyan Roberts } 3996e8f5887SRyan Roberts 4005a00bfd6SRyan Roberts static inline void __set_ptes(struct mm_struct *mm, 401dba2ff49SCatalin Marinas unsigned long __always_unused addr, 4024a169d61SMatthew Wilcox (Oracle) pte_t *ptep, pte_t pte, unsigned int nr) 40342b25471SKefeng Wang { 4044a169d61SMatthew Wilcox (Oracle) page_table_check_ptes_set(mm, ptep, pte, nr); 4053425cec4SRyan Roberts __sync_cache_and_tags(pte, nr); 4064a169d61SMatthew Wilcox (Oracle) 4074a169d61SMatthew Wilcox (Oracle) for (;;) { 4083425cec4SRyan Roberts __check_safe_pte_update(mm, ptep, pte); 4095a00bfd6SRyan Roberts __set_pte(ptep, pte); 4104a169d61SMatthew Wilcox (Oracle) if (--nr == 0) 4114a169d61SMatthew Wilcox (Oracle) break; 4124a169d61SMatthew Wilcox (Oracle) ptep++; 413c1bd2b40SRyan Roberts pte = pte_advance_pfn(pte, 1); 41442b25471SKefeng Wang } 4154a169d61SMatthew Wilcox (Oracle) } 41642b25471SKefeng Wang 4174f04d8f0SCatalin Marinas /* 4184f04d8f0SCatalin Marinas * Huge pte definitions. 4194f04d8f0SCatalin Marinas */ 420084bd298SSteve Capper #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) 421084bd298SSteve Capper 422084bd298SSteve Capper /* 423084bd298SSteve Capper * Hugetlb definitions. 424084bd298SSteve Capper */ 42566b3923aSDavid Woods #define HUGE_MAX_HSTATE 4 426084bd298SSteve Capper #define HPAGE_SHIFT PMD_SHIFT 427084bd298SSteve Capper #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 428084bd298SSteve Capper #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 429084bd298SSteve Capper #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 4304f04d8f0SCatalin Marinas 43175387b92SKristina Martsenko static inline pte_t pgd_pte(pgd_t pgd) 43275387b92SKristina Martsenko { 43375387b92SKristina Martsenko return __pte(pgd_val(pgd)); 43475387b92SKristina Martsenko } 43575387b92SKristina Martsenko 436e9f63768SMike Rapoport static inline pte_t p4d_pte(p4d_t p4d) 437e9f63768SMike Rapoport { 438e9f63768SMike Rapoport return __pte(p4d_val(p4d)); 439e9f63768SMike Rapoport } 440e9f63768SMike Rapoport 44129e56940SSteve Capper static inline pte_t pud_pte(pud_t pud) 44229e56940SSteve Capper { 44329e56940SSteve Capper return __pte(pud_val(pud)); 44429e56940SSteve Capper } 44529e56940SSteve Capper 446eb3f0624SPunit Agrawal static inline pud_t pte_pud(pte_t pte) 447eb3f0624SPunit Agrawal { 448eb3f0624SPunit Agrawal return __pud(pte_val(pte)); 449eb3f0624SPunit Agrawal } 450eb3f0624SPunit Agrawal 45129e56940SSteve Capper static inline pmd_t pud_pmd(pud_t pud) 45229e56940SSteve Capper { 45329e56940SSteve Capper return __pmd(pud_val(pud)); 45429e56940SSteve Capper } 45529e56940SSteve Capper 4569c7e535fSSteve Capper static inline pte_t pmd_pte(pmd_t pmd) 4579c7e535fSSteve Capper { 4589c7e535fSSteve Capper return __pte(pmd_val(pmd)); 4599c7e535fSSteve Capper } 460af074848SSteve Capper 4619c7e535fSSteve Capper static inline pmd_t pte_pmd(pte_t pte) 4629c7e535fSSteve Capper { 4639c7e535fSSteve Capper return __pmd(pte_val(pte)); 4649c7e535fSSteve Capper } 465af074848SSteve Capper 466f7f0097aSAnshuman Khandual static inline pgprot_t mk_pud_sect_prot(pgprot_t prot) 4678ce837ceSArd Biesheuvel { 468f7f0097aSAnshuman Khandual return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT); 469f7f0097aSAnshuman Khandual } 470f7f0097aSAnshuman Khandual 471f7f0097aSAnshuman Khandual static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot) 472f7f0097aSAnshuman Khandual { 473f7f0097aSAnshuman Khandual return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT); 4748ce837ceSArd Biesheuvel } 4758ce837ceSArd Biesheuvel 476570ef363SDavid Hildenbrand static inline pte_t pte_swp_mkexclusive(pte_t pte) 477570ef363SDavid Hildenbrand { 478570ef363SDavid Hildenbrand return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 479570ef363SDavid Hildenbrand } 480570ef363SDavid Hildenbrand 481570ef363SDavid Hildenbrand static inline int pte_swp_exclusive(pte_t pte) 482570ef363SDavid Hildenbrand { 483570ef363SDavid Hildenbrand return pte_val(pte) & PTE_SWP_EXCLUSIVE; 484570ef363SDavid Hildenbrand } 485570ef363SDavid Hildenbrand 486570ef363SDavid Hildenbrand static inline pte_t pte_swp_clear_exclusive(pte_t pte) 487570ef363SDavid Hildenbrand { 488570ef363SDavid Hildenbrand return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 489570ef363SDavid Hildenbrand } 490570ef363SDavid Hildenbrand 4915b32510aSRyan Roberts #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 4925b32510aSRyan Roberts static inline pte_t pte_swp_mkuffd_wp(pte_t pte) 4935b32510aSRyan Roberts { 4945b32510aSRyan Roberts return set_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP)); 4955b32510aSRyan Roberts } 4965b32510aSRyan Roberts 4975b32510aSRyan Roberts static inline int pte_swp_uffd_wp(pte_t pte) 4985b32510aSRyan Roberts { 4995b32510aSRyan Roberts return !!(pte_val(pte) & PTE_SWP_UFFD_WP); 5005b32510aSRyan Roberts } 5015b32510aSRyan Roberts 5025b32510aSRyan Roberts static inline pte_t pte_swp_clear_uffd_wp(pte_t pte) 5035b32510aSRyan Roberts { 5045b32510aSRyan Roberts return clear_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP)); 5055b32510aSRyan Roberts } 5065b32510aSRyan Roberts #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 5075b32510aSRyan Roberts 50856166230SGanapatrao Kulkarni #ifdef CONFIG_NUMA_BALANCING 50956166230SGanapatrao Kulkarni /* 510ca5999fdSMike Rapoport * See the comment in include/linux/pgtable.h 51156166230SGanapatrao Kulkarni */ 51256166230SGanapatrao Kulkarni static inline int pte_protnone(pte_t pte) 51356166230SGanapatrao Kulkarni { 514f0f5863aSRyan Roberts /* 515f0f5863aSRyan Roberts * pte_present_invalid() tells us that the pte is invalid from HW 516f0f5863aSRyan Roberts * perspective but present from SW perspective, so the fields are to be 517f0f5863aSRyan Roberts * interpretted as per the HW layout. The second 2 checks are the unique 518f0f5863aSRyan Roberts * encoding that we use for PROT_NONE. It is insufficient to only use 519f0f5863aSRyan Roberts * the first check because we share the same encoding scheme with pmds 520f0f5863aSRyan Roberts * which support pmd_mkinvalid(), so can be present-invalid without 521f0f5863aSRyan Roberts * being PROT_NONE. 522f0f5863aSRyan Roberts */ 523f0f5863aSRyan Roberts return pte_present_invalid(pte) && !pte_user(pte) && !pte_user_exec(pte); 52456166230SGanapatrao Kulkarni } 52556166230SGanapatrao Kulkarni 52656166230SGanapatrao Kulkarni static inline int pmd_protnone(pmd_t pmd) 52756166230SGanapatrao Kulkarni { 52856166230SGanapatrao Kulkarni return pte_protnone(pmd_pte(pmd)); 52956166230SGanapatrao Kulkarni } 53056166230SGanapatrao Kulkarni #endif 53156166230SGanapatrao Kulkarni 532f0f5863aSRyan Roberts #define pmd_present(pmd) pte_present(pmd_pte(pmd)) 533b65399f6SAnshuman Khandual 534af074848SSteve Capper /* 535af074848SSteve Capper * THP definitions. 536af074848SSteve Capper */ 537af074848SSteve Capper 538af074848SSteve Capper #ifdef CONFIG_TRANSPARENT_HUGEPAGE 539b65399f6SAnshuman Khandual static inline int pmd_trans_huge(pmd_t pmd) 540b65399f6SAnshuman Khandual { 541b65399f6SAnshuman Khandual return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); 542b65399f6SAnshuman Khandual } 54329e56940SSteve Capper #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 544af074848SSteve Capper 545c164e038SKirill A. Shutemov #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) 5469c7e535fSSteve Capper #define pmd_young(pmd) pte_young(pmd_pte(pmd)) 5470795edafSWill Deacon #define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) 54842b25471SKefeng Wang #define pmd_user(pmd) pte_user(pmd_pte(pmd)) 54942b25471SKefeng Wang #define pmd_user_exec(pmd) pte_user_exec(pmd_pte(pmd)) 550d55863dbSPeter Zijlstra #define pmd_cont(pmd) pte_cont(pmd_pte(pmd)) 5519c7e535fSSteve Capper #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) 5529c7e535fSSteve Capper #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) 5532f0584f3SRick Edgecombe #define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))) 55405ee26d9SMinchan Kim #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) 5559c7e535fSSteve Capper #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) 5569c7e535fSSteve Capper #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) 557b28c74e2SRyan Roberts #define pmd_mkinvalid(pmd) pte_pmd(pte_mkinvalid(pmd_pte(pmd))) 5585b32510aSRyan Roberts #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 5595b32510aSRyan Roberts #define pmd_uffd_wp(pmd) pte_uffd_wp(pmd_pte(pmd)) 5605b32510aSRyan Roberts #define pmd_mkuffd_wp(pmd) pte_pmd(pte_mkuffd_wp(pmd_pte(pmd))) 5615b32510aSRyan Roberts #define pmd_clear_uffd_wp(pmd) pte_pmd(pte_clear_uffd_wp(pmd_pte(pmd))) 5625b32510aSRyan Roberts #define pmd_swp_uffd_wp(pmd) pte_swp_uffd_wp(pmd_pte(pmd)) 5635b32510aSRyan Roberts #define pmd_swp_mkuffd_wp(pmd) pte_pmd(pte_swp_mkuffd_wp(pmd_pte(pmd))) 5645b32510aSRyan Roberts #define pmd_swp_clear_uffd_wp(pmd) \ 5655b32510aSRyan Roberts pte_pmd(pte_swp_clear_uffd_wp(pmd_pte(pmd))) 5665b32510aSRyan Roberts #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 567af074848SSteve Capper 5689c7e535fSSteve Capper #define pmd_write(pmd) pte_write(pmd_pte(pmd)) 569af074848SSteve Capper 570af074848SSteve Capper #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) 571af074848SSteve Capper 57273b20c84SRobin Murphy #ifdef CONFIG_TRANSPARENT_HUGEPAGE 57373b20c84SRobin Murphy #define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) 57473b20c84SRobin Murphy #endif 57530e23538SJia He static inline pmd_t pmd_mkdevmap(pmd_t pmd) 57630e23538SJia He { 57730e23538SJia He return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP))); 57830e23538SJia He } 57973b20c84SRobin Murphy 58075387b92SKristina Martsenko #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) 58175387b92SKristina Martsenko #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) 58275387b92SKristina Martsenko #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) 58375387b92SKristina Martsenko #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 584af074848SSteve Capper #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 585af074848SSteve Capper 58635a63966SPunit Agrawal #define pud_young(pud) pte_young(pud_pte(pud)) 587eb3f0624SPunit Agrawal #define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) 58829e56940SSteve Capper #define pud_write(pud) pte_write(pud_pte(pud)) 58975387b92SKristina Martsenko 590b8e0ba7cSPunit Agrawal #define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT)) 591b8e0ba7cSPunit Agrawal 59275387b92SKristina Martsenko #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud)) 59375387b92SKristina Martsenko #define __phys_to_pud_val(phys) __phys_to_pte_val(phys) 59475387b92SKristina Martsenko #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) 59575387b92SKristina Martsenko #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 596af074848SSteve Capper 597dba2ff49SCatalin Marinas static inline void __set_pte_at(struct mm_struct *mm, 598dba2ff49SCatalin Marinas unsigned long __always_unused addr, 5993425cec4SRyan Roberts pte_t *ptep, pte_t pte, unsigned int nr) 6003425cec4SRyan Roberts { 6013425cec4SRyan Roberts __sync_cache_and_tags(pte, nr); 6023425cec4SRyan Roberts __check_safe_pte_update(mm, ptep, pte); 6035a00bfd6SRyan Roberts __set_pte(ptep, pte); 6043425cec4SRyan Roberts } 6053425cec4SRyan Roberts 60642b25471SKefeng Wang static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 60742b25471SKefeng Wang pmd_t *pmdp, pmd_t pmd) 60842b25471SKefeng Wang { 609a3b83713SKemeng Shi page_table_check_pmd_set(mm, pmdp, pmd); 6103425cec4SRyan Roberts return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd), 6113425cec4SRyan Roberts PMD_SIZE >> PAGE_SHIFT); 61242b25471SKefeng Wang } 61342b25471SKefeng Wang 61442b25471SKefeng Wang static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 61542b25471SKefeng Wang pud_t *pudp, pud_t pud) 61642b25471SKefeng Wang { 6176d144436SKemeng Shi page_table_check_pud_set(mm, pudp, pud); 6183425cec4SRyan Roberts return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud), 6193425cec4SRyan Roberts PUD_SIZE >> PAGE_SHIFT); 62042b25471SKefeng Wang } 621af074848SSteve Capper 622e9f63768SMike Rapoport #define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d)) 623e9f63768SMike Rapoport #define __phys_to_p4d_val(phys) __phys_to_pte_val(phys) 624e9f63768SMike Rapoport 62575387b92SKristina Martsenko #define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd)) 62675387b92SKristina Martsenko #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys) 62775387b92SKristina Martsenko 628a501e324SCatalin Marinas #define __pgprot_modify(prot,mask,bits) \ 629a501e324SCatalin Marinas __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 630a501e324SCatalin Marinas 631cca98e9fSChristoph Hellwig #define pgprot_nx(prot) \ 632034aa9cdSWill Deacon __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN) 633cca98e9fSChristoph Hellwig 634af074848SSteve Capper /* 6354f04d8f0SCatalin Marinas * Mark the prot value as uncacheable and unbufferable. 6364f04d8f0SCatalin Marinas */ 6374f04d8f0SCatalin Marinas #define pgprot_noncached(prot) \ 638de2db743SCatalin Marinas __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) 6394f04d8f0SCatalin Marinas #define pgprot_writecombine(prot) \ 640de2db743SCatalin Marinas __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 641d1e6dc91SLiviu Dudau #define pgprot_device(prot) \ 642d1e6dc91SLiviu Dudau __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) 643d15dfd31SCatalin Marinas #define pgprot_tagged(prot) \ 644d15dfd31SCatalin Marinas __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED)) 645d15dfd31SCatalin Marinas #define pgprot_mhp pgprot_tagged 6463e4e1d3fSChristoph Hellwig /* 6473e4e1d3fSChristoph Hellwig * DMA allocations for non-coherent devices use what the Arm architecture calls 6483e4e1d3fSChristoph Hellwig * "Normal non-cacheable" memory, which permits speculation, unaligned accesses 6493e4e1d3fSChristoph Hellwig * and merging of writes. This is different from "Device-nGnR[nE]" memory which 6503e4e1d3fSChristoph Hellwig * is intended for MMIO and thus forbids speculation, preserves access size, 6513e4e1d3fSChristoph Hellwig * requires strict alignment and can also force write responses to come from the 6523e4e1d3fSChristoph Hellwig * endpoint. 6533e4e1d3fSChristoph Hellwig */ 654419e2f18SChristoph Hellwig #define pgprot_dmacoherent(prot) \ 655419e2f18SChristoph Hellwig __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ 656419e2f18SChristoph Hellwig PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 657419e2f18SChristoph Hellwig 6584f04d8f0SCatalin Marinas #define __HAVE_PHYS_MEM_ACCESS_PROT 6594f04d8f0SCatalin Marinas struct file; 6604f04d8f0SCatalin Marinas extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 6614f04d8f0SCatalin Marinas unsigned long size, pgprot_t vma_prot); 6624f04d8f0SCatalin Marinas 6634f04d8f0SCatalin Marinas #define pmd_none(pmd) (!pmd_val(pmd)) 6644f04d8f0SCatalin Marinas 66536311607SMarc Zyngier #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 66636311607SMarc Zyngier PMD_TYPE_TABLE) 66736311607SMarc Zyngier #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 66836311607SMarc Zyngier PMD_TYPE_SECT) 66923bc8f69SMuchun Song #define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd)) 670e377ab82SAnshuman Khandual #define pmd_bad(pmd) (!pmd_table(pmd)) 67136311607SMarc Zyngier 672d55863dbSPeter Zijlstra #define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE) 673d55863dbSPeter Zijlstra #define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE) 674d55863dbSPeter Zijlstra 675cac4b8cdSCatalin Marinas #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 6767d4e2dcfSQian Cai static inline bool pud_sect(pud_t pud) { return false; } 6777d4e2dcfSQian Cai static inline bool pud_table(pud_t pud) { return true; } 678206a2a73SSteve Capper #else 679206a2a73SSteve Capper #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 680206a2a73SSteve Capper PUD_TYPE_SECT) 681523d6e9fSzhichang.yuan #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 682523d6e9fSzhichang.yuan PUD_TYPE_TABLE) 683206a2a73SSteve Capper #endif 68436311607SMarc Zyngier 6856ed8a3a0SArd Biesheuvel extern pgd_t init_pg_dir[]; 6862330b7caSJun Yao extern pgd_t init_pg_end[]; 6876ed8a3a0SArd Biesheuvel extern pgd_t swapper_pg_dir[]; 6886ed8a3a0SArd Biesheuvel extern pgd_t idmap_pg_dir[]; 6896ed8a3a0SArd Biesheuvel extern pgd_t tramp_pg_dir[]; 6906ed8a3a0SArd Biesheuvel extern pgd_t reserved_pg_dir[]; 6912330b7caSJun Yao 6922330b7caSJun Yao extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); 6932330b7caSJun Yao 6942330b7caSJun Yao static inline bool in_swapper_pgdir(void *addr) 6952330b7caSJun Yao { 6962330b7caSJun Yao return ((unsigned long)addr & PAGE_MASK) == 6972330b7caSJun Yao ((unsigned long)swapper_pg_dir & PAGE_MASK); 6982330b7caSJun Yao } 6992330b7caSJun Yao 7004f04d8f0SCatalin Marinas static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 7014f04d8f0SCatalin Marinas { 702e9ed821bSJames Morse #ifdef __PAGETABLE_PMD_FOLDED 703e9ed821bSJames Morse if (in_swapper_pgdir(pmdp)) { 7042330b7caSJun Yao set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd))); 7052330b7caSJun Yao return; 7062330b7caSJun Yao } 707e9ed821bSJames Morse #endif /* __PAGETABLE_PMD_FOLDED */ 7082330b7caSJun Yao 70920a004e7SWill Deacon WRITE_ONCE(*pmdp, pmd); 7100795edafSWill Deacon 711d0b7a302SWill Deacon if (pmd_valid(pmd)) { 71298f7685eSWill Deacon dsb(ishst); 713d0b7a302SWill Deacon isb(); 714d0b7a302SWill Deacon } 7154f04d8f0SCatalin Marinas } 7164f04d8f0SCatalin Marinas 7174f04d8f0SCatalin Marinas static inline void pmd_clear(pmd_t *pmdp) 7184f04d8f0SCatalin Marinas { 7194f04d8f0SCatalin Marinas set_pmd(pmdp, __pmd(0)); 7204f04d8f0SCatalin Marinas } 7214f04d8f0SCatalin Marinas 722dca56dcaSMark Rutland static inline phys_addr_t pmd_page_paddr(pmd_t pmd) 7234f04d8f0SCatalin Marinas { 72475387b92SKristina Martsenko return __pmd_to_phys(pmd); 7254f04d8f0SCatalin Marinas } 7264f04d8f0SCatalin Marinas 727974b9b2cSMike Rapoport static inline unsigned long pmd_page_vaddr(pmd_t pmd) 728974b9b2cSMike Rapoport { 729974b9b2cSMike Rapoport return (unsigned long)__va(pmd_page_paddr(pmd)); 730974b9b2cSMike Rapoport } 73174dd022fSQian Cai 732053520f7SMark Rutland /* Find an entry in the third-level page table. */ 733f069fabaSWill Deacon #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) 734053520f7SMark Rutland 735961faac1SMark Rutland #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) 736961faac1SMark Rutland #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) 737961faac1SMark Rutland #define pte_clear_fixmap() clear_fixmap(FIX_PTE) 738961faac1SMark Rutland 73968ecabd0SGavin Shan #define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd)) 7404f04d8f0SCatalin Marinas 7416533945aSArd Biesheuvel /* use ONLY for statically allocated translation tables */ 7426533945aSArd Biesheuvel #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) 7436533945aSArd Biesheuvel 7444f04d8f0SCatalin Marinas /* 7454f04d8f0SCatalin Marinas * Conversion functions: convert a page and protection to a page entry, 7464f04d8f0SCatalin Marinas * and a page entry and page directory to the page they refer to. 7474f04d8f0SCatalin Marinas */ 7484f04d8f0SCatalin Marinas #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) 7494f04d8f0SCatalin Marinas 7509f25e6adSKirill A. Shutemov #if CONFIG_PGTABLE_LEVELS > 2 7514f04d8f0SCatalin Marinas 7522cf660ebSGavin Shan #define pmd_ERROR(e) \ 7532cf660ebSGavin Shan pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) 7547078db46SCatalin Marinas 7554f04d8f0SCatalin Marinas #define pud_none(pud) (!pud_val(pud)) 756e377ab82SAnshuman Khandual #define pud_bad(pud) (!pud_table(pud)) 757f02ab08aSPunit Agrawal #define pud_present(pud) pte_present(pud_pte(pud)) 758961a6ee5SPeter Xu #ifndef __PAGETABLE_PMD_FOLDED 75923bc8f69SMuchun Song #define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) 760961a6ee5SPeter Xu #else 761961a6ee5SPeter Xu #define pud_leaf(pud) false 762961a6ee5SPeter Xu #endif 7630795edafSWill Deacon #define pud_valid(pud) pte_valid(pud_pte(pud)) 76442b25471SKefeng Wang #define pud_user(pud) pte_user(pud_pte(pud)) 765730a11f9SLiu Shixin #define pud_user_exec(pud) pte_user_exec(pud_pte(pud)) 7664f04d8f0SCatalin Marinas 76790e636f6SArd Biesheuvel static inline bool pgtable_l4_enabled(void); 76890e636f6SArd Biesheuvel 7694f04d8f0SCatalin Marinas static inline void set_pud(pud_t *pudp, pud_t pud) 7704f04d8f0SCatalin Marinas { 77190e636f6SArd Biesheuvel if (!pgtable_l4_enabled() && in_swapper_pgdir(pudp)) { 7722330b7caSJun Yao set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud))); 7732330b7caSJun Yao return; 7742330b7caSJun Yao } 7752330b7caSJun Yao 77620a004e7SWill Deacon WRITE_ONCE(*pudp, pud); 7770795edafSWill Deacon 778d0b7a302SWill Deacon if (pud_valid(pud)) { 77998f7685eSWill Deacon dsb(ishst); 780d0b7a302SWill Deacon isb(); 781d0b7a302SWill Deacon } 7824f04d8f0SCatalin Marinas } 7834f04d8f0SCatalin Marinas 7844f04d8f0SCatalin Marinas static inline void pud_clear(pud_t *pudp) 7854f04d8f0SCatalin Marinas { 7864f04d8f0SCatalin Marinas set_pud(pudp, __pud(0)); 7874f04d8f0SCatalin Marinas } 7884f04d8f0SCatalin Marinas 789dca56dcaSMark Rutland static inline phys_addr_t pud_page_paddr(pud_t pud) 7904f04d8f0SCatalin Marinas { 79175387b92SKristina Martsenko return __pud_to_phys(pud); 7924f04d8f0SCatalin Marinas } 7934f04d8f0SCatalin Marinas 7949cf6fa24SAneesh Kumar K.V static inline pmd_t *pud_pgtable(pud_t pud) 795974b9b2cSMike Rapoport { 7969cf6fa24SAneesh Kumar K.V return (pmd_t *)__va(pud_page_paddr(pud)); 797974b9b2cSMike Rapoport } 7987078db46SCatalin Marinas 799974b9b2cSMike Rapoport /* Find an entry in the second-level page table. */ 80020a004e7SWill Deacon #define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t)) 8017078db46SCatalin Marinas 802961faac1SMark Rutland #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) 803961faac1SMark Rutland #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) 804961faac1SMark Rutland #define pmd_clear_fixmap() clear_fixmap(FIX_PMD) 8054f04d8f0SCatalin Marinas 80668ecabd0SGavin Shan #define pud_page(pud) phys_to_page(__pud_to_phys(pud)) 80729e56940SSteve Capper 8086533945aSArd Biesheuvel /* use ONLY for statically allocated translation tables */ 8096533945aSArd Biesheuvel #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) 8106533945aSArd Biesheuvel 811dca56dcaSMark Rutland #else 812dca56dcaSMark Rutland 813cb67ea12SRyan Roberts #define pud_valid(pud) false 814dca56dcaSMark Rutland #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) 8154e4ff23aSWill Deacon #define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */ 816dca56dcaSMark Rutland 817961faac1SMark Rutland /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ 818961faac1SMark Rutland #define pmd_set_fixmap(addr) NULL 819961faac1SMark Rutland #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp) 820961faac1SMark Rutland #define pmd_clear_fixmap() 821961faac1SMark Rutland 8226533945aSArd Biesheuvel #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir) 8236533945aSArd Biesheuvel 8249f25e6adSKirill A. Shutemov #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 8254f04d8f0SCatalin Marinas 8269f25e6adSKirill A. Shutemov #if CONFIG_PGTABLE_LEVELS > 3 827c79b954bSJungseok Lee 8280dd4f60aSArd Biesheuvel static __always_inline bool pgtable_l4_enabled(void) 8290dd4f60aSArd Biesheuvel { 8300dd4f60aSArd Biesheuvel if (CONFIG_PGTABLE_LEVELS > 4 || !IS_ENABLED(CONFIG_ARM64_LPA2)) 8310dd4f60aSArd Biesheuvel return true; 8320dd4f60aSArd Biesheuvel if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT)) 8330dd4f60aSArd Biesheuvel return vabits_actual == VA_BITS; 8340dd4f60aSArd Biesheuvel return alternative_has_cap_unlikely(ARM64_HAS_VA52); 8350dd4f60aSArd Biesheuvel } 8360dd4f60aSArd Biesheuvel 8370dd4f60aSArd Biesheuvel static inline bool mm_pud_folded(const struct mm_struct *mm) 8380dd4f60aSArd Biesheuvel { 8390dd4f60aSArd Biesheuvel return !pgtable_l4_enabled(); 8400dd4f60aSArd Biesheuvel } 8410dd4f60aSArd Biesheuvel #define mm_pud_folded mm_pud_folded 8420dd4f60aSArd Biesheuvel 8432cf660ebSGavin Shan #define pud_ERROR(e) \ 8442cf660ebSGavin Shan pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e)) 8457078db46SCatalin Marinas 8460dd4f60aSArd Biesheuvel #define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d)) 8470dd4f60aSArd Biesheuvel #define p4d_bad(p4d) (pgtable_l4_enabled() && !(p4d_val(p4d) & 2)) 8480dd4f60aSArd Biesheuvel #define p4d_present(p4d) (!p4d_none(p4d)) 849c79b954bSJungseok Lee 850e9f63768SMike Rapoport static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) 851c79b954bSJungseok Lee { 852e9f63768SMike Rapoport if (in_swapper_pgdir(p4dp)) { 853e9f63768SMike Rapoport set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d))); 8542330b7caSJun Yao return; 8552330b7caSJun Yao } 8562330b7caSJun Yao 857e9f63768SMike Rapoport WRITE_ONCE(*p4dp, p4d); 858c79b954bSJungseok Lee dsb(ishst); 859eb6a4dccSWill Deacon isb(); 860c79b954bSJungseok Lee } 861c79b954bSJungseok Lee 862e9f63768SMike Rapoport static inline void p4d_clear(p4d_t *p4dp) 863c79b954bSJungseok Lee { 8640dd4f60aSArd Biesheuvel if (pgtable_l4_enabled()) 865e9f63768SMike Rapoport set_p4d(p4dp, __p4d(0)); 866c79b954bSJungseok Lee } 867c79b954bSJungseok Lee 868e9f63768SMike Rapoport static inline phys_addr_t p4d_page_paddr(p4d_t p4d) 869c79b954bSJungseok Lee { 870e9f63768SMike Rapoport return __p4d_to_phys(p4d); 871c79b954bSJungseok Lee } 872c79b954bSJungseok Lee 8730dd4f60aSArd Biesheuvel #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 8740dd4f60aSArd Biesheuvel 8750dd4f60aSArd Biesheuvel static inline pud_t *p4d_to_folded_pud(p4d_t *p4dp, unsigned long addr) 8760dd4f60aSArd Biesheuvel { 8770dd4f60aSArd Biesheuvel return (pud_t *)PTR_ALIGN_DOWN(p4dp, PAGE_SIZE) + pud_index(addr); 8780dd4f60aSArd Biesheuvel } 8790dd4f60aSArd Biesheuvel 880dc4875f0SAneesh Kumar K.V static inline pud_t *p4d_pgtable(p4d_t p4d) 881974b9b2cSMike Rapoport { 882dc4875f0SAneesh Kumar K.V return (pud_t *)__va(p4d_page_paddr(p4d)); 883974b9b2cSMike Rapoport } 8847078db46SCatalin Marinas 8850dd4f60aSArd Biesheuvel static inline phys_addr_t pud_offset_phys(p4d_t *p4dp, unsigned long addr) 8860dd4f60aSArd Biesheuvel { 8870dd4f60aSArd Biesheuvel BUG_ON(!pgtable_l4_enabled()); 8887078db46SCatalin Marinas 8890dd4f60aSArd Biesheuvel return p4d_page_paddr(READ_ONCE(*p4dp)) + pud_index(addr) * sizeof(pud_t); 8900dd4f60aSArd Biesheuvel } 8910dd4f60aSArd Biesheuvel 8920dd4f60aSArd Biesheuvel static inline 8930dd4f60aSArd Biesheuvel pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long addr) 8940dd4f60aSArd Biesheuvel { 8950dd4f60aSArd Biesheuvel if (!pgtable_l4_enabled()) 8960dd4f60aSArd Biesheuvel return p4d_to_folded_pud(p4dp, addr); 8970dd4f60aSArd Biesheuvel return (pud_t *)__va(p4d_page_paddr(p4d)) + pud_index(addr); 8980dd4f60aSArd Biesheuvel } 8990dd4f60aSArd Biesheuvel #define pud_offset_lockless pud_offset_lockless 9000dd4f60aSArd Biesheuvel 9010dd4f60aSArd Biesheuvel static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long addr) 9020dd4f60aSArd Biesheuvel { 9030dd4f60aSArd Biesheuvel return pud_offset_lockless(p4dp, READ_ONCE(*p4dp), addr); 9040dd4f60aSArd Biesheuvel } 9050dd4f60aSArd Biesheuvel #define pud_offset pud_offset 9060dd4f60aSArd Biesheuvel 9070dd4f60aSArd Biesheuvel static inline pud_t *pud_set_fixmap(unsigned long addr) 9080dd4f60aSArd Biesheuvel { 9090dd4f60aSArd Biesheuvel if (!pgtable_l4_enabled()) 9100dd4f60aSArd Biesheuvel return NULL; 9110dd4f60aSArd Biesheuvel return (pud_t *)set_fixmap_offset(FIX_PUD, addr); 9120dd4f60aSArd Biesheuvel } 9130dd4f60aSArd Biesheuvel 9140dd4f60aSArd Biesheuvel static inline pud_t *pud_set_fixmap_offset(p4d_t *p4dp, unsigned long addr) 9150dd4f60aSArd Biesheuvel { 9160dd4f60aSArd Biesheuvel if (!pgtable_l4_enabled()) 9170dd4f60aSArd Biesheuvel return p4d_to_folded_pud(p4dp, addr); 9180dd4f60aSArd Biesheuvel return pud_set_fixmap(pud_offset_phys(p4dp, addr)); 9190dd4f60aSArd Biesheuvel } 9200dd4f60aSArd Biesheuvel 9210dd4f60aSArd Biesheuvel static inline void pud_clear_fixmap(void) 9220dd4f60aSArd Biesheuvel { 9230dd4f60aSArd Biesheuvel if (pgtable_l4_enabled()) 9240dd4f60aSArd Biesheuvel clear_fixmap(FIX_PUD); 9250dd4f60aSArd Biesheuvel } 9260dd4f60aSArd Biesheuvel 9270dd4f60aSArd Biesheuvel /* use ONLY for statically allocated translation tables */ 9280dd4f60aSArd Biesheuvel static inline pud_t *pud_offset_kimg(p4d_t *p4dp, u64 addr) 9290dd4f60aSArd Biesheuvel { 9300dd4f60aSArd Biesheuvel if (!pgtable_l4_enabled()) 9310dd4f60aSArd Biesheuvel return p4d_to_folded_pud(p4dp, addr); 9320dd4f60aSArd Biesheuvel return (pud_t *)__phys_to_kimg(pud_offset_phys(p4dp, addr)); 9330dd4f60aSArd Biesheuvel } 934c79b954bSJungseok Lee 935e9f63768SMike Rapoport #define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d))) 9365d96e0cbSJungseok Lee 937dca56dcaSMark Rutland #else 938dca56dcaSMark Rutland 9390dd4f60aSArd Biesheuvel static inline bool pgtable_l4_enabled(void) { return false; } 9400dd4f60aSArd Biesheuvel 941e9f63768SMike Rapoport #define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;}) 942dca56dcaSMark Rutland 943961faac1SMark Rutland /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */ 944961faac1SMark Rutland #define pud_set_fixmap(addr) NULL 945961faac1SMark Rutland #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) 946961faac1SMark Rutland #define pud_clear_fixmap() 947961faac1SMark Rutland 9486533945aSArd Biesheuvel #define pud_offset_kimg(dir,addr) ((pud_t *)dir) 9496533945aSArd Biesheuvel 9509f25e6adSKirill A. Shutemov #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 951c79b954bSJungseok Lee 952a6bbf5d4SArd Biesheuvel #if CONFIG_PGTABLE_LEVELS > 4 953a6bbf5d4SArd Biesheuvel 954a6bbf5d4SArd Biesheuvel static __always_inline bool pgtable_l5_enabled(void) 955a6bbf5d4SArd Biesheuvel { 956a6bbf5d4SArd Biesheuvel if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT)) 957a6bbf5d4SArd Biesheuvel return vabits_actual == VA_BITS; 958a6bbf5d4SArd Biesheuvel return alternative_has_cap_unlikely(ARM64_HAS_VA52); 959a6bbf5d4SArd Biesheuvel } 960a6bbf5d4SArd Biesheuvel 961a6bbf5d4SArd Biesheuvel static inline bool mm_p4d_folded(const struct mm_struct *mm) 962a6bbf5d4SArd Biesheuvel { 963a6bbf5d4SArd Biesheuvel return !pgtable_l5_enabled(); 964a6bbf5d4SArd Biesheuvel } 965a6bbf5d4SArd Biesheuvel #define mm_p4d_folded mm_p4d_folded 966a6bbf5d4SArd Biesheuvel 967a6bbf5d4SArd Biesheuvel #define p4d_ERROR(e) \ 968a6bbf5d4SArd Biesheuvel pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e)) 969a6bbf5d4SArd Biesheuvel 970a6bbf5d4SArd Biesheuvel #define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd)) 971a6bbf5d4SArd Biesheuvel #define pgd_bad(pgd) (pgtable_l5_enabled() && !(pgd_val(pgd) & 2)) 972a6bbf5d4SArd Biesheuvel #define pgd_present(pgd) (!pgd_none(pgd)) 973a6bbf5d4SArd Biesheuvel 974a6bbf5d4SArd Biesheuvel static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 975a6bbf5d4SArd Biesheuvel { 976a6bbf5d4SArd Biesheuvel if (in_swapper_pgdir(pgdp)) { 977a6bbf5d4SArd Biesheuvel set_swapper_pgd(pgdp, __pgd(pgd_val(pgd))); 978a6bbf5d4SArd Biesheuvel return; 979a6bbf5d4SArd Biesheuvel } 980a6bbf5d4SArd Biesheuvel 981a6bbf5d4SArd Biesheuvel WRITE_ONCE(*pgdp, pgd); 982a6bbf5d4SArd Biesheuvel dsb(ishst); 983a6bbf5d4SArd Biesheuvel isb(); 984a6bbf5d4SArd Biesheuvel } 985a6bbf5d4SArd Biesheuvel 986a6bbf5d4SArd Biesheuvel static inline void pgd_clear(pgd_t *pgdp) 987a6bbf5d4SArd Biesheuvel { 988a6bbf5d4SArd Biesheuvel if (pgtable_l5_enabled()) 989a6bbf5d4SArd Biesheuvel set_pgd(pgdp, __pgd(0)); 990a6bbf5d4SArd Biesheuvel } 991a6bbf5d4SArd Biesheuvel 992a6bbf5d4SArd Biesheuvel static inline phys_addr_t pgd_page_paddr(pgd_t pgd) 993a6bbf5d4SArd Biesheuvel { 994a6bbf5d4SArd Biesheuvel return __pgd_to_phys(pgd); 995a6bbf5d4SArd Biesheuvel } 996a6bbf5d4SArd Biesheuvel 997a6bbf5d4SArd Biesheuvel #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) 998a6bbf5d4SArd Biesheuvel 999a6bbf5d4SArd Biesheuvel static inline p4d_t *pgd_to_folded_p4d(pgd_t *pgdp, unsigned long addr) 1000a6bbf5d4SArd Biesheuvel { 1001a6bbf5d4SArd Biesheuvel return (p4d_t *)PTR_ALIGN_DOWN(pgdp, PAGE_SIZE) + p4d_index(addr); 1002a6bbf5d4SArd Biesheuvel } 1003a6bbf5d4SArd Biesheuvel 1004a6bbf5d4SArd Biesheuvel static inline phys_addr_t p4d_offset_phys(pgd_t *pgdp, unsigned long addr) 1005a6bbf5d4SArd Biesheuvel { 1006a6bbf5d4SArd Biesheuvel BUG_ON(!pgtable_l5_enabled()); 1007a6bbf5d4SArd Biesheuvel 1008a6bbf5d4SArd Biesheuvel return pgd_page_paddr(READ_ONCE(*pgdp)) + p4d_index(addr) * sizeof(p4d_t); 1009a6bbf5d4SArd Biesheuvel } 1010a6bbf5d4SArd Biesheuvel 1011a6bbf5d4SArd Biesheuvel static inline 1012a6bbf5d4SArd Biesheuvel p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long addr) 1013a6bbf5d4SArd Biesheuvel { 1014a6bbf5d4SArd Biesheuvel if (!pgtable_l5_enabled()) 1015a6bbf5d4SArd Biesheuvel return pgd_to_folded_p4d(pgdp, addr); 1016a6bbf5d4SArd Biesheuvel return (p4d_t *)__va(pgd_page_paddr(pgd)) + p4d_index(addr); 1017a6bbf5d4SArd Biesheuvel } 1018a6bbf5d4SArd Biesheuvel #define p4d_offset_lockless p4d_offset_lockless 1019a6bbf5d4SArd Biesheuvel 1020a6bbf5d4SArd Biesheuvel static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long addr) 1021a6bbf5d4SArd Biesheuvel { 1022a6bbf5d4SArd Biesheuvel return p4d_offset_lockless(pgdp, READ_ONCE(*pgdp), addr); 1023a6bbf5d4SArd Biesheuvel } 1024a6bbf5d4SArd Biesheuvel 10256ed8a3a0SArd Biesheuvel static inline p4d_t *p4d_set_fixmap(unsigned long addr) 10266ed8a3a0SArd Biesheuvel { 10276ed8a3a0SArd Biesheuvel if (!pgtable_l5_enabled()) 10286ed8a3a0SArd Biesheuvel return NULL; 10296ed8a3a0SArd Biesheuvel return (p4d_t *)set_fixmap_offset(FIX_P4D, addr); 10306ed8a3a0SArd Biesheuvel } 10316ed8a3a0SArd Biesheuvel 10326ed8a3a0SArd Biesheuvel static inline p4d_t *p4d_set_fixmap_offset(pgd_t *pgdp, unsigned long addr) 10336ed8a3a0SArd Biesheuvel { 10346ed8a3a0SArd Biesheuvel if (!pgtable_l5_enabled()) 10356ed8a3a0SArd Biesheuvel return pgd_to_folded_p4d(pgdp, addr); 10366ed8a3a0SArd Biesheuvel return p4d_set_fixmap(p4d_offset_phys(pgdp, addr)); 10376ed8a3a0SArd Biesheuvel } 10386ed8a3a0SArd Biesheuvel 10396ed8a3a0SArd Biesheuvel static inline void p4d_clear_fixmap(void) 10406ed8a3a0SArd Biesheuvel { 10416ed8a3a0SArd Biesheuvel if (pgtable_l5_enabled()) 10426ed8a3a0SArd Biesheuvel clear_fixmap(FIX_P4D); 10436ed8a3a0SArd Biesheuvel } 10446ed8a3a0SArd Biesheuvel 10456ed8a3a0SArd Biesheuvel /* use ONLY for statically allocated translation tables */ 10466ed8a3a0SArd Biesheuvel static inline p4d_t *p4d_offset_kimg(pgd_t *pgdp, u64 addr) 10476ed8a3a0SArd Biesheuvel { 10486ed8a3a0SArd Biesheuvel if (!pgtable_l5_enabled()) 10496ed8a3a0SArd Biesheuvel return pgd_to_folded_p4d(pgdp, addr); 10506ed8a3a0SArd Biesheuvel return (p4d_t *)__phys_to_kimg(p4d_offset_phys(pgdp, addr)); 10516ed8a3a0SArd Biesheuvel } 10526ed8a3a0SArd Biesheuvel 1053a6bbf5d4SArd Biesheuvel #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd))) 1054a6bbf5d4SArd Biesheuvel 1055a6bbf5d4SArd Biesheuvel #else 1056a6bbf5d4SArd Biesheuvel 1057a6bbf5d4SArd Biesheuvel static inline bool pgtable_l5_enabled(void) { return false; } 1058a6bbf5d4SArd Biesheuvel 10590e9df1c9SRyan Roberts #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) 10600e9df1c9SRyan Roberts 10616ed8a3a0SArd Biesheuvel /* Match p4d_offset folding in <asm/generic/pgtable-nop4d.h> */ 10626ed8a3a0SArd Biesheuvel #define p4d_set_fixmap(addr) NULL 10636ed8a3a0SArd Biesheuvel #define p4d_set_fixmap_offset(p4dp, addr) ((p4d_t *)p4dp) 10646ed8a3a0SArd Biesheuvel #define p4d_clear_fixmap() 10656ed8a3a0SArd Biesheuvel 10666ed8a3a0SArd Biesheuvel #define p4d_offset_kimg(dir,addr) ((p4d_t *)dir) 10676ed8a3a0SArd Biesheuvel 1068a6bbf5d4SArd Biesheuvel #endif /* CONFIG_PGTABLE_LEVELS > 4 */ 1069a6bbf5d4SArd Biesheuvel 10702cf660ebSGavin Shan #define pgd_ERROR(e) \ 10712cf660ebSGavin Shan pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e)) 10727078db46SCatalin Marinas 1073961faac1SMark Rutland #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) 1074961faac1SMark Rutland #define pgd_clear_fixmap() clear_fixmap(FIX_PGD) 1075961faac1SMark Rutland 10764f04d8f0SCatalin Marinas static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 10774f04d8f0SCatalin Marinas { 10789f341931SCatalin Marinas /* 10799f341931SCatalin Marinas * Normal and Normal-Tagged are two different memory types and indices 10809f341931SCatalin Marinas * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK. 10819f341931SCatalin Marinas */ 1082a6fadf7eSWill Deacon const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 1083f0f5863aSRyan Roberts PTE_PRESENT_INVALID | PTE_VALID | PTE_WRITE | 1084f0f5863aSRyan Roberts PTE_GP | PTE_ATTRINDX_MASK; 10852f4b829cSCatalin Marinas /* preserve the hardware dirty information */ 10862f4b829cSCatalin Marinas if (pte_hw_dirty(pte)) 10876477c388SAnshuman Khandual pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 10886477c388SAnshuman Khandual 10894f04d8f0SCatalin Marinas pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 10903c069607SJames Houghton /* 10913c069607SJames Houghton * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware 10923c069607SJames Houghton * dirtiness again. 10933c069607SJames Houghton */ 10943c069607SJames Houghton if (pte_sw_dirty(pte)) 10953c069607SJames Houghton pte = pte_mkdirty(pte); 10964f04d8f0SCatalin Marinas return pte; 10974f04d8f0SCatalin Marinas } 10984f04d8f0SCatalin Marinas 10999c7e535fSSteve Capper static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 11009c7e535fSSteve Capper { 11019c7e535fSSteve Capper return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 11029c7e535fSSteve Capper } 11039c7e535fSSteve Capper 11045a00bfd6SRyan Roberts extern int __ptep_set_access_flags(struct vm_area_struct *vma, 110566dbd6e6SCatalin Marinas unsigned long address, pte_t *ptep, 110666dbd6e6SCatalin Marinas pte_t entry, int dirty); 110766dbd6e6SCatalin Marinas 1108282aa705SCatalin Marinas #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1109282aa705SCatalin Marinas #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 1110282aa705SCatalin Marinas static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 1111282aa705SCatalin Marinas unsigned long address, pmd_t *pmdp, 1112282aa705SCatalin Marinas pmd_t entry, int dirty) 1113282aa705SCatalin Marinas { 11145a00bfd6SRyan Roberts return __ptep_set_access_flags(vma, address, (pte_t *)pmdp, 11155a00bfd6SRyan Roberts pmd_pte(entry), dirty); 1116282aa705SCatalin Marinas } 111773b20c84SRobin Murphy 111873b20c84SRobin Murphy static inline int pud_devmap(pud_t pud) 111973b20c84SRobin Murphy { 112073b20c84SRobin Murphy return 0; 112173b20c84SRobin Murphy } 112273b20c84SRobin Murphy 112373b20c84SRobin Murphy static inline int pgd_devmap(pgd_t pgd) 112473b20c84SRobin Murphy { 112573b20c84SRobin Murphy return 0; 112673b20c84SRobin Murphy } 1127282aa705SCatalin Marinas #endif 1128282aa705SCatalin Marinas 1129ed928a34STong Tiangen #ifdef CONFIG_PAGE_TABLE_CHECK 1130ed928a34STong Tiangen static inline bool pte_user_accessible_page(pte_t pte) 1131ed928a34STong Tiangen { 1132f0f5863aSRyan Roberts return pte_valid(pte) && (pte_user(pte) || pte_user_exec(pte)); 1133ed928a34STong Tiangen } 1134ed928a34STong Tiangen 1135ed928a34STong Tiangen static inline bool pmd_user_accessible_page(pmd_t pmd) 1136ed928a34STong Tiangen { 1137f0f5863aSRyan Roberts return pmd_valid(pmd) && !pmd_table(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd)); 1138ed928a34STong Tiangen } 1139ed928a34STong Tiangen 1140ed928a34STong Tiangen static inline bool pud_user_accessible_page(pud_t pud) 1141ed928a34STong Tiangen { 1142f0f5863aSRyan Roberts return pud_valid(pud) && !pud_table(pud) && (pud_user(pud) || pud_user_exec(pud)); 1143ed928a34STong Tiangen } 1144ed928a34STong Tiangen #endif 1145ed928a34STong Tiangen 11462f4b829cSCatalin Marinas /* 11472f4b829cSCatalin Marinas * Atomic pte/pmd modifications. 11482f4b829cSCatalin Marinas */ 11495a00bfd6SRyan Roberts static inline int __ptep_test_and_clear_young(struct vm_area_struct *vma, 11505a00bfd6SRyan Roberts unsigned long address, 11515a00bfd6SRyan Roberts pte_t *ptep) 11522f4b829cSCatalin Marinas { 11533bbf7157SCatalin Marinas pte_t old_pte, pte; 11542f4b829cSCatalin Marinas 11555a00bfd6SRyan Roberts pte = __ptep_get(ptep); 11563bbf7157SCatalin Marinas do { 11573bbf7157SCatalin Marinas old_pte = pte; 11583bbf7157SCatalin Marinas pte = pte_mkold(pte); 11593bbf7157SCatalin Marinas pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 11603bbf7157SCatalin Marinas pte_val(old_pte), pte_val(pte)); 11613bbf7157SCatalin Marinas } while (pte_val(pte) != pte_val(old_pte)); 11622f4b829cSCatalin Marinas 11633bbf7157SCatalin Marinas return pte_young(pte); 11642f4b829cSCatalin Marinas } 11652f4b829cSCatalin Marinas 11665a00bfd6SRyan Roberts static inline int __ptep_clear_flush_young(struct vm_area_struct *vma, 11673403e56bSAlex Van Brunt unsigned long address, pte_t *ptep) 11683403e56bSAlex Van Brunt { 11695a00bfd6SRyan Roberts int young = __ptep_test_and_clear_young(vma, address, ptep); 11703403e56bSAlex Van Brunt 11713403e56bSAlex Van Brunt if (young) { 11723403e56bSAlex Van Brunt /* 11733403e56bSAlex Van Brunt * We can elide the trailing DSB here since the worst that can 11743403e56bSAlex Van Brunt * happen is that a CPU continues to use the young entry in its 11753403e56bSAlex Van Brunt * TLB and we mistakenly reclaim the associated page. The 11763403e56bSAlex Van Brunt * window for such an event is bounded by the next 11773403e56bSAlex Van Brunt * context-switch, which provides a DSB to complete the TLB 11783403e56bSAlex Van Brunt * invalidation. 11793403e56bSAlex Van Brunt */ 11803403e56bSAlex Van Brunt flush_tlb_page_nosync(vma, address); 11813403e56bSAlex Van Brunt } 11823403e56bSAlex Van Brunt 11833403e56bSAlex Van Brunt return young; 11843403e56bSAlex Van Brunt } 11853403e56bSAlex Van Brunt 11862f4b829cSCatalin Marinas #ifdef CONFIG_TRANSPARENT_HUGEPAGE 11872f4b829cSCatalin Marinas #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 11882f4b829cSCatalin Marinas static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 11892f4b829cSCatalin Marinas unsigned long address, 11902f4b829cSCatalin Marinas pmd_t *pmdp) 11912f4b829cSCatalin Marinas { 11925a00bfd6SRyan Roberts return __ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 11932f4b829cSCatalin Marinas } 11942f4b829cSCatalin Marinas #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 11952f4b829cSCatalin Marinas 11965a00bfd6SRyan Roberts static inline pte_t __ptep_get_and_clear(struct mm_struct *mm, 11972f4b829cSCatalin Marinas unsigned long address, pte_t *ptep) 11982f4b829cSCatalin Marinas { 119942b25471SKefeng Wang pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0)); 120042b25471SKefeng Wang 1201aa232204SKemeng Shi page_table_check_pte_clear(mm, pte); 120242b25471SKefeng Wang 120342b25471SKefeng Wang return pte; 12042f4b829cSCatalin Marinas } 12052f4b829cSCatalin Marinas 12066b1e4efbSRyan Roberts static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr, 12076b1e4efbSRyan Roberts pte_t *ptep, unsigned int nr, int full) 12086b1e4efbSRyan Roberts { 12096b1e4efbSRyan Roberts for (;;) { 12106b1e4efbSRyan Roberts __ptep_get_and_clear(mm, addr, ptep); 12116b1e4efbSRyan Roberts if (--nr == 0) 12126b1e4efbSRyan Roberts break; 12136b1e4efbSRyan Roberts ptep++; 12146b1e4efbSRyan Roberts addr += PAGE_SIZE; 12156b1e4efbSRyan Roberts } 12166b1e4efbSRyan Roberts } 12176b1e4efbSRyan Roberts 12186b1e4efbSRyan Roberts static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm, 12196b1e4efbSRyan Roberts unsigned long addr, pte_t *ptep, 12206b1e4efbSRyan Roberts unsigned int nr, int full) 12216b1e4efbSRyan Roberts { 12226b1e4efbSRyan Roberts pte_t pte, tmp_pte; 12236b1e4efbSRyan Roberts 12246b1e4efbSRyan Roberts pte = __ptep_get_and_clear(mm, addr, ptep); 12256b1e4efbSRyan Roberts while (--nr) { 12266b1e4efbSRyan Roberts ptep++; 12276b1e4efbSRyan Roberts addr += PAGE_SIZE; 12286b1e4efbSRyan Roberts tmp_pte = __ptep_get_and_clear(mm, addr, ptep); 12296b1e4efbSRyan Roberts if (pte_dirty(tmp_pte)) 12306b1e4efbSRyan Roberts pte = pte_mkdirty(pte); 12316b1e4efbSRyan Roberts if (pte_young(tmp_pte)) 12326b1e4efbSRyan Roberts pte = pte_mkyoung(pte); 12336b1e4efbSRyan Roberts } 12346b1e4efbSRyan Roberts return pte; 12356b1e4efbSRyan Roberts } 12366b1e4efbSRyan Roberts 12372f4b829cSCatalin Marinas #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1238911f56eeSCatalin Marinas #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1239911f56eeSCatalin Marinas static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 12402f4b829cSCatalin Marinas unsigned long address, pmd_t *pmdp) 12412f4b829cSCatalin Marinas { 124242b25471SKefeng Wang pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0)); 124342b25471SKefeng Wang 12441831414cSKemeng Shi page_table_check_pmd_clear(mm, pmd); 124542b25471SKefeng Wang 124642b25471SKefeng Wang return pmd; 12472f4b829cSCatalin Marinas } 12482f4b829cSCatalin Marinas #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 12492f4b829cSCatalin Marinas 1250311a6cf2SRyan Roberts static inline void ___ptep_set_wrprotect(struct mm_struct *mm, 1251311a6cf2SRyan Roberts unsigned long address, pte_t *ptep, 1252311a6cf2SRyan Roberts pte_t pte) 12532f4b829cSCatalin Marinas { 1254311a6cf2SRyan Roberts pte_t old_pte; 12552f4b829cSCatalin Marinas 12563bbf7157SCatalin Marinas do { 12573bbf7157SCatalin Marinas old_pte = pte; 12583bbf7157SCatalin Marinas pte = pte_wrprotect(pte); 12593bbf7157SCatalin Marinas pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 12603bbf7157SCatalin Marinas pte_val(old_pte), pte_val(pte)); 12613bbf7157SCatalin Marinas } while (pte_val(pte) != pte_val(old_pte)); 12622f4b829cSCatalin Marinas } 12632f4b829cSCatalin Marinas 12642f4b829cSCatalin Marinas /* 12655a00bfd6SRyan Roberts * __ptep_set_wrprotect - mark read-only while trasferring potential hardware 12662f4b829cSCatalin Marinas * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. 12672f4b829cSCatalin Marinas */ 12685a00bfd6SRyan Roberts static inline void __ptep_set_wrprotect(struct mm_struct *mm, 12695a00bfd6SRyan Roberts unsigned long address, pte_t *ptep) 12702f4b829cSCatalin Marinas { 1271311a6cf2SRyan Roberts ___ptep_set_wrprotect(mm, address, ptep, __ptep_get(ptep)); 1272311a6cf2SRyan Roberts } 12732f4b829cSCatalin Marinas 1274311a6cf2SRyan Roberts static inline void __wrprotect_ptes(struct mm_struct *mm, unsigned long address, 1275311a6cf2SRyan Roberts pte_t *ptep, unsigned int nr) 1276311a6cf2SRyan Roberts { 1277311a6cf2SRyan Roberts unsigned int i; 1278311a6cf2SRyan Roberts 1279311a6cf2SRyan Roberts for (i = 0; i < nr; i++, address += PAGE_SIZE, ptep++) 1280311a6cf2SRyan Roberts __ptep_set_wrprotect(mm, address, ptep); 12812f4b829cSCatalin Marinas } 12822f4b829cSCatalin Marinas 1283*89e86854SLance Yang static inline void __clear_young_dirty_pte(struct vm_area_struct *vma, 1284*89e86854SLance Yang unsigned long addr, pte_t *ptep, 1285*89e86854SLance Yang pte_t pte, cydp_t flags) 1286*89e86854SLance Yang { 1287*89e86854SLance Yang pte_t old_pte; 1288*89e86854SLance Yang 1289*89e86854SLance Yang do { 1290*89e86854SLance Yang old_pte = pte; 1291*89e86854SLance Yang 1292*89e86854SLance Yang if (flags & CYDP_CLEAR_YOUNG) 1293*89e86854SLance Yang pte = pte_mkold(pte); 1294*89e86854SLance Yang if (flags & CYDP_CLEAR_DIRTY) 1295*89e86854SLance Yang pte = pte_mkclean(pte); 1296*89e86854SLance Yang 1297*89e86854SLance Yang pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1298*89e86854SLance Yang pte_val(old_pte), pte_val(pte)); 1299*89e86854SLance Yang } while (pte_val(pte) != pte_val(old_pte)); 1300*89e86854SLance Yang } 1301*89e86854SLance Yang 1302*89e86854SLance Yang static inline void __clear_young_dirty_ptes(struct vm_area_struct *vma, 1303*89e86854SLance Yang unsigned long addr, pte_t *ptep, 1304*89e86854SLance Yang unsigned int nr, cydp_t flags) 1305*89e86854SLance Yang { 1306*89e86854SLance Yang pte_t pte; 1307*89e86854SLance Yang 1308*89e86854SLance Yang for (;;) { 1309*89e86854SLance Yang pte = __ptep_get(ptep); 1310*89e86854SLance Yang 1311*89e86854SLance Yang if (flags == (CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY)) 1312*89e86854SLance Yang __set_pte(ptep, pte_mkclean(pte_mkold(pte))); 1313*89e86854SLance Yang else 1314*89e86854SLance Yang __clear_young_dirty_pte(vma, addr, ptep, pte, flags); 1315*89e86854SLance Yang 1316*89e86854SLance Yang if (--nr == 0) 1317*89e86854SLance Yang break; 1318*89e86854SLance Yang ptep++; 1319*89e86854SLance Yang addr += PAGE_SIZE; 1320*89e86854SLance Yang } 1321*89e86854SLance Yang } 1322*89e86854SLance Yang 13232f4b829cSCatalin Marinas #ifdef CONFIG_TRANSPARENT_HUGEPAGE 13242f4b829cSCatalin Marinas #define __HAVE_ARCH_PMDP_SET_WRPROTECT 13252f4b829cSCatalin Marinas static inline void pmdp_set_wrprotect(struct mm_struct *mm, 13262f4b829cSCatalin Marinas unsigned long address, pmd_t *pmdp) 13272f4b829cSCatalin Marinas { 13285a00bfd6SRyan Roberts __ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 13292f4b829cSCatalin Marinas } 13301d78a62cSCatalin Marinas 13311d78a62cSCatalin Marinas #define pmdp_establish pmdp_establish 13321d78a62cSCatalin Marinas static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 13331d78a62cSCatalin Marinas unsigned long address, pmd_t *pmdp, pmd_t pmd) 13341d78a62cSCatalin Marinas { 1335a3b83713SKemeng Shi page_table_check_pmd_set(vma->vm_mm, pmdp, pmd); 13361d78a62cSCatalin Marinas return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd))); 13371d78a62cSCatalin Marinas } 13382f4b829cSCatalin Marinas #endif 13392f4b829cSCatalin Marinas 13404f04d8f0SCatalin Marinas /* 13414f04d8f0SCatalin Marinas * Encode and decode a swap entry: 13423676f9efSCatalin Marinas * bits 0-1: present (must be zero) 1343570ef363SDavid Hildenbrand * bits 2: remember PG_anon_exclusive 13445b32510aSRyan Roberts * bit 3: remember uffd-wp state 134555564814SRyan Roberts * bits 6-10: swap type 134655564814SRyan Roberts * bit 11: PTE_PRESENT_INVALID (must be zero) 134755564814SRyan Roberts * bits 12-61: swap offset 13484f04d8f0SCatalin Marinas */ 134955564814SRyan Roberts #define __SWP_TYPE_SHIFT 6 1350570ef363SDavid Hildenbrand #define __SWP_TYPE_BITS 5 13514f04d8f0SCatalin Marinas #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 135255564814SRyan Roberts #define __SWP_OFFSET_SHIFT 12 135355564814SRyan Roberts #define __SWP_OFFSET_BITS 50 13543676f9efSCatalin Marinas #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) 13554f04d8f0SCatalin Marinas 13564f04d8f0SCatalin Marinas #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 13573676f9efSCatalin Marinas #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) 13584f04d8f0SCatalin Marinas #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 13594f04d8f0SCatalin Marinas 13604f04d8f0SCatalin Marinas #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 13614f04d8f0SCatalin Marinas #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 13624f04d8f0SCatalin Marinas 136353fa117bSAnshuman Khandual #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 136453fa117bSAnshuman Khandual #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 136553fa117bSAnshuman Khandual #define __swp_entry_to_pmd(swp) __pmd((swp).val) 136653fa117bSAnshuman Khandual #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 136753fa117bSAnshuman Khandual 13684f04d8f0SCatalin Marinas /* 13694f04d8f0SCatalin Marinas * Ensure that there are not more swap files than can be encoded in the kernel 1370aad9061bSGeert Uytterhoeven * PTEs. 13714f04d8f0SCatalin Marinas */ 13724f04d8f0SCatalin Marinas #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 13734f04d8f0SCatalin Marinas 137436943abaSSteven Price #ifdef CONFIG_ARM64_MTE 137536943abaSSteven Price 137636943abaSSteven Price #define __HAVE_ARCH_PREPARE_TO_SWAP 1377f238b8c3SBarry Song extern int arch_prepare_to_swap(struct folio *folio); 137836943abaSSteven Price 137936943abaSSteven Price #define __HAVE_ARCH_SWAP_INVALIDATE 138036943abaSSteven Price static inline void arch_swap_invalidate_page(int type, pgoff_t offset) 138136943abaSSteven Price { 138236943abaSSteven Price if (system_supports_mte()) 138336943abaSSteven Price mte_invalidate_tags(type, offset); 138436943abaSSteven Price } 138536943abaSSteven Price 138636943abaSSteven Price static inline void arch_swap_invalidate_area(int type) 138736943abaSSteven Price { 138836943abaSSteven Price if (system_supports_mte()) 138936943abaSSteven Price mte_invalidate_tags_area(type); 139036943abaSSteven Price } 139136943abaSSteven Price 139236943abaSSteven Price #define __HAVE_ARCH_SWAP_RESTORE 1393f238b8c3SBarry Song extern void arch_swap_restore(swp_entry_t entry, struct folio *folio); 139436943abaSSteven Price 139536943abaSSteven Price #endif /* CONFIG_ARM64_MTE */ 139636943abaSSteven Price 1397cba3574fSWill Deacon /* 13985a00bfd6SRyan Roberts * On AArch64, the cache coherency is handled via the __set_ptes() function. 1399cba3574fSWill Deacon */ 14004a169d61SMatthew Wilcox (Oracle) static inline void update_mmu_cache_range(struct vm_fault *vmf, 14014a169d61SMatthew Wilcox (Oracle) struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 14024a169d61SMatthew Wilcox (Oracle) unsigned int nr) 1403cba3574fSWill Deacon { 1404cba3574fSWill Deacon /* 1405120798d2SWill Deacon * We don't do anything here, so there's a very small chance of 1406120798d2SWill Deacon * us retaking a user fault which we just fixed up. The alternative 1407120798d2SWill Deacon * is doing a dsb(ishst), but that penalises the fastpath. 1408cba3574fSWill Deacon */ 1409cba3574fSWill Deacon } 1410cba3574fSWill Deacon 14114a169d61SMatthew Wilcox (Oracle) #define update_mmu_cache(vma, addr, ptep) \ 14124a169d61SMatthew Wilcox (Oracle) update_mmu_cache_range(NULL, vma, addr, ptep, 1) 1413cba3574fSWill Deacon #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) 1414cba3574fSWill Deacon 1415529c4b05SKristina Martsenko #ifdef CONFIG_ARM64_PA_BITS_52 1416529c4b05SKristina Martsenko #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) 1417529c4b05SKristina Martsenko #else 1418529c4b05SKristina Martsenko #define phys_to_ttbr(addr) (addr) 1419529c4b05SKristina Martsenko #endif 1420529c4b05SKristina Martsenko 14216af31226SJia He /* 14226af31226SJia He * On arm64 without hardware Access Flag, copying from user will fail because 14236af31226SJia He * the pte is old and cannot be marked young. So we always end up with zeroed 14246af31226SJia He * page after fork() + CoW for pfn mappings. We don't always have a 14256af31226SJia He * hardware-managed access flag on arm64. 14266af31226SJia He */ 1427e1fd09e3SYu Zhao #define arch_has_hw_pte_young cpu_has_hw_af 14280388f9c7SWill Deacon 14290388f9c7SWill Deacon /* 14300388f9c7SWill Deacon * Experimentally, it's cheap to set the access flag in hardware and we 14310388f9c7SWill Deacon * benefit from prefaulting mappings as 'old' to start with. 14320388f9c7SWill Deacon */ 1433e1fd09e3SYu Zhao #define arch_wants_old_prefaulted_pte cpu_has_hw_af 14346af31226SJia He 1435f8b46c4bSAnshuman Khandual static inline bool pud_sect_supported(void) 1436f8b46c4bSAnshuman Khandual { 1437f8b46c4bSAnshuman Khandual return PAGE_SIZE == SZ_4K; 1438f8b46c4bSAnshuman Khandual } 1439f8b46c4bSAnshuman Khandual 144018107f8aSVladimir Murzin 14415db568e7SAnshuman Khandual #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 14425db568e7SAnshuman Khandual #define ptep_modify_prot_start ptep_modify_prot_start 14435db568e7SAnshuman Khandual extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma, 14445db568e7SAnshuman Khandual unsigned long addr, pte_t *ptep); 14455db568e7SAnshuman Khandual 14465db568e7SAnshuman Khandual #define ptep_modify_prot_commit ptep_modify_prot_commit 14475db568e7SAnshuman Khandual extern void ptep_modify_prot_commit(struct vm_area_struct *vma, 14485db568e7SAnshuman Khandual unsigned long addr, pte_t *ptep, 14495db568e7SAnshuman Khandual pte_t old_pte, pte_t new_pte); 14505a00bfd6SRyan Roberts 14514602e575SRyan Roberts #ifdef CONFIG_ARM64_CONTPTE 14524602e575SRyan Roberts 14534602e575SRyan Roberts /* 14544602e575SRyan Roberts * The contpte APIs are used to transparently manage the contiguous bit in ptes 14554602e575SRyan Roberts * where it is possible and makes sense to do so. The PTE_CONT bit is considered 14564602e575SRyan Roberts * a private implementation detail of the public ptep API (see below). 14574602e575SRyan Roberts */ 1458f0c22649SRyan Roberts extern void __contpte_try_fold(struct mm_struct *mm, unsigned long addr, 1459f0c22649SRyan Roberts pte_t *ptep, pte_t pte); 14604602e575SRyan Roberts extern void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, 14614602e575SRyan Roberts pte_t *ptep, pte_t pte); 14624602e575SRyan Roberts extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte); 14634602e575SRyan Roberts extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep); 14644602e575SRyan Roberts extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, 14654602e575SRyan Roberts pte_t *ptep, pte_t pte, unsigned int nr); 14666b1e4efbSRyan Roberts extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, 14676b1e4efbSRyan Roberts pte_t *ptep, unsigned int nr, int full); 14686b1e4efbSRyan Roberts extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, 14696b1e4efbSRyan Roberts unsigned long addr, pte_t *ptep, 14706b1e4efbSRyan Roberts unsigned int nr, int full); 14714602e575SRyan Roberts extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, 14724602e575SRyan Roberts unsigned long addr, pte_t *ptep); 14734602e575SRyan Roberts extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, 14744602e575SRyan Roberts unsigned long addr, pte_t *ptep); 1475311a6cf2SRyan Roberts extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, 1476311a6cf2SRyan Roberts pte_t *ptep, unsigned int nr); 14774602e575SRyan Roberts extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma, 14784602e575SRyan Roberts unsigned long addr, pte_t *ptep, 14794602e575SRyan Roberts pte_t entry, int dirty); 1480*89e86854SLance Yang extern void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma, 1481*89e86854SLance Yang unsigned long addr, pte_t *ptep, 1482*89e86854SLance Yang unsigned int nr, cydp_t flags); 14834602e575SRyan Roberts 1484f0c22649SRyan Roberts static __always_inline void contpte_try_fold(struct mm_struct *mm, 1485f0c22649SRyan Roberts unsigned long addr, pte_t *ptep, pte_t pte) 1486f0c22649SRyan Roberts { 1487f0c22649SRyan Roberts /* 1488f0c22649SRyan Roberts * Only bother trying if both the virtual and physical addresses are 1489f0c22649SRyan Roberts * aligned and correspond to the last entry in a contig range. The core 1490f0c22649SRyan Roberts * code mostly modifies ranges from low to high, so this is the likely 1491f0c22649SRyan Roberts * the last modification in the contig range, so a good time to fold. 1492f0c22649SRyan Roberts * We can't fold special mappings, because there is no associated folio. 1493f0c22649SRyan Roberts */ 1494f0c22649SRyan Roberts 1495f0c22649SRyan Roberts const unsigned long contmask = CONT_PTES - 1; 1496f0c22649SRyan Roberts bool valign = ((addr >> PAGE_SHIFT) & contmask) == contmask; 1497f0c22649SRyan Roberts 1498f0c22649SRyan Roberts if (unlikely(valign)) { 1499f0c22649SRyan Roberts bool palign = (pte_pfn(pte) & contmask) == contmask; 1500f0c22649SRyan Roberts 1501f0c22649SRyan Roberts if (unlikely(palign && 1502f0c22649SRyan Roberts pte_valid(pte) && !pte_cont(pte) && !pte_special(pte))) 1503f0c22649SRyan Roberts __contpte_try_fold(mm, addr, ptep, pte); 1504f0c22649SRyan Roberts } 1505f0c22649SRyan Roberts } 1506f0c22649SRyan Roberts 1507b972fc6aSRyan Roberts static __always_inline void contpte_try_unfold(struct mm_struct *mm, 1508b972fc6aSRyan Roberts unsigned long addr, pte_t *ptep, pte_t pte) 15094602e575SRyan Roberts { 15104602e575SRyan Roberts if (unlikely(pte_valid_cont(pte))) 15114602e575SRyan Roberts __contpte_try_unfold(mm, addr, ptep, pte); 15124602e575SRyan Roberts } 15134602e575SRyan Roberts 1514fb5451e5SRyan Roberts #define pte_batch_hint pte_batch_hint 1515fb5451e5SRyan Roberts static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte) 1516fb5451e5SRyan Roberts { 1517fb5451e5SRyan Roberts if (!pte_valid_cont(pte)) 1518fb5451e5SRyan Roberts return 1; 1519fb5451e5SRyan Roberts 1520fb5451e5SRyan Roberts return CONT_PTES - (((unsigned long)ptep >> 3) & (CONT_PTES - 1)); 1521fb5451e5SRyan Roberts } 1522fb5451e5SRyan Roberts 15234602e575SRyan Roberts /* 15244602e575SRyan Roberts * The below functions constitute the public API that arm64 presents to the 15254602e575SRyan Roberts * core-mm to manipulate PTE entries within their page tables (or at least this 15264602e575SRyan Roberts * is the subset of the API that arm64 needs to implement). These public 15274602e575SRyan Roberts * versions will automatically and transparently apply the contiguous bit where 15284602e575SRyan Roberts * it makes sense to do so. Therefore any users that are contig-aware (e.g. 15294602e575SRyan Roberts * hugetlb, kernel mapper) should NOT use these APIs, but instead use the 15304602e575SRyan Roberts * private versions, which are prefixed with double underscore. All of these 15314602e575SRyan Roberts * APIs except for ptep_get_lockless() are expected to be called with the PTL 15324602e575SRyan Roberts * held. Although the contiguous bit is considered private to the 15334602e575SRyan Roberts * implementation, it is deliberately allowed to leak through the getters (e.g. 15344602e575SRyan Roberts * ptep_get()), back to core code. This is required so that pte_leaf_size() can 15354602e575SRyan Roberts * provide an accurate size for perf_get_pgtable_size(). But this leakage means 15364602e575SRyan Roberts * its possible a pte will be passed to a setter with the contiguous bit set, so 15374602e575SRyan Roberts * we explicitly clear the contiguous bit in those cases to prevent accidentally 15384602e575SRyan Roberts * setting it in the pgtable. 15394602e575SRyan Roberts */ 15404602e575SRyan Roberts 15414602e575SRyan Roberts #define ptep_get ptep_get 15424602e575SRyan Roberts static inline pte_t ptep_get(pte_t *ptep) 15434602e575SRyan Roberts { 15444602e575SRyan Roberts pte_t pte = __ptep_get(ptep); 15454602e575SRyan Roberts 15464602e575SRyan Roberts if (likely(!pte_valid_cont(pte))) 15474602e575SRyan Roberts return pte; 15484602e575SRyan Roberts 15494602e575SRyan Roberts return contpte_ptep_get(ptep, pte); 15504602e575SRyan Roberts } 15514602e575SRyan Roberts 15524602e575SRyan Roberts #define ptep_get_lockless ptep_get_lockless 15534602e575SRyan Roberts static inline pte_t ptep_get_lockless(pte_t *ptep) 15544602e575SRyan Roberts { 15554602e575SRyan Roberts pte_t pte = __ptep_get(ptep); 15564602e575SRyan Roberts 15574602e575SRyan Roberts if (likely(!pte_valid_cont(pte))) 15584602e575SRyan Roberts return pte; 15594602e575SRyan Roberts 15604602e575SRyan Roberts return contpte_ptep_get_lockless(ptep); 15614602e575SRyan Roberts } 15624602e575SRyan Roberts 15634602e575SRyan Roberts static inline void set_pte(pte_t *ptep, pte_t pte) 15644602e575SRyan Roberts { 15654602e575SRyan Roberts /* 15664602e575SRyan Roberts * We don't have the mm or vaddr so cannot unfold contig entries (since 15674602e575SRyan Roberts * it requires tlb maintenance). set_pte() is not used in core code, so 15684602e575SRyan Roberts * this should never even be called. Regardless do our best to service 15694602e575SRyan Roberts * any call and emit a warning if there is any attempt to set a pte on 15704602e575SRyan Roberts * top of an existing contig range. 15714602e575SRyan Roberts */ 15724602e575SRyan Roberts pte_t orig_pte = __ptep_get(ptep); 15734602e575SRyan Roberts 15744602e575SRyan Roberts WARN_ON_ONCE(pte_valid_cont(orig_pte)); 15754602e575SRyan Roberts __set_pte(ptep, pte_mknoncont(pte)); 15764602e575SRyan Roberts } 15774602e575SRyan Roberts 15784602e575SRyan Roberts #define set_ptes set_ptes 1579b972fc6aSRyan Roberts static __always_inline void set_ptes(struct mm_struct *mm, unsigned long addr, 15804602e575SRyan Roberts pte_t *ptep, pte_t pte, unsigned int nr) 15814602e575SRyan Roberts { 15824602e575SRyan Roberts pte = pte_mknoncont(pte); 15834602e575SRyan Roberts 15844602e575SRyan Roberts if (likely(nr == 1)) { 15854602e575SRyan Roberts contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 15864602e575SRyan Roberts __set_ptes(mm, addr, ptep, pte, 1); 1587f0c22649SRyan Roberts contpte_try_fold(mm, addr, ptep, pte); 15884602e575SRyan Roberts } else { 15894602e575SRyan Roberts contpte_set_ptes(mm, addr, ptep, pte, nr); 15904602e575SRyan Roberts } 15914602e575SRyan Roberts } 15924602e575SRyan Roberts 15934602e575SRyan Roberts static inline void pte_clear(struct mm_struct *mm, 15944602e575SRyan Roberts unsigned long addr, pte_t *ptep) 15954602e575SRyan Roberts { 15964602e575SRyan Roberts contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 15974602e575SRyan Roberts __pte_clear(mm, addr, ptep); 15984602e575SRyan Roberts } 15994602e575SRyan Roberts 16006b1e4efbSRyan Roberts #define clear_full_ptes clear_full_ptes 16016b1e4efbSRyan Roberts static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, 16026b1e4efbSRyan Roberts pte_t *ptep, unsigned int nr, int full) 16036b1e4efbSRyan Roberts { 16046b1e4efbSRyan Roberts if (likely(nr == 1)) { 16056b1e4efbSRyan Roberts contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 16066b1e4efbSRyan Roberts __clear_full_ptes(mm, addr, ptep, nr, full); 16076b1e4efbSRyan Roberts } else { 16086b1e4efbSRyan Roberts contpte_clear_full_ptes(mm, addr, ptep, nr, full); 16096b1e4efbSRyan Roberts } 16106b1e4efbSRyan Roberts } 16116b1e4efbSRyan Roberts 16126b1e4efbSRyan Roberts #define get_and_clear_full_ptes get_and_clear_full_ptes 16136b1e4efbSRyan Roberts static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, 16146b1e4efbSRyan Roberts unsigned long addr, pte_t *ptep, 16156b1e4efbSRyan Roberts unsigned int nr, int full) 16166b1e4efbSRyan Roberts { 16176b1e4efbSRyan Roberts pte_t pte; 16186b1e4efbSRyan Roberts 16196b1e4efbSRyan Roberts if (likely(nr == 1)) { 16206b1e4efbSRyan Roberts contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 16216b1e4efbSRyan Roberts pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full); 16226b1e4efbSRyan Roberts } else { 16236b1e4efbSRyan Roberts pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full); 16246b1e4efbSRyan Roberts } 16256b1e4efbSRyan Roberts 16266b1e4efbSRyan Roberts return pte; 16276b1e4efbSRyan Roberts } 16286b1e4efbSRyan Roberts 16294602e575SRyan Roberts #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 16304602e575SRyan Roberts static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 16314602e575SRyan Roberts unsigned long addr, pte_t *ptep) 16324602e575SRyan Roberts { 16334602e575SRyan Roberts contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 16344602e575SRyan Roberts return __ptep_get_and_clear(mm, addr, ptep); 16354602e575SRyan Roberts } 16364602e575SRyan Roberts 16374602e575SRyan Roberts #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 16384602e575SRyan Roberts static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 16394602e575SRyan Roberts unsigned long addr, pte_t *ptep) 16404602e575SRyan Roberts { 16414602e575SRyan Roberts pte_t orig_pte = __ptep_get(ptep); 16424602e575SRyan Roberts 16434602e575SRyan Roberts if (likely(!pte_valid_cont(orig_pte))) 16444602e575SRyan Roberts return __ptep_test_and_clear_young(vma, addr, ptep); 16454602e575SRyan Roberts 16464602e575SRyan Roberts return contpte_ptep_test_and_clear_young(vma, addr, ptep); 16474602e575SRyan Roberts } 16484602e575SRyan Roberts 16494602e575SRyan Roberts #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 16504602e575SRyan Roberts static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 16514602e575SRyan Roberts unsigned long addr, pte_t *ptep) 16524602e575SRyan Roberts { 16534602e575SRyan Roberts pte_t orig_pte = __ptep_get(ptep); 16544602e575SRyan Roberts 16554602e575SRyan Roberts if (likely(!pte_valid_cont(orig_pte))) 16564602e575SRyan Roberts return __ptep_clear_flush_young(vma, addr, ptep); 16574602e575SRyan Roberts 16584602e575SRyan Roberts return contpte_ptep_clear_flush_young(vma, addr, ptep); 16594602e575SRyan Roberts } 16604602e575SRyan Roberts 1661311a6cf2SRyan Roberts #define wrprotect_ptes wrprotect_ptes 1662b972fc6aSRyan Roberts static __always_inline void wrprotect_ptes(struct mm_struct *mm, 1663b972fc6aSRyan Roberts unsigned long addr, pte_t *ptep, unsigned int nr) 1664311a6cf2SRyan Roberts { 1665311a6cf2SRyan Roberts if (likely(nr == 1)) { 1666311a6cf2SRyan Roberts /* 1667311a6cf2SRyan Roberts * Optimization: wrprotect_ptes() can only be called for present 1668311a6cf2SRyan Roberts * ptes so we only need to check contig bit as condition for 1669311a6cf2SRyan Roberts * unfold, and we can remove the contig bit from the pte we read 1670311a6cf2SRyan Roberts * to avoid re-reading. This speeds up fork() which is sensitive 1671311a6cf2SRyan Roberts * for order-0 folios. Equivalent to contpte_try_unfold(). 1672311a6cf2SRyan Roberts */ 1673311a6cf2SRyan Roberts pte_t orig_pte = __ptep_get(ptep); 1674311a6cf2SRyan Roberts 1675311a6cf2SRyan Roberts if (unlikely(pte_cont(orig_pte))) { 1676311a6cf2SRyan Roberts __contpte_try_unfold(mm, addr, ptep, orig_pte); 1677311a6cf2SRyan Roberts orig_pte = pte_mknoncont(orig_pte); 1678311a6cf2SRyan Roberts } 1679311a6cf2SRyan Roberts ___ptep_set_wrprotect(mm, addr, ptep, orig_pte); 1680311a6cf2SRyan Roberts } else { 1681311a6cf2SRyan Roberts contpte_wrprotect_ptes(mm, addr, ptep, nr); 1682311a6cf2SRyan Roberts } 1683311a6cf2SRyan Roberts } 1684311a6cf2SRyan Roberts 16854602e575SRyan Roberts #define __HAVE_ARCH_PTEP_SET_WRPROTECT 16864602e575SRyan Roberts static inline void ptep_set_wrprotect(struct mm_struct *mm, 16874602e575SRyan Roberts unsigned long addr, pte_t *ptep) 16884602e575SRyan Roberts { 1689311a6cf2SRyan Roberts wrprotect_ptes(mm, addr, ptep, 1); 16904602e575SRyan Roberts } 16914602e575SRyan Roberts 16924602e575SRyan Roberts #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 16934602e575SRyan Roberts static inline int ptep_set_access_flags(struct vm_area_struct *vma, 16944602e575SRyan Roberts unsigned long addr, pte_t *ptep, 16954602e575SRyan Roberts pte_t entry, int dirty) 16964602e575SRyan Roberts { 16974602e575SRyan Roberts pte_t orig_pte = __ptep_get(ptep); 16984602e575SRyan Roberts 16994602e575SRyan Roberts entry = pte_mknoncont(entry); 17004602e575SRyan Roberts 17014602e575SRyan Roberts if (likely(!pte_valid_cont(orig_pte))) 17024602e575SRyan Roberts return __ptep_set_access_flags(vma, addr, ptep, entry, dirty); 17034602e575SRyan Roberts 17044602e575SRyan Roberts return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty); 17054602e575SRyan Roberts } 17064602e575SRyan Roberts 1707*89e86854SLance Yang #define clear_young_dirty_ptes clear_young_dirty_ptes 1708*89e86854SLance Yang static inline void clear_young_dirty_ptes(struct vm_area_struct *vma, 1709*89e86854SLance Yang unsigned long addr, pte_t *ptep, 1710*89e86854SLance Yang unsigned int nr, cydp_t flags) 1711*89e86854SLance Yang { 1712*89e86854SLance Yang if (likely(nr == 1 && !pte_cont(__ptep_get(ptep)))) 1713*89e86854SLance Yang __clear_young_dirty_ptes(vma, addr, ptep, nr, flags); 1714*89e86854SLance Yang else 1715*89e86854SLance Yang contpte_clear_young_dirty_ptes(vma, addr, ptep, nr, flags); 1716*89e86854SLance Yang } 1717*89e86854SLance Yang 17184602e575SRyan Roberts #else /* CONFIG_ARM64_CONTPTE */ 17194602e575SRyan Roberts 17205a00bfd6SRyan Roberts #define ptep_get __ptep_get 17215a00bfd6SRyan Roberts #define set_pte __set_pte 17225a00bfd6SRyan Roberts #define set_ptes __set_ptes 17235a00bfd6SRyan Roberts #define pte_clear __pte_clear 17246b1e4efbSRyan Roberts #define clear_full_ptes __clear_full_ptes 17256b1e4efbSRyan Roberts #define get_and_clear_full_ptes __get_and_clear_full_ptes 17265a00bfd6SRyan Roberts #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 17275a00bfd6SRyan Roberts #define ptep_get_and_clear __ptep_get_and_clear 17285a00bfd6SRyan Roberts #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 17295a00bfd6SRyan Roberts #define ptep_test_and_clear_young __ptep_test_and_clear_young 17305a00bfd6SRyan Roberts #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 17315a00bfd6SRyan Roberts #define ptep_clear_flush_young __ptep_clear_flush_young 17325a00bfd6SRyan Roberts #define __HAVE_ARCH_PTEP_SET_WRPROTECT 17335a00bfd6SRyan Roberts #define ptep_set_wrprotect __ptep_set_wrprotect 1734311a6cf2SRyan Roberts #define wrprotect_ptes __wrprotect_ptes 17355a00bfd6SRyan Roberts #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 17365a00bfd6SRyan Roberts #define ptep_set_access_flags __ptep_set_access_flags 1737*89e86854SLance Yang #define clear_young_dirty_ptes __clear_young_dirty_ptes 17385a00bfd6SRyan Roberts 17394602e575SRyan Roberts #endif /* CONFIG_ARM64_CONTPTE */ 17404602e575SRyan Roberts 17414f04d8f0SCatalin Marinas #endif /* !__ASSEMBLY__ */ 17424f04d8f0SCatalin Marinas 17434f04d8f0SCatalin Marinas #endif /* __ASM_PGTABLE_H */ 1744