1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */ 24f04d8f0SCatalin Marinas /* 34f04d8f0SCatalin Marinas * Copyright (C) 2012 ARM Ltd. 44f04d8f0SCatalin Marinas */ 54f04d8f0SCatalin Marinas #ifndef __ASM_PGTABLE_H 64f04d8f0SCatalin Marinas #define __ASM_PGTABLE_H 74f04d8f0SCatalin Marinas 82f4b829cSCatalin Marinas #include <asm/bug.h> 94f04d8f0SCatalin Marinas #include <asm/proc-fns.h> 104f04d8f0SCatalin Marinas 114f04d8f0SCatalin Marinas #include <asm/memory.h> 1234bfeea4SCatalin Marinas #include <asm/mte.h> 134f04d8f0SCatalin Marinas #include <asm/pgtable-hwdef.h> 143eca86e7SMark Rutland #include <asm/pgtable-prot.h> 153403e56bSAlex Van Brunt #include <asm/tlbflush.h> 164f04d8f0SCatalin Marinas 174f04d8f0SCatalin Marinas /* 183e1907d5SArd Biesheuvel * VMALLOC range. 1908375198SCatalin Marinas * 20f9040773SArd Biesheuvel * VMALLOC_START: beginning of the kernel vmalloc space 21a5315819SMark Brown * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space 223e1907d5SArd Biesheuvel * and fixed mappings 234f04d8f0SCatalin Marinas */ 24f9040773SArd Biesheuvel #define VMALLOC_START (MODULES_END) 259ad7c6d5SArd Biesheuvel #define VMALLOC_END (VMEMMAP_START - SZ_256M) 264f04d8f0SCatalin Marinas 277bc1a0f9SArd Biesheuvel #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) 287bc1a0f9SArd Biesheuvel 294f04d8f0SCatalin Marinas #ifndef __ASSEMBLY__ 302f4b829cSCatalin Marinas 313bbf7157SCatalin Marinas #include <asm/cmpxchg.h> 32961faac1SMark Rutland #include <asm/fixmap.h> 332f4b829cSCatalin Marinas #include <linux/mmdebug.h> 3486c9e812SWill Deacon #include <linux/mm_types.h> 3586c9e812SWill Deacon #include <linux/sched.h> 3642b25471SKefeng Wang #include <linux/page_table_check.h> 372f4b829cSCatalin Marinas 38a7ac1cfaSZhenyu Ye #ifdef CONFIG_TRANSPARENT_HUGEPAGE 39a7ac1cfaSZhenyu Ye #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 40a7ac1cfaSZhenyu Ye 41a7ac1cfaSZhenyu Ye /* Set stride and tlb_level in flush_*_tlb_range */ 42a7ac1cfaSZhenyu Ye #define flush_pmd_tlb_range(vma, addr, end) \ 43a7ac1cfaSZhenyu Ye __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2) 44a7ac1cfaSZhenyu Ye #define flush_pud_tlb_range(vma, addr, end) \ 45a7ac1cfaSZhenyu Ye __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1) 46a7ac1cfaSZhenyu Ye #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 47a7ac1cfaSZhenyu Ye 48d0637c50SBarry Song static inline bool arch_thp_swp_supported(void) 49d0637c50SBarry Song { 50d0637c50SBarry Song return !system_supports_mte(); 51d0637c50SBarry Song } 52d0637c50SBarry Song #define arch_thp_swp_supported arch_thp_swp_supported 53d0637c50SBarry Song 544f04d8f0SCatalin Marinas /* 556a1bdb17SWill Deacon * Outside of a few very special situations (e.g. hibernation), we always 566a1bdb17SWill Deacon * use broadcast TLB invalidation instructions, therefore a spurious page 576a1bdb17SWill Deacon * fault on one CPU which has been handled concurrently by another CPU 586a1bdb17SWill Deacon * does not need to perform additional invalidation. 596a1bdb17SWill Deacon */ 6099c29133SGerald Schaefer #define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0) 616a1bdb17SWill Deacon 626a1bdb17SWill Deacon /* 634f04d8f0SCatalin Marinas * ZERO_PAGE is a global shared page that is always zero: used 644f04d8f0SCatalin Marinas * for zero-mapped memory areas etc.. 654f04d8f0SCatalin Marinas */ 665227cfa7SMark Rutland extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 672077be67SLaura Abbott #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) 684f04d8f0SCatalin Marinas 692cf660ebSGavin Shan #define pte_ERROR(e) \ 702cf660ebSGavin Shan pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e)) 717078db46SCatalin Marinas 7275387b92SKristina Martsenko /* 7375387b92SKristina Martsenko * Macros to convert between a physical address and its placement in a 7475387b92SKristina Martsenko * page table entry, taking care of 52-bit addresses. 7575387b92SKristina Martsenko */ 7675387b92SKristina Martsenko #ifdef CONFIG_ARM64_PA_BITS_52 77c7c386fbSArnd Bergmann static inline phys_addr_t __pte_to_phys(pte_t pte) 78c7c386fbSArnd Bergmann { 79c7c386fbSArnd Bergmann return (pte_val(pte) & PTE_ADDR_LOW) | 80a4ee2861SAnshuman Khandual ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT); 81c7c386fbSArnd Bergmann } 82c7c386fbSArnd Bergmann static inline pteval_t __phys_to_pte_val(phys_addr_t phys) 83c7c386fbSArnd Bergmann { 84a4ee2861SAnshuman Khandual return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PTE_ADDR_MASK; 85c7c386fbSArnd Bergmann } 8675387b92SKristina Martsenko #else 8775387b92SKristina Martsenko #define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK) 8875387b92SKristina Martsenko #define __phys_to_pte_val(phys) (phys) 8975387b92SKristina Martsenko #endif 904f04d8f0SCatalin Marinas 9175387b92SKristina Martsenko #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) 9275387b92SKristina Martsenko #define pfn_pte(pfn,prot) \ 9375387b92SKristina Martsenko __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 944f04d8f0SCatalin Marinas 954f04d8f0SCatalin Marinas #define pte_none(pte) (!pte_val(pte)) 965a00bfd6SRyan Roberts #define __pte_clear(mm, addr, ptep) \ 975a00bfd6SRyan Roberts __set_pte(ptep, __pte(0)) 984f04d8f0SCatalin Marinas #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 997078db46SCatalin Marinas 1004f04d8f0SCatalin Marinas /* 1014f04d8f0SCatalin Marinas * The following only work if pte_present(). Undefined behaviour otherwise. 1024f04d8f0SCatalin Marinas */ 10384fe6826SSteve Capper #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))) 10484fe6826SSteve Capper #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) 10584fe6826SSteve Capper #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) 10684fe6826SSteve Capper #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) 107d0ba9612SAnshuman Khandual #define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY)) 10842b25471SKefeng Wang #define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) 109ec663d96SCatalin Marinas #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) 11093ef666aSJeremy Linton #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) 11173b20c84SRobin Murphy #define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP)) 11234bfeea4SCatalin Marinas #define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \ 11334bfeea4SCatalin Marinas PTE_ATTRINDX(MT_NORMAL_TAGGED)) 1144f04d8f0SCatalin Marinas 115d27cfa1fSArd Biesheuvel #define pte_cont_addr_end(addr, end) \ 116d27cfa1fSArd Biesheuvel ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ 117d27cfa1fSArd Biesheuvel (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 118d27cfa1fSArd Biesheuvel }) 119d27cfa1fSArd Biesheuvel 120d27cfa1fSArd Biesheuvel #define pmd_cont_addr_end(addr, end) \ 121d27cfa1fSArd Biesheuvel ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \ 122d27cfa1fSArd Biesheuvel (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 123d27cfa1fSArd Biesheuvel }) 124d27cfa1fSArd Biesheuvel 125d0ba9612SAnshuman Khandual #define pte_hw_dirty(pte) (pte_write(pte) && !pte_rdonly(pte)) 1262f4b829cSCatalin Marinas #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) 1272f4b829cSCatalin Marinas #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) 1282f4b829cSCatalin Marinas 129766ffb69SWill Deacon #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) 13018107f8aSVladimir Murzin /* 13118107f8aSVladimir Murzin * Execute-only user mappings do not have the PTE_USER bit set. All valid 13218107f8aSVladimir Murzin * kernel mappings have the PTE_UXN bit set. 13318107f8aSVladimir Murzin */ 134ec663d96SCatalin Marinas #define pte_valid_not_user(pte) \ 13518107f8aSVladimir Murzin ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) 13676c714beSWill Deacon /* 1374602e575SRyan Roberts * Returns true if the pte is valid and has the contiguous bit set. 1384602e575SRyan Roberts */ 1394602e575SRyan Roberts #define pte_valid_cont(pte) (pte_valid(pte) && pte_cont(pte)) 1404602e575SRyan Roberts /* 14176c714beSWill Deacon * Could the pte be present in the TLB? We must check mm_tlb_flush_pending 14276c714beSWill Deacon * so that we don't erroneously return false for pages that have been 14376c714beSWill Deacon * remapped as PROT_NONE but are yet to be flushed from the TLB. 14407509e10SWill Deacon * Note that we can't make any assumptions based on the state of the access 1455a00bfd6SRyan Roberts * flag, since __ptep_clear_flush_young() elides a DSB when invalidating the 14607509e10SWill Deacon * TLB. 14776c714beSWill Deacon */ 14876c714beSWill Deacon #define pte_accessible(mm, pte) \ 14907509e10SWill Deacon (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) 1504f04d8f0SCatalin Marinas 1516218f96cSCatalin Marinas /* 15218107f8aSVladimir Murzin * p??_access_permitted() is true for valid user mappings (PTE_USER 15318107f8aSVladimir Murzin * bit set, subject to the write permission check). For execute-only 15418107f8aSVladimir Murzin * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits 15518107f8aSVladimir Murzin * not set) must return false. PROT_NONE mappings do not have the 15618107f8aSVladimir Murzin * PTE_VALID bit set. 1576218f96cSCatalin Marinas */ 1586218f96cSCatalin Marinas #define pte_access_permitted(pte, write) \ 15918107f8aSVladimir Murzin (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte))) 1606218f96cSCatalin Marinas #define pmd_access_permitted(pmd, write) \ 1616218f96cSCatalin Marinas (pte_access_permitted(pmd_pte(pmd), (write))) 1626218f96cSCatalin Marinas #define pud_access_permitted(pud, write) \ 1636218f96cSCatalin Marinas (pte_access_permitted(pud_pte(pud), (write))) 1646218f96cSCatalin Marinas 165b6d4f280SLaura Abbott static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) 166b6d4f280SLaura Abbott { 167b6d4f280SLaura Abbott pte_val(pte) &= ~pgprot_val(prot); 168b6d4f280SLaura Abbott return pte; 169b6d4f280SLaura Abbott } 170b6d4f280SLaura Abbott 171b6d4f280SLaura Abbott static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) 172b6d4f280SLaura Abbott { 173b6d4f280SLaura Abbott pte_val(pte) |= pgprot_val(prot); 174b6d4f280SLaura Abbott return pte; 175b6d4f280SLaura Abbott } 176b6d4f280SLaura Abbott 177b65399f6SAnshuman Khandual static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) 178b65399f6SAnshuman Khandual { 179b65399f6SAnshuman Khandual pmd_val(pmd) &= ~pgprot_val(prot); 180b65399f6SAnshuman Khandual return pmd; 181b65399f6SAnshuman Khandual } 182b65399f6SAnshuman Khandual 183b65399f6SAnshuman Khandual static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) 184b65399f6SAnshuman Khandual { 185b65399f6SAnshuman Khandual pmd_val(pmd) |= pgprot_val(prot); 186b65399f6SAnshuman Khandual return pmd; 187b65399f6SAnshuman Khandual } 188b65399f6SAnshuman Khandual 1892f0584f3SRick Edgecombe static inline pte_t pte_mkwrite_novma(pte_t pte) 19044b6dfc5SSteve Capper { 19173e86cb0SCatalin Marinas pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); 19273e86cb0SCatalin Marinas pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 19373e86cb0SCatalin Marinas return pte; 19444b6dfc5SSteve Capper } 19544b6dfc5SSteve Capper 19644b6dfc5SSteve Capper static inline pte_t pte_mkclean(pte_t pte) 19744b6dfc5SSteve Capper { 1988781bcbcSSteve Capper pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); 1998781bcbcSSteve Capper pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 2008781bcbcSSteve Capper 2018781bcbcSSteve Capper return pte; 20244b6dfc5SSteve Capper } 20344b6dfc5SSteve Capper 20444b6dfc5SSteve Capper static inline pte_t pte_mkdirty(pte_t pte) 20544b6dfc5SSteve Capper { 2068781bcbcSSteve Capper pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 2078781bcbcSSteve Capper 2088781bcbcSSteve Capper if (pte_write(pte)) 2098781bcbcSSteve Capper pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 2108781bcbcSSteve Capper 2118781bcbcSSteve Capper return pte; 21244b6dfc5SSteve Capper } 21344b6dfc5SSteve Capper 214ff1712f9SWill Deacon static inline pte_t pte_wrprotect(pte_t pte) 215ff1712f9SWill Deacon { 216ff1712f9SWill Deacon /* 217ff1712f9SWill Deacon * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY 218ff1712f9SWill Deacon * clear), set the PTE_DIRTY bit. 219ff1712f9SWill Deacon */ 220ff1712f9SWill Deacon if (pte_hw_dirty(pte)) 2216477c388SAnshuman Khandual pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 222ff1712f9SWill Deacon 223ff1712f9SWill Deacon pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); 224ff1712f9SWill Deacon pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 225ff1712f9SWill Deacon return pte; 226ff1712f9SWill Deacon } 227ff1712f9SWill Deacon 22844b6dfc5SSteve Capper static inline pte_t pte_mkold(pte_t pte) 22944b6dfc5SSteve Capper { 230b6d4f280SLaura Abbott return clear_pte_bit(pte, __pgprot(PTE_AF)); 23144b6dfc5SSteve Capper } 23244b6dfc5SSteve Capper 23344b6dfc5SSteve Capper static inline pte_t pte_mkyoung(pte_t pte) 23444b6dfc5SSteve Capper { 235b6d4f280SLaura Abbott return set_pte_bit(pte, __pgprot(PTE_AF)); 23644b6dfc5SSteve Capper } 23744b6dfc5SSteve Capper 23844b6dfc5SSteve Capper static inline pte_t pte_mkspecial(pte_t pte) 23944b6dfc5SSteve Capper { 240b6d4f280SLaura Abbott return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); 24144b6dfc5SSteve Capper } 2424f04d8f0SCatalin Marinas 24393ef666aSJeremy Linton static inline pte_t pte_mkcont(pte_t pte) 24493ef666aSJeremy Linton { 24566b3923aSDavid Woods pte = set_pte_bit(pte, __pgprot(PTE_CONT)); 24666b3923aSDavid Woods return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE)); 24793ef666aSJeremy Linton } 24893ef666aSJeremy Linton 24993ef666aSJeremy Linton static inline pte_t pte_mknoncont(pte_t pte) 25093ef666aSJeremy Linton { 25193ef666aSJeremy Linton return clear_pte_bit(pte, __pgprot(PTE_CONT)); 25293ef666aSJeremy Linton } 25393ef666aSJeremy Linton 2545ebe3a44SJames Morse static inline pte_t pte_mkpresent(pte_t pte) 2555ebe3a44SJames Morse { 2565ebe3a44SJames Morse return set_pte_bit(pte, __pgprot(PTE_VALID)); 2575ebe3a44SJames Morse } 2585ebe3a44SJames Morse 25966b3923aSDavid Woods static inline pmd_t pmd_mkcont(pmd_t pmd) 26066b3923aSDavid Woods { 26166b3923aSDavid Woods return __pmd(pmd_val(pmd) | PMD_SECT_CONT); 26266b3923aSDavid Woods } 26366b3923aSDavid Woods 26473b20c84SRobin Murphy static inline pte_t pte_mkdevmap(pte_t pte) 26573b20c84SRobin Murphy { 26630e23538SJia He return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); 26773b20c84SRobin Murphy } 26873b20c84SRobin Murphy 2695a00bfd6SRyan Roberts static inline void __set_pte(pte_t *ptep, pte_t pte) 2704f04d8f0SCatalin Marinas { 27120a004e7SWill Deacon WRITE_ONCE(*ptep, pte); 2727f0b1bf0SCatalin Marinas 2737f0b1bf0SCatalin Marinas /* 2747f0b1bf0SCatalin Marinas * Only if the new pte is valid and kernel, otherwise TLB maintenance 2757f0b1bf0SCatalin Marinas * or update_mmu_cache() have the necessary barriers. 2767f0b1bf0SCatalin Marinas */ 277d0b7a302SWill Deacon if (pte_valid_not_user(pte)) { 2787f0b1bf0SCatalin Marinas dsb(ishst); 279d0b7a302SWill Deacon isb(); 280d0b7a302SWill Deacon } 2814f04d8f0SCatalin Marinas } 2824f04d8f0SCatalin Marinas 2835a00bfd6SRyan Roberts static inline pte_t __ptep_get(pte_t *ptep) 28453273655SRyan Roberts { 28553273655SRyan Roberts return READ_ONCE(*ptep); 28653273655SRyan Roberts } 28753273655SRyan Roberts 288907e21c1SShaokun Zhang extern void __sync_icache_dcache(pte_t pteval); 289004fc58fSAnshuman Khandual bool pgattr_change_is_safe(u64 old, u64 new); 2904f04d8f0SCatalin Marinas 2912f4b829cSCatalin Marinas /* 2922f4b829cSCatalin Marinas * PTE bits configuration in the presence of hardware Dirty Bit Management 2932f4b829cSCatalin Marinas * (PTE_WRITE == PTE_DBM): 2942f4b829cSCatalin Marinas * 2952f4b829cSCatalin Marinas * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) 2962f4b829cSCatalin Marinas * 0 0 | 1 0 0 2972f4b829cSCatalin Marinas * 0 1 | 1 1 0 2982f4b829cSCatalin Marinas * 1 0 | 1 0 1 2992f4b829cSCatalin Marinas * 1 1 | 0 1 x 3002f4b829cSCatalin Marinas * 3012f4b829cSCatalin Marinas * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via 3022f4b829cSCatalin Marinas * the page fault mechanism. Checking the dirty status of a pte becomes: 3032f4b829cSCatalin Marinas * 304b847415cSCatalin Marinas * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) 3052f4b829cSCatalin Marinas */ 3069b604722SMark Rutland 307004fc58fSAnshuman Khandual static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, 3089b604722SMark Rutland pte_t pte) 3094f04d8f0SCatalin Marinas { 31020a004e7SWill Deacon pte_t old_pte; 31120a004e7SWill Deacon 3129b604722SMark Rutland if (!IS_ENABLED(CONFIG_DEBUG_VM)) 3139b604722SMark Rutland return; 3149b604722SMark Rutland 3155a00bfd6SRyan Roberts old_pte = __ptep_get(ptep); 3169b604722SMark Rutland 3179b604722SMark Rutland if (!pte_valid(old_pte) || !pte_valid(pte)) 3189b604722SMark Rutland return; 3199b604722SMark Rutland if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1) 3209b604722SMark Rutland return; 32102522463SWill Deacon 3222f4b829cSCatalin Marinas /* 3239b604722SMark Rutland * Check for potential race with hardware updates of the pte 3245a00bfd6SRyan Roberts * (__ptep_set_access_flags safely changes valid ptes without going 3259b604722SMark Rutland * through an invalid entry). 3262f4b829cSCatalin Marinas */ 32782d34008SCatalin Marinas VM_WARN_ONCE(!pte_young(pte), 32882d34008SCatalin Marinas "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", 32920a004e7SWill Deacon __func__, pte_val(old_pte), pte_val(pte)); 33020a004e7SWill Deacon VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), 33182d34008SCatalin Marinas "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", 33220a004e7SWill Deacon __func__, pte_val(old_pte), pte_val(pte)); 333004fc58fSAnshuman Khandual VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)), 334004fc58fSAnshuman Khandual "%s: unsafe attribute change: 0x%016llx -> 0x%016llx", 335004fc58fSAnshuman Khandual __func__, pte_val(old_pte), pte_val(pte)); 3362f4b829cSCatalin Marinas } 3372f4b829cSCatalin Marinas 3383425cec4SRyan Roberts static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages) 3399b604722SMark Rutland { 3409b604722SMark Rutland if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) 3419b604722SMark Rutland __sync_icache_dcache(pte); 3429b604722SMark Rutland 34369e3b846SSteven Price /* 34469e3b846SSteven Price * If the PTE would provide user space access to the tags associated 34569e3b846SSteven Price * with it then ensure that the MTE tags are synchronised. Although 34669e3b846SSteven Price * pte_access_permitted() returns false for exec only mappings, they 34769e3b846SSteven Price * don't expose tags (instruction fetches don't check tags). 34869e3b846SSteven Price */ 34969e3b846SSteven Price if (system_supports_mte() && pte_access_permitted(pte, false) && 350332c151cSPeter Collingbourne !pte_special(pte) && pte_tagged(pte)) 3513425cec4SRyan Roberts mte_sync_tags(pte, nr_pages); 3524f04d8f0SCatalin Marinas } 3534f04d8f0SCatalin Marinas 3546e8f5887SRyan Roberts /* 3556e8f5887SRyan Roberts * Select all bits except the pfn 3566e8f5887SRyan Roberts */ 3576e8f5887SRyan Roberts static inline pgprot_t pte_pgprot(pte_t pte) 3586e8f5887SRyan Roberts { 3596e8f5887SRyan Roberts unsigned long pfn = pte_pfn(pte); 3606e8f5887SRyan Roberts 3616e8f5887SRyan Roberts return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); 3626e8f5887SRyan Roberts } 3636e8f5887SRyan Roberts 364c1bd2b40SRyan Roberts #define pte_advance_pfn pte_advance_pfn 365c1bd2b40SRyan Roberts static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) 3666e8f5887SRyan Roberts { 367c1bd2b40SRyan Roberts return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte)); 3686e8f5887SRyan Roberts } 3696e8f5887SRyan Roberts 3705a00bfd6SRyan Roberts static inline void __set_ptes(struct mm_struct *mm, 371dba2ff49SCatalin Marinas unsigned long __always_unused addr, 3724a169d61SMatthew Wilcox (Oracle) pte_t *ptep, pte_t pte, unsigned int nr) 37342b25471SKefeng Wang { 3744a169d61SMatthew Wilcox (Oracle) page_table_check_ptes_set(mm, ptep, pte, nr); 3753425cec4SRyan Roberts __sync_cache_and_tags(pte, nr); 3764a169d61SMatthew Wilcox (Oracle) 3774a169d61SMatthew Wilcox (Oracle) for (;;) { 3783425cec4SRyan Roberts __check_safe_pte_update(mm, ptep, pte); 3795a00bfd6SRyan Roberts __set_pte(ptep, pte); 3804a169d61SMatthew Wilcox (Oracle) if (--nr == 0) 3814a169d61SMatthew Wilcox (Oracle) break; 3824a169d61SMatthew Wilcox (Oracle) ptep++; 383c1bd2b40SRyan Roberts pte = pte_advance_pfn(pte, 1); 38442b25471SKefeng Wang } 3854a169d61SMatthew Wilcox (Oracle) } 38642b25471SKefeng Wang 3874f04d8f0SCatalin Marinas /* 3884f04d8f0SCatalin Marinas * Huge pte definitions. 3894f04d8f0SCatalin Marinas */ 390084bd298SSteve Capper #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) 391084bd298SSteve Capper 392084bd298SSteve Capper /* 393084bd298SSteve Capper * Hugetlb definitions. 394084bd298SSteve Capper */ 39566b3923aSDavid Woods #define HUGE_MAX_HSTATE 4 396084bd298SSteve Capper #define HPAGE_SHIFT PMD_SHIFT 397084bd298SSteve Capper #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 398084bd298SSteve Capper #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 399084bd298SSteve Capper #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 4004f04d8f0SCatalin Marinas 40175387b92SKristina Martsenko static inline pte_t pgd_pte(pgd_t pgd) 40275387b92SKristina Martsenko { 40375387b92SKristina Martsenko return __pte(pgd_val(pgd)); 40475387b92SKristina Martsenko } 40575387b92SKristina Martsenko 406e9f63768SMike Rapoport static inline pte_t p4d_pte(p4d_t p4d) 407e9f63768SMike Rapoport { 408e9f63768SMike Rapoport return __pte(p4d_val(p4d)); 409e9f63768SMike Rapoport } 410e9f63768SMike Rapoport 41129e56940SSteve Capper static inline pte_t pud_pte(pud_t pud) 41229e56940SSteve Capper { 41329e56940SSteve Capper return __pte(pud_val(pud)); 41429e56940SSteve Capper } 41529e56940SSteve Capper 416eb3f0624SPunit Agrawal static inline pud_t pte_pud(pte_t pte) 417eb3f0624SPunit Agrawal { 418eb3f0624SPunit Agrawal return __pud(pte_val(pte)); 419eb3f0624SPunit Agrawal } 420eb3f0624SPunit Agrawal 42129e56940SSteve Capper static inline pmd_t pud_pmd(pud_t pud) 42229e56940SSteve Capper { 42329e56940SSteve Capper return __pmd(pud_val(pud)); 42429e56940SSteve Capper } 42529e56940SSteve Capper 4269c7e535fSSteve Capper static inline pte_t pmd_pte(pmd_t pmd) 4279c7e535fSSteve Capper { 4289c7e535fSSteve Capper return __pte(pmd_val(pmd)); 4299c7e535fSSteve Capper } 430af074848SSteve Capper 4319c7e535fSSteve Capper static inline pmd_t pte_pmd(pte_t pte) 4329c7e535fSSteve Capper { 4339c7e535fSSteve Capper return __pmd(pte_val(pte)); 4349c7e535fSSteve Capper } 435af074848SSteve Capper 436f7f0097aSAnshuman Khandual static inline pgprot_t mk_pud_sect_prot(pgprot_t prot) 4378ce837ceSArd Biesheuvel { 438f7f0097aSAnshuman Khandual return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT); 439f7f0097aSAnshuman Khandual } 440f7f0097aSAnshuman Khandual 441f7f0097aSAnshuman Khandual static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot) 442f7f0097aSAnshuman Khandual { 443f7f0097aSAnshuman Khandual return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT); 4448ce837ceSArd Biesheuvel } 4458ce837ceSArd Biesheuvel 446570ef363SDavid Hildenbrand static inline pte_t pte_swp_mkexclusive(pte_t pte) 447570ef363SDavid Hildenbrand { 448570ef363SDavid Hildenbrand return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 449570ef363SDavid Hildenbrand } 450570ef363SDavid Hildenbrand 451570ef363SDavid Hildenbrand static inline int pte_swp_exclusive(pte_t pte) 452570ef363SDavid Hildenbrand { 453570ef363SDavid Hildenbrand return pte_val(pte) & PTE_SWP_EXCLUSIVE; 454570ef363SDavid Hildenbrand } 455570ef363SDavid Hildenbrand 456570ef363SDavid Hildenbrand static inline pte_t pte_swp_clear_exclusive(pte_t pte) 457570ef363SDavid Hildenbrand { 458570ef363SDavid Hildenbrand return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 459570ef363SDavid Hildenbrand } 460570ef363SDavid Hildenbrand 46156166230SGanapatrao Kulkarni #ifdef CONFIG_NUMA_BALANCING 46256166230SGanapatrao Kulkarni /* 463ca5999fdSMike Rapoport * See the comment in include/linux/pgtable.h 46456166230SGanapatrao Kulkarni */ 46556166230SGanapatrao Kulkarni static inline int pte_protnone(pte_t pte) 46656166230SGanapatrao Kulkarni { 46756166230SGanapatrao Kulkarni return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE; 46856166230SGanapatrao Kulkarni } 46956166230SGanapatrao Kulkarni 47056166230SGanapatrao Kulkarni static inline int pmd_protnone(pmd_t pmd) 47156166230SGanapatrao Kulkarni { 47256166230SGanapatrao Kulkarni return pte_protnone(pmd_pte(pmd)); 47356166230SGanapatrao Kulkarni } 47456166230SGanapatrao Kulkarni #endif 47556166230SGanapatrao Kulkarni 476b65399f6SAnshuman Khandual #define pmd_present_invalid(pmd) (!!(pmd_val(pmd) & PMD_PRESENT_INVALID)) 477b65399f6SAnshuman Khandual 478b65399f6SAnshuman Khandual static inline int pmd_present(pmd_t pmd) 479b65399f6SAnshuman Khandual { 480b65399f6SAnshuman Khandual return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd); 481b65399f6SAnshuman Khandual } 482b65399f6SAnshuman Khandual 483af074848SSteve Capper /* 484af074848SSteve Capper * THP definitions. 485af074848SSteve Capper */ 486af074848SSteve Capper 487af074848SSteve Capper #ifdef CONFIG_TRANSPARENT_HUGEPAGE 488b65399f6SAnshuman Khandual static inline int pmd_trans_huge(pmd_t pmd) 489b65399f6SAnshuman Khandual { 490b65399f6SAnshuman Khandual return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); 491b65399f6SAnshuman Khandual } 49229e56940SSteve Capper #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 493af074848SSteve Capper 494c164e038SKirill A. Shutemov #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) 4959c7e535fSSteve Capper #define pmd_young(pmd) pte_young(pmd_pte(pmd)) 4960795edafSWill Deacon #define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) 49742b25471SKefeng Wang #define pmd_user(pmd) pte_user(pmd_pte(pmd)) 49842b25471SKefeng Wang #define pmd_user_exec(pmd) pte_user_exec(pmd_pte(pmd)) 499d55863dbSPeter Zijlstra #define pmd_cont(pmd) pte_cont(pmd_pte(pmd)) 5009c7e535fSSteve Capper #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) 5019c7e535fSSteve Capper #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) 5022f0584f3SRick Edgecombe #define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))) 50305ee26d9SMinchan Kim #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) 5049c7e535fSSteve Capper #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) 5059c7e535fSSteve Capper #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) 506b65399f6SAnshuman Khandual 507b65399f6SAnshuman Khandual static inline pmd_t pmd_mkinvalid(pmd_t pmd) 508b65399f6SAnshuman Khandual { 509b65399f6SAnshuman Khandual pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID)); 510b65399f6SAnshuman Khandual pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID)); 511b65399f6SAnshuman Khandual 512b65399f6SAnshuman Khandual return pmd; 513b65399f6SAnshuman Khandual } 514af074848SSteve Capper 5150dbd3b18SSuzuki K Poulose #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) 5160dbd3b18SSuzuki K Poulose 5179c7e535fSSteve Capper #define pmd_write(pmd) pte_write(pmd_pte(pmd)) 518af074848SSteve Capper 519af074848SSteve Capper #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) 520af074848SSteve Capper 52173b20c84SRobin Murphy #ifdef CONFIG_TRANSPARENT_HUGEPAGE 52273b20c84SRobin Murphy #define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) 52373b20c84SRobin Murphy #endif 52430e23538SJia He static inline pmd_t pmd_mkdevmap(pmd_t pmd) 52530e23538SJia He { 52630e23538SJia He return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP))); 52730e23538SJia He } 52873b20c84SRobin Murphy 52975387b92SKristina Martsenko #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) 53075387b92SKristina Martsenko #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) 53175387b92SKristina Martsenko #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) 53275387b92SKristina Martsenko #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 533af074848SSteve Capper #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 534af074848SSteve Capper 53535a63966SPunit Agrawal #define pud_young(pud) pte_young(pud_pte(pud)) 536eb3f0624SPunit Agrawal #define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) 53729e56940SSteve Capper #define pud_write(pud) pte_write(pud_pte(pud)) 53875387b92SKristina Martsenko 539b8e0ba7cSPunit Agrawal #define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT)) 540b8e0ba7cSPunit Agrawal 54175387b92SKristina Martsenko #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud)) 54275387b92SKristina Martsenko #define __phys_to_pud_val(phys) __phys_to_pte_val(phys) 54375387b92SKristina Martsenko #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) 54475387b92SKristina Martsenko #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 545af074848SSteve Capper 546dba2ff49SCatalin Marinas static inline void __set_pte_at(struct mm_struct *mm, 547dba2ff49SCatalin Marinas unsigned long __always_unused addr, 5483425cec4SRyan Roberts pte_t *ptep, pte_t pte, unsigned int nr) 5493425cec4SRyan Roberts { 5503425cec4SRyan Roberts __sync_cache_and_tags(pte, nr); 5513425cec4SRyan Roberts __check_safe_pte_update(mm, ptep, pte); 5525a00bfd6SRyan Roberts __set_pte(ptep, pte); 5533425cec4SRyan Roberts } 5543425cec4SRyan Roberts 55542b25471SKefeng Wang static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 55642b25471SKefeng Wang pmd_t *pmdp, pmd_t pmd) 55742b25471SKefeng Wang { 558a3b83713SKemeng Shi page_table_check_pmd_set(mm, pmdp, pmd); 5593425cec4SRyan Roberts return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd), 5603425cec4SRyan Roberts PMD_SIZE >> PAGE_SHIFT); 56142b25471SKefeng Wang } 56242b25471SKefeng Wang 56342b25471SKefeng Wang static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 56442b25471SKefeng Wang pud_t *pudp, pud_t pud) 56542b25471SKefeng Wang { 5666d144436SKemeng Shi page_table_check_pud_set(mm, pudp, pud); 5673425cec4SRyan Roberts return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud), 5683425cec4SRyan Roberts PUD_SIZE >> PAGE_SHIFT); 56942b25471SKefeng Wang } 570af074848SSteve Capper 571e9f63768SMike Rapoport #define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d)) 572e9f63768SMike Rapoport #define __phys_to_p4d_val(phys) __phys_to_pte_val(phys) 573e9f63768SMike Rapoport 57475387b92SKristina Martsenko #define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd)) 57575387b92SKristina Martsenko #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys) 57675387b92SKristina Martsenko 577a501e324SCatalin Marinas #define __pgprot_modify(prot,mask,bits) \ 578a501e324SCatalin Marinas __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 579a501e324SCatalin Marinas 580cca98e9fSChristoph Hellwig #define pgprot_nx(prot) \ 581034aa9cdSWill Deacon __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN) 582cca98e9fSChristoph Hellwig 583af074848SSteve Capper /* 5844f04d8f0SCatalin Marinas * Mark the prot value as uncacheable and unbufferable. 5854f04d8f0SCatalin Marinas */ 5864f04d8f0SCatalin Marinas #define pgprot_noncached(prot) \ 587de2db743SCatalin Marinas __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) 5884f04d8f0SCatalin Marinas #define pgprot_writecombine(prot) \ 589de2db743SCatalin Marinas __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 590d1e6dc91SLiviu Dudau #define pgprot_device(prot) \ 591d1e6dc91SLiviu Dudau __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) 592d15dfd31SCatalin Marinas #define pgprot_tagged(prot) \ 593d15dfd31SCatalin Marinas __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED)) 594d15dfd31SCatalin Marinas #define pgprot_mhp pgprot_tagged 5953e4e1d3fSChristoph Hellwig /* 5963e4e1d3fSChristoph Hellwig * DMA allocations for non-coherent devices use what the Arm architecture calls 5973e4e1d3fSChristoph Hellwig * "Normal non-cacheable" memory, which permits speculation, unaligned accesses 5983e4e1d3fSChristoph Hellwig * and merging of writes. This is different from "Device-nGnR[nE]" memory which 5993e4e1d3fSChristoph Hellwig * is intended for MMIO and thus forbids speculation, preserves access size, 6003e4e1d3fSChristoph Hellwig * requires strict alignment and can also force write responses to come from the 6013e4e1d3fSChristoph Hellwig * endpoint. 6023e4e1d3fSChristoph Hellwig */ 603419e2f18SChristoph Hellwig #define pgprot_dmacoherent(prot) \ 604419e2f18SChristoph Hellwig __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ 605419e2f18SChristoph Hellwig PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 606419e2f18SChristoph Hellwig 6074f04d8f0SCatalin Marinas #define __HAVE_PHYS_MEM_ACCESS_PROT 6084f04d8f0SCatalin Marinas struct file; 6094f04d8f0SCatalin Marinas extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 6104f04d8f0SCatalin Marinas unsigned long size, pgprot_t vma_prot); 6114f04d8f0SCatalin Marinas 6124f04d8f0SCatalin Marinas #define pmd_none(pmd) (!pmd_val(pmd)) 6134f04d8f0SCatalin Marinas 61436311607SMarc Zyngier #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 61536311607SMarc Zyngier PMD_TYPE_TABLE) 61636311607SMarc Zyngier #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 61736311607SMarc Zyngier PMD_TYPE_SECT) 61823bc8f69SMuchun Song #define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd)) 619e377ab82SAnshuman Khandual #define pmd_bad(pmd) (!pmd_table(pmd)) 62036311607SMarc Zyngier 621d55863dbSPeter Zijlstra #define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE) 622d55863dbSPeter Zijlstra #define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE) 623d55863dbSPeter Zijlstra 624cac4b8cdSCatalin Marinas #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 6257d4e2dcfSQian Cai static inline bool pud_sect(pud_t pud) { return false; } 6267d4e2dcfSQian Cai static inline bool pud_table(pud_t pud) { return true; } 627206a2a73SSteve Capper #else 628206a2a73SSteve Capper #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 629206a2a73SSteve Capper PUD_TYPE_SECT) 630523d6e9fSzhichang.yuan #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 631523d6e9fSzhichang.yuan PUD_TYPE_TABLE) 632206a2a73SSteve Capper #endif 63336311607SMarc Zyngier 6342330b7caSJun Yao extern pgd_t init_pg_dir[PTRS_PER_PGD]; 6352330b7caSJun Yao extern pgd_t init_pg_end[]; 6362330b7caSJun Yao extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 6372330b7caSJun Yao extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; 6382330b7caSJun Yao extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; 639833be850SMark Rutland extern pgd_t reserved_pg_dir[PTRS_PER_PGD]; 6402330b7caSJun Yao 6412330b7caSJun Yao extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); 6422330b7caSJun Yao 6432330b7caSJun Yao static inline bool in_swapper_pgdir(void *addr) 6442330b7caSJun Yao { 6452330b7caSJun Yao return ((unsigned long)addr & PAGE_MASK) == 6462330b7caSJun Yao ((unsigned long)swapper_pg_dir & PAGE_MASK); 6472330b7caSJun Yao } 6482330b7caSJun Yao 6494f04d8f0SCatalin Marinas static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 6504f04d8f0SCatalin Marinas { 651e9ed821bSJames Morse #ifdef __PAGETABLE_PMD_FOLDED 652e9ed821bSJames Morse if (in_swapper_pgdir(pmdp)) { 6532330b7caSJun Yao set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd))); 6542330b7caSJun Yao return; 6552330b7caSJun Yao } 656e9ed821bSJames Morse #endif /* __PAGETABLE_PMD_FOLDED */ 6572330b7caSJun Yao 65820a004e7SWill Deacon WRITE_ONCE(*pmdp, pmd); 6590795edafSWill Deacon 660d0b7a302SWill Deacon if (pmd_valid(pmd)) { 66198f7685eSWill Deacon dsb(ishst); 662d0b7a302SWill Deacon isb(); 663d0b7a302SWill Deacon } 6644f04d8f0SCatalin Marinas } 6654f04d8f0SCatalin Marinas 6664f04d8f0SCatalin Marinas static inline void pmd_clear(pmd_t *pmdp) 6674f04d8f0SCatalin Marinas { 6684f04d8f0SCatalin Marinas set_pmd(pmdp, __pmd(0)); 6694f04d8f0SCatalin Marinas } 6704f04d8f0SCatalin Marinas 671dca56dcaSMark Rutland static inline phys_addr_t pmd_page_paddr(pmd_t pmd) 6724f04d8f0SCatalin Marinas { 67375387b92SKristina Martsenko return __pmd_to_phys(pmd); 6744f04d8f0SCatalin Marinas } 6754f04d8f0SCatalin Marinas 676974b9b2cSMike Rapoport static inline unsigned long pmd_page_vaddr(pmd_t pmd) 677974b9b2cSMike Rapoport { 678974b9b2cSMike Rapoport return (unsigned long)__va(pmd_page_paddr(pmd)); 679974b9b2cSMike Rapoport } 68074dd022fSQian Cai 681053520f7SMark Rutland /* Find an entry in the third-level page table. */ 682f069fabaSWill Deacon #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) 683053520f7SMark Rutland 684961faac1SMark Rutland #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) 685961faac1SMark Rutland #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) 686961faac1SMark Rutland #define pte_clear_fixmap() clear_fixmap(FIX_PTE) 687961faac1SMark Rutland 68868ecabd0SGavin Shan #define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd)) 6894f04d8f0SCatalin Marinas 6906533945aSArd Biesheuvel /* use ONLY for statically allocated translation tables */ 6916533945aSArd Biesheuvel #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) 6926533945aSArd Biesheuvel 6934f04d8f0SCatalin Marinas /* 6944f04d8f0SCatalin Marinas * Conversion functions: convert a page and protection to a page entry, 6954f04d8f0SCatalin Marinas * and a page entry and page directory to the page they refer to. 6964f04d8f0SCatalin Marinas */ 6974f04d8f0SCatalin Marinas #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) 6984f04d8f0SCatalin Marinas 6999f25e6adSKirill A. Shutemov #if CONFIG_PGTABLE_LEVELS > 2 7004f04d8f0SCatalin Marinas 7012cf660ebSGavin Shan #define pmd_ERROR(e) \ 7022cf660ebSGavin Shan pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) 7037078db46SCatalin Marinas 7044f04d8f0SCatalin Marinas #define pud_none(pud) (!pud_val(pud)) 705e377ab82SAnshuman Khandual #define pud_bad(pud) (!pud_table(pud)) 706f02ab08aSPunit Agrawal #define pud_present(pud) pte_present(pud_pte(pud)) 70723bc8f69SMuchun Song #define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) 7080795edafSWill Deacon #define pud_valid(pud) pte_valid(pud_pte(pud)) 70942b25471SKefeng Wang #define pud_user(pud) pte_user(pud_pte(pud)) 710730a11f9SLiu Shixin #define pud_user_exec(pud) pte_user_exec(pud_pte(pud)) 7114f04d8f0SCatalin Marinas 7124f04d8f0SCatalin Marinas static inline void set_pud(pud_t *pudp, pud_t pud) 7134f04d8f0SCatalin Marinas { 714e9ed821bSJames Morse #ifdef __PAGETABLE_PUD_FOLDED 715e9ed821bSJames Morse if (in_swapper_pgdir(pudp)) { 7162330b7caSJun Yao set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud))); 7172330b7caSJun Yao return; 7182330b7caSJun Yao } 719e9ed821bSJames Morse #endif /* __PAGETABLE_PUD_FOLDED */ 7202330b7caSJun Yao 72120a004e7SWill Deacon WRITE_ONCE(*pudp, pud); 7220795edafSWill Deacon 723d0b7a302SWill Deacon if (pud_valid(pud)) { 72498f7685eSWill Deacon dsb(ishst); 725d0b7a302SWill Deacon isb(); 726d0b7a302SWill Deacon } 7274f04d8f0SCatalin Marinas } 7284f04d8f0SCatalin Marinas 7294f04d8f0SCatalin Marinas static inline void pud_clear(pud_t *pudp) 7304f04d8f0SCatalin Marinas { 7314f04d8f0SCatalin Marinas set_pud(pudp, __pud(0)); 7324f04d8f0SCatalin Marinas } 7334f04d8f0SCatalin Marinas 734dca56dcaSMark Rutland static inline phys_addr_t pud_page_paddr(pud_t pud) 7354f04d8f0SCatalin Marinas { 73675387b92SKristina Martsenko return __pud_to_phys(pud); 7374f04d8f0SCatalin Marinas } 7384f04d8f0SCatalin Marinas 7399cf6fa24SAneesh Kumar K.V static inline pmd_t *pud_pgtable(pud_t pud) 740974b9b2cSMike Rapoport { 7419cf6fa24SAneesh Kumar K.V return (pmd_t *)__va(pud_page_paddr(pud)); 742974b9b2cSMike Rapoport } 7437078db46SCatalin Marinas 744974b9b2cSMike Rapoport /* Find an entry in the second-level page table. */ 74520a004e7SWill Deacon #define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t)) 7467078db46SCatalin Marinas 747961faac1SMark Rutland #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) 748961faac1SMark Rutland #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) 749961faac1SMark Rutland #define pmd_clear_fixmap() clear_fixmap(FIX_PMD) 7504f04d8f0SCatalin Marinas 75168ecabd0SGavin Shan #define pud_page(pud) phys_to_page(__pud_to_phys(pud)) 75229e56940SSteve Capper 7536533945aSArd Biesheuvel /* use ONLY for statically allocated translation tables */ 7546533945aSArd Biesheuvel #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) 7556533945aSArd Biesheuvel 756dca56dcaSMark Rutland #else 757dca56dcaSMark Rutland 758dca56dcaSMark Rutland #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) 7594e4ff23aSWill Deacon #define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */ 760dca56dcaSMark Rutland 761961faac1SMark Rutland /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ 762961faac1SMark Rutland #define pmd_set_fixmap(addr) NULL 763961faac1SMark Rutland #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp) 764961faac1SMark Rutland #define pmd_clear_fixmap() 765961faac1SMark Rutland 7666533945aSArd Biesheuvel #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir) 7676533945aSArd Biesheuvel 7689f25e6adSKirill A. Shutemov #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 7694f04d8f0SCatalin Marinas 7709f25e6adSKirill A. Shutemov #if CONFIG_PGTABLE_LEVELS > 3 771c79b954bSJungseok Lee 7722cf660ebSGavin Shan #define pud_ERROR(e) \ 7732cf660ebSGavin Shan pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e)) 7747078db46SCatalin Marinas 775e9f63768SMike Rapoport #define p4d_none(p4d) (!p4d_val(p4d)) 776e9f63768SMike Rapoport #define p4d_bad(p4d) (!(p4d_val(p4d) & 2)) 777e9f63768SMike Rapoport #define p4d_present(p4d) (p4d_val(p4d)) 778c79b954bSJungseok Lee 779e9f63768SMike Rapoport static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) 780c79b954bSJungseok Lee { 781e9f63768SMike Rapoport if (in_swapper_pgdir(p4dp)) { 782e9f63768SMike Rapoport set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d))); 7832330b7caSJun Yao return; 7842330b7caSJun Yao } 7852330b7caSJun Yao 786e9f63768SMike Rapoport WRITE_ONCE(*p4dp, p4d); 787c79b954bSJungseok Lee dsb(ishst); 788eb6a4dccSWill Deacon isb(); 789c79b954bSJungseok Lee } 790c79b954bSJungseok Lee 791e9f63768SMike Rapoport static inline void p4d_clear(p4d_t *p4dp) 792c79b954bSJungseok Lee { 793e9f63768SMike Rapoport set_p4d(p4dp, __p4d(0)); 794c79b954bSJungseok Lee } 795c79b954bSJungseok Lee 796e9f63768SMike Rapoport static inline phys_addr_t p4d_page_paddr(p4d_t p4d) 797c79b954bSJungseok Lee { 798e9f63768SMike Rapoport return __p4d_to_phys(p4d); 799c79b954bSJungseok Lee } 800c79b954bSJungseok Lee 801dc4875f0SAneesh Kumar K.V static inline pud_t *p4d_pgtable(p4d_t p4d) 802974b9b2cSMike Rapoport { 803dc4875f0SAneesh Kumar K.V return (pud_t *)__va(p4d_page_paddr(p4d)); 804974b9b2cSMike Rapoport } 8057078db46SCatalin Marinas 8065845e703SXujun Leng /* Find an entry in the first-level page table. */ 807e9f63768SMike Rapoport #define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t)) 8087078db46SCatalin Marinas 809961faac1SMark Rutland #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr)) 810e9f63768SMike Rapoport #define pud_set_fixmap_offset(p4d, addr) pud_set_fixmap(pud_offset_phys(p4d, addr)) 811961faac1SMark Rutland #define pud_clear_fixmap() clear_fixmap(FIX_PUD) 812c79b954bSJungseok Lee 813e9f63768SMike Rapoport #define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d))) 8145d96e0cbSJungseok Lee 8156533945aSArd Biesheuvel /* use ONLY for statically allocated translation tables */ 8166533945aSArd Biesheuvel #define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr)))) 8176533945aSArd Biesheuvel 818dca56dcaSMark Rutland #else 819dca56dcaSMark Rutland 820e9f63768SMike Rapoport #define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;}) 821dca56dcaSMark Rutland #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;}) 822dca56dcaSMark Rutland 823961faac1SMark Rutland /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */ 824961faac1SMark Rutland #define pud_set_fixmap(addr) NULL 825961faac1SMark Rutland #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) 826961faac1SMark Rutland #define pud_clear_fixmap() 827961faac1SMark Rutland 8286533945aSArd Biesheuvel #define pud_offset_kimg(dir,addr) ((pud_t *)dir) 8296533945aSArd Biesheuvel 8309f25e6adSKirill A. Shutemov #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 831c79b954bSJungseok Lee 8322cf660ebSGavin Shan #define pgd_ERROR(e) \ 8332cf660ebSGavin Shan pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e)) 8347078db46SCatalin Marinas 835961faac1SMark Rutland #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) 836961faac1SMark Rutland #define pgd_clear_fixmap() clear_fixmap(FIX_PGD) 837961faac1SMark Rutland 8384f04d8f0SCatalin Marinas static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 8394f04d8f0SCatalin Marinas { 8409f341931SCatalin Marinas /* 8419f341931SCatalin Marinas * Normal and Normal-Tagged are two different memory types and indices 8429f341931SCatalin Marinas * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK. 8439f341931SCatalin Marinas */ 844a6fadf7eSWill Deacon const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 8459f341931SCatalin Marinas PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP | 8469f341931SCatalin Marinas PTE_ATTRINDX_MASK; 8472f4b829cSCatalin Marinas /* preserve the hardware dirty information */ 8482f4b829cSCatalin Marinas if (pte_hw_dirty(pte)) 8496477c388SAnshuman Khandual pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 8506477c388SAnshuman Khandual 8514f04d8f0SCatalin Marinas pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 8523c069607SJames Houghton /* 8533c069607SJames Houghton * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware 8543c069607SJames Houghton * dirtiness again. 8553c069607SJames Houghton */ 8563c069607SJames Houghton if (pte_sw_dirty(pte)) 8573c069607SJames Houghton pte = pte_mkdirty(pte); 8584f04d8f0SCatalin Marinas return pte; 8594f04d8f0SCatalin Marinas } 8604f04d8f0SCatalin Marinas 8619c7e535fSSteve Capper static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 8629c7e535fSSteve Capper { 8639c7e535fSSteve Capper return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 8649c7e535fSSteve Capper } 8659c7e535fSSteve Capper 8665a00bfd6SRyan Roberts extern int __ptep_set_access_flags(struct vm_area_struct *vma, 86766dbd6e6SCatalin Marinas unsigned long address, pte_t *ptep, 86866dbd6e6SCatalin Marinas pte_t entry, int dirty); 86966dbd6e6SCatalin Marinas 870282aa705SCatalin Marinas #ifdef CONFIG_TRANSPARENT_HUGEPAGE 871282aa705SCatalin Marinas #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 872282aa705SCatalin Marinas static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 873282aa705SCatalin Marinas unsigned long address, pmd_t *pmdp, 874282aa705SCatalin Marinas pmd_t entry, int dirty) 875282aa705SCatalin Marinas { 8765a00bfd6SRyan Roberts return __ptep_set_access_flags(vma, address, (pte_t *)pmdp, 8775a00bfd6SRyan Roberts pmd_pte(entry), dirty); 878282aa705SCatalin Marinas } 87973b20c84SRobin Murphy 88073b20c84SRobin Murphy static inline int pud_devmap(pud_t pud) 88173b20c84SRobin Murphy { 88273b20c84SRobin Murphy return 0; 88373b20c84SRobin Murphy } 88473b20c84SRobin Murphy 88573b20c84SRobin Murphy static inline int pgd_devmap(pgd_t pgd) 88673b20c84SRobin Murphy { 88773b20c84SRobin Murphy return 0; 88873b20c84SRobin Murphy } 889282aa705SCatalin Marinas #endif 890282aa705SCatalin Marinas 891ed928a34STong Tiangen #ifdef CONFIG_PAGE_TABLE_CHECK 892ed928a34STong Tiangen static inline bool pte_user_accessible_page(pte_t pte) 893ed928a34STong Tiangen { 894ed928a34STong Tiangen return pte_present(pte) && (pte_user(pte) || pte_user_exec(pte)); 895ed928a34STong Tiangen } 896ed928a34STong Tiangen 897ed928a34STong Tiangen static inline bool pmd_user_accessible_page(pmd_t pmd) 898ed928a34STong Tiangen { 89974c2f810SLiu Shixin return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd)); 900ed928a34STong Tiangen } 901ed928a34STong Tiangen 902ed928a34STong Tiangen static inline bool pud_user_accessible_page(pud_t pud) 903ed928a34STong Tiangen { 904730a11f9SLiu Shixin return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud)); 905ed928a34STong Tiangen } 906ed928a34STong Tiangen #endif 907ed928a34STong Tiangen 9082f4b829cSCatalin Marinas /* 9092f4b829cSCatalin Marinas * Atomic pte/pmd modifications. 9102f4b829cSCatalin Marinas */ 9115a00bfd6SRyan Roberts static inline int __ptep_test_and_clear_young(struct vm_area_struct *vma, 9125a00bfd6SRyan Roberts unsigned long address, 9135a00bfd6SRyan Roberts pte_t *ptep) 9142f4b829cSCatalin Marinas { 9153bbf7157SCatalin Marinas pte_t old_pte, pte; 9162f4b829cSCatalin Marinas 9175a00bfd6SRyan Roberts pte = __ptep_get(ptep); 9183bbf7157SCatalin Marinas do { 9193bbf7157SCatalin Marinas old_pte = pte; 9203bbf7157SCatalin Marinas pte = pte_mkold(pte); 9213bbf7157SCatalin Marinas pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 9223bbf7157SCatalin Marinas pte_val(old_pte), pte_val(pte)); 9233bbf7157SCatalin Marinas } while (pte_val(pte) != pte_val(old_pte)); 9242f4b829cSCatalin Marinas 9253bbf7157SCatalin Marinas return pte_young(pte); 9262f4b829cSCatalin Marinas } 9272f4b829cSCatalin Marinas 9285a00bfd6SRyan Roberts static inline int __ptep_clear_flush_young(struct vm_area_struct *vma, 9293403e56bSAlex Van Brunt unsigned long address, pte_t *ptep) 9303403e56bSAlex Van Brunt { 9315a00bfd6SRyan Roberts int young = __ptep_test_and_clear_young(vma, address, ptep); 9323403e56bSAlex Van Brunt 9333403e56bSAlex Van Brunt if (young) { 9343403e56bSAlex Van Brunt /* 9353403e56bSAlex Van Brunt * We can elide the trailing DSB here since the worst that can 9363403e56bSAlex Van Brunt * happen is that a CPU continues to use the young entry in its 9373403e56bSAlex Van Brunt * TLB and we mistakenly reclaim the associated page. The 9383403e56bSAlex Van Brunt * window for such an event is bounded by the next 9393403e56bSAlex Van Brunt * context-switch, which provides a DSB to complete the TLB 9403403e56bSAlex Van Brunt * invalidation. 9413403e56bSAlex Van Brunt */ 9423403e56bSAlex Van Brunt flush_tlb_page_nosync(vma, address); 9433403e56bSAlex Van Brunt } 9443403e56bSAlex Van Brunt 9453403e56bSAlex Van Brunt return young; 9463403e56bSAlex Van Brunt } 9473403e56bSAlex Van Brunt 9482f4b829cSCatalin Marinas #ifdef CONFIG_TRANSPARENT_HUGEPAGE 9492f4b829cSCatalin Marinas #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 9502f4b829cSCatalin Marinas static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 9512f4b829cSCatalin Marinas unsigned long address, 9522f4b829cSCatalin Marinas pmd_t *pmdp) 9532f4b829cSCatalin Marinas { 9545a00bfd6SRyan Roberts return __ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 9552f4b829cSCatalin Marinas } 9562f4b829cSCatalin Marinas #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 9572f4b829cSCatalin Marinas 9585a00bfd6SRyan Roberts static inline pte_t __ptep_get_and_clear(struct mm_struct *mm, 9592f4b829cSCatalin Marinas unsigned long address, pte_t *ptep) 9602f4b829cSCatalin Marinas { 96142b25471SKefeng Wang pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0)); 96242b25471SKefeng Wang 963aa232204SKemeng Shi page_table_check_pte_clear(mm, pte); 96442b25471SKefeng Wang 96542b25471SKefeng Wang return pte; 9662f4b829cSCatalin Marinas } 9672f4b829cSCatalin Marinas 9686b1e4efbSRyan Roberts static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr, 9696b1e4efbSRyan Roberts pte_t *ptep, unsigned int nr, int full) 9706b1e4efbSRyan Roberts { 9716b1e4efbSRyan Roberts for (;;) { 9726b1e4efbSRyan Roberts __ptep_get_and_clear(mm, addr, ptep); 9736b1e4efbSRyan Roberts if (--nr == 0) 9746b1e4efbSRyan Roberts break; 9756b1e4efbSRyan Roberts ptep++; 9766b1e4efbSRyan Roberts addr += PAGE_SIZE; 9776b1e4efbSRyan Roberts } 9786b1e4efbSRyan Roberts } 9796b1e4efbSRyan Roberts 9806b1e4efbSRyan Roberts static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm, 9816b1e4efbSRyan Roberts unsigned long addr, pte_t *ptep, 9826b1e4efbSRyan Roberts unsigned int nr, int full) 9836b1e4efbSRyan Roberts { 9846b1e4efbSRyan Roberts pte_t pte, tmp_pte; 9856b1e4efbSRyan Roberts 9866b1e4efbSRyan Roberts pte = __ptep_get_and_clear(mm, addr, ptep); 9876b1e4efbSRyan Roberts while (--nr) { 9886b1e4efbSRyan Roberts ptep++; 9896b1e4efbSRyan Roberts addr += PAGE_SIZE; 9906b1e4efbSRyan Roberts tmp_pte = __ptep_get_and_clear(mm, addr, ptep); 9916b1e4efbSRyan Roberts if (pte_dirty(tmp_pte)) 9926b1e4efbSRyan Roberts pte = pte_mkdirty(pte); 9936b1e4efbSRyan Roberts if (pte_young(tmp_pte)) 9946b1e4efbSRyan Roberts pte = pte_mkyoung(pte); 9956b1e4efbSRyan Roberts } 9966b1e4efbSRyan Roberts return pte; 9976b1e4efbSRyan Roberts } 9986b1e4efbSRyan Roberts 9992f4b829cSCatalin Marinas #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1000911f56eeSCatalin Marinas #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1001911f56eeSCatalin Marinas static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 10022f4b829cSCatalin Marinas unsigned long address, pmd_t *pmdp) 10032f4b829cSCatalin Marinas { 100442b25471SKefeng Wang pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0)); 100542b25471SKefeng Wang 10061831414cSKemeng Shi page_table_check_pmd_clear(mm, pmd); 100742b25471SKefeng Wang 100842b25471SKefeng Wang return pmd; 10092f4b829cSCatalin Marinas } 10102f4b829cSCatalin Marinas #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 10112f4b829cSCatalin Marinas 1012311a6cf2SRyan Roberts static inline void ___ptep_set_wrprotect(struct mm_struct *mm, 1013311a6cf2SRyan Roberts unsigned long address, pte_t *ptep, 1014311a6cf2SRyan Roberts pte_t pte) 1015311a6cf2SRyan Roberts { 1016311a6cf2SRyan Roberts pte_t old_pte; 1017311a6cf2SRyan Roberts 1018311a6cf2SRyan Roberts do { 1019311a6cf2SRyan Roberts old_pte = pte; 1020311a6cf2SRyan Roberts pte = pte_wrprotect(pte); 1021311a6cf2SRyan Roberts pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1022311a6cf2SRyan Roberts pte_val(old_pte), pte_val(pte)); 1023311a6cf2SRyan Roberts } while (pte_val(pte) != pte_val(old_pte)); 1024311a6cf2SRyan Roberts } 1025311a6cf2SRyan Roberts 10262f4b829cSCatalin Marinas /* 10275a00bfd6SRyan Roberts * __ptep_set_wrprotect - mark read-only while trasferring potential hardware 10288781bcbcSSteve Capper * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. 10292f4b829cSCatalin Marinas */ 10305a00bfd6SRyan Roberts static inline void __ptep_set_wrprotect(struct mm_struct *mm, 10315a00bfd6SRyan Roberts unsigned long address, pte_t *ptep) 10322f4b829cSCatalin Marinas { 1033311a6cf2SRyan Roberts ___ptep_set_wrprotect(mm, address, ptep, __ptep_get(ptep)); 1034311a6cf2SRyan Roberts } 10352f4b829cSCatalin Marinas 1036311a6cf2SRyan Roberts static inline void __wrprotect_ptes(struct mm_struct *mm, unsigned long address, 1037311a6cf2SRyan Roberts pte_t *ptep, unsigned int nr) 1038311a6cf2SRyan Roberts { 1039311a6cf2SRyan Roberts unsigned int i; 1040311a6cf2SRyan Roberts 1041311a6cf2SRyan Roberts for (i = 0; i < nr; i++, address += PAGE_SIZE, ptep++) 1042311a6cf2SRyan Roberts __ptep_set_wrprotect(mm, address, ptep); 10432f4b829cSCatalin Marinas } 10442f4b829cSCatalin Marinas 10452f4b829cSCatalin Marinas #ifdef CONFIG_TRANSPARENT_HUGEPAGE 10462f4b829cSCatalin Marinas #define __HAVE_ARCH_PMDP_SET_WRPROTECT 10472f4b829cSCatalin Marinas static inline void pmdp_set_wrprotect(struct mm_struct *mm, 10482f4b829cSCatalin Marinas unsigned long address, pmd_t *pmdp) 10492f4b829cSCatalin Marinas { 10505a00bfd6SRyan Roberts __ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 10512f4b829cSCatalin Marinas } 10521d78a62cSCatalin Marinas 10531d78a62cSCatalin Marinas #define pmdp_establish pmdp_establish 10541d78a62cSCatalin Marinas static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 10551d78a62cSCatalin Marinas unsigned long address, pmd_t *pmdp, pmd_t pmd) 10561d78a62cSCatalin Marinas { 1057a3b83713SKemeng Shi page_table_check_pmd_set(vma->vm_mm, pmdp, pmd); 10581d78a62cSCatalin Marinas return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd))); 10591d78a62cSCatalin Marinas } 10602f4b829cSCatalin Marinas #endif 10612f4b829cSCatalin Marinas 10624f04d8f0SCatalin Marinas /* 10634f04d8f0SCatalin Marinas * Encode and decode a swap entry: 10643676f9efSCatalin Marinas * bits 0-1: present (must be zero) 1065570ef363SDavid Hildenbrand * bits 2: remember PG_anon_exclusive 1066570ef363SDavid Hildenbrand * bits 3-7: swap type 10679b3e661eSKirill A. Shutemov * bits 8-57: swap offset 1068fdc69e7dSCatalin Marinas * bit 58: PTE_PROT_NONE (must be zero) 10694f04d8f0SCatalin Marinas */ 1070570ef363SDavid Hildenbrand #define __SWP_TYPE_SHIFT 3 1071570ef363SDavid Hildenbrand #define __SWP_TYPE_BITS 5 10729b3e661eSKirill A. Shutemov #define __SWP_OFFSET_BITS 50 10734f04d8f0SCatalin Marinas #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 10744f04d8f0SCatalin Marinas #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 10753676f9efSCatalin Marinas #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) 10764f04d8f0SCatalin Marinas 10774f04d8f0SCatalin Marinas #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 10783676f9efSCatalin Marinas #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) 10794f04d8f0SCatalin Marinas #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 10804f04d8f0SCatalin Marinas 10814f04d8f0SCatalin Marinas #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 10824f04d8f0SCatalin Marinas #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 10834f04d8f0SCatalin Marinas 108453fa117bSAnshuman Khandual #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 108553fa117bSAnshuman Khandual #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 108653fa117bSAnshuman Khandual #define __swp_entry_to_pmd(swp) __pmd((swp).val) 108753fa117bSAnshuman Khandual #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 108853fa117bSAnshuman Khandual 10894f04d8f0SCatalin Marinas /* 10904f04d8f0SCatalin Marinas * Ensure that there are not more swap files than can be encoded in the kernel 1091aad9061bSGeert Uytterhoeven * PTEs. 10924f04d8f0SCatalin Marinas */ 10934f04d8f0SCatalin Marinas #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 10944f04d8f0SCatalin Marinas 109536943abaSSteven Price #ifdef CONFIG_ARM64_MTE 109636943abaSSteven Price 109736943abaSSteven Price #define __HAVE_ARCH_PREPARE_TO_SWAP 109836943abaSSteven Price static inline int arch_prepare_to_swap(struct page *page) 109936943abaSSteven Price { 110036943abaSSteven Price if (system_supports_mte()) 110136943abaSSteven Price return mte_save_tags(page); 110236943abaSSteven Price return 0; 110336943abaSSteven Price } 110436943abaSSteven Price 110536943abaSSteven Price #define __HAVE_ARCH_SWAP_INVALIDATE 110636943abaSSteven Price static inline void arch_swap_invalidate_page(int type, pgoff_t offset) 110736943abaSSteven Price { 110836943abaSSteven Price if (system_supports_mte()) 110936943abaSSteven Price mte_invalidate_tags(type, offset); 111036943abaSSteven Price } 111136943abaSSteven Price 111236943abaSSteven Price static inline void arch_swap_invalidate_area(int type) 111336943abaSSteven Price { 111436943abaSSteven Price if (system_supports_mte()) 111536943abaSSteven Price mte_invalidate_tags_area(type); 111636943abaSSteven Price } 111736943abaSSteven Price 111836943abaSSteven Price #define __HAVE_ARCH_SWAP_RESTORE 1119da08e9b7SMatthew Wilcox (Oracle) static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) 112036943abaSSteven Price { 1121d77e59a8SCatalin Marinas if (system_supports_mte()) 1122d77e59a8SCatalin Marinas mte_restore_tags(entry, &folio->page); 112336943abaSSteven Price } 112436943abaSSteven Price 112536943abaSSteven Price #endif /* CONFIG_ARM64_MTE */ 112636943abaSSteven Price 1127cba3574fSWill Deacon /* 11285a00bfd6SRyan Roberts * On AArch64, the cache coherency is handled via the __set_ptes() function. 1129cba3574fSWill Deacon */ 11304a169d61SMatthew Wilcox (Oracle) static inline void update_mmu_cache_range(struct vm_fault *vmf, 11314a169d61SMatthew Wilcox (Oracle) struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 11324a169d61SMatthew Wilcox (Oracle) unsigned int nr) 1133cba3574fSWill Deacon { 1134cba3574fSWill Deacon /* 1135120798d2SWill Deacon * We don't do anything here, so there's a very small chance of 1136120798d2SWill Deacon * us retaking a user fault which we just fixed up. The alternative 1137120798d2SWill Deacon * is doing a dsb(ishst), but that penalises the fastpath. 1138cba3574fSWill Deacon */ 1139cba3574fSWill Deacon } 1140cba3574fSWill Deacon 11414a169d61SMatthew Wilcox (Oracle) #define update_mmu_cache(vma, addr, ptep) \ 11424a169d61SMatthew Wilcox (Oracle) update_mmu_cache_range(NULL, vma, addr, ptep, 1) 1143cba3574fSWill Deacon #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) 1144cba3574fSWill Deacon 1145529c4b05SKristina Martsenko #ifdef CONFIG_ARM64_PA_BITS_52 1146529c4b05SKristina Martsenko #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) 1147529c4b05SKristina Martsenko #else 1148529c4b05SKristina Martsenko #define phys_to_ttbr(addr) (addr) 1149529c4b05SKristina Martsenko #endif 1150529c4b05SKristina Martsenko 11516af31226SJia He /* 11526af31226SJia He * On arm64 without hardware Access Flag, copying from user will fail because 11536af31226SJia He * the pte is old and cannot be marked young. So we always end up with zeroed 11546af31226SJia He * page after fork() + CoW for pfn mappings. We don't always have a 11556af31226SJia He * hardware-managed access flag on arm64. 11566af31226SJia He */ 1157e1fd09e3SYu Zhao #define arch_has_hw_pte_young cpu_has_hw_af 11580388f9c7SWill Deacon 11590388f9c7SWill Deacon /* 11600388f9c7SWill Deacon * Experimentally, it's cheap to set the access flag in hardware and we 11610388f9c7SWill Deacon * benefit from prefaulting mappings as 'old' to start with. 11620388f9c7SWill Deacon */ 1163e1fd09e3SYu Zhao #define arch_wants_old_prefaulted_pte cpu_has_hw_af 11646af31226SJia He 1165f8b46c4bSAnshuman Khandual static inline bool pud_sect_supported(void) 1166f8b46c4bSAnshuman Khandual { 1167f8b46c4bSAnshuman Khandual return PAGE_SIZE == SZ_4K; 1168f8b46c4bSAnshuman Khandual } 1169f8b46c4bSAnshuman Khandual 117018107f8aSVladimir Murzin 11715db568e7SAnshuman Khandual #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 11725db568e7SAnshuman Khandual #define ptep_modify_prot_start ptep_modify_prot_start 11735db568e7SAnshuman Khandual extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma, 11745db568e7SAnshuman Khandual unsigned long addr, pte_t *ptep); 11755db568e7SAnshuman Khandual 11765db568e7SAnshuman Khandual #define ptep_modify_prot_commit ptep_modify_prot_commit 11775db568e7SAnshuman Khandual extern void ptep_modify_prot_commit(struct vm_area_struct *vma, 11785db568e7SAnshuman Khandual unsigned long addr, pte_t *ptep, 11795db568e7SAnshuman Khandual pte_t old_pte, pte_t new_pte); 11805a00bfd6SRyan Roberts 11814602e575SRyan Roberts #ifdef CONFIG_ARM64_CONTPTE 11824602e575SRyan Roberts 11834602e575SRyan Roberts /* 11844602e575SRyan Roberts * The contpte APIs are used to transparently manage the contiguous bit in ptes 11854602e575SRyan Roberts * where it is possible and makes sense to do so. The PTE_CONT bit is considered 11864602e575SRyan Roberts * a private implementation detail of the public ptep API (see below). 11874602e575SRyan Roberts */ 1188*f0c22649SRyan Roberts extern void __contpte_try_fold(struct mm_struct *mm, unsigned long addr, 1189*f0c22649SRyan Roberts pte_t *ptep, pte_t pte); 11904602e575SRyan Roberts extern void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, 11914602e575SRyan Roberts pte_t *ptep, pte_t pte); 11924602e575SRyan Roberts extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte); 11934602e575SRyan Roberts extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep); 11944602e575SRyan Roberts extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, 11954602e575SRyan Roberts pte_t *ptep, pte_t pte, unsigned int nr); 11966b1e4efbSRyan Roberts extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, 11976b1e4efbSRyan Roberts pte_t *ptep, unsigned int nr, int full); 11986b1e4efbSRyan Roberts extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, 11996b1e4efbSRyan Roberts unsigned long addr, pte_t *ptep, 12006b1e4efbSRyan Roberts unsigned int nr, int full); 12014602e575SRyan Roberts extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, 12024602e575SRyan Roberts unsigned long addr, pte_t *ptep); 12034602e575SRyan Roberts extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, 12044602e575SRyan Roberts unsigned long addr, pte_t *ptep); 1205311a6cf2SRyan Roberts extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, 1206311a6cf2SRyan Roberts pte_t *ptep, unsigned int nr); 12074602e575SRyan Roberts extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma, 12084602e575SRyan Roberts unsigned long addr, pte_t *ptep, 12094602e575SRyan Roberts pte_t entry, int dirty); 12104602e575SRyan Roberts 1211*f0c22649SRyan Roberts static __always_inline void contpte_try_fold(struct mm_struct *mm, 1212*f0c22649SRyan Roberts unsigned long addr, pte_t *ptep, pte_t pte) 1213*f0c22649SRyan Roberts { 1214*f0c22649SRyan Roberts /* 1215*f0c22649SRyan Roberts * Only bother trying if both the virtual and physical addresses are 1216*f0c22649SRyan Roberts * aligned and correspond to the last entry in a contig range. The core 1217*f0c22649SRyan Roberts * code mostly modifies ranges from low to high, so this is the likely 1218*f0c22649SRyan Roberts * the last modification in the contig range, so a good time to fold. 1219*f0c22649SRyan Roberts * We can't fold special mappings, because there is no associated folio. 1220*f0c22649SRyan Roberts */ 1221*f0c22649SRyan Roberts 1222*f0c22649SRyan Roberts const unsigned long contmask = CONT_PTES - 1; 1223*f0c22649SRyan Roberts bool valign = ((addr >> PAGE_SHIFT) & contmask) == contmask; 1224*f0c22649SRyan Roberts 1225*f0c22649SRyan Roberts if (unlikely(valign)) { 1226*f0c22649SRyan Roberts bool palign = (pte_pfn(pte) & contmask) == contmask; 1227*f0c22649SRyan Roberts 1228*f0c22649SRyan Roberts if (unlikely(palign && 1229*f0c22649SRyan Roberts pte_valid(pte) && !pte_cont(pte) && !pte_special(pte))) 1230*f0c22649SRyan Roberts __contpte_try_fold(mm, addr, ptep, pte); 1231*f0c22649SRyan Roberts } 1232*f0c22649SRyan Roberts } 1233*f0c22649SRyan Roberts 1234b972fc6aSRyan Roberts static __always_inline void contpte_try_unfold(struct mm_struct *mm, 1235b972fc6aSRyan Roberts unsigned long addr, pte_t *ptep, pte_t pte) 12364602e575SRyan Roberts { 12374602e575SRyan Roberts if (unlikely(pte_valid_cont(pte))) 12384602e575SRyan Roberts __contpte_try_unfold(mm, addr, ptep, pte); 12394602e575SRyan Roberts } 12404602e575SRyan Roberts 1241fb5451e5SRyan Roberts #define pte_batch_hint pte_batch_hint 1242fb5451e5SRyan Roberts static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte) 1243fb5451e5SRyan Roberts { 1244fb5451e5SRyan Roberts if (!pte_valid_cont(pte)) 1245fb5451e5SRyan Roberts return 1; 1246fb5451e5SRyan Roberts 1247fb5451e5SRyan Roberts return CONT_PTES - (((unsigned long)ptep >> 3) & (CONT_PTES - 1)); 1248fb5451e5SRyan Roberts } 1249fb5451e5SRyan Roberts 12504602e575SRyan Roberts /* 12514602e575SRyan Roberts * The below functions constitute the public API that arm64 presents to the 12524602e575SRyan Roberts * core-mm to manipulate PTE entries within their page tables (or at least this 12534602e575SRyan Roberts * is the subset of the API that arm64 needs to implement). These public 12544602e575SRyan Roberts * versions will automatically and transparently apply the contiguous bit where 12554602e575SRyan Roberts * it makes sense to do so. Therefore any users that are contig-aware (e.g. 12564602e575SRyan Roberts * hugetlb, kernel mapper) should NOT use these APIs, but instead use the 12574602e575SRyan Roberts * private versions, which are prefixed with double underscore. All of these 12584602e575SRyan Roberts * APIs except for ptep_get_lockless() are expected to be called with the PTL 12594602e575SRyan Roberts * held. Although the contiguous bit is considered private to the 12604602e575SRyan Roberts * implementation, it is deliberately allowed to leak through the getters (e.g. 12614602e575SRyan Roberts * ptep_get()), back to core code. This is required so that pte_leaf_size() can 12624602e575SRyan Roberts * provide an accurate size for perf_get_pgtable_size(). But this leakage means 12634602e575SRyan Roberts * its possible a pte will be passed to a setter with the contiguous bit set, so 12644602e575SRyan Roberts * we explicitly clear the contiguous bit in those cases to prevent accidentally 12654602e575SRyan Roberts * setting it in the pgtable. 12664602e575SRyan Roberts */ 12674602e575SRyan Roberts 12684602e575SRyan Roberts #define ptep_get ptep_get 12694602e575SRyan Roberts static inline pte_t ptep_get(pte_t *ptep) 12704602e575SRyan Roberts { 12714602e575SRyan Roberts pte_t pte = __ptep_get(ptep); 12724602e575SRyan Roberts 12734602e575SRyan Roberts if (likely(!pte_valid_cont(pte))) 12744602e575SRyan Roberts return pte; 12754602e575SRyan Roberts 12764602e575SRyan Roberts return contpte_ptep_get(ptep, pte); 12774602e575SRyan Roberts } 12784602e575SRyan Roberts 12794602e575SRyan Roberts #define ptep_get_lockless ptep_get_lockless 12804602e575SRyan Roberts static inline pte_t ptep_get_lockless(pte_t *ptep) 12814602e575SRyan Roberts { 12824602e575SRyan Roberts pte_t pte = __ptep_get(ptep); 12834602e575SRyan Roberts 12844602e575SRyan Roberts if (likely(!pte_valid_cont(pte))) 12854602e575SRyan Roberts return pte; 12864602e575SRyan Roberts 12874602e575SRyan Roberts return contpte_ptep_get_lockless(ptep); 12884602e575SRyan Roberts } 12894602e575SRyan Roberts 12904602e575SRyan Roberts static inline void set_pte(pte_t *ptep, pte_t pte) 12914602e575SRyan Roberts { 12924602e575SRyan Roberts /* 12934602e575SRyan Roberts * We don't have the mm or vaddr so cannot unfold contig entries (since 12944602e575SRyan Roberts * it requires tlb maintenance). set_pte() is not used in core code, so 12954602e575SRyan Roberts * this should never even be called. Regardless do our best to service 12964602e575SRyan Roberts * any call and emit a warning if there is any attempt to set a pte on 12974602e575SRyan Roberts * top of an existing contig range. 12984602e575SRyan Roberts */ 12994602e575SRyan Roberts pte_t orig_pte = __ptep_get(ptep); 13004602e575SRyan Roberts 13014602e575SRyan Roberts WARN_ON_ONCE(pte_valid_cont(orig_pte)); 13024602e575SRyan Roberts __set_pte(ptep, pte_mknoncont(pte)); 13034602e575SRyan Roberts } 13044602e575SRyan Roberts 13054602e575SRyan Roberts #define set_ptes set_ptes 1306b972fc6aSRyan Roberts static __always_inline void set_ptes(struct mm_struct *mm, unsigned long addr, 13074602e575SRyan Roberts pte_t *ptep, pte_t pte, unsigned int nr) 13084602e575SRyan Roberts { 13094602e575SRyan Roberts pte = pte_mknoncont(pte); 13104602e575SRyan Roberts 13114602e575SRyan Roberts if (likely(nr == 1)) { 13124602e575SRyan Roberts contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 13134602e575SRyan Roberts __set_ptes(mm, addr, ptep, pte, 1); 1314*f0c22649SRyan Roberts contpte_try_fold(mm, addr, ptep, pte); 13154602e575SRyan Roberts } else { 13164602e575SRyan Roberts contpte_set_ptes(mm, addr, ptep, pte, nr); 13174602e575SRyan Roberts } 13184602e575SRyan Roberts } 13194602e575SRyan Roberts 13204602e575SRyan Roberts static inline void pte_clear(struct mm_struct *mm, 13214602e575SRyan Roberts unsigned long addr, pte_t *ptep) 13224602e575SRyan Roberts { 13234602e575SRyan Roberts contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 13244602e575SRyan Roberts __pte_clear(mm, addr, ptep); 13254602e575SRyan Roberts } 13264602e575SRyan Roberts 13276b1e4efbSRyan Roberts #define clear_full_ptes clear_full_ptes 13286b1e4efbSRyan Roberts static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, 13296b1e4efbSRyan Roberts pte_t *ptep, unsigned int nr, int full) 13306b1e4efbSRyan Roberts { 13316b1e4efbSRyan Roberts if (likely(nr == 1)) { 13326b1e4efbSRyan Roberts contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 13336b1e4efbSRyan Roberts __clear_full_ptes(mm, addr, ptep, nr, full); 13346b1e4efbSRyan Roberts } else { 13356b1e4efbSRyan Roberts contpte_clear_full_ptes(mm, addr, ptep, nr, full); 13366b1e4efbSRyan Roberts } 13376b1e4efbSRyan Roberts } 13386b1e4efbSRyan Roberts 13396b1e4efbSRyan Roberts #define get_and_clear_full_ptes get_and_clear_full_ptes 13406b1e4efbSRyan Roberts static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, 13416b1e4efbSRyan Roberts unsigned long addr, pte_t *ptep, 13426b1e4efbSRyan Roberts unsigned int nr, int full) 13436b1e4efbSRyan Roberts { 13446b1e4efbSRyan Roberts pte_t pte; 13456b1e4efbSRyan Roberts 13466b1e4efbSRyan Roberts if (likely(nr == 1)) { 13476b1e4efbSRyan Roberts contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 13486b1e4efbSRyan Roberts pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full); 13496b1e4efbSRyan Roberts } else { 13506b1e4efbSRyan Roberts pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full); 13516b1e4efbSRyan Roberts } 13526b1e4efbSRyan Roberts 13536b1e4efbSRyan Roberts return pte; 13546b1e4efbSRyan Roberts } 13556b1e4efbSRyan Roberts 13564602e575SRyan Roberts #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 13574602e575SRyan Roberts static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 13584602e575SRyan Roberts unsigned long addr, pte_t *ptep) 13594602e575SRyan Roberts { 13604602e575SRyan Roberts contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 13614602e575SRyan Roberts return __ptep_get_and_clear(mm, addr, ptep); 13624602e575SRyan Roberts } 13634602e575SRyan Roberts 13644602e575SRyan Roberts #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 13654602e575SRyan Roberts static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 13664602e575SRyan Roberts unsigned long addr, pte_t *ptep) 13674602e575SRyan Roberts { 13684602e575SRyan Roberts pte_t orig_pte = __ptep_get(ptep); 13694602e575SRyan Roberts 13704602e575SRyan Roberts if (likely(!pte_valid_cont(orig_pte))) 13714602e575SRyan Roberts return __ptep_test_and_clear_young(vma, addr, ptep); 13724602e575SRyan Roberts 13734602e575SRyan Roberts return contpte_ptep_test_and_clear_young(vma, addr, ptep); 13744602e575SRyan Roberts } 13754602e575SRyan Roberts 13764602e575SRyan Roberts #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 13774602e575SRyan Roberts static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 13784602e575SRyan Roberts unsigned long addr, pte_t *ptep) 13794602e575SRyan Roberts { 13804602e575SRyan Roberts pte_t orig_pte = __ptep_get(ptep); 13814602e575SRyan Roberts 13824602e575SRyan Roberts if (likely(!pte_valid_cont(orig_pte))) 13834602e575SRyan Roberts return __ptep_clear_flush_young(vma, addr, ptep); 13844602e575SRyan Roberts 13854602e575SRyan Roberts return contpte_ptep_clear_flush_young(vma, addr, ptep); 13864602e575SRyan Roberts } 13874602e575SRyan Roberts 1388311a6cf2SRyan Roberts #define wrprotect_ptes wrprotect_ptes 1389b972fc6aSRyan Roberts static __always_inline void wrprotect_ptes(struct mm_struct *mm, 1390b972fc6aSRyan Roberts unsigned long addr, pte_t *ptep, unsigned int nr) 1391311a6cf2SRyan Roberts { 1392311a6cf2SRyan Roberts if (likely(nr == 1)) { 1393311a6cf2SRyan Roberts /* 1394311a6cf2SRyan Roberts * Optimization: wrprotect_ptes() can only be called for present 1395311a6cf2SRyan Roberts * ptes so we only need to check contig bit as condition for 1396311a6cf2SRyan Roberts * unfold, and we can remove the contig bit from the pte we read 1397311a6cf2SRyan Roberts * to avoid re-reading. This speeds up fork() which is sensitive 1398311a6cf2SRyan Roberts * for order-0 folios. Equivalent to contpte_try_unfold(). 1399311a6cf2SRyan Roberts */ 1400311a6cf2SRyan Roberts pte_t orig_pte = __ptep_get(ptep); 1401311a6cf2SRyan Roberts 1402311a6cf2SRyan Roberts if (unlikely(pte_cont(orig_pte))) { 1403311a6cf2SRyan Roberts __contpte_try_unfold(mm, addr, ptep, orig_pte); 1404311a6cf2SRyan Roberts orig_pte = pte_mknoncont(orig_pte); 1405311a6cf2SRyan Roberts } 1406311a6cf2SRyan Roberts ___ptep_set_wrprotect(mm, addr, ptep, orig_pte); 1407311a6cf2SRyan Roberts } else { 1408311a6cf2SRyan Roberts contpte_wrprotect_ptes(mm, addr, ptep, nr); 1409311a6cf2SRyan Roberts } 1410311a6cf2SRyan Roberts } 1411311a6cf2SRyan Roberts 14124602e575SRyan Roberts #define __HAVE_ARCH_PTEP_SET_WRPROTECT 14134602e575SRyan Roberts static inline void ptep_set_wrprotect(struct mm_struct *mm, 14144602e575SRyan Roberts unsigned long addr, pte_t *ptep) 14154602e575SRyan Roberts { 1416311a6cf2SRyan Roberts wrprotect_ptes(mm, addr, ptep, 1); 14174602e575SRyan Roberts } 14184602e575SRyan Roberts 14194602e575SRyan Roberts #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 14204602e575SRyan Roberts static inline int ptep_set_access_flags(struct vm_area_struct *vma, 14214602e575SRyan Roberts unsigned long addr, pte_t *ptep, 14224602e575SRyan Roberts pte_t entry, int dirty) 14234602e575SRyan Roberts { 14244602e575SRyan Roberts pte_t orig_pte = __ptep_get(ptep); 14254602e575SRyan Roberts 14264602e575SRyan Roberts entry = pte_mknoncont(entry); 14274602e575SRyan Roberts 14284602e575SRyan Roberts if (likely(!pte_valid_cont(orig_pte))) 14294602e575SRyan Roberts return __ptep_set_access_flags(vma, addr, ptep, entry, dirty); 14304602e575SRyan Roberts 14314602e575SRyan Roberts return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty); 14324602e575SRyan Roberts } 14334602e575SRyan Roberts 14344602e575SRyan Roberts #else /* CONFIG_ARM64_CONTPTE */ 14354602e575SRyan Roberts 14365a00bfd6SRyan Roberts #define ptep_get __ptep_get 14375a00bfd6SRyan Roberts #define set_pte __set_pte 14385a00bfd6SRyan Roberts #define set_ptes __set_ptes 14395a00bfd6SRyan Roberts #define pte_clear __pte_clear 14406b1e4efbSRyan Roberts #define clear_full_ptes __clear_full_ptes 14416b1e4efbSRyan Roberts #define get_and_clear_full_ptes __get_and_clear_full_ptes 14425a00bfd6SRyan Roberts #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 14435a00bfd6SRyan Roberts #define ptep_get_and_clear __ptep_get_and_clear 14445a00bfd6SRyan Roberts #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 14455a00bfd6SRyan Roberts #define ptep_test_and_clear_young __ptep_test_and_clear_young 14465a00bfd6SRyan Roberts #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 14475a00bfd6SRyan Roberts #define ptep_clear_flush_young __ptep_clear_flush_young 14485a00bfd6SRyan Roberts #define __HAVE_ARCH_PTEP_SET_WRPROTECT 14495a00bfd6SRyan Roberts #define ptep_set_wrprotect __ptep_set_wrprotect 1450311a6cf2SRyan Roberts #define wrprotect_ptes __wrprotect_ptes 14515a00bfd6SRyan Roberts #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 14525a00bfd6SRyan Roberts #define ptep_set_access_flags __ptep_set_access_flags 14535a00bfd6SRyan Roberts 14544602e575SRyan Roberts #endif /* CONFIG_ARM64_CONTPTE */ 14554602e575SRyan Roberts 14564f04d8f0SCatalin Marinas #endif /* !__ASSEMBLY__ */ 14574f04d8f0SCatalin Marinas 14584f04d8f0SCatalin Marinas #endif /* __ASM_PGTABLE_H */ 1459