1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */ 24f04d8f0SCatalin Marinas /* 34f04d8f0SCatalin Marinas * Copyright (C) 2012 ARM Ltd. 44f04d8f0SCatalin Marinas */ 54f04d8f0SCatalin Marinas #ifndef __ASM_PGTABLE_H 64f04d8f0SCatalin Marinas #define __ASM_PGTABLE_H 74f04d8f0SCatalin Marinas 82f4b829cSCatalin Marinas #include <asm/bug.h> 94f04d8f0SCatalin Marinas #include <asm/proc-fns.h> 104f04d8f0SCatalin Marinas 114f04d8f0SCatalin Marinas #include <asm/memory.h> 1234bfeea4SCatalin Marinas #include <asm/mte.h> 134f04d8f0SCatalin Marinas #include <asm/pgtable-hwdef.h> 143eca86e7SMark Rutland #include <asm/pgtable-prot.h> 153403e56bSAlex Van Brunt #include <asm/tlbflush.h> 164f04d8f0SCatalin Marinas 174f04d8f0SCatalin Marinas /* 183e1907d5SArd Biesheuvel * VMALLOC range. 1908375198SCatalin Marinas * 20f9040773SArd Biesheuvel * VMALLOC_START: beginning of the kernel vmalloc space 21d432b8d5SArd Biesheuvel * VMALLOC_END: extends to the available space below vmemmap 224f04d8f0SCatalin Marinas */ 23f9040773SArd Biesheuvel #define VMALLOC_START (MODULES_END) 24d432b8d5SArd Biesheuvel #if VA_BITS == VA_BITS_MIN 25b730b0f2SArd Biesheuvel #define VMALLOC_END (VMEMMAP_START - SZ_8M) 26d432b8d5SArd Biesheuvel #else 27d432b8d5SArd Biesheuvel #define VMEMMAP_UNUSED_NPAGES ((_PAGE_OFFSET(vabits_actual) - PAGE_OFFSET) >> PAGE_SHIFT) 28d432b8d5SArd Biesheuvel #define VMALLOC_END (VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M) 29d432b8d5SArd Biesheuvel #endif 304f04d8f0SCatalin Marinas 317bc1a0f9SArd Biesheuvel #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) 327bc1a0f9SArd Biesheuvel 334f04d8f0SCatalin Marinas #ifndef __ASSEMBLY__ 342f4b829cSCatalin Marinas 353bbf7157SCatalin Marinas #include <asm/cmpxchg.h> 36961faac1SMark Rutland #include <asm/fixmap.h> 372f4b829cSCatalin Marinas #include <linux/mmdebug.h> 3886c9e812SWill Deacon #include <linux/mm_types.h> 3986c9e812SWill Deacon #include <linux/sched.h> 4042b25471SKefeng Wang #include <linux/page_table_check.h> 412f4b829cSCatalin Marinas 42a7ac1cfaSZhenyu Ye #ifdef CONFIG_TRANSPARENT_HUGEPAGE 43a7ac1cfaSZhenyu Ye #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 44a7ac1cfaSZhenyu Ye 45a7ac1cfaSZhenyu Ye /* Set stride and tlb_level in flush_*_tlb_range */ 46a7ac1cfaSZhenyu Ye #define flush_pmd_tlb_range(vma, addr, end) \ 47a7ac1cfaSZhenyu Ye __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2) 48a7ac1cfaSZhenyu Ye #define flush_pud_tlb_range(vma, addr, end) \ 49a7ac1cfaSZhenyu Ye __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1) 50a7ac1cfaSZhenyu Ye #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 51a7ac1cfaSZhenyu Ye 52d0637c50SBarry Song static inline bool arch_thp_swp_supported(void) 53d0637c50SBarry Song { 54d0637c50SBarry Song return !system_supports_mte(); 55d0637c50SBarry Song } 56d0637c50SBarry Song #define arch_thp_swp_supported arch_thp_swp_supported 57d0637c50SBarry Song 584f04d8f0SCatalin Marinas /* 596a1bdb17SWill Deacon * Outside of a few very special situations (e.g. hibernation), we always 606a1bdb17SWill Deacon * use broadcast TLB invalidation instructions, therefore a spurious page 616a1bdb17SWill Deacon * fault on one CPU which has been handled concurrently by another CPU 626a1bdb17SWill Deacon * does not need to perform additional invalidation. 636a1bdb17SWill Deacon */ 6499c29133SGerald Schaefer #define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0) 656a1bdb17SWill Deacon 666a1bdb17SWill Deacon /* 674f04d8f0SCatalin Marinas * ZERO_PAGE is a global shared page that is always zero: used 684f04d8f0SCatalin Marinas * for zero-mapped memory areas etc.. 694f04d8f0SCatalin Marinas */ 705227cfa7SMark Rutland extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 712077be67SLaura Abbott #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) 724f04d8f0SCatalin Marinas 732cf660ebSGavin Shan #define pte_ERROR(e) \ 742cf660ebSGavin Shan pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e)) 757078db46SCatalin Marinas 7675387b92SKristina Martsenko /* 7775387b92SKristina Martsenko * Macros to convert between a physical address and its placement in a 7875387b92SKristina Martsenko * page table entry, taking care of 52-bit addresses. 7975387b92SKristina Martsenko */ 8075387b92SKristina Martsenko #ifdef CONFIG_ARM64_PA_BITS_52 81c7c386fbSArnd Bergmann static inline phys_addr_t __pte_to_phys(pte_t pte) 82c7c386fbSArnd Bergmann { 83925a0eb4SArd Biesheuvel pte_val(pte) &= ~PTE_MAYBE_SHARED; 84c7c386fbSArnd Bergmann return (pte_val(pte) & PTE_ADDR_LOW) | 85a4ee2861SAnshuman Khandual ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT); 86c7c386fbSArnd Bergmann } 87c7c386fbSArnd Bergmann static inline pteval_t __phys_to_pte_val(phys_addr_t phys) 88c7c386fbSArnd Bergmann { 89925a0eb4SArd Biesheuvel return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK; 90c7c386fbSArnd Bergmann } 9175387b92SKristina Martsenko #else 92925a0eb4SArd Biesheuvel #define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW) 9375387b92SKristina Martsenko #define __phys_to_pte_val(phys) (phys) 9475387b92SKristina Martsenko #endif 954f04d8f0SCatalin Marinas 9675387b92SKristina Martsenko #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) 9775387b92SKristina Martsenko #define pfn_pte(pfn,prot) \ 9875387b92SKristina Martsenko __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 994f04d8f0SCatalin Marinas 1004f04d8f0SCatalin Marinas #define pte_none(pte) (!pte_val(pte)) 1014f04d8f0SCatalin Marinas #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) 1024f04d8f0SCatalin Marinas #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 1037078db46SCatalin Marinas 1044f04d8f0SCatalin Marinas /* 1054f04d8f0SCatalin Marinas * The following only work if pte_present(). Undefined behaviour otherwise. 1064f04d8f0SCatalin Marinas */ 10784fe6826SSteve Capper #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))) 10884fe6826SSteve Capper #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) 10984fe6826SSteve Capper #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) 11084fe6826SSteve Capper #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) 111d0ba9612SAnshuman Khandual #define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY)) 11242b25471SKefeng Wang #define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) 113ec663d96SCatalin Marinas #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) 11493ef666aSJeremy Linton #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) 11573b20c84SRobin Murphy #define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP)) 11634bfeea4SCatalin Marinas #define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \ 11734bfeea4SCatalin Marinas PTE_ATTRINDX(MT_NORMAL_TAGGED)) 1184f04d8f0SCatalin Marinas 119d27cfa1fSArd Biesheuvel #define pte_cont_addr_end(addr, end) \ 120d27cfa1fSArd Biesheuvel ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ 121d27cfa1fSArd Biesheuvel (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 122d27cfa1fSArd Biesheuvel }) 123d27cfa1fSArd Biesheuvel 124d27cfa1fSArd Biesheuvel #define pmd_cont_addr_end(addr, end) \ 125d27cfa1fSArd Biesheuvel ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \ 126d27cfa1fSArd Biesheuvel (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 127d27cfa1fSArd Biesheuvel }) 128d27cfa1fSArd Biesheuvel 129d0ba9612SAnshuman Khandual #define pte_hw_dirty(pte) (pte_write(pte) && !pte_rdonly(pte)) 1302f4b829cSCatalin Marinas #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) 1312f4b829cSCatalin Marinas #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) 1322f4b829cSCatalin Marinas 133766ffb69SWill Deacon #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) 13418107f8aSVladimir Murzin /* 13518107f8aSVladimir Murzin * Execute-only user mappings do not have the PTE_USER bit set. All valid 13618107f8aSVladimir Murzin * kernel mappings have the PTE_UXN bit set. 13718107f8aSVladimir Murzin */ 138ec663d96SCatalin Marinas #define pte_valid_not_user(pte) \ 13918107f8aSVladimir Murzin ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) 14076c714beSWill Deacon /* 14176c714beSWill Deacon * Could the pte be present in the TLB? We must check mm_tlb_flush_pending 14276c714beSWill Deacon * so that we don't erroneously return false for pages that have been 14376c714beSWill Deacon * remapped as PROT_NONE but are yet to be flushed from the TLB. 14407509e10SWill Deacon * Note that we can't make any assumptions based on the state of the access 14507509e10SWill Deacon * flag, since ptep_clear_flush_young() elides a DSB when invalidating the 14607509e10SWill Deacon * TLB. 14776c714beSWill Deacon */ 14876c714beSWill Deacon #define pte_accessible(mm, pte) \ 14907509e10SWill Deacon (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) 1504f04d8f0SCatalin Marinas 1516218f96cSCatalin Marinas /* 15218107f8aSVladimir Murzin * p??_access_permitted() is true for valid user mappings (PTE_USER 15318107f8aSVladimir Murzin * bit set, subject to the write permission check). For execute-only 15418107f8aSVladimir Murzin * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits 15518107f8aSVladimir Murzin * not set) must return false. PROT_NONE mappings do not have the 15618107f8aSVladimir Murzin * PTE_VALID bit set. 1576218f96cSCatalin Marinas */ 1586218f96cSCatalin Marinas #define pte_access_permitted(pte, write) \ 15918107f8aSVladimir Murzin (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte))) 1606218f96cSCatalin Marinas #define pmd_access_permitted(pmd, write) \ 1616218f96cSCatalin Marinas (pte_access_permitted(pmd_pte(pmd), (write))) 1626218f96cSCatalin Marinas #define pud_access_permitted(pud, write) \ 1636218f96cSCatalin Marinas (pte_access_permitted(pud_pte(pud), (write))) 1646218f96cSCatalin Marinas 165b6d4f280SLaura Abbott static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) 166b6d4f280SLaura Abbott { 167b6d4f280SLaura Abbott pte_val(pte) &= ~pgprot_val(prot); 168b6d4f280SLaura Abbott return pte; 169b6d4f280SLaura Abbott } 170b6d4f280SLaura Abbott 171b6d4f280SLaura Abbott static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) 172b6d4f280SLaura Abbott { 173b6d4f280SLaura Abbott pte_val(pte) |= pgprot_val(prot); 174b6d4f280SLaura Abbott return pte; 175b6d4f280SLaura Abbott } 176b6d4f280SLaura Abbott 177b65399f6SAnshuman Khandual static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) 178b65399f6SAnshuman Khandual { 179b65399f6SAnshuman Khandual pmd_val(pmd) &= ~pgprot_val(prot); 180b65399f6SAnshuman Khandual return pmd; 181b65399f6SAnshuman Khandual } 182b65399f6SAnshuman Khandual 183b65399f6SAnshuman Khandual static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) 184b65399f6SAnshuman Khandual { 185b65399f6SAnshuman Khandual pmd_val(pmd) |= pgprot_val(prot); 186b65399f6SAnshuman Khandual return pmd; 187b65399f6SAnshuman Khandual } 188b65399f6SAnshuman Khandual 1892f0584f3SRick Edgecombe static inline pte_t pte_mkwrite_novma(pte_t pte) 19044b6dfc5SSteve Capper { 19173e86cb0SCatalin Marinas pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); 19273e86cb0SCatalin Marinas pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 19373e86cb0SCatalin Marinas return pte; 19444b6dfc5SSteve Capper } 19544b6dfc5SSteve Capper 19644b6dfc5SSteve Capper static inline pte_t pte_mkclean(pte_t pte) 19744b6dfc5SSteve Capper { 1988781bcbcSSteve Capper pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); 1998781bcbcSSteve Capper pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 2008781bcbcSSteve Capper 2018781bcbcSSteve Capper return pte; 20244b6dfc5SSteve Capper } 20344b6dfc5SSteve Capper 20444b6dfc5SSteve Capper static inline pte_t pte_mkdirty(pte_t pte) 20544b6dfc5SSteve Capper { 2068781bcbcSSteve Capper pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 2078781bcbcSSteve Capper 2088781bcbcSSteve Capper if (pte_write(pte)) 2098781bcbcSSteve Capper pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 2108781bcbcSSteve Capper 2118781bcbcSSteve Capper return pte; 21244b6dfc5SSteve Capper } 21344b6dfc5SSteve Capper 214ff1712f9SWill Deacon static inline pte_t pte_wrprotect(pte_t pte) 215ff1712f9SWill Deacon { 216ff1712f9SWill Deacon /* 217ff1712f9SWill Deacon * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY 218ff1712f9SWill Deacon * clear), set the PTE_DIRTY bit. 219ff1712f9SWill Deacon */ 220ff1712f9SWill Deacon if (pte_hw_dirty(pte)) 2216477c388SAnshuman Khandual pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 222ff1712f9SWill Deacon 223ff1712f9SWill Deacon pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); 224ff1712f9SWill Deacon pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 225ff1712f9SWill Deacon return pte; 226ff1712f9SWill Deacon } 227ff1712f9SWill Deacon 22844b6dfc5SSteve Capper static inline pte_t pte_mkold(pte_t pte) 22944b6dfc5SSteve Capper { 230b6d4f280SLaura Abbott return clear_pte_bit(pte, __pgprot(PTE_AF)); 23144b6dfc5SSteve Capper } 23244b6dfc5SSteve Capper 23344b6dfc5SSteve Capper static inline pte_t pte_mkyoung(pte_t pte) 23444b6dfc5SSteve Capper { 235b6d4f280SLaura Abbott return set_pte_bit(pte, __pgprot(PTE_AF)); 23644b6dfc5SSteve Capper } 23744b6dfc5SSteve Capper 23844b6dfc5SSteve Capper static inline pte_t pte_mkspecial(pte_t pte) 23944b6dfc5SSteve Capper { 240b6d4f280SLaura Abbott return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); 24144b6dfc5SSteve Capper } 2424f04d8f0SCatalin Marinas 24393ef666aSJeremy Linton static inline pte_t pte_mkcont(pte_t pte) 24493ef666aSJeremy Linton { 24566b3923aSDavid Woods pte = set_pte_bit(pte, __pgprot(PTE_CONT)); 24666b3923aSDavid Woods return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE)); 24793ef666aSJeremy Linton } 24893ef666aSJeremy Linton 24993ef666aSJeremy Linton static inline pte_t pte_mknoncont(pte_t pte) 25093ef666aSJeremy Linton { 25193ef666aSJeremy Linton return clear_pte_bit(pte, __pgprot(PTE_CONT)); 25293ef666aSJeremy Linton } 25393ef666aSJeremy Linton 2545ebe3a44SJames Morse static inline pte_t pte_mkpresent(pte_t pte) 2555ebe3a44SJames Morse { 2565ebe3a44SJames Morse return set_pte_bit(pte, __pgprot(PTE_VALID)); 2575ebe3a44SJames Morse } 2585ebe3a44SJames Morse 25966b3923aSDavid Woods static inline pmd_t pmd_mkcont(pmd_t pmd) 26066b3923aSDavid Woods { 26166b3923aSDavid Woods return __pmd(pmd_val(pmd) | PMD_SECT_CONT); 26266b3923aSDavid Woods } 26366b3923aSDavid Woods 26473b20c84SRobin Murphy static inline pte_t pte_mkdevmap(pte_t pte) 26573b20c84SRobin Murphy { 26630e23538SJia He return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); 26773b20c84SRobin Murphy } 26873b20c84SRobin Murphy 2694f04d8f0SCatalin Marinas static inline void set_pte(pte_t *ptep, pte_t pte) 2704f04d8f0SCatalin Marinas { 27120a004e7SWill Deacon WRITE_ONCE(*ptep, pte); 2727f0b1bf0SCatalin Marinas 2737f0b1bf0SCatalin Marinas /* 2747f0b1bf0SCatalin Marinas * Only if the new pte is valid and kernel, otherwise TLB maintenance 2757f0b1bf0SCatalin Marinas * or update_mmu_cache() have the necessary barriers. 2767f0b1bf0SCatalin Marinas */ 277d0b7a302SWill Deacon if (pte_valid_not_user(pte)) { 2787f0b1bf0SCatalin Marinas dsb(ishst); 279d0b7a302SWill Deacon isb(); 280d0b7a302SWill Deacon } 2814f04d8f0SCatalin Marinas } 2824f04d8f0SCatalin Marinas 283907e21c1SShaokun Zhang extern void __sync_icache_dcache(pte_t pteval); 284004fc58fSAnshuman Khandual bool pgattr_change_is_safe(u64 old, u64 new); 2854f04d8f0SCatalin Marinas 2862f4b829cSCatalin Marinas /* 2872f4b829cSCatalin Marinas * PTE bits configuration in the presence of hardware Dirty Bit Management 2882f4b829cSCatalin Marinas * (PTE_WRITE == PTE_DBM): 2892f4b829cSCatalin Marinas * 2902f4b829cSCatalin Marinas * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) 2912f4b829cSCatalin Marinas * 0 0 | 1 0 0 2922f4b829cSCatalin Marinas * 0 1 | 1 1 0 2932f4b829cSCatalin Marinas * 1 0 | 1 0 1 2942f4b829cSCatalin Marinas * 1 1 | 0 1 x 2952f4b829cSCatalin Marinas * 2962f4b829cSCatalin Marinas * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via 2972f4b829cSCatalin Marinas * the page fault mechanism. Checking the dirty status of a pte becomes: 2982f4b829cSCatalin Marinas * 299b847415cSCatalin Marinas * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) 3002f4b829cSCatalin Marinas */ 3019b604722SMark Rutland 302004fc58fSAnshuman Khandual static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, 3039b604722SMark Rutland pte_t pte) 3044f04d8f0SCatalin Marinas { 30520a004e7SWill Deacon pte_t old_pte; 30620a004e7SWill Deacon 3079b604722SMark Rutland if (!IS_ENABLED(CONFIG_DEBUG_VM)) 3089b604722SMark Rutland return; 3099b604722SMark Rutland 3109b604722SMark Rutland old_pte = READ_ONCE(*ptep); 3119b604722SMark Rutland 3129b604722SMark Rutland if (!pte_valid(old_pte) || !pte_valid(pte)) 3139b604722SMark Rutland return; 3149b604722SMark Rutland if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1) 3159b604722SMark Rutland return; 31602522463SWill Deacon 3172f4b829cSCatalin Marinas /* 3189b604722SMark Rutland * Check for potential race with hardware updates of the pte 3199b604722SMark Rutland * (ptep_set_access_flags safely changes valid ptes without going 3209b604722SMark Rutland * through an invalid entry). 3212f4b829cSCatalin Marinas */ 32282d34008SCatalin Marinas VM_WARN_ONCE(!pte_young(pte), 32382d34008SCatalin Marinas "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", 32420a004e7SWill Deacon __func__, pte_val(old_pte), pte_val(pte)); 32520a004e7SWill Deacon VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), 32682d34008SCatalin Marinas "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", 32720a004e7SWill Deacon __func__, pte_val(old_pte), pte_val(pte)); 328004fc58fSAnshuman Khandual VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)), 329004fc58fSAnshuman Khandual "%s: unsafe attribute change: 0x%016llx -> 0x%016llx", 330004fc58fSAnshuman Khandual __func__, pte_val(old_pte), pte_val(pte)); 3312f4b829cSCatalin Marinas } 3322f4b829cSCatalin Marinas 3333425cec4SRyan Roberts static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages) 3349b604722SMark Rutland { 3359b604722SMark Rutland if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) 3369b604722SMark Rutland __sync_icache_dcache(pte); 3379b604722SMark Rutland 33869e3b846SSteven Price /* 33969e3b846SSteven Price * If the PTE would provide user space access to the tags associated 34069e3b846SSteven Price * with it then ensure that the MTE tags are synchronised. Although 34169e3b846SSteven Price * pte_access_permitted() returns false for exec only mappings, they 34269e3b846SSteven Price * don't expose tags (instruction fetches don't check tags). 34369e3b846SSteven Price */ 34469e3b846SSteven Price if (system_supports_mte() && pte_access_permitted(pte, false) && 345332c151cSPeter Collingbourne !pte_special(pte) && pte_tagged(pte)) 3463425cec4SRyan Roberts mte_sync_tags(pte, nr_pages); 3474f04d8f0SCatalin Marinas } 3484f04d8f0SCatalin Marinas 349dba2ff49SCatalin Marinas static inline void set_ptes(struct mm_struct *mm, 350dba2ff49SCatalin Marinas unsigned long __always_unused addr, 3514a169d61SMatthew Wilcox (Oracle) pte_t *ptep, pte_t pte, unsigned int nr) 35242b25471SKefeng Wang { 3534a169d61SMatthew Wilcox (Oracle) page_table_check_ptes_set(mm, ptep, pte, nr); 3543425cec4SRyan Roberts __sync_cache_and_tags(pte, nr); 3554a169d61SMatthew Wilcox (Oracle) 3564a169d61SMatthew Wilcox (Oracle) for (;;) { 3573425cec4SRyan Roberts __check_safe_pte_update(mm, ptep, pte); 3583425cec4SRyan Roberts set_pte(ptep, pte); 3594a169d61SMatthew Wilcox (Oracle) if (--nr == 0) 3604a169d61SMatthew Wilcox (Oracle) break; 3614a169d61SMatthew Wilcox (Oracle) ptep++; 3624a169d61SMatthew Wilcox (Oracle) pte_val(pte) += PAGE_SIZE; 36342b25471SKefeng Wang } 3644a169d61SMatthew Wilcox (Oracle) } 3654a169d61SMatthew Wilcox (Oracle) #define set_ptes set_ptes 36642b25471SKefeng Wang 3674f04d8f0SCatalin Marinas /* 3684f04d8f0SCatalin Marinas * Huge pte definitions. 3694f04d8f0SCatalin Marinas */ 370084bd298SSteve Capper #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) 371084bd298SSteve Capper 372084bd298SSteve Capper /* 373084bd298SSteve Capper * Hugetlb definitions. 374084bd298SSteve Capper */ 37566b3923aSDavid Woods #define HUGE_MAX_HSTATE 4 376084bd298SSteve Capper #define HPAGE_SHIFT PMD_SHIFT 377084bd298SSteve Capper #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 378084bd298SSteve Capper #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 379084bd298SSteve Capper #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 3804f04d8f0SCatalin Marinas 38175387b92SKristina Martsenko static inline pte_t pgd_pte(pgd_t pgd) 38275387b92SKristina Martsenko { 38375387b92SKristina Martsenko return __pte(pgd_val(pgd)); 38475387b92SKristina Martsenko } 38575387b92SKristina Martsenko 386e9f63768SMike Rapoport static inline pte_t p4d_pte(p4d_t p4d) 387e9f63768SMike Rapoport { 388e9f63768SMike Rapoport return __pte(p4d_val(p4d)); 389e9f63768SMike Rapoport } 390e9f63768SMike Rapoport 39129e56940SSteve Capper static inline pte_t pud_pte(pud_t pud) 39229e56940SSteve Capper { 39329e56940SSteve Capper return __pte(pud_val(pud)); 39429e56940SSteve Capper } 39529e56940SSteve Capper 396eb3f0624SPunit Agrawal static inline pud_t pte_pud(pte_t pte) 397eb3f0624SPunit Agrawal { 398eb3f0624SPunit Agrawal return __pud(pte_val(pte)); 399eb3f0624SPunit Agrawal } 400eb3f0624SPunit Agrawal 40129e56940SSteve Capper static inline pmd_t pud_pmd(pud_t pud) 40229e56940SSteve Capper { 40329e56940SSteve Capper return __pmd(pud_val(pud)); 40429e56940SSteve Capper } 40529e56940SSteve Capper 4069c7e535fSSteve Capper static inline pte_t pmd_pte(pmd_t pmd) 4079c7e535fSSteve Capper { 4089c7e535fSSteve Capper return __pte(pmd_val(pmd)); 4099c7e535fSSteve Capper } 410af074848SSteve Capper 4119c7e535fSSteve Capper static inline pmd_t pte_pmd(pte_t pte) 4129c7e535fSSteve Capper { 4139c7e535fSSteve Capper return __pmd(pte_val(pte)); 4149c7e535fSSteve Capper } 415af074848SSteve Capper 416f7f0097aSAnshuman Khandual static inline pgprot_t mk_pud_sect_prot(pgprot_t prot) 4178ce837ceSArd Biesheuvel { 418f7f0097aSAnshuman Khandual return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT); 419f7f0097aSAnshuman Khandual } 420f7f0097aSAnshuman Khandual 421f7f0097aSAnshuman Khandual static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot) 422f7f0097aSAnshuman Khandual { 423f7f0097aSAnshuman Khandual return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT); 4248ce837ceSArd Biesheuvel } 4258ce837ceSArd Biesheuvel 426570ef363SDavid Hildenbrand static inline pte_t pte_swp_mkexclusive(pte_t pte) 427570ef363SDavid Hildenbrand { 428570ef363SDavid Hildenbrand return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 429570ef363SDavid Hildenbrand } 430570ef363SDavid Hildenbrand 431570ef363SDavid Hildenbrand static inline int pte_swp_exclusive(pte_t pte) 432570ef363SDavid Hildenbrand { 433570ef363SDavid Hildenbrand return pte_val(pte) & PTE_SWP_EXCLUSIVE; 434570ef363SDavid Hildenbrand } 435570ef363SDavid Hildenbrand 436570ef363SDavid Hildenbrand static inline pte_t pte_swp_clear_exclusive(pte_t pte) 437570ef363SDavid Hildenbrand { 438570ef363SDavid Hildenbrand return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 439570ef363SDavid Hildenbrand } 440570ef363SDavid Hildenbrand 441893dea9cSKefeng Wang /* 442893dea9cSKefeng Wang * Select all bits except the pfn 443893dea9cSKefeng Wang */ 444893dea9cSKefeng Wang static inline pgprot_t pte_pgprot(pte_t pte) 445893dea9cSKefeng Wang { 446893dea9cSKefeng Wang unsigned long pfn = pte_pfn(pte); 447893dea9cSKefeng Wang 448893dea9cSKefeng Wang return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); 449893dea9cSKefeng Wang } 450893dea9cSKefeng Wang 45156166230SGanapatrao Kulkarni #ifdef CONFIG_NUMA_BALANCING 45256166230SGanapatrao Kulkarni /* 453ca5999fdSMike Rapoport * See the comment in include/linux/pgtable.h 45456166230SGanapatrao Kulkarni */ 45556166230SGanapatrao Kulkarni static inline int pte_protnone(pte_t pte) 45656166230SGanapatrao Kulkarni { 45756166230SGanapatrao Kulkarni return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE; 45856166230SGanapatrao Kulkarni } 45956166230SGanapatrao Kulkarni 46056166230SGanapatrao Kulkarni static inline int pmd_protnone(pmd_t pmd) 46156166230SGanapatrao Kulkarni { 46256166230SGanapatrao Kulkarni return pte_protnone(pmd_pte(pmd)); 46356166230SGanapatrao Kulkarni } 46456166230SGanapatrao Kulkarni #endif 46556166230SGanapatrao Kulkarni 466b65399f6SAnshuman Khandual #define pmd_present_invalid(pmd) (!!(pmd_val(pmd) & PMD_PRESENT_INVALID)) 467b65399f6SAnshuman Khandual 468b65399f6SAnshuman Khandual static inline int pmd_present(pmd_t pmd) 469b65399f6SAnshuman Khandual { 470b65399f6SAnshuman Khandual return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd); 471b65399f6SAnshuman Khandual } 472b65399f6SAnshuman Khandual 473af074848SSteve Capper /* 474af074848SSteve Capper * THP definitions. 475af074848SSteve Capper */ 476af074848SSteve Capper 477af074848SSteve Capper #ifdef CONFIG_TRANSPARENT_HUGEPAGE 478b65399f6SAnshuman Khandual static inline int pmd_trans_huge(pmd_t pmd) 479b65399f6SAnshuman Khandual { 480b65399f6SAnshuman Khandual return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); 481b65399f6SAnshuman Khandual } 48229e56940SSteve Capper #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 483af074848SSteve Capper 484c164e038SKirill A. Shutemov #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) 4859c7e535fSSteve Capper #define pmd_young(pmd) pte_young(pmd_pte(pmd)) 4860795edafSWill Deacon #define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) 48742b25471SKefeng Wang #define pmd_user(pmd) pte_user(pmd_pte(pmd)) 48842b25471SKefeng Wang #define pmd_user_exec(pmd) pte_user_exec(pmd_pte(pmd)) 489d55863dbSPeter Zijlstra #define pmd_cont(pmd) pte_cont(pmd_pte(pmd)) 4909c7e535fSSteve Capper #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) 4919c7e535fSSteve Capper #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) 4922f0584f3SRick Edgecombe #define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))) 49305ee26d9SMinchan Kim #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) 4949c7e535fSSteve Capper #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) 4959c7e535fSSteve Capper #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) 496b65399f6SAnshuman Khandual 497b65399f6SAnshuman Khandual static inline pmd_t pmd_mkinvalid(pmd_t pmd) 498b65399f6SAnshuman Khandual { 499b65399f6SAnshuman Khandual pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID)); 500b65399f6SAnshuman Khandual pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID)); 501b65399f6SAnshuman Khandual 502b65399f6SAnshuman Khandual return pmd; 503b65399f6SAnshuman Khandual } 504af074848SSteve Capper 5050dbd3b18SSuzuki K Poulose #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) 5060dbd3b18SSuzuki K Poulose 5079c7e535fSSteve Capper #define pmd_write(pmd) pte_write(pmd_pte(pmd)) 508af074848SSteve Capper 509af074848SSteve Capper #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) 510af074848SSteve Capper 51173b20c84SRobin Murphy #ifdef CONFIG_TRANSPARENT_HUGEPAGE 51273b20c84SRobin Murphy #define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) 51373b20c84SRobin Murphy #endif 51430e23538SJia He static inline pmd_t pmd_mkdevmap(pmd_t pmd) 51530e23538SJia He { 51630e23538SJia He return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP))); 51730e23538SJia He } 51873b20c84SRobin Murphy 51975387b92SKristina Martsenko #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) 52075387b92SKristina Martsenko #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) 52175387b92SKristina Martsenko #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) 52275387b92SKristina Martsenko #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 523af074848SSteve Capper #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 524af074848SSteve Capper 52535a63966SPunit Agrawal #define pud_young(pud) pte_young(pud_pte(pud)) 526eb3f0624SPunit Agrawal #define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) 52729e56940SSteve Capper #define pud_write(pud) pte_write(pud_pte(pud)) 52875387b92SKristina Martsenko 529b8e0ba7cSPunit Agrawal #define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT)) 530b8e0ba7cSPunit Agrawal 53175387b92SKristina Martsenko #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud)) 53275387b92SKristina Martsenko #define __phys_to_pud_val(phys) __phys_to_pte_val(phys) 53375387b92SKristina Martsenko #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) 53475387b92SKristina Martsenko #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 535af074848SSteve Capper 536dba2ff49SCatalin Marinas static inline void __set_pte_at(struct mm_struct *mm, 537dba2ff49SCatalin Marinas unsigned long __always_unused addr, 5383425cec4SRyan Roberts pte_t *ptep, pte_t pte, unsigned int nr) 5393425cec4SRyan Roberts { 5403425cec4SRyan Roberts __sync_cache_and_tags(pte, nr); 5413425cec4SRyan Roberts __check_safe_pte_update(mm, ptep, pte); 5423425cec4SRyan Roberts set_pte(ptep, pte); 5433425cec4SRyan Roberts } 5443425cec4SRyan Roberts 54542b25471SKefeng Wang static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 54642b25471SKefeng Wang pmd_t *pmdp, pmd_t pmd) 54742b25471SKefeng Wang { 548a3b83713SKemeng Shi page_table_check_pmd_set(mm, pmdp, pmd); 5493425cec4SRyan Roberts return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd), 5503425cec4SRyan Roberts PMD_SIZE >> PAGE_SHIFT); 55142b25471SKefeng Wang } 55242b25471SKefeng Wang 55342b25471SKefeng Wang static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 55442b25471SKefeng Wang pud_t *pudp, pud_t pud) 55542b25471SKefeng Wang { 5566d144436SKemeng Shi page_table_check_pud_set(mm, pudp, pud); 5573425cec4SRyan Roberts return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud), 5583425cec4SRyan Roberts PUD_SIZE >> PAGE_SHIFT); 55942b25471SKefeng Wang } 560af074848SSteve Capper 561e9f63768SMike Rapoport #define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d)) 562e9f63768SMike Rapoport #define __phys_to_p4d_val(phys) __phys_to_pte_val(phys) 563e9f63768SMike Rapoport 56475387b92SKristina Martsenko #define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd)) 56575387b92SKristina Martsenko #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys) 56675387b92SKristina Martsenko 567a501e324SCatalin Marinas #define __pgprot_modify(prot,mask,bits) \ 568a501e324SCatalin Marinas __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 569a501e324SCatalin Marinas 570cca98e9fSChristoph Hellwig #define pgprot_nx(prot) \ 571034aa9cdSWill Deacon __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN) 572cca98e9fSChristoph Hellwig 573af074848SSteve Capper /* 5744f04d8f0SCatalin Marinas * Mark the prot value as uncacheable and unbufferable. 5754f04d8f0SCatalin Marinas */ 5764f04d8f0SCatalin Marinas #define pgprot_noncached(prot) \ 577de2db743SCatalin Marinas __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) 5784f04d8f0SCatalin Marinas #define pgprot_writecombine(prot) \ 579de2db743SCatalin Marinas __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 580d1e6dc91SLiviu Dudau #define pgprot_device(prot) \ 581d1e6dc91SLiviu Dudau __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) 582d15dfd31SCatalin Marinas #define pgprot_tagged(prot) \ 583d15dfd31SCatalin Marinas __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED)) 584d15dfd31SCatalin Marinas #define pgprot_mhp pgprot_tagged 5853e4e1d3fSChristoph Hellwig /* 5863e4e1d3fSChristoph Hellwig * DMA allocations for non-coherent devices use what the Arm architecture calls 5873e4e1d3fSChristoph Hellwig * "Normal non-cacheable" memory, which permits speculation, unaligned accesses 5883e4e1d3fSChristoph Hellwig * and merging of writes. This is different from "Device-nGnR[nE]" memory which 5893e4e1d3fSChristoph Hellwig * is intended for MMIO and thus forbids speculation, preserves access size, 5903e4e1d3fSChristoph Hellwig * requires strict alignment and can also force write responses to come from the 5913e4e1d3fSChristoph Hellwig * endpoint. 5923e4e1d3fSChristoph Hellwig */ 593419e2f18SChristoph Hellwig #define pgprot_dmacoherent(prot) \ 594419e2f18SChristoph Hellwig __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ 595419e2f18SChristoph Hellwig PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 596419e2f18SChristoph Hellwig 5974f04d8f0SCatalin Marinas #define __HAVE_PHYS_MEM_ACCESS_PROT 5984f04d8f0SCatalin Marinas struct file; 5994f04d8f0SCatalin Marinas extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 6004f04d8f0SCatalin Marinas unsigned long size, pgprot_t vma_prot); 6014f04d8f0SCatalin Marinas 6024f04d8f0SCatalin Marinas #define pmd_none(pmd) (!pmd_val(pmd)) 6034f04d8f0SCatalin Marinas 60436311607SMarc Zyngier #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 60536311607SMarc Zyngier PMD_TYPE_TABLE) 60636311607SMarc Zyngier #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 60736311607SMarc Zyngier PMD_TYPE_SECT) 60823bc8f69SMuchun Song #define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd)) 609e377ab82SAnshuman Khandual #define pmd_bad(pmd) (!pmd_table(pmd)) 61036311607SMarc Zyngier 611d55863dbSPeter Zijlstra #define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE) 612d55863dbSPeter Zijlstra #define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE) 613d55863dbSPeter Zijlstra 614cac4b8cdSCatalin Marinas #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 6157d4e2dcfSQian Cai static inline bool pud_sect(pud_t pud) { return false; } 6167d4e2dcfSQian Cai static inline bool pud_table(pud_t pud) { return true; } 617206a2a73SSteve Capper #else 618206a2a73SSteve Capper #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 619206a2a73SSteve Capper PUD_TYPE_SECT) 620523d6e9fSzhichang.yuan #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 621523d6e9fSzhichang.yuan PUD_TYPE_TABLE) 622206a2a73SSteve Capper #endif 62336311607SMarc Zyngier 6246ed8a3a0SArd Biesheuvel extern pgd_t init_pg_dir[]; 6252330b7caSJun Yao extern pgd_t init_pg_end[]; 6266ed8a3a0SArd Biesheuvel extern pgd_t swapper_pg_dir[]; 6276ed8a3a0SArd Biesheuvel extern pgd_t idmap_pg_dir[]; 6286ed8a3a0SArd Biesheuvel extern pgd_t tramp_pg_dir[]; 6296ed8a3a0SArd Biesheuvel extern pgd_t reserved_pg_dir[]; 6302330b7caSJun Yao 6312330b7caSJun Yao extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); 6322330b7caSJun Yao 6332330b7caSJun Yao static inline bool in_swapper_pgdir(void *addr) 6342330b7caSJun Yao { 6352330b7caSJun Yao return ((unsigned long)addr & PAGE_MASK) == 6362330b7caSJun Yao ((unsigned long)swapper_pg_dir & PAGE_MASK); 6372330b7caSJun Yao } 6382330b7caSJun Yao 6394f04d8f0SCatalin Marinas static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 6404f04d8f0SCatalin Marinas { 641e9ed821bSJames Morse #ifdef __PAGETABLE_PMD_FOLDED 642e9ed821bSJames Morse if (in_swapper_pgdir(pmdp)) { 6432330b7caSJun Yao set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd))); 6442330b7caSJun Yao return; 6452330b7caSJun Yao } 646e9ed821bSJames Morse #endif /* __PAGETABLE_PMD_FOLDED */ 6472330b7caSJun Yao 64820a004e7SWill Deacon WRITE_ONCE(*pmdp, pmd); 6490795edafSWill Deacon 650d0b7a302SWill Deacon if (pmd_valid(pmd)) { 65198f7685eSWill Deacon dsb(ishst); 652d0b7a302SWill Deacon isb(); 653d0b7a302SWill Deacon } 6544f04d8f0SCatalin Marinas } 6554f04d8f0SCatalin Marinas 6564f04d8f0SCatalin Marinas static inline void pmd_clear(pmd_t *pmdp) 6574f04d8f0SCatalin Marinas { 6584f04d8f0SCatalin Marinas set_pmd(pmdp, __pmd(0)); 6594f04d8f0SCatalin Marinas } 6604f04d8f0SCatalin Marinas 661dca56dcaSMark Rutland static inline phys_addr_t pmd_page_paddr(pmd_t pmd) 6624f04d8f0SCatalin Marinas { 66375387b92SKristina Martsenko return __pmd_to_phys(pmd); 6644f04d8f0SCatalin Marinas } 6654f04d8f0SCatalin Marinas 666974b9b2cSMike Rapoport static inline unsigned long pmd_page_vaddr(pmd_t pmd) 667974b9b2cSMike Rapoport { 668974b9b2cSMike Rapoport return (unsigned long)__va(pmd_page_paddr(pmd)); 669974b9b2cSMike Rapoport } 67074dd022fSQian Cai 671053520f7SMark Rutland /* Find an entry in the third-level page table. */ 672f069fabaSWill Deacon #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) 673053520f7SMark Rutland 674961faac1SMark Rutland #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) 675961faac1SMark Rutland #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) 676961faac1SMark Rutland #define pte_clear_fixmap() clear_fixmap(FIX_PTE) 677961faac1SMark Rutland 67868ecabd0SGavin Shan #define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd)) 6794f04d8f0SCatalin Marinas 6806533945aSArd Biesheuvel /* use ONLY for statically allocated translation tables */ 6816533945aSArd Biesheuvel #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) 6826533945aSArd Biesheuvel 6834f04d8f0SCatalin Marinas /* 6844f04d8f0SCatalin Marinas * Conversion functions: convert a page and protection to a page entry, 6854f04d8f0SCatalin Marinas * and a page entry and page directory to the page they refer to. 6864f04d8f0SCatalin Marinas */ 6874f04d8f0SCatalin Marinas #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) 6884f04d8f0SCatalin Marinas 6899f25e6adSKirill A. Shutemov #if CONFIG_PGTABLE_LEVELS > 2 6904f04d8f0SCatalin Marinas 6912cf660ebSGavin Shan #define pmd_ERROR(e) \ 6922cf660ebSGavin Shan pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) 6937078db46SCatalin Marinas 6944f04d8f0SCatalin Marinas #define pud_none(pud) (!pud_val(pud)) 695e377ab82SAnshuman Khandual #define pud_bad(pud) (!pud_table(pud)) 696f02ab08aSPunit Agrawal #define pud_present(pud) pte_present(pud_pte(pud)) 69723bc8f69SMuchun Song #define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) 6980795edafSWill Deacon #define pud_valid(pud) pte_valid(pud_pte(pud)) 69942b25471SKefeng Wang #define pud_user(pud) pte_user(pud_pte(pud)) 700730a11f9SLiu Shixin #define pud_user_exec(pud) pte_user_exec(pud_pte(pud)) 7014f04d8f0SCatalin Marinas 7024f04d8f0SCatalin Marinas static inline void set_pud(pud_t *pudp, pud_t pud) 7034f04d8f0SCatalin Marinas { 704e9ed821bSJames Morse #ifdef __PAGETABLE_PUD_FOLDED 705e9ed821bSJames Morse if (in_swapper_pgdir(pudp)) { 7062330b7caSJun Yao set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud))); 7072330b7caSJun Yao return; 7082330b7caSJun Yao } 709e9ed821bSJames Morse #endif /* __PAGETABLE_PUD_FOLDED */ 7102330b7caSJun Yao 71120a004e7SWill Deacon WRITE_ONCE(*pudp, pud); 7120795edafSWill Deacon 713d0b7a302SWill Deacon if (pud_valid(pud)) { 71498f7685eSWill Deacon dsb(ishst); 715d0b7a302SWill Deacon isb(); 716d0b7a302SWill Deacon } 7174f04d8f0SCatalin Marinas } 7184f04d8f0SCatalin Marinas 7194f04d8f0SCatalin Marinas static inline void pud_clear(pud_t *pudp) 7204f04d8f0SCatalin Marinas { 7214f04d8f0SCatalin Marinas set_pud(pudp, __pud(0)); 7224f04d8f0SCatalin Marinas } 7234f04d8f0SCatalin Marinas 724dca56dcaSMark Rutland static inline phys_addr_t pud_page_paddr(pud_t pud) 7254f04d8f0SCatalin Marinas { 72675387b92SKristina Martsenko return __pud_to_phys(pud); 7274f04d8f0SCatalin Marinas } 7284f04d8f0SCatalin Marinas 7299cf6fa24SAneesh Kumar K.V static inline pmd_t *pud_pgtable(pud_t pud) 730974b9b2cSMike Rapoport { 7319cf6fa24SAneesh Kumar K.V return (pmd_t *)__va(pud_page_paddr(pud)); 732974b9b2cSMike Rapoport } 7337078db46SCatalin Marinas 734974b9b2cSMike Rapoport /* Find an entry in the second-level page table. */ 73520a004e7SWill Deacon #define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t)) 7367078db46SCatalin Marinas 737961faac1SMark Rutland #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) 738961faac1SMark Rutland #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) 739961faac1SMark Rutland #define pmd_clear_fixmap() clear_fixmap(FIX_PMD) 7404f04d8f0SCatalin Marinas 74168ecabd0SGavin Shan #define pud_page(pud) phys_to_page(__pud_to_phys(pud)) 74229e56940SSteve Capper 7436533945aSArd Biesheuvel /* use ONLY for statically allocated translation tables */ 7446533945aSArd Biesheuvel #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) 7456533945aSArd Biesheuvel 746dca56dcaSMark Rutland #else 747dca56dcaSMark Rutland 748dca56dcaSMark Rutland #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) 7494e4ff23aSWill Deacon #define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */ 750dca56dcaSMark Rutland 751961faac1SMark Rutland /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ 752961faac1SMark Rutland #define pmd_set_fixmap(addr) NULL 753961faac1SMark Rutland #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp) 754961faac1SMark Rutland #define pmd_clear_fixmap() 755961faac1SMark Rutland 7566533945aSArd Biesheuvel #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir) 7576533945aSArd Biesheuvel 7589f25e6adSKirill A. Shutemov #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 7594f04d8f0SCatalin Marinas 7609f25e6adSKirill A. Shutemov #if CONFIG_PGTABLE_LEVELS > 3 761c79b954bSJungseok Lee 762*0dd4f60aSArd Biesheuvel static __always_inline bool pgtable_l4_enabled(void) 763*0dd4f60aSArd Biesheuvel { 764*0dd4f60aSArd Biesheuvel if (CONFIG_PGTABLE_LEVELS > 4 || !IS_ENABLED(CONFIG_ARM64_LPA2)) 765*0dd4f60aSArd Biesheuvel return true; 766*0dd4f60aSArd Biesheuvel if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT)) 767*0dd4f60aSArd Biesheuvel return vabits_actual == VA_BITS; 768*0dd4f60aSArd Biesheuvel return alternative_has_cap_unlikely(ARM64_HAS_VA52); 769*0dd4f60aSArd Biesheuvel } 770*0dd4f60aSArd Biesheuvel 771*0dd4f60aSArd Biesheuvel static inline bool mm_pud_folded(const struct mm_struct *mm) 772*0dd4f60aSArd Biesheuvel { 773*0dd4f60aSArd Biesheuvel return !pgtable_l4_enabled(); 774*0dd4f60aSArd Biesheuvel } 775*0dd4f60aSArd Biesheuvel #define mm_pud_folded mm_pud_folded 776*0dd4f60aSArd Biesheuvel 7772cf660ebSGavin Shan #define pud_ERROR(e) \ 7782cf660ebSGavin Shan pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e)) 7797078db46SCatalin Marinas 780*0dd4f60aSArd Biesheuvel #define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d)) 781*0dd4f60aSArd Biesheuvel #define p4d_bad(p4d) (pgtable_l4_enabled() && !(p4d_val(p4d) & 2)) 782*0dd4f60aSArd Biesheuvel #define p4d_present(p4d) (!p4d_none(p4d)) 783c79b954bSJungseok Lee 784e9f63768SMike Rapoport static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) 785c79b954bSJungseok Lee { 786e9f63768SMike Rapoport if (in_swapper_pgdir(p4dp)) { 787e9f63768SMike Rapoport set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d))); 7882330b7caSJun Yao return; 7892330b7caSJun Yao } 7902330b7caSJun Yao 791e9f63768SMike Rapoport WRITE_ONCE(*p4dp, p4d); 792c79b954bSJungseok Lee dsb(ishst); 793eb6a4dccSWill Deacon isb(); 794c79b954bSJungseok Lee } 795c79b954bSJungseok Lee 796e9f63768SMike Rapoport static inline void p4d_clear(p4d_t *p4dp) 797c79b954bSJungseok Lee { 798*0dd4f60aSArd Biesheuvel if (pgtable_l4_enabled()) 799e9f63768SMike Rapoport set_p4d(p4dp, __p4d(0)); 800c79b954bSJungseok Lee } 801c79b954bSJungseok Lee 802e9f63768SMike Rapoport static inline phys_addr_t p4d_page_paddr(p4d_t p4d) 803c79b954bSJungseok Lee { 804e9f63768SMike Rapoport return __p4d_to_phys(p4d); 805c79b954bSJungseok Lee } 806c79b954bSJungseok Lee 807*0dd4f60aSArd Biesheuvel #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 808*0dd4f60aSArd Biesheuvel 809*0dd4f60aSArd Biesheuvel static inline pud_t *p4d_to_folded_pud(p4d_t *p4dp, unsigned long addr) 810*0dd4f60aSArd Biesheuvel { 811*0dd4f60aSArd Biesheuvel return (pud_t *)PTR_ALIGN_DOWN(p4dp, PAGE_SIZE) + pud_index(addr); 812*0dd4f60aSArd Biesheuvel } 813*0dd4f60aSArd Biesheuvel 814dc4875f0SAneesh Kumar K.V static inline pud_t *p4d_pgtable(p4d_t p4d) 815974b9b2cSMike Rapoport { 816dc4875f0SAneesh Kumar K.V return (pud_t *)__va(p4d_page_paddr(p4d)); 817974b9b2cSMike Rapoport } 8187078db46SCatalin Marinas 819*0dd4f60aSArd Biesheuvel static inline phys_addr_t pud_offset_phys(p4d_t *p4dp, unsigned long addr) 820*0dd4f60aSArd Biesheuvel { 821*0dd4f60aSArd Biesheuvel BUG_ON(!pgtable_l4_enabled()); 8227078db46SCatalin Marinas 823*0dd4f60aSArd Biesheuvel return p4d_page_paddr(READ_ONCE(*p4dp)) + pud_index(addr) * sizeof(pud_t); 824*0dd4f60aSArd Biesheuvel } 825*0dd4f60aSArd Biesheuvel 826*0dd4f60aSArd Biesheuvel static inline 827*0dd4f60aSArd Biesheuvel pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long addr) 828*0dd4f60aSArd Biesheuvel { 829*0dd4f60aSArd Biesheuvel if (!pgtable_l4_enabled()) 830*0dd4f60aSArd Biesheuvel return p4d_to_folded_pud(p4dp, addr); 831*0dd4f60aSArd Biesheuvel return (pud_t *)__va(p4d_page_paddr(p4d)) + pud_index(addr); 832*0dd4f60aSArd Biesheuvel } 833*0dd4f60aSArd Biesheuvel #define pud_offset_lockless pud_offset_lockless 834*0dd4f60aSArd Biesheuvel 835*0dd4f60aSArd Biesheuvel static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long addr) 836*0dd4f60aSArd Biesheuvel { 837*0dd4f60aSArd Biesheuvel return pud_offset_lockless(p4dp, READ_ONCE(*p4dp), addr); 838*0dd4f60aSArd Biesheuvel } 839*0dd4f60aSArd Biesheuvel #define pud_offset pud_offset 840*0dd4f60aSArd Biesheuvel 841*0dd4f60aSArd Biesheuvel static inline pud_t *pud_set_fixmap(unsigned long addr) 842*0dd4f60aSArd Biesheuvel { 843*0dd4f60aSArd Biesheuvel if (!pgtable_l4_enabled()) 844*0dd4f60aSArd Biesheuvel return NULL; 845*0dd4f60aSArd Biesheuvel return (pud_t *)set_fixmap_offset(FIX_PUD, addr); 846*0dd4f60aSArd Biesheuvel } 847*0dd4f60aSArd Biesheuvel 848*0dd4f60aSArd Biesheuvel static inline pud_t *pud_set_fixmap_offset(p4d_t *p4dp, unsigned long addr) 849*0dd4f60aSArd Biesheuvel { 850*0dd4f60aSArd Biesheuvel if (!pgtable_l4_enabled()) 851*0dd4f60aSArd Biesheuvel return p4d_to_folded_pud(p4dp, addr); 852*0dd4f60aSArd Biesheuvel return pud_set_fixmap(pud_offset_phys(p4dp, addr)); 853*0dd4f60aSArd Biesheuvel } 854*0dd4f60aSArd Biesheuvel 855*0dd4f60aSArd Biesheuvel static inline void pud_clear_fixmap(void) 856*0dd4f60aSArd Biesheuvel { 857*0dd4f60aSArd Biesheuvel if (pgtable_l4_enabled()) 858*0dd4f60aSArd Biesheuvel clear_fixmap(FIX_PUD); 859*0dd4f60aSArd Biesheuvel } 860*0dd4f60aSArd Biesheuvel 861*0dd4f60aSArd Biesheuvel /* use ONLY for statically allocated translation tables */ 862*0dd4f60aSArd Biesheuvel static inline pud_t *pud_offset_kimg(p4d_t *p4dp, u64 addr) 863*0dd4f60aSArd Biesheuvel { 864*0dd4f60aSArd Biesheuvel if (!pgtable_l4_enabled()) 865*0dd4f60aSArd Biesheuvel return p4d_to_folded_pud(p4dp, addr); 866*0dd4f60aSArd Biesheuvel return (pud_t *)__phys_to_kimg(pud_offset_phys(p4dp, addr)); 867*0dd4f60aSArd Biesheuvel } 868c79b954bSJungseok Lee 869e9f63768SMike Rapoport #define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d))) 8705d96e0cbSJungseok Lee 871dca56dcaSMark Rutland #else 872dca56dcaSMark Rutland 873*0dd4f60aSArd Biesheuvel static inline bool pgtable_l4_enabled(void) { return false; } 874*0dd4f60aSArd Biesheuvel 875e9f63768SMike Rapoport #define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;}) 876dca56dcaSMark Rutland 877961faac1SMark Rutland /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */ 878961faac1SMark Rutland #define pud_set_fixmap(addr) NULL 879961faac1SMark Rutland #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) 880961faac1SMark Rutland #define pud_clear_fixmap() 881961faac1SMark Rutland 8826533945aSArd Biesheuvel #define pud_offset_kimg(dir,addr) ((pud_t *)dir) 8836533945aSArd Biesheuvel 8849f25e6adSKirill A. Shutemov #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 885c79b954bSJungseok Lee 886a6bbf5d4SArd Biesheuvel #if CONFIG_PGTABLE_LEVELS > 4 887a6bbf5d4SArd Biesheuvel 888a6bbf5d4SArd Biesheuvel static __always_inline bool pgtable_l5_enabled(void) 889a6bbf5d4SArd Biesheuvel { 890a6bbf5d4SArd Biesheuvel if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT)) 891a6bbf5d4SArd Biesheuvel return vabits_actual == VA_BITS; 892a6bbf5d4SArd Biesheuvel return alternative_has_cap_unlikely(ARM64_HAS_VA52); 893a6bbf5d4SArd Biesheuvel } 894a6bbf5d4SArd Biesheuvel 895a6bbf5d4SArd Biesheuvel static inline bool mm_p4d_folded(const struct mm_struct *mm) 896a6bbf5d4SArd Biesheuvel { 897a6bbf5d4SArd Biesheuvel return !pgtable_l5_enabled(); 898a6bbf5d4SArd Biesheuvel } 899a6bbf5d4SArd Biesheuvel #define mm_p4d_folded mm_p4d_folded 900a6bbf5d4SArd Biesheuvel 901a6bbf5d4SArd Biesheuvel #define p4d_ERROR(e) \ 902a6bbf5d4SArd Biesheuvel pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e)) 903a6bbf5d4SArd Biesheuvel 904a6bbf5d4SArd Biesheuvel #define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd)) 905a6bbf5d4SArd Biesheuvel #define pgd_bad(pgd) (pgtable_l5_enabled() && !(pgd_val(pgd) & 2)) 906a6bbf5d4SArd Biesheuvel #define pgd_present(pgd) (!pgd_none(pgd)) 907a6bbf5d4SArd Biesheuvel 908a6bbf5d4SArd Biesheuvel static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 909a6bbf5d4SArd Biesheuvel { 910a6bbf5d4SArd Biesheuvel if (in_swapper_pgdir(pgdp)) { 911a6bbf5d4SArd Biesheuvel set_swapper_pgd(pgdp, __pgd(pgd_val(pgd))); 912a6bbf5d4SArd Biesheuvel return; 913a6bbf5d4SArd Biesheuvel } 914a6bbf5d4SArd Biesheuvel 915a6bbf5d4SArd Biesheuvel WRITE_ONCE(*pgdp, pgd); 916a6bbf5d4SArd Biesheuvel dsb(ishst); 917a6bbf5d4SArd Biesheuvel isb(); 918a6bbf5d4SArd Biesheuvel } 919a6bbf5d4SArd Biesheuvel 920a6bbf5d4SArd Biesheuvel static inline void pgd_clear(pgd_t *pgdp) 921a6bbf5d4SArd Biesheuvel { 922a6bbf5d4SArd Biesheuvel if (pgtable_l5_enabled()) 923a6bbf5d4SArd Biesheuvel set_pgd(pgdp, __pgd(0)); 924a6bbf5d4SArd Biesheuvel } 925a6bbf5d4SArd Biesheuvel 926a6bbf5d4SArd Biesheuvel static inline phys_addr_t pgd_page_paddr(pgd_t pgd) 927a6bbf5d4SArd Biesheuvel { 928a6bbf5d4SArd Biesheuvel return __pgd_to_phys(pgd); 929a6bbf5d4SArd Biesheuvel } 930a6bbf5d4SArd Biesheuvel 931a6bbf5d4SArd Biesheuvel #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) 932a6bbf5d4SArd Biesheuvel 933a6bbf5d4SArd Biesheuvel static inline p4d_t *pgd_to_folded_p4d(pgd_t *pgdp, unsigned long addr) 934a6bbf5d4SArd Biesheuvel { 935a6bbf5d4SArd Biesheuvel return (p4d_t *)PTR_ALIGN_DOWN(pgdp, PAGE_SIZE) + p4d_index(addr); 936a6bbf5d4SArd Biesheuvel } 937a6bbf5d4SArd Biesheuvel 938a6bbf5d4SArd Biesheuvel static inline phys_addr_t p4d_offset_phys(pgd_t *pgdp, unsigned long addr) 939a6bbf5d4SArd Biesheuvel { 940a6bbf5d4SArd Biesheuvel BUG_ON(!pgtable_l5_enabled()); 941a6bbf5d4SArd Biesheuvel 942a6bbf5d4SArd Biesheuvel return pgd_page_paddr(READ_ONCE(*pgdp)) + p4d_index(addr) * sizeof(p4d_t); 943a6bbf5d4SArd Biesheuvel } 944a6bbf5d4SArd Biesheuvel 945a6bbf5d4SArd Biesheuvel static inline 946a6bbf5d4SArd Biesheuvel p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long addr) 947a6bbf5d4SArd Biesheuvel { 948a6bbf5d4SArd Biesheuvel if (!pgtable_l5_enabled()) 949a6bbf5d4SArd Biesheuvel return pgd_to_folded_p4d(pgdp, addr); 950a6bbf5d4SArd Biesheuvel return (p4d_t *)__va(pgd_page_paddr(pgd)) + p4d_index(addr); 951a6bbf5d4SArd Biesheuvel } 952a6bbf5d4SArd Biesheuvel #define p4d_offset_lockless p4d_offset_lockless 953a6bbf5d4SArd Biesheuvel 954a6bbf5d4SArd Biesheuvel static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long addr) 955a6bbf5d4SArd Biesheuvel { 956a6bbf5d4SArd Biesheuvel return p4d_offset_lockless(pgdp, READ_ONCE(*pgdp), addr); 957a6bbf5d4SArd Biesheuvel } 958a6bbf5d4SArd Biesheuvel 9596ed8a3a0SArd Biesheuvel static inline p4d_t *p4d_set_fixmap(unsigned long addr) 9606ed8a3a0SArd Biesheuvel { 9616ed8a3a0SArd Biesheuvel if (!pgtable_l5_enabled()) 9626ed8a3a0SArd Biesheuvel return NULL; 9636ed8a3a0SArd Biesheuvel return (p4d_t *)set_fixmap_offset(FIX_P4D, addr); 9646ed8a3a0SArd Biesheuvel } 9656ed8a3a0SArd Biesheuvel 9666ed8a3a0SArd Biesheuvel static inline p4d_t *p4d_set_fixmap_offset(pgd_t *pgdp, unsigned long addr) 9676ed8a3a0SArd Biesheuvel { 9686ed8a3a0SArd Biesheuvel if (!pgtable_l5_enabled()) 9696ed8a3a0SArd Biesheuvel return pgd_to_folded_p4d(pgdp, addr); 9706ed8a3a0SArd Biesheuvel return p4d_set_fixmap(p4d_offset_phys(pgdp, addr)); 9716ed8a3a0SArd Biesheuvel } 9726ed8a3a0SArd Biesheuvel 9736ed8a3a0SArd Biesheuvel static inline void p4d_clear_fixmap(void) 9746ed8a3a0SArd Biesheuvel { 9756ed8a3a0SArd Biesheuvel if (pgtable_l5_enabled()) 9766ed8a3a0SArd Biesheuvel clear_fixmap(FIX_P4D); 9776ed8a3a0SArd Biesheuvel } 9786ed8a3a0SArd Biesheuvel 9796ed8a3a0SArd Biesheuvel /* use ONLY for statically allocated translation tables */ 9806ed8a3a0SArd Biesheuvel static inline p4d_t *p4d_offset_kimg(pgd_t *pgdp, u64 addr) 9816ed8a3a0SArd Biesheuvel { 9826ed8a3a0SArd Biesheuvel if (!pgtable_l5_enabled()) 9836ed8a3a0SArd Biesheuvel return pgd_to_folded_p4d(pgdp, addr); 9846ed8a3a0SArd Biesheuvel return (p4d_t *)__phys_to_kimg(p4d_offset_phys(pgdp, addr)); 9856ed8a3a0SArd Biesheuvel } 9866ed8a3a0SArd Biesheuvel 987a6bbf5d4SArd Biesheuvel #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd))) 988a6bbf5d4SArd Biesheuvel 989a6bbf5d4SArd Biesheuvel #else 990a6bbf5d4SArd Biesheuvel 991a6bbf5d4SArd Biesheuvel static inline bool pgtable_l5_enabled(void) { return false; } 992a6bbf5d4SArd Biesheuvel 9936ed8a3a0SArd Biesheuvel /* Match p4d_offset folding in <asm/generic/pgtable-nop4d.h> */ 9946ed8a3a0SArd Biesheuvel #define p4d_set_fixmap(addr) NULL 9956ed8a3a0SArd Biesheuvel #define p4d_set_fixmap_offset(p4dp, addr) ((p4d_t *)p4dp) 9966ed8a3a0SArd Biesheuvel #define p4d_clear_fixmap() 9976ed8a3a0SArd Biesheuvel 9986ed8a3a0SArd Biesheuvel #define p4d_offset_kimg(dir,addr) ((p4d_t *)dir) 9996ed8a3a0SArd Biesheuvel 1000a6bbf5d4SArd Biesheuvel #endif /* CONFIG_PGTABLE_LEVELS > 4 */ 1001a6bbf5d4SArd Biesheuvel 10022cf660ebSGavin Shan #define pgd_ERROR(e) \ 10032cf660ebSGavin Shan pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e)) 10047078db46SCatalin Marinas 1005961faac1SMark Rutland #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) 1006961faac1SMark Rutland #define pgd_clear_fixmap() clear_fixmap(FIX_PGD) 1007961faac1SMark Rutland 10084f04d8f0SCatalin Marinas static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 10094f04d8f0SCatalin Marinas { 10109f341931SCatalin Marinas /* 10119f341931SCatalin Marinas * Normal and Normal-Tagged are two different memory types and indices 10129f341931SCatalin Marinas * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK. 10139f341931SCatalin Marinas */ 1014a6fadf7eSWill Deacon const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 10159f341931SCatalin Marinas PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP | 10169f341931SCatalin Marinas PTE_ATTRINDX_MASK; 10172f4b829cSCatalin Marinas /* preserve the hardware dirty information */ 10182f4b829cSCatalin Marinas if (pte_hw_dirty(pte)) 10196477c388SAnshuman Khandual pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 10206477c388SAnshuman Khandual 10214f04d8f0SCatalin Marinas pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 10223c069607SJames Houghton /* 10233c069607SJames Houghton * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware 10243c069607SJames Houghton * dirtiness again. 10253c069607SJames Houghton */ 10263c069607SJames Houghton if (pte_sw_dirty(pte)) 10273c069607SJames Houghton pte = pte_mkdirty(pte); 10284f04d8f0SCatalin Marinas return pte; 10294f04d8f0SCatalin Marinas } 10304f04d8f0SCatalin Marinas 10319c7e535fSSteve Capper static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 10329c7e535fSSteve Capper { 10339c7e535fSSteve Capper return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 10349c7e535fSSteve Capper } 10359c7e535fSSteve Capper 103666dbd6e6SCatalin Marinas #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 103766dbd6e6SCatalin Marinas extern int ptep_set_access_flags(struct vm_area_struct *vma, 103866dbd6e6SCatalin Marinas unsigned long address, pte_t *ptep, 103966dbd6e6SCatalin Marinas pte_t entry, int dirty); 104066dbd6e6SCatalin Marinas 1041282aa705SCatalin Marinas #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1042282aa705SCatalin Marinas #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 1043282aa705SCatalin Marinas static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 1044282aa705SCatalin Marinas unsigned long address, pmd_t *pmdp, 1045282aa705SCatalin Marinas pmd_t entry, int dirty) 1046282aa705SCatalin Marinas { 1047282aa705SCatalin Marinas return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); 1048282aa705SCatalin Marinas } 104973b20c84SRobin Murphy 105073b20c84SRobin Murphy static inline int pud_devmap(pud_t pud) 105173b20c84SRobin Murphy { 105273b20c84SRobin Murphy return 0; 105373b20c84SRobin Murphy } 105473b20c84SRobin Murphy 105573b20c84SRobin Murphy static inline int pgd_devmap(pgd_t pgd) 105673b20c84SRobin Murphy { 105773b20c84SRobin Murphy return 0; 105873b20c84SRobin Murphy } 1059282aa705SCatalin Marinas #endif 1060282aa705SCatalin Marinas 1061ed928a34STong Tiangen #ifdef CONFIG_PAGE_TABLE_CHECK 1062ed928a34STong Tiangen static inline bool pte_user_accessible_page(pte_t pte) 1063ed928a34STong Tiangen { 1064ed928a34STong Tiangen return pte_present(pte) && (pte_user(pte) || pte_user_exec(pte)); 1065ed928a34STong Tiangen } 1066ed928a34STong Tiangen 1067ed928a34STong Tiangen static inline bool pmd_user_accessible_page(pmd_t pmd) 1068ed928a34STong Tiangen { 106974c2f810SLiu Shixin return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd)); 1070ed928a34STong Tiangen } 1071ed928a34STong Tiangen 1072ed928a34STong Tiangen static inline bool pud_user_accessible_page(pud_t pud) 1073ed928a34STong Tiangen { 1074730a11f9SLiu Shixin return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud)); 1075ed928a34STong Tiangen } 1076ed928a34STong Tiangen #endif 1077ed928a34STong Tiangen 10782f4b829cSCatalin Marinas /* 10792f4b829cSCatalin Marinas * Atomic pte/pmd modifications. 10802f4b829cSCatalin Marinas */ 10812f4b829cSCatalin Marinas #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 108206485053SCatalin Marinas static inline int __ptep_test_and_clear_young(pte_t *ptep) 10832f4b829cSCatalin Marinas { 10843bbf7157SCatalin Marinas pte_t old_pte, pte; 10852f4b829cSCatalin Marinas 10863bbf7157SCatalin Marinas pte = READ_ONCE(*ptep); 10873bbf7157SCatalin Marinas do { 10883bbf7157SCatalin Marinas old_pte = pte; 10893bbf7157SCatalin Marinas pte = pte_mkold(pte); 10903bbf7157SCatalin Marinas pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 10913bbf7157SCatalin Marinas pte_val(old_pte), pte_val(pte)); 10923bbf7157SCatalin Marinas } while (pte_val(pte) != pte_val(old_pte)); 10932f4b829cSCatalin Marinas 10943bbf7157SCatalin Marinas return pte_young(pte); 10952f4b829cSCatalin Marinas } 10962f4b829cSCatalin Marinas 109706485053SCatalin Marinas static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 109806485053SCatalin Marinas unsigned long address, 109906485053SCatalin Marinas pte_t *ptep) 110006485053SCatalin Marinas { 110106485053SCatalin Marinas return __ptep_test_and_clear_young(ptep); 110206485053SCatalin Marinas } 110306485053SCatalin Marinas 11043403e56bSAlex Van Brunt #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 11053403e56bSAlex Van Brunt static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 11063403e56bSAlex Van Brunt unsigned long address, pte_t *ptep) 11073403e56bSAlex Van Brunt { 11083403e56bSAlex Van Brunt int young = ptep_test_and_clear_young(vma, address, ptep); 11093403e56bSAlex Van Brunt 11103403e56bSAlex Van Brunt if (young) { 11113403e56bSAlex Van Brunt /* 11123403e56bSAlex Van Brunt * We can elide the trailing DSB here since the worst that can 11133403e56bSAlex Van Brunt * happen is that a CPU continues to use the young entry in its 11143403e56bSAlex Van Brunt * TLB and we mistakenly reclaim the associated page. The 11153403e56bSAlex Van Brunt * window for such an event is bounded by the next 11163403e56bSAlex Van Brunt * context-switch, which provides a DSB to complete the TLB 11173403e56bSAlex Van Brunt * invalidation. 11183403e56bSAlex Van Brunt */ 11193403e56bSAlex Van Brunt flush_tlb_page_nosync(vma, address); 11203403e56bSAlex Van Brunt } 11213403e56bSAlex Van Brunt 11223403e56bSAlex Van Brunt return young; 11233403e56bSAlex Van Brunt } 11243403e56bSAlex Van Brunt 11252f4b829cSCatalin Marinas #ifdef CONFIG_TRANSPARENT_HUGEPAGE 11262f4b829cSCatalin Marinas #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 11272f4b829cSCatalin Marinas static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 11282f4b829cSCatalin Marinas unsigned long address, 11292f4b829cSCatalin Marinas pmd_t *pmdp) 11302f4b829cSCatalin Marinas { 11312f4b829cSCatalin Marinas return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 11322f4b829cSCatalin Marinas } 11332f4b829cSCatalin Marinas #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 11342f4b829cSCatalin Marinas 11352f4b829cSCatalin Marinas #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 11362f4b829cSCatalin Marinas static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 11372f4b829cSCatalin Marinas unsigned long address, pte_t *ptep) 11382f4b829cSCatalin Marinas { 113942b25471SKefeng Wang pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0)); 114042b25471SKefeng Wang 1141aa232204SKemeng Shi page_table_check_pte_clear(mm, pte); 114242b25471SKefeng Wang 114342b25471SKefeng Wang return pte; 11442f4b829cSCatalin Marinas } 11452f4b829cSCatalin Marinas 11462f4b829cSCatalin Marinas #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1147911f56eeSCatalin Marinas #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1148911f56eeSCatalin Marinas static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 11492f4b829cSCatalin Marinas unsigned long address, pmd_t *pmdp) 11502f4b829cSCatalin Marinas { 115142b25471SKefeng Wang pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0)); 115242b25471SKefeng Wang 11531831414cSKemeng Shi page_table_check_pmd_clear(mm, pmd); 115442b25471SKefeng Wang 115542b25471SKefeng Wang return pmd; 11562f4b829cSCatalin Marinas } 11572f4b829cSCatalin Marinas #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 11582f4b829cSCatalin Marinas 11592f4b829cSCatalin Marinas /* 11608781bcbcSSteve Capper * ptep_set_wrprotect - mark read-only while trasferring potential hardware 11618781bcbcSSteve Capper * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. 11622f4b829cSCatalin Marinas */ 11632f4b829cSCatalin Marinas #define __HAVE_ARCH_PTEP_SET_WRPROTECT 11642f4b829cSCatalin Marinas static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) 11652f4b829cSCatalin Marinas { 11663bbf7157SCatalin Marinas pte_t old_pte, pte; 11672f4b829cSCatalin Marinas 11683bbf7157SCatalin Marinas pte = READ_ONCE(*ptep); 11693bbf7157SCatalin Marinas do { 11703bbf7157SCatalin Marinas old_pte = pte; 11713bbf7157SCatalin Marinas pte = pte_wrprotect(pte); 11723bbf7157SCatalin Marinas pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 11733bbf7157SCatalin Marinas pte_val(old_pte), pte_val(pte)); 11743bbf7157SCatalin Marinas } while (pte_val(pte) != pte_val(old_pte)); 11752f4b829cSCatalin Marinas } 11762f4b829cSCatalin Marinas 11772f4b829cSCatalin Marinas #ifdef CONFIG_TRANSPARENT_HUGEPAGE 11782f4b829cSCatalin Marinas #define __HAVE_ARCH_PMDP_SET_WRPROTECT 11792f4b829cSCatalin Marinas static inline void pmdp_set_wrprotect(struct mm_struct *mm, 11802f4b829cSCatalin Marinas unsigned long address, pmd_t *pmdp) 11812f4b829cSCatalin Marinas { 11822f4b829cSCatalin Marinas ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 11832f4b829cSCatalin Marinas } 11841d78a62cSCatalin Marinas 11851d78a62cSCatalin Marinas #define pmdp_establish pmdp_establish 11861d78a62cSCatalin Marinas static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 11871d78a62cSCatalin Marinas unsigned long address, pmd_t *pmdp, pmd_t pmd) 11881d78a62cSCatalin Marinas { 1189a3b83713SKemeng Shi page_table_check_pmd_set(vma->vm_mm, pmdp, pmd); 11901d78a62cSCatalin Marinas return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd))); 11911d78a62cSCatalin Marinas } 11922f4b829cSCatalin Marinas #endif 11932f4b829cSCatalin Marinas 11944f04d8f0SCatalin Marinas /* 11954f04d8f0SCatalin Marinas * Encode and decode a swap entry: 11963676f9efSCatalin Marinas * bits 0-1: present (must be zero) 1197570ef363SDavid Hildenbrand * bits 2: remember PG_anon_exclusive 1198570ef363SDavid Hildenbrand * bits 3-7: swap type 11999b3e661eSKirill A. Shutemov * bits 8-57: swap offset 1200fdc69e7dSCatalin Marinas * bit 58: PTE_PROT_NONE (must be zero) 12014f04d8f0SCatalin Marinas */ 1202570ef363SDavid Hildenbrand #define __SWP_TYPE_SHIFT 3 1203570ef363SDavid Hildenbrand #define __SWP_TYPE_BITS 5 12049b3e661eSKirill A. Shutemov #define __SWP_OFFSET_BITS 50 12054f04d8f0SCatalin Marinas #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 12064f04d8f0SCatalin Marinas #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 12073676f9efSCatalin Marinas #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) 12084f04d8f0SCatalin Marinas 12094f04d8f0SCatalin Marinas #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 12103676f9efSCatalin Marinas #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) 12114f04d8f0SCatalin Marinas #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 12124f04d8f0SCatalin Marinas 12134f04d8f0SCatalin Marinas #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 12144f04d8f0SCatalin Marinas #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 12154f04d8f0SCatalin Marinas 121653fa117bSAnshuman Khandual #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 121753fa117bSAnshuman Khandual #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 121853fa117bSAnshuman Khandual #define __swp_entry_to_pmd(swp) __pmd((swp).val) 121953fa117bSAnshuman Khandual #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 122053fa117bSAnshuman Khandual 12214f04d8f0SCatalin Marinas /* 12224f04d8f0SCatalin Marinas * Ensure that there are not more swap files than can be encoded in the kernel 1223aad9061bSGeert Uytterhoeven * PTEs. 12244f04d8f0SCatalin Marinas */ 12254f04d8f0SCatalin Marinas #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 12264f04d8f0SCatalin Marinas 122736943abaSSteven Price #ifdef CONFIG_ARM64_MTE 122836943abaSSteven Price 122936943abaSSteven Price #define __HAVE_ARCH_PREPARE_TO_SWAP 123036943abaSSteven Price static inline int arch_prepare_to_swap(struct page *page) 123136943abaSSteven Price { 123236943abaSSteven Price if (system_supports_mte()) 123336943abaSSteven Price return mte_save_tags(page); 123436943abaSSteven Price return 0; 123536943abaSSteven Price } 123636943abaSSteven Price 123736943abaSSteven Price #define __HAVE_ARCH_SWAP_INVALIDATE 123836943abaSSteven Price static inline void arch_swap_invalidate_page(int type, pgoff_t offset) 123936943abaSSteven Price { 124036943abaSSteven Price if (system_supports_mte()) 124136943abaSSteven Price mte_invalidate_tags(type, offset); 124236943abaSSteven Price } 124336943abaSSteven Price 124436943abaSSteven Price static inline void arch_swap_invalidate_area(int type) 124536943abaSSteven Price { 124636943abaSSteven Price if (system_supports_mte()) 124736943abaSSteven Price mte_invalidate_tags_area(type); 124836943abaSSteven Price } 124936943abaSSteven Price 125036943abaSSteven Price #define __HAVE_ARCH_SWAP_RESTORE 1251da08e9b7SMatthew Wilcox (Oracle) static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) 125236943abaSSteven Price { 1253d77e59a8SCatalin Marinas if (system_supports_mte()) 1254d77e59a8SCatalin Marinas mte_restore_tags(entry, &folio->page); 125536943abaSSteven Price } 125636943abaSSteven Price 125736943abaSSteven Price #endif /* CONFIG_ARM64_MTE */ 125836943abaSSteven Price 1259cba3574fSWill Deacon /* 1260cba3574fSWill Deacon * On AArch64, the cache coherency is handled via the set_pte_at() function. 1261cba3574fSWill Deacon */ 12624a169d61SMatthew Wilcox (Oracle) static inline void update_mmu_cache_range(struct vm_fault *vmf, 12634a169d61SMatthew Wilcox (Oracle) struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 12644a169d61SMatthew Wilcox (Oracle) unsigned int nr) 1265cba3574fSWill Deacon { 1266cba3574fSWill Deacon /* 1267120798d2SWill Deacon * We don't do anything here, so there's a very small chance of 1268120798d2SWill Deacon * us retaking a user fault which we just fixed up. The alternative 1269120798d2SWill Deacon * is doing a dsb(ishst), but that penalises the fastpath. 1270cba3574fSWill Deacon */ 1271cba3574fSWill Deacon } 1272cba3574fSWill Deacon 12734a169d61SMatthew Wilcox (Oracle) #define update_mmu_cache(vma, addr, ptep) \ 12744a169d61SMatthew Wilcox (Oracle) update_mmu_cache_range(NULL, vma, addr, ptep, 1) 1275cba3574fSWill Deacon #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) 1276cba3574fSWill Deacon 1277529c4b05SKristina Martsenko #ifdef CONFIG_ARM64_PA_BITS_52 1278529c4b05SKristina Martsenko #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) 1279529c4b05SKristina Martsenko #else 1280529c4b05SKristina Martsenko #define phys_to_ttbr(addr) (addr) 1281529c4b05SKristina Martsenko #endif 1282529c4b05SKristina Martsenko 12836af31226SJia He /* 12846af31226SJia He * On arm64 without hardware Access Flag, copying from user will fail because 12856af31226SJia He * the pte is old and cannot be marked young. So we always end up with zeroed 12866af31226SJia He * page after fork() + CoW for pfn mappings. We don't always have a 12876af31226SJia He * hardware-managed access flag on arm64. 12886af31226SJia He */ 1289e1fd09e3SYu Zhao #define arch_has_hw_pte_young cpu_has_hw_af 12900388f9c7SWill Deacon 12910388f9c7SWill Deacon /* 12920388f9c7SWill Deacon * Experimentally, it's cheap to set the access flag in hardware and we 12930388f9c7SWill Deacon * benefit from prefaulting mappings as 'old' to start with. 12940388f9c7SWill Deacon */ 1295e1fd09e3SYu Zhao #define arch_wants_old_prefaulted_pte cpu_has_hw_af 12966af31226SJia He 1297f8b46c4bSAnshuman Khandual static inline bool pud_sect_supported(void) 1298f8b46c4bSAnshuman Khandual { 1299f8b46c4bSAnshuman Khandual return PAGE_SIZE == SZ_4K; 1300f8b46c4bSAnshuman Khandual } 1301f8b46c4bSAnshuman Khandual 130218107f8aSVladimir Murzin 13035db568e7SAnshuman Khandual #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 13045db568e7SAnshuman Khandual #define ptep_modify_prot_start ptep_modify_prot_start 13055db568e7SAnshuman Khandual extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma, 13065db568e7SAnshuman Khandual unsigned long addr, pte_t *ptep); 13075db568e7SAnshuman Khandual 13085db568e7SAnshuman Khandual #define ptep_modify_prot_commit ptep_modify_prot_commit 13095db568e7SAnshuman Khandual extern void ptep_modify_prot_commit(struct vm_area_struct *vma, 13105db568e7SAnshuman Khandual unsigned long addr, pte_t *ptep, 13115db568e7SAnshuman Khandual pte_t old_pte, pte_t new_pte); 13124f04d8f0SCatalin Marinas #endif /* !__ASSEMBLY__ */ 13134f04d8f0SCatalin Marinas 13144f04d8f0SCatalin Marinas #endif /* __ASM_PGTABLE_H */ 1315