xref: /linux/arch/arm64/include/asm/pgtable.h (revision c1bd2b4028ae5b4d2ada64b31c40cc44cdf00972)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
24f04d8f0SCatalin Marinas /*
34f04d8f0SCatalin Marinas  * Copyright (C) 2012 ARM Ltd.
44f04d8f0SCatalin Marinas  */
54f04d8f0SCatalin Marinas #ifndef __ASM_PGTABLE_H
64f04d8f0SCatalin Marinas #define __ASM_PGTABLE_H
74f04d8f0SCatalin Marinas 
82f4b829cSCatalin Marinas #include <asm/bug.h>
94f04d8f0SCatalin Marinas #include <asm/proc-fns.h>
104f04d8f0SCatalin Marinas 
114f04d8f0SCatalin Marinas #include <asm/memory.h>
1234bfeea4SCatalin Marinas #include <asm/mte.h>
134f04d8f0SCatalin Marinas #include <asm/pgtable-hwdef.h>
143eca86e7SMark Rutland #include <asm/pgtable-prot.h>
153403e56bSAlex Van Brunt #include <asm/tlbflush.h>
164f04d8f0SCatalin Marinas 
174f04d8f0SCatalin Marinas /*
183e1907d5SArd Biesheuvel  * VMALLOC range.
1908375198SCatalin Marinas  *
20f9040773SArd Biesheuvel  * VMALLOC_START: beginning of the kernel vmalloc space
21a5315819SMark Brown  * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
223e1907d5SArd Biesheuvel  *	and fixed mappings
234f04d8f0SCatalin Marinas  */
24f9040773SArd Biesheuvel #define VMALLOC_START		(MODULES_END)
259ad7c6d5SArd Biesheuvel #define VMALLOC_END		(VMEMMAP_START - SZ_256M)
264f04d8f0SCatalin Marinas 
277bc1a0f9SArd Biesheuvel #define vmemmap			((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
287bc1a0f9SArd Biesheuvel 
294f04d8f0SCatalin Marinas #ifndef __ASSEMBLY__
302f4b829cSCatalin Marinas 
313bbf7157SCatalin Marinas #include <asm/cmpxchg.h>
32961faac1SMark Rutland #include <asm/fixmap.h>
332f4b829cSCatalin Marinas #include <linux/mmdebug.h>
3486c9e812SWill Deacon #include <linux/mm_types.h>
3586c9e812SWill Deacon #include <linux/sched.h>
3642b25471SKefeng Wang #include <linux/page_table_check.h>
372f4b829cSCatalin Marinas 
38a7ac1cfaSZhenyu Ye #ifdef CONFIG_TRANSPARENT_HUGEPAGE
39a7ac1cfaSZhenyu Ye #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
40a7ac1cfaSZhenyu Ye 
41a7ac1cfaSZhenyu Ye /* Set stride and tlb_level in flush_*_tlb_range */
42a7ac1cfaSZhenyu Ye #define flush_pmd_tlb_range(vma, addr, end)	\
43a7ac1cfaSZhenyu Ye 	__flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
44a7ac1cfaSZhenyu Ye #define flush_pud_tlb_range(vma, addr, end)	\
45a7ac1cfaSZhenyu Ye 	__flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
46a7ac1cfaSZhenyu Ye #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
47a7ac1cfaSZhenyu Ye 
48d0637c50SBarry Song static inline bool arch_thp_swp_supported(void)
49d0637c50SBarry Song {
50d0637c50SBarry Song 	return !system_supports_mte();
51d0637c50SBarry Song }
52d0637c50SBarry Song #define arch_thp_swp_supported arch_thp_swp_supported
53d0637c50SBarry Song 
544f04d8f0SCatalin Marinas /*
556a1bdb17SWill Deacon  * Outside of a few very special situations (e.g. hibernation), we always
566a1bdb17SWill Deacon  * use broadcast TLB invalidation instructions, therefore a spurious page
576a1bdb17SWill Deacon  * fault on one CPU which has been handled concurrently by another CPU
586a1bdb17SWill Deacon  * does not need to perform additional invalidation.
596a1bdb17SWill Deacon  */
6099c29133SGerald Schaefer #define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
616a1bdb17SWill Deacon 
626a1bdb17SWill Deacon /*
634f04d8f0SCatalin Marinas  * ZERO_PAGE is a global shared page that is always zero: used
644f04d8f0SCatalin Marinas  * for zero-mapped memory areas etc..
654f04d8f0SCatalin Marinas  */
665227cfa7SMark Rutland extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
672077be67SLaura Abbott #define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
684f04d8f0SCatalin Marinas 
692cf660ebSGavin Shan #define pte_ERROR(e)	\
702cf660ebSGavin Shan 	pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
717078db46SCatalin Marinas 
7275387b92SKristina Martsenko /*
7375387b92SKristina Martsenko  * Macros to convert between a physical address and its placement in a
7475387b92SKristina Martsenko  * page table entry, taking care of 52-bit addresses.
7575387b92SKristina Martsenko  */
7675387b92SKristina Martsenko #ifdef CONFIG_ARM64_PA_BITS_52
77c7c386fbSArnd Bergmann static inline phys_addr_t __pte_to_phys(pte_t pte)
78c7c386fbSArnd Bergmann {
79c7c386fbSArnd Bergmann 	return (pte_val(pte) & PTE_ADDR_LOW) |
80a4ee2861SAnshuman Khandual 		((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT);
81c7c386fbSArnd Bergmann }
82c7c386fbSArnd Bergmann static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
83c7c386fbSArnd Bergmann {
84a4ee2861SAnshuman Khandual 	return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PTE_ADDR_MASK;
85c7c386fbSArnd Bergmann }
8675387b92SKristina Martsenko #else
8775387b92SKristina Martsenko #define __pte_to_phys(pte)	(pte_val(pte) & PTE_ADDR_MASK)
8875387b92SKristina Martsenko #define __phys_to_pte_val(phys)	(phys)
8975387b92SKristina Martsenko #endif
904f04d8f0SCatalin Marinas 
9175387b92SKristina Martsenko #define pte_pfn(pte)		(__pte_to_phys(pte) >> PAGE_SHIFT)
9275387b92SKristina Martsenko #define pfn_pte(pfn,prot)	\
9375387b92SKristina Martsenko 	__pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
944f04d8f0SCatalin Marinas 
954f04d8f0SCatalin Marinas #define pte_none(pte)		(!pte_val(pte))
964f04d8f0SCatalin Marinas #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
974f04d8f0SCatalin Marinas #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
987078db46SCatalin Marinas 
994f04d8f0SCatalin Marinas /*
1004f04d8f0SCatalin Marinas  * The following only work if pte_present(). Undefined behaviour otherwise.
1014f04d8f0SCatalin Marinas  */
10284fe6826SSteve Capper #define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
10384fe6826SSteve Capper #define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
10484fe6826SSteve Capper #define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
10584fe6826SSteve Capper #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
106d0ba9612SAnshuman Khandual #define pte_rdonly(pte)		(!!(pte_val(pte) & PTE_RDONLY))
10742b25471SKefeng Wang #define pte_user(pte)		(!!(pte_val(pte) & PTE_USER))
108ec663d96SCatalin Marinas #define pte_user_exec(pte)	(!(pte_val(pte) & PTE_UXN))
10993ef666aSJeremy Linton #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
11073b20c84SRobin Murphy #define pte_devmap(pte)		(!!(pte_val(pte) & PTE_DEVMAP))
11134bfeea4SCatalin Marinas #define pte_tagged(pte)		((pte_val(pte) & PTE_ATTRINDX_MASK) == \
11234bfeea4SCatalin Marinas 				 PTE_ATTRINDX(MT_NORMAL_TAGGED))
1134f04d8f0SCatalin Marinas 
114d27cfa1fSArd Biesheuvel #define pte_cont_addr_end(addr, end)						\
115d27cfa1fSArd Biesheuvel ({	unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;	\
116d27cfa1fSArd Biesheuvel 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
117d27cfa1fSArd Biesheuvel })
118d27cfa1fSArd Biesheuvel 
119d27cfa1fSArd Biesheuvel #define pmd_cont_addr_end(addr, end)						\
120d27cfa1fSArd Biesheuvel ({	unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK;	\
121d27cfa1fSArd Biesheuvel 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
122d27cfa1fSArd Biesheuvel })
123d27cfa1fSArd Biesheuvel 
124d0ba9612SAnshuman Khandual #define pte_hw_dirty(pte)	(pte_write(pte) && !pte_rdonly(pte))
1252f4b829cSCatalin Marinas #define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
1262f4b829cSCatalin Marinas #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
1272f4b829cSCatalin Marinas 
128766ffb69SWill Deacon #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
12918107f8aSVladimir Murzin /*
13018107f8aSVladimir Murzin  * Execute-only user mappings do not have the PTE_USER bit set. All valid
13118107f8aSVladimir Murzin  * kernel mappings have the PTE_UXN bit set.
13218107f8aSVladimir Murzin  */
133ec663d96SCatalin Marinas #define pte_valid_not_user(pte) \
13418107f8aSVladimir Murzin 	((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
13576c714beSWill Deacon /*
13676c714beSWill Deacon  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
13776c714beSWill Deacon  * so that we don't erroneously return false for pages that have been
13876c714beSWill Deacon  * remapped as PROT_NONE but are yet to be flushed from the TLB.
13907509e10SWill Deacon  * Note that we can't make any assumptions based on the state of the access
14007509e10SWill Deacon  * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
14107509e10SWill Deacon  * TLB.
14276c714beSWill Deacon  */
14376c714beSWill Deacon #define pte_accessible(mm, pte)	\
14407509e10SWill Deacon 	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
1454f04d8f0SCatalin Marinas 
1466218f96cSCatalin Marinas /*
14718107f8aSVladimir Murzin  * p??_access_permitted() is true for valid user mappings (PTE_USER
14818107f8aSVladimir Murzin  * bit set, subject to the write permission check). For execute-only
14918107f8aSVladimir Murzin  * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
15018107f8aSVladimir Murzin  * not set) must return false. PROT_NONE mappings do not have the
15118107f8aSVladimir Murzin  * PTE_VALID bit set.
1526218f96cSCatalin Marinas  */
1536218f96cSCatalin Marinas #define pte_access_permitted(pte, write) \
15418107f8aSVladimir Murzin 	(((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
1556218f96cSCatalin Marinas #define pmd_access_permitted(pmd, write) \
1566218f96cSCatalin Marinas 	(pte_access_permitted(pmd_pte(pmd), (write)))
1576218f96cSCatalin Marinas #define pud_access_permitted(pud, write) \
1586218f96cSCatalin Marinas 	(pte_access_permitted(pud_pte(pud), (write)))
1596218f96cSCatalin Marinas 
160b6d4f280SLaura Abbott static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
161b6d4f280SLaura Abbott {
162b6d4f280SLaura Abbott 	pte_val(pte) &= ~pgprot_val(prot);
163b6d4f280SLaura Abbott 	return pte;
164b6d4f280SLaura Abbott }
165b6d4f280SLaura Abbott 
166b6d4f280SLaura Abbott static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
167b6d4f280SLaura Abbott {
168b6d4f280SLaura Abbott 	pte_val(pte) |= pgprot_val(prot);
169b6d4f280SLaura Abbott 	return pte;
170b6d4f280SLaura Abbott }
171b6d4f280SLaura Abbott 
172b65399f6SAnshuman Khandual static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
173b65399f6SAnshuman Khandual {
174b65399f6SAnshuman Khandual 	pmd_val(pmd) &= ~pgprot_val(prot);
175b65399f6SAnshuman Khandual 	return pmd;
176b65399f6SAnshuman Khandual }
177b65399f6SAnshuman Khandual 
178b65399f6SAnshuman Khandual static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
179b65399f6SAnshuman Khandual {
180b65399f6SAnshuman Khandual 	pmd_val(pmd) |= pgprot_val(prot);
181b65399f6SAnshuman Khandual 	return pmd;
182b65399f6SAnshuman Khandual }
183b65399f6SAnshuman Khandual 
1842f0584f3SRick Edgecombe static inline pte_t pte_mkwrite_novma(pte_t pte)
18544b6dfc5SSteve Capper {
18673e86cb0SCatalin Marinas 	pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
18773e86cb0SCatalin Marinas 	pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
18873e86cb0SCatalin Marinas 	return pte;
18944b6dfc5SSteve Capper }
19044b6dfc5SSteve Capper 
19144b6dfc5SSteve Capper static inline pte_t pte_mkclean(pte_t pte)
19244b6dfc5SSteve Capper {
1938781bcbcSSteve Capper 	pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
1948781bcbcSSteve Capper 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
1958781bcbcSSteve Capper 
1968781bcbcSSteve Capper 	return pte;
19744b6dfc5SSteve Capper }
19844b6dfc5SSteve Capper 
19944b6dfc5SSteve Capper static inline pte_t pte_mkdirty(pte_t pte)
20044b6dfc5SSteve Capper {
2018781bcbcSSteve Capper 	pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
2028781bcbcSSteve Capper 
2038781bcbcSSteve Capper 	if (pte_write(pte))
2048781bcbcSSteve Capper 		pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
2058781bcbcSSteve Capper 
2068781bcbcSSteve Capper 	return pte;
20744b6dfc5SSteve Capper }
20844b6dfc5SSteve Capper 
209ff1712f9SWill Deacon static inline pte_t pte_wrprotect(pte_t pte)
210ff1712f9SWill Deacon {
211ff1712f9SWill Deacon 	/*
212ff1712f9SWill Deacon 	 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
213ff1712f9SWill Deacon 	 * clear), set the PTE_DIRTY bit.
214ff1712f9SWill Deacon 	 */
215ff1712f9SWill Deacon 	if (pte_hw_dirty(pte))
2166477c388SAnshuman Khandual 		pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
217ff1712f9SWill Deacon 
218ff1712f9SWill Deacon 	pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
219ff1712f9SWill Deacon 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
220ff1712f9SWill Deacon 	return pte;
221ff1712f9SWill Deacon }
222ff1712f9SWill Deacon 
22344b6dfc5SSteve Capper static inline pte_t pte_mkold(pte_t pte)
22444b6dfc5SSteve Capper {
225b6d4f280SLaura Abbott 	return clear_pte_bit(pte, __pgprot(PTE_AF));
22644b6dfc5SSteve Capper }
22744b6dfc5SSteve Capper 
22844b6dfc5SSteve Capper static inline pte_t pte_mkyoung(pte_t pte)
22944b6dfc5SSteve Capper {
230b6d4f280SLaura Abbott 	return set_pte_bit(pte, __pgprot(PTE_AF));
23144b6dfc5SSteve Capper }
23244b6dfc5SSteve Capper 
23344b6dfc5SSteve Capper static inline pte_t pte_mkspecial(pte_t pte)
23444b6dfc5SSteve Capper {
235b6d4f280SLaura Abbott 	return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
23644b6dfc5SSteve Capper }
2374f04d8f0SCatalin Marinas 
23893ef666aSJeremy Linton static inline pte_t pte_mkcont(pte_t pte)
23993ef666aSJeremy Linton {
24066b3923aSDavid Woods 	pte = set_pte_bit(pte, __pgprot(PTE_CONT));
24166b3923aSDavid Woods 	return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
24293ef666aSJeremy Linton }
24393ef666aSJeremy Linton 
24493ef666aSJeremy Linton static inline pte_t pte_mknoncont(pte_t pte)
24593ef666aSJeremy Linton {
24693ef666aSJeremy Linton 	return clear_pte_bit(pte, __pgprot(PTE_CONT));
24793ef666aSJeremy Linton }
24893ef666aSJeremy Linton 
2495ebe3a44SJames Morse static inline pte_t pte_mkpresent(pte_t pte)
2505ebe3a44SJames Morse {
2515ebe3a44SJames Morse 	return set_pte_bit(pte, __pgprot(PTE_VALID));
2525ebe3a44SJames Morse }
2535ebe3a44SJames Morse 
25466b3923aSDavid Woods static inline pmd_t pmd_mkcont(pmd_t pmd)
25566b3923aSDavid Woods {
25666b3923aSDavid Woods 	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
25766b3923aSDavid Woods }
25866b3923aSDavid Woods 
25973b20c84SRobin Murphy static inline pte_t pte_mkdevmap(pte_t pte)
26073b20c84SRobin Murphy {
26130e23538SJia He 	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
26273b20c84SRobin Murphy }
26373b20c84SRobin Murphy 
2644f04d8f0SCatalin Marinas static inline void set_pte(pte_t *ptep, pte_t pte)
2654f04d8f0SCatalin Marinas {
26620a004e7SWill Deacon 	WRITE_ONCE(*ptep, pte);
2677f0b1bf0SCatalin Marinas 
2687f0b1bf0SCatalin Marinas 	/*
2697f0b1bf0SCatalin Marinas 	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
2707f0b1bf0SCatalin Marinas 	 * or update_mmu_cache() have the necessary barriers.
2717f0b1bf0SCatalin Marinas 	 */
272d0b7a302SWill Deacon 	if (pte_valid_not_user(pte)) {
2737f0b1bf0SCatalin Marinas 		dsb(ishst);
274d0b7a302SWill Deacon 		isb();
275d0b7a302SWill Deacon 	}
2764f04d8f0SCatalin Marinas }
2774f04d8f0SCatalin Marinas 
278907e21c1SShaokun Zhang extern void __sync_icache_dcache(pte_t pteval);
279004fc58fSAnshuman Khandual bool pgattr_change_is_safe(u64 old, u64 new);
2804f04d8f0SCatalin Marinas 
2812f4b829cSCatalin Marinas /*
2822f4b829cSCatalin Marinas  * PTE bits configuration in the presence of hardware Dirty Bit Management
2832f4b829cSCatalin Marinas  * (PTE_WRITE == PTE_DBM):
2842f4b829cSCatalin Marinas  *
2852f4b829cSCatalin Marinas  * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
2862f4b829cSCatalin Marinas  *   0      0      |   1           0          0
2872f4b829cSCatalin Marinas  *   0      1      |   1           1          0
2882f4b829cSCatalin Marinas  *   1      0      |   1           0          1
2892f4b829cSCatalin Marinas  *   1      1      |   0           1          x
2902f4b829cSCatalin Marinas  *
2912f4b829cSCatalin Marinas  * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
2922f4b829cSCatalin Marinas  * the page fault mechanism. Checking the dirty status of a pte becomes:
2932f4b829cSCatalin Marinas  *
294b847415cSCatalin Marinas  *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
2952f4b829cSCatalin Marinas  */
2969b604722SMark Rutland 
297004fc58fSAnshuman Khandual static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep,
2989b604722SMark Rutland 					   pte_t pte)
2994f04d8f0SCatalin Marinas {
30020a004e7SWill Deacon 	pte_t old_pte;
30120a004e7SWill Deacon 
3029b604722SMark Rutland 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
3039b604722SMark Rutland 		return;
3049b604722SMark Rutland 
3059b604722SMark Rutland 	old_pte = READ_ONCE(*ptep);
3069b604722SMark Rutland 
3079b604722SMark Rutland 	if (!pte_valid(old_pte) || !pte_valid(pte))
3089b604722SMark Rutland 		return;
3099b604722SMark Rutland 	if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
3109b604722SMark Rutland 		return;
31102522463SWill Deacon 
3122f4b829cSCatalin Marinas 	/*
3139b604722SMark Rutland 	 * Check for potential race with hardware updates of the pte
3149b604722SMark Rutland 	 * (ptep_set_access_flags safely changes valid ptes without going
3159b604722SMark Rutland 	 * through an invalid entry).
3162f4b829cSCatalin Marinas 	 */
31782d34008SCatalin Marinas 	VM_WARN_ONCE(!pte_young(pte),
31882d34008SCatalin Marinas 		     "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
31920a004e7SWill Deacon 		     __func__, pte_val(old_pte), pte_val(pte));
32020a004e7SWill Deacon 	VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
32182d34008SCatalin Marinas 		     "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
32220a004e7SWill Deacon 		     __func__, pte_val(old_pte), pte_val(pte));
323004fc58fSAnshuman Khandual 	VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)),
324004fc58fSAnshuman Khandual 		     "%s: unsafe attribute change: 0x%016llx -> 0x%016llx",
325004fc58fSAnshuman Khandual 		     __func__, pte_val(old_pte), pte_val(pte));
3262f4b829cSCatalin Marinas }
3272f4b829cSCatalin Marinas 
3283425cec4SRyan Roberts static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages)
3299b604722SMark Rutland {
3309b604722SMark Rutland 	if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
3319b604722SMark Rutland 		__sync_icache_dcache(pte);
3329b604722SMark Rutland 
33369e3b846SSteven Price 	/*
33469e3b846SSteven Price 	 * If the PTE would provide user space access to the tags associated
33569e3b846SSteven Price 	 * with it then ensure that the MTE tags are synchronised.  Although
33669e3b846SSteven Price 	 * pte_access_permitted() returns false for exec only mappings, they
33769e3b846SSteven Price 	 * don't expose tags (instruction fetches don't check tags).
33869e3b846SSteven Price 	 */
33969e3b846SSteven Price 	if (system_supports_mte() && pte_access_permitted(pte, false) &&
340332c151cSPeter Collingbourne 	    !pte_special(pte) && pte_tagged(pte))
3413425cec4SRyan Roberts 		mte_sync_tags(pte, nr_pages);
3424f04d8f0SCatalin Marinas }
3434f04d8f0SCatalin Marinas 
3446e8f5887SRyan Roberts /*
3456e8f5887SRyan Roberts  * Select all bits except the pfn
3466e8f5887SRyan Roberts  */
3476e8f5887SRyan Roberts static inline pgprot_t pte_pgprot(pte_t pte)
3486e8f5887SRyan Roberts {
3496e8f5887SRyan Roberts 	unsigned long pfn = pte_pfn(pte);
3506e8f5887SRyan Roberts 
3516e8f5887SRyan Roberts 	return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
3526e8f5887SRyan Roberts }
3536e8f5887SRyan Roberts 
354*c1bd2b40SRyan Roberts #define pte_advance_pfn pte_advance_pfn
355*c1bd2b40SRyan Roberts static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
3566e8f5887SRyan Roberts {
357*c1bd2b40SRyan Roberts 	return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte));
3586e8f5887SRyan Roberts }
3596e8f5887SRyan Roberts 
360dba2ff49SCatalin Marinas static inline void set_ptes(struct mm_struct *mm,
361dba2ff49SCatalin Marinas 			    unsigned long __always_unused addr,
3624a169d61SMatthew Wilcox (Oracle) 			    pte_t *ptep, pte_t pte, unsigned int nr)
36342b25471SKefeng Wang {
3644a169d61SMatthew Wilcox (Oracle) 	page_table_check_ptes_set(mm, ptep, pte, nr);
3653425cec4SRyan Roberts 	__sync_cache_and_tags(pte, nr);
3664a169d61SMatthew Wilcox (Oracle) 
3674a169d61SMatthew Wilcox (Oracle) 	for (;;) {
3683425cec4SRyan Roberts 		__check_safe_pte_update(mm, ptep, pte);
3693425cec4SRyan Roberts 		set_pte(ptep, pte);
3704a169d61SMatthew Wilcox (Oracle) 		if (--nr == 0)
3714a169d61SMatthew Wilcox (Oracle) 			break;
3724a169d61SMatthew Wilcox (Oracle) 		ptep++;
373*c1bd2b40SRyan Roberts 		pte = pte_advance_pfn(pte, 1);
37442b25471SKefeng Wang 	}
3754a169d61SMatthew Wilcox (Oracle) }
3764a169d61SMatthew Wilcox (Oracle) #define set_ptes set_ptes
37742b25471SKefeng Wang 
3784f04d8f0SCatalin Marinas /*
3794f04d8f0SCatalin Marinas  * Huge pte definitions.
3804f04d8f0SCatalin Marinas  */
381084bd298SSteve Capper #define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))
382084bd298SSteve Capper 
383084bd298SSteve Capper /*
384084bd298SSteve Capper  * Hugetlb definitions.
385084bd298SSteve Capper  */
38666b3923aSDavid Woods #define HUGE_MAX_HSTATE		4
387084bd298SSteve Capper #define HPAGE_SHIFT		PMD_SHIFT
388084bd298SSteve Capper #define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
389084bd298SSteve Capper #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
390084bd298SSteve Capper #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
3914f04d8f0SCatalin Marinas 
39275387b92SKristina Martsenko static inline pte_t pgd_pte(pgd_t pgd)
39375387b92SKristina Martsenko {
39475387b92SKristina Martsenko 	return __pte(pgd_val(pgd));
39575387b92SKristina Martsenko }
39675387b92SKristina Martsenko 
397e9f63768SMike Rapoport static inline pte_t p4d_pte(p4d_t p4d)
398e9f63768SMike Rapoport {
399e9f63768SMike Rapoport 	return __pte(p4d_val(p4d));
400e9f63768SMike Rapoport }
401e9f63768SMike Rapoport 
40229e56940SSteve Capper static inline pte_t pud_pte(pud_t pud)
40329e56940SSteve Capper {
40429e56940SSteve Capper 	return __pte(pud_val(pud));
40529e56940SSteve Capper }
40629e56940SSteve Capper 
407eb3f0624SPunit Agrawal static inline pud_t pte_pud(pte_t pte)
408eb3f0624SPunit Agrawal {
409eb3f0624SPunit Agrawal 	return __pud(pte_val(pte));
410eb3f0624SPunit Agrawal }
411eb3f0624SPunit Agrawal 
41229e56940SSteve Capper static inline pmd_t pud_pmd(pud_t pud)
41329e56940SSteve Capper {
41429e56940SSteve Capper 	return __pmd(pud_val(pud));
41529e56940SSteve Capper }
41629e56940SSteve Capper 
4179c7e535fSSteve Capper static inline pte_t pmd_pte(pmd_t pmd)
4189c7e535fSSteve Capper {
4199c7e535fSSteve Capper 	return __pte(pmd_val(pmd));
4209c7e535fSSteve Capper }
421af074848SSteve Capper 
4229c7e535fSSteve Capper static inline pmd_t pte_pmd(pte_t pte)
4239c7e535fSSteve Capper {
4249c7e535fSSteve Capper 	return __pmd(pte_val(pte));
4259c7e535fSSteve Capper }
426af074848SSteve Capper 
427f7f0097aSAnshuman Khandual static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
4288ce837ceSArd Biesheuvel {
429f7f0097aSAnshuman Khandual 	return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
430f7f0097aSAnshuman Khandual }
431f7f0097aSAnshuman Khandual 
432f7f0097aSAnshuman Khandual static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
433f7f0097aSAnshuman Khandual {
434f7f0097aSAnshuman Khandual 	return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
4358ce837ceSArd Biesheuvel }
4368ce837ceSArd Biesheuvel 
437570ef363SDavid Hildenbrand static inline pte_t pte_swp_mkexclusive(pte_t pte)
438570ef363SDavid Hildenbrand {
439570ef363SDavid Hildenbrand 	return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
440570ef363SDavid Hildenbrand }
441570ef363SDavid Hildenbrand 
442570ef363SDavid Hildenbrand static inline int pte_swp_exclusive(pte_t pte)
443570ef363SDavid Hildenbrand {
444570ef363SDavid Hildenbrand 	return pte_val(pte) & PTE_SWP_EXCLUSIVE;
445570ef363SDavid Hildenbrand }
446570ef363SDavid Hildenbrand 
447570ef363SDavid Hildenbrand static inline pte_t pte_swp_clear_exclusive(pte_t pte)
448570ef363SDavid Hildenbrand {
449570ef363SDavid Hildenbrand 	return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
450570ef363SDavid Hildenbrand }
451570ef363SDavid Hildenbrand 
45256166230SGanapatrao Kulkarni #ifdef CONFIG_NUMA_BALANCING
45356166230SGanapatrao Kulkarni /*
454ca5999fdSMike Rapoport  * See the comment in include/linux/pgtable.h
45556166230SGanapatrao Kulkarni  */
45656166230SGanapatrao Kulkarni static inline int pte_protnone(pte_t pte)
45756166230SGanapatrao Kulkarni {
45856166230SGanapatrao Kulkarni 	return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
45956166230SGanapatrao Kulkarni }
46056166230SGanapatrao Kulkarni 
46156166230SGanapatrao Kulkarni static inline int pmd_protnone(pmd_t pmd)
46256166230SGanapatrao Kulkarni {
46356166230SGanapatrao Kulkarni 	return pte_protnone(pmd_pte(pmd));
46456166230SGanapatrao Kulkarni }
46556166230SGanapatrao Kulkarni #endif
46656166230SGanapatrao Kulkarni 
467b65399f6SAnshuman Khandual #define pmd_present_invalid(pmd)     (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))
468b65399f6SAnshuman Khandual 
469b65399f6SAnshuman Khandual static inline int pmd_present(pmd_t pmd)
470b65399f6SAnshuman Khandual {
471b65399f6SAnshuman Khandual 	return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
472b65399f6SAnshuman Khandual }
473b65399f6SAnshuman Khandual 
474af074848SSteve Capper /*
475af074848SSteve Capper  * THP definitions.
476af074848SSteve Capper  */
477af074848SSteve Capper 
478af074848SSteve Capper #ifdef CONFIG_TRANSPARENT_HUGEPAGE
479b65399f6SAnshuman Khandual static inline int pmd_trans_huge(pmd_t pmd)
480b65399f6SAnshuman Khandual {
481b65399f6SAnshuman Khandual 	return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
482b65399f6SAnshuman Khandual }
48329e56940SSteve Capper #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
484af074848SSteve Capper 
485c164e038SKirill A. Shutemov #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
4869c7e535fSSteve Capper #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
4870795edafSWill Deacon #define pmd_valid(pmd)		pte_valid(pmd_pte(pmd))
48842b25471SKefeng Wang #define pmd_user(pmd)		pte_user(pmd_pte(pmd))
48942b25471SKefeng Wang #define pmd_user_exec(pmd)	pte_user_exec(pmd_pte(pmd))
490d55863dbSPeter Zijlstra #define pmd_cont(pmd)		pte_cont(pmd_pte(pmd))
4919c7e535fSSteve Capper #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
4929c7e535fSSteve Capper #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
4932f0584f3SRick Edgecombe #define pmd_mkwrite_novma(pmd)	pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)))
49405ee26d9SMinchan Kim #define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
4959c7e535fSSteve Capper #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
4969c7e535fSSteve Capper #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
497b65399f6SAnshuman Khandual 
498b65399f6SAnshuman Khandual static inline pmd_t pmd_mkinvalid(pmd_t pmd)
499b65399f6SAnshuman Khandual {
500b65399f6SAnshuman Khandual 	pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
501b65399f6SAnshuman Khandual 	pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));
502b65399f6SAnshuman Khandual 
503b65399f6SAnshuman Khandual 	return pmd;
504b65399f6SAnshuman Khandual }
505af074848SSteve Capper 
5060dbd3b18SSuzuki K Poulose #define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))
5070dbd3b18SSuzuki K Poulose 
5089c7e535fSSteve Capper #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
509af074848SSteve Capper 
510af074848SSteve Capper #define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
511af074848SSteve Capper 
51273b20c84SRobin Murphy #ifdef CONFIG_TRANSPARENT_HUGEPAGE
51373b20c84SRobin Murphy #define pmd_devmap(pmd)		pte_devmap(pmd_pte(pmd))
51473b20c84SRobin Murphy #endif
51530e23538SJia He static inline pmd_t pmd_mkdevmap(pmd_t pmd)
51630e23538SJia He {
51730e23538SJia He 	return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
51830e23538SJia He }
51973b20c84SRobin Murphy 
52075387b92SKristina Martsenko #define __pmd_to_phys(pmd)	__pte_to_phys(pmd_pte(pmd))
52175387b92SKristina Martsenko #define __phys_to_pmd_val(phys)	__phys_to_pte_val(phys)
52275387b92SKristina Martsenko #define pmd_pfn(pmd)		((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
52375387b92SKristina Martsenko #define pfn_pmd(pfn,prot)	__pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
524af074848SSteve Capper #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
525af074848SSteve Capper 
52635a63966SPunit Agrawal #define pud_young(pud)		pte_young(pud_pte(pud))
527eb3f0624SPunit Agrawal #define pud_mkyoung(pud)	pte_pud(pte_mkyoung(pud_pte(pud)))
52829e56940SSteve Capper #define pud_write(pud)		pte_write(pud_pte(pud))
52975387b92SKristina Martsenko 
530b8e0ba7cSPunit Agrawal #define pud_mkhuge(pud)		(__pud(pud_val(pud) & ~PUD_TABLE_BIT))
531b8e0ba7cSPunit Agrawal 
53275387b92SKristina Martsenko #define __pud_to_phys(pud)	__pte_to_phys(pud_pte(pud))
53375387b92SKristina Martsenko #define __phys_to_pud_val(phys)	__phys_to_pte_val(phys)
53475387b92SKristina Martsenko #define pud_pfn(pud)		((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
53575387b92SKristina Martsenko #define pfn_pud(pfn,prot)	__pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
536af074848SSteve Capper 
537dba2ff49SCatalin Marinas static inline void __set_pte_at(struct mm_struct *mm,
538dba2ff49SCatalin Marinas 				unsigned long __always_unused addr,
5393425cec4SRyan Roberts 				pte_t *ptep, pte_t pte, unsigned int nr)
5403425cec4SRyan Roberts {
5413425cec4SRyan Roberts 	__sync_cache_and_tags(pte, nr);
5423425cec4SRyan Roberts 	__check_safe_pte_update(mm, ptep, pte);
5433425cec4SRyan Roberts 	set_pte(ptep, pte);
5443425cec4SRyan Roberts }
5453425cec4SRyan Roberts 
54642b25471SKefeng Wang static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
54742b25471SKefeng Wang 			      pmd_t *pmdp, pmd_t pmd)
54842b25471SKefeng Wang {
549a3b83713SKemeng Shi 	page_table_check_pmd_set(mm, pmdp, pmd);
5503425cec4SRyan Roberts 	return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd),
5513425cec4SRyan Roberts 						PMD_SIZE >> PAGE_SHIFT);
55242b25471SKefeng Wang }
55342b25471SKefeng Wang 
55442b25471SKefeng Wang static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
55542b25471SKefeng Wang 			      pud_t *pudp, pud_t pud)
55642b25471SKefeng Wang {
5576d144436SKemeng Shi 	page_table_check_pud_set(mm, pudp, pud);
5583425cec4SRyan Roberts 	return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud),
5593425cec4SRyan Roberts 						PUD_SIZE >> PAGE_SHIFT);
56042b25471SKefeng Wang }
561af074848SSteve Capper 
562e9f63768SMike Rapoport #define __p4d_to_phys(p4d)	__pte_to_phys(p4d_pte(p4d))
563e9f63768SMike Rapoport #define __phys_to_p4d_val(phys)	__phys_to_pte_val(phys)
564e9f63768SMike Rapoport 
56575387b92SKristina Martsenko #define __pgd_to_phys(pgd)	__pte_to_phys(pgd_pte(pgd))
56675387b92SKristina Martsenko #define __phys_to_pgd_val(phys)	__phys_to_pte_val(phys)
56775387b92SKristina Martsenko 
568a501e324SCatalin Marinas #define __pgprot_modify(prot,mask,bits) \
569a501e324SCatalin Marinas 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
570a501e324SCatalin Marinas 
571cca98e9fSChristoph Hellwig #define pgprot_nx(prot) \
572034aa9cdSWill Deacon 	__pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
573cca98e9fSChristoph Hellwig 
574af074848SSteve Capper /*
5754f04d8f0SCatalin Marinas  * Mark the prot value as uncacheable and unbufferable.
5764f04d8f0SCatalin Marinas  */
5774f04d8f0SCatalin Marinas #define pgprot_noncached(prot) \
578de2db743SCatalin Marinas 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
5794f04d8f0SCatalin Marinas #define pgprot_writecombine(prot) \
580de2db743SCatalin Marinas 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
581d1e6dc91SLiviu Dudau #define pgprot_device(prot) \
582d1e6dc91SLiviu Dudau 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
583d15dfd31SCatalin Marinas #define pgprot_tagged(prot) \
584d15dfd31SCatalin Marinas 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
585d15dfd31SCatalin Marinas #define pgprot_mhp	pgprot_tagged
5863e4e1d3fSChristoph Hellwig /*
5873e4e1d3fSChristoph Hellwig  * DMA allocations for non-coherent devices use what the Arm architecture calls
5883e4e1d3fSChristoph Hellwig  * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
5893e4e1d3fSChristoph Hellwig  * and merging of writes.  This is different from "Device-nGnR[nE]" memory which
5903e4e1d3fSChristoph Hellwig  * is intended for MMIO and thus forbids speculation, preserves access size,
5913e4e1d3fSChristoph Hellwig  * requires strict alignment and can also force write responses to come from the
5923e4e1d3fSChristoph Hellwig  * endpoint.
5933e4e1d3fSChristoph Hellwig  */
594419e2f18SChristoph Hellwig #define pgprot_dmacoherent(prot) \
595419e2f18SChristoph Hellwig 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, \
596419e2f18SChristoph Hellwig 			PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
597419e2f18SChristoph Hellwig 
5984f04d8f0SCatalin Marinas #define __HAVE_PHYS_MEM_ACCESS_PROT
5994f04d8f0SCatalin Marinas struct file;
6004f04d8f0SCatalin Marinas extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
6014f04d8f0SCatalin Marinas 				     unsigned long size, pgprot_t vma_prot);
6024f04d8f0SCatalin Marinas 
6034f04d8f0SCatalin Marinas #define pmd_none(pmd)		(!pmd_val(pmd))
6044f04d8f0SCatalin Marinas 
60536311607SMarc Zyngier #define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
60636311607SMarc Zyngier 				 PMD_TYPE_TABLE)
60736311607SMarc Zyngier #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
60836311607SMarc Zyngier 				 PMD_TYPE_SECT)
60923bc8f69SMuchun Song #define pmd_leaf(pmd)		(pmd_present(pmd) && !pmd_table(pmd))
610e377ab82SAnshuman Khandual #define pmd_bad(pmd)		(!pmd_table(pmd))
61136311607SMarc Zyngier 
612d55863dbSPeter Zijlstra #define pmd_leaf_size(pmd)	(pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
613d55863dbSPeter Zijlstra #define pte_leaf_size(pte)	(pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
614d55863dbSPeter Zijlstra 
615cac4b8cdSCatalin Marinas #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
6167d4e2dcfSQian Cai static inline bool pud_sect(pud_t pud) { return false; }
6177d4e2dcfSQian Cai static inline bool pud_table(pud_t pud) { return true; }
618206a2a73SSteve Capper #else
619206a2a73SSteve Capper #define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
620206a2a73SSteve Capper 				 PUD_TYPE_SECT)
621523d6e9fSzhichang.yuan #define pud_table(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
622523d6e9fSzhichang.yuan 				 PUD_TYPE_TABLE)
623206a2a73SSteve Capper #endif
62436311607SMarc Zyngier 
6252330b7caSJun Yao extern pgd_t init_pg_dir[PTRS_PER_PGD];
6262330b7caSJun Yao extern pgd_t init_pg_end[];
6272330b7caSJun Yao extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
6282330b7caSJun Yao extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
6292330b7caSJun Yao extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
630833be850SMark Rutland extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
6312330b7caSJun Yao 
6322330b7caSJun Yao extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
6332330b7caSJun Yao 
6342330b7caSJun Yao static inline bool in_swapper_pgdir(void *addr)
6352330b7caSJun Yao {
6362330b7caSJun Yao 	return ((unsigned long)addr & PAGE_MASK) ==
6372330b7caSJun Yao 	        ((unsigned long)swapper_pg_dir & PAGE_MASK);
6382330b7caSJun Yao }
6392330b7caSJun Yao 
6404f04d8f0SCatalin Marinas static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
6414f04d8f0SCatalin Marinas {
642e9ed821bSJames Morse #ifdef __PAGETABLE_PMD_FOLDED
643e9ed821bSJames Morse 	if (in_swapper_pgdir(pmdp)) {
6442330b7caSJun Yao 		set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
6452330b7caSJun Yao 		return;
6462330b7caSJun Yao 	}
647e9ed821bSJames Morse #endif /* __PAGETABLE_PMD_FOLDED */
6482330b7caSJun Yao 
64920a004e7SWill Deacon 	WRITE_ONCE(*pmdp, pmd);
6500795edafSWill Deacon 
651d0b7a302SWill Deacon 	if (pmd_valid(pmd)) {
65298f7685eSWill Deacon 		dsb(ishst);
653d0b7a302SWill Deacon 		isb();
654d0b7a302SWill Deacon 	}
6554f04d8f0SCatalin Marinas }
6564f04d8f0SCatalin Marinas 
6574f04d8f0SCatalin Marinas static inline void pmd_clear(pmd_t *pmdp)
6584f04d8f0SCatalin Marinas {
6594f04d8f0SCatalin Marinas 	set_pmd(pmdp, __pmd(0));
6604f04d8f0SCatalin Marinas }
6614f04d8f0SCatalin Marinas 
662dca56dcaSMark Rutland static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
6634f04d8f0SCatalin Marinas {
66475387b92SKristina Martsenko 	return __pmd_to_phys(pmd);
6654f04d8f0SCatalin Marinas }
6664f04d8f0SCatalin Marinas 
667974b9b2cSMike Rapoport static inline unsigned long pmd_page_vaddr(pmd_t pmd)
668974b9b2cSMike Rapoport {
669974b9b2cSMike Rapoport 	return (unsigned long)__va(pmd_page_paddr(pmd));
670974b9b2cSMike Rapoport }
67174dd022fSQian Cai 
672053520f7SMark Rutland /* Find an entry in the third-level page table. */
673f069fabaSWill Deacon #define pte_offset_phys(dir,addr)	(pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
674053520f7SMark Rutland 
675961faac1SMark Rutland #define pte_set_fixmap(addr)		((pte_t *)set_fixmap_offset(FIX_PTE, addr))
676961faac1SMark Rutland #define pte_set_fixmap_offset(pmd, addr)	pte_set_fixmap(pte_offset_phys(pmd, addr))
677961faac1SMark Rutland #define pte_clear_fixmap()		clear_fixmap(FIX_PTE)
678961faac1SMark Rutland 
67968ecabd0SGavin Shan #define pmd_page(pmd)			phys_to_page(__pmd_to_phys(pmd))
6804f04d8f0SCatalin Marinas 
6816533945aSArd Biesheuvel /* use ONLY for statically allocated translation tables */
6826533945aSArd Biesheuvel #define pte_offset_kimg(dir,addr)	((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
6836533945aSArd Biesheuvel 
6844f04d8f0SCatalin Marinas /*
6854f04d8f0SCatalin Marinas  * Conversion functions: convert a page and protection to a page entry,
6864f04d8f0SCatalin Marinas  * and a page entry and page directory to the page they refer to.
6874f04d8f0SCatalin Marinas  */
6884f04d8f0SCatalin Marinas #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
6894f04d8f0SCatalin Marinas 
6909f25e6adSKirill A. Shutemov #if CONFIG_PGTABLE_LEVELS > 2
6914f04d8f0SCatalin Marinas 
6922cf660ebSGavin Shan #define pmd_ERROR(e)	\
6932cf660ebSGavin Shan 	pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
6947078db46SCatalin Marinas 
6954f04d8f0SCatalin Marinas #define pud_none(pud)		(!pud_val(pud))
696e377ab82SAnshuman Khandual #define pud_bad(pud)		(!pud_table(pud))
697f02ab08aSPunit Agrawal #define pud_present(pud)	pte_present(pud_pte(pud))
69823bc8f69SMuchun Song #define pud_leaf(pud)		(pud_present(pud) && !pud_table(pud))
6990795edafSWill Deacon #define pud_valid(pud)		pte_valid(pud_pte(pud))
70042b25471SKefeng Wang #define pud_user(pud)		pte_user(pud_pte(pud))
701730a11f9SLiu Shixin #define pud_user_exec(pud)	pte_user_exec(pud_pte(pud))
7024f04d8f0SCatalin Marinas 
7034f04d8f0SCatalin Marinas static inline void set_pud(pud_t *pudp, pud_t pud)
7044f04d8f0SCatalin Marinas {
705e9ed821bSJames Morse #ifdef __PAGETABLE_PUD_FOLDED
706e9ed821bSJames Morse 	if (in_swapper_pgdir(pudp)) {
7072330b7caSJun Yao 		set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
7082330b7caSJun Yao 		return;
7092330b7caSJun Yao 	}
710e9ed821bSJames Morse #endif /* __PAGETABLE_PUD_FOLDED */
7112330b7caSJun Yao 
71220a004e7SWill Deacon 	WRITE_ONCE(*pudp, pud);
7130795edafSWill Deacon 
714d0b7a302SWill Deacon 	if (pud_valid(pud)) {
71598f7685eSWill Deacon 		dsb(ishst);
716d0b7a302SWill Deacon 		isb();
717d0b7a302SWill Deacon 	}
7184f04d8f0SCatalin Marinas }
7194f04d8f0SCatalin Marinas 
7204f04d8f0SCatalin Marinas static inline void pud_clear(pud_t *pudp)
7214f04d8f0SCatalin Marinas {
7224f04d8f0SCatalin Marinas 	set_pud(pudp, __pud(0));
7234f04d8f0SCatalin Marinas }
7244f04d8f0SCatalin Marinas 
725dca56dcaSMark Rutland static inline phys_addr_t pud_page_paddr(pud_t pud)
7264f04d8f0SCatalin Marinas {
72775387b92SKristina Martsenko 	return __pud_to_phys(pud);
7284f04d8f0SCatalin Marinas }
7294f04d8f0SCatalin Marinas 
7309cf6fa24SAneesh Kumar K.V static inline pmd_t *pud_pgtable(pud_t pud)
731974b9b2cSMike Rapoport {
7329cf6fa24SAneesh Kumar K.V 	return (pmd_t *)__va(pud_page_paddr(pud));
733974b9b2cSMike Rapoport }
7347078db46SCatalin Marinas 
735974b9b2cSMike Rapoport /* Find an entry in the second-level page table. */
73620a004e7SWill Deacon #define pmd_offset_phys(dir, addr)	(pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
7377078db46SCatalin Marinas 
738961faac1SMark Rutland #define pmd_set_fixmap(addr)		((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
739961faac1SMark Rutland #define pmd_set_fixmap_offset(pud, addr)	pmd_set_fixmap(pmd_offset_phys(pud, addr))
740961faac1SMark Rutland #define pmd_clear_fixmap()		clear_fixmap(FIX_PMD)
7414f04d8f0SCatalin Marinas 
74268ecabd0SGavin Shan #define pud_page(pud)			phys_to_page(__pud_to_phys(pud))
74329e56940SSteve Capper 
7446533945aSArd Biesheuvel /* use ONLY for statically allocated translation tables */
7456533945aSArd Biesheuvel #define pmd_offset_kimg(dir,addr)	((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
7466533945aSArd Biesheuvel 
747dca56dcaSMark Rutland #else
748dca56dcaSMark Rutland 
749dca56dcaSMark Rutland #define pud_page_paddr(pud)	({ BUILD_BUG(); 0; })
7504e4ff23aSWill Deacon #define pud_user_exec(pud)	pud_user(pud) /* Always 0 with folding */
751dca56dcaSMark Rutland 
752961faac1SMark Rutland /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
753961faac1SMark Rutland #define pmd_set_fixmap(addr)		NULL
754961faac1SMark Rutland #define pmd_set_fixmap_offset(pudp, addr)	((pmd_t *)pudp)
755961faac1SMark Rutland #define pmd_clear_fixmap()
756961faac1SMark Rutland 
7576533945aSArd Biesheuvel #define pmd_offset_kimg(dir,addr)	((pmd_t *)dir)
7586533945aSArd Biesheuvel 
7599f25e6adSKirill A. Shutemov #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
7604f04d8f0SCatalin Marinas 
7619f25e6adSKirill A. Shutemov #if CONFIG_PGTABLE_LEVELS > 3
762c79b954bSJungseok Lee 
7632cf660ebSGavin Shan #define pud_ERROR(e)	\
7642cf660ebSGavin Shan 	pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
7657078db46SCatalin Marinas 
766e9f63768SMike Rapoport #define p4d_none(p4d)		(!p4d_val(p4d))
767e9f63768SMike Rapoport #define p4d_bad(p4d)		(!(p4d_val(p4d) & 2))
768e9f63768SMike Rapoport #define p4d_present(p4d)	(p4d_val(p4d))
769c79b954bSJungseok Lee 
770e9f63768SMike Rapoport static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
771c79b954bSJungseok Lee {
772e9f63768SMike Rapoport 	if (in_swapper_pgdir(p4dp)) {
773e9f63768SMike Rapoport 		set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
7742330b7caSJun Yao 		return;
7752330b7caSJun Yao 	}
7762330b7caSJun Yao 
777e9f63768SMike Rapoport 	WRITE_ONCE(*p4dp, p4d);
778c79b954bSJungseok Lee 	dsb(ishst);
779eb6a4dccSWill Deacon 	isb();
780c79b954bSJungseok Lee }
781c79b954bSJungseok Lee 
782e9f63768SMike Rapoport static inline void p4d_clear(p4d_t *p4dp)
783c79b954bSJungseok Lee {
784e9f63768SMike Rapoport 	set_p4d(p4dp, __p4d(0));
785c79b954bSJungseok Lee }
786c79b954bSJungseok Lee 
787e9f63768SMike Rapoport static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
788c79b954bSJungseok Lee {
789e9f63768SMike Rapoport 	return __p4d_to_phys(p4d);
790c79b954bSJungseok Lee }
791c79b954bSJungseok Lee 
792dc4875f0SAneesh Kumar K.V static inline pud_t *p4d_pgtable(p4d_t p4d)
793974b9b2cSMike Rapoport {
794dc4875f0SAneesh Kumar K.V 	return (pud_t *)__va(p4d_page_paddr(p4d));
795974b9b2cSMike Rapoport }
7967078db46SCatalin Marinas 
7975845e703SXujun Leng /* Find an entry in the first-level page table. */
798e9f63768SMike Rapoport #define pud_offset_phys(dir, addr)	(p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
7997078db46SCatalin Marinas 
800961faac1SMark Rutland #define pud_set_fixmap(addr)		((pud_t *)set_fixmap_offset(FIX_PUD, addr))
801e9f63768SMike Rapoport #define pud_set_fixmap_offset(p4d, addr)	pud_set_fixmap(pud_offset_phys(p4d, addr))
802961faac1SMark Rutland #define pud_clear_fixmap()		clear_fixmap(FIX_PUD)
803c79b954bSJungseok Lee 
804e9f63768SMike Rapoport #define p4d_page(p4d)		pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
8055d96e0cbSJungseok Lee 
8066533945aSArd Biesheuvel /* use ONLY for statically allocated translation tables */
8076533945aSArd Biesheuvel #define pud_offset_kimg(dir,addr)	((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
8086533945aSArd Biesheuvel 
809dca56dcaSMark Rutland #else
810dca56dcaSMark Rutland 
811e9f63768SMike Rapoport #define p4d_page_paddr(p4d)	({ BUILD_BUG(); 0;})
812dca56dcaSMark Rutland #define pgd_page_paddr(pgd)	({ BUILD_BUG(); 0;})
813dca56dcaSMark Rutland 
814961faac1SMark Rutland /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
815961faac1SMark Rutland #define pud_set_fixmap(addr)		NULL
816961faac1SMark Rutland #define pud_set_fixmap_offset(pgdp, addr)	((pud_t *)pgdp)
817961faac1SMark Rutland #define pud_clear_fixmap()
818961faac1SMark Rutland 
8196533945aSArd Biesheuvel #define pud_offset_kimg(dir,addr)	((pud_t *)dir)
8206533945aSArd Biesheuvel 
8219f25e6adSKirill A. Shutemov #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
822c79b954bSJungseok Lee 
8232cf660ebSGavin Shan #define pgd_ERROR(e)	\
8242cf660ebSGavin Shan 	pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
8257078db46SCatalin Marinas 
826961faac1SMark Rutland #define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
827961faac1SMark Rutland #define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
828961faac1SMark Rutland 
8294f04d8f0SCatalin Marinas static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
8304f04d8f0SCatalin Marinas {
8319f341931SCatalin Marinas 	/*
8329f341931SCatalin Marinas 	 * Normal and Normal-Tagged are two different memory types and indices
8339f341931SCatalin Marinas 	 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
8349f341931SCatalin Marinas 	 */
835a6fadf7eSWill Deacon 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
8369f341931SCatalin Marinas 			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
8379f341931SCatalin Marinas 			      PTE_ATTRINDX_MASK;
8382f4b829cSCatalin Marinas 	/* preserve the hardware dirty information */
8392f4b829cSCatalin Marinas 	if (pte_hw_dirty(pte))
8406477c388SAnshuman Khandual 		pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
8416477c388SAnshuman Khandual 
8424f04d8f0SCatalin Marinas 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
8433c069607SJames Houghton 	/*
8443c069607SJames Houghton 	 * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware
8453c069607SJames Houghton 	 * dirtiness again.
8463c069607SJames Houghton 	 */
8473c069607SJames Houghton 	if (pte_sw_dirty(pte))
8483c069607SJames Houghton 		pte = pte_mkdirty(pte);
8494f04d8f0SCatalin Marinas 	return pte;
8504f04d8f0SCatalin Marinas }
8514f04d8f0SCatalin Marinas 
8529c7e535fSSteve Capper static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
8539c7e535fSSteve Capper {
8549c7e535fSSteve Capper 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
8559c7e535fSSteve Capper }
8569c7e535fSSteve Capper 
85766dbd6e6SCatalin Marinas #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
85866dbd6e6SCatalin Marinas extern int ptep_set_access_flags(struct vm_area_struct *vma,
85966dbd6e6SCatalin Marinas 				 unsigned long address, pte_t *ptep,
86066dbd6e6SCatalin Marinas 				 pte_t entry, int dirty);
86166dbd6e6SCatalin Marinas 
862282aa705SCatalin Marinas #ifdef CONFIG_TRANSPARENT_HUGEPAGE
863282aa705SCatalin Marinas #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
864282aa705SCatalin Marinas static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
865282aa705SCatalin Marinas 					unsigned long address, pmd_t *pmdp,
866282aa705SCatalin Marinas 					pmd_t entry, int dirty)
867282aa705SCatalin Marinas {
868282aa705SCatalin Marinas 	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
869282aa705SCatalin Marinas }
87073b20c84SRobin Murphy 
87173b20c84SRobin Murphy static inline int pud_devmap(pud_t pud)
87273b20c84SRobin Murphy {
87373b20c84SRobin Murphy 	return 0;
87473b20c84SRobin Murphy }
87573b20c84SRobin Murphy 
87673b20c84SRobin Murphy static inline int pgd_devmap(pgd_t pgd)
87773b20c84SRobin Murphy {
87873b20c84SRobin Murphy 	return 0;
87973b20c84SRobin Murphy }
880282aa705SCatalin Marinas #endif
881282aa705SCatalin Marinas 
882ed928a34STong Tiangen #ifdef CONFIG_PAGE_TABLE_CHECK
883ed928a34STong Tiangen static inline bool pte_user_accessible_page(pte_t pte)
884ed928a34STong Tiangen {
885ed928a34STong Tiangen 	return pte_present(pte) && (pte_user(pte) || pte_user_exec(pte));
886ed928a34STong Tiangen }
887ed928a34STong Tiangen 
888ed928a34STong Tiangen static inline bool pmd_user_accessible_page(pmd_t pmd)
889ed928a34STong Tiangen {
89074c2f810SLiu Shixin 	return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
891ed928a34STong Tiangen }
892ed928a34STong Tiangen 
893ed928a34STong Tiangen static inline bool pud_user_accessible_page(pud_t pud)
894ed928a34STong Tiangen {
895730a11f9SLiu Shixin 	return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud));
896ed928a34STong Tiangen }
897ed928a34STong Tiangen #endif
898ed928a34STong Tiangen 
8992f4b829cSCatalin Marinas /*
9002f4b829cSCatalin Marinas  * Atomic pte/pmd modifications.
9012f4b829cSCatalin Marinas  */
9022f4b829cSCatalin Marinas #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
90306485053SCatalin Marinas static inline int __ptep_test_and_clear_young(pte_t *ptep)
9042f4b829cSCatalin Marinas {
9053bbf7157SCatalin Marinas 	pte_t old_pte, pte;
9062f4b829cSCatalin Marinas 
9073bbf7157SCatalin Marinas 	pte = READ_ONCE(*ptep);
9083bbf7157SCatalin Marinas 	do {
9093bbf7157SCatalin Marinas 		old_pte = pte;
9103bbf7157SCatalin Marinas 		pte = pte_mkold(pte);
9113bbf7157SCatalin Marinas 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
9123bbf7157SCatalin Marinas 					       pte_val(old_pte), pte_val(pte));
9133bbf7157SCatalin Marinas 	} while (pte_val(pte) != pte_val(old_pte));
9142f4b829cSCatalin Marinas 
9153bbf7157SCatalin Marinas 	return pte_young(pte);
9162f4b829cSCatalin Marinas }
9172f4b829cSCatalin Marinas 
91806485053SCatalin Marinas static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
91906485053SCatalin Marinas 					    unsigned long address,
92006485053SCatalin Marinas 					    pte_t *ptep)
92106485053SCatalin Marinas {
92206485053SCatalin Marinas 	return __ptep_test_and_clear_young(ptep);
92306485053SCatalin Marinas }
92406485053SCatalin Marinas 
9253403e56bSAlex Van Brunt #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
9263403e56bSAlex Van Brunt static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
9273403e56bSAlex Van Brunt 					 unsigned long address, pte_t *ptep)
9283403e56bSAlex Van Brunt {
9293403e56bSAlex Van Brunt 	int young = ptep_test_and_clear_young(vma, address, ptep);
9303403e56bSAlex Van Brunt 
9313403e56bSAlex Van Brunt 	if (young) {
9323403e56bSAlex Van Brunt 		/*
9333403e56bSAlex Van Brunt 		 * We can elide the trailing DSB here since the worst that can
9343403e56bSAlex Van Brunt 		 * happen is that a CPU continues to use the young entry in its
9353403e56bSAlex Van Brunt 		 * TLB and we mistakenly reclaim the associated page. The
9363403e56bSAlex Van Brunt 		 * window for such an event is bounded by the next
9373403e56bSAlex Van Brunt 		 * context-switch, which provides a DSB to complete the TLB
9383403e56bSAlex Van Brunt 		 * invalidation.
9393403e56bSAlex Van Brunt 		 */
9403403e56bSAlex Van Brunt 		flush_tlb_page_nosync(vma, address);
9413403e56bSAlex Van Brunt 	}
9423403e56bSAlex Van Brunt 
9433403e56bSAlex Van Brunt 	return young;
9443403e56bSAlex Van Brunt }
9453403e56bSAlex Van Brunt 
9462f4b829cSCatalin Marinas #ifdef CONFIG_TRANSPARENT_HUGEPAGE
9472f4b829cSCatalin Marinas #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
9482f4b829cSCatalin Marinas static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
9492f4b829cSCatalin Marinas 					    unsigned long address,
9502f4b829cSCatalin Marinas 					    pmd_t *pmdp)
9512f4b829cSCatalin Marinas {
9522f4b829cSCatalin Marinas 	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
9532f4b829cSCatalin Marinas }
9542f4b829cSCatalin Marinas #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
9552f4b829cSCatalin Marinas 
9562f4b829cSCatalin Marinas #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
9572f4b829cSCatalin Marinas static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
9582f4b829cSCatalin Marinas 				       unsigned long address, pte_t *ptep)
9592f4b829cSCatalin Marinas {
96042b25471SKefeng Wang 	pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
96142b25471SKefeng Wang 
962aa232204SKemeng Shi 	page_table_check_pte_clear(mm, pte);
96342b25471SKefeng Wang 
96442b25471SKefeng Wang 	return pte;
9652f4b829cSCatalin Marinas }
9662f4b829cSCatalin Marinas 
9672f4b829cSCatalin Marinas #ifdef CONFIG_TRANSPARENT_HUGEPAGE
968911f56eeSCatalin Marinas #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
969911f56eeSCatalin Marinas static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
9702f4b829cSCatalin Marinas 					    unsigned long address, pmd_t *pmdp)
9712f4b829cSCatalin Marinas {
97242b25471SKefeng Wang 	pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0));
97342b25471SKefeng Wang 
9741831414cSKemeng Shi 	page_table_check_pmd_clear(mm, pmd);
97542b25471SKefeng Wang 
97642b25471SKefeng Wang 	return pmd;
9772f4b829cSCatalin Marinas }
9782f4b829cSCatalin Marinas #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
9792f4b829cSCatalin Marinas 
9802f4b829cSCatalin Marinas /*
9818781bcbcSSteve Capper  * ptep_set_wrprotect - mark read-only while trasferring potential hardware
9828781bcbcSSteve Capper  * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
9832f4b829cSCatalin Marinas  */
9842f4b829cSCatalin Marinas #define __HAVE_ARCH_PTEP_SET_WRPROTECT
9852f4b829cSCatalin Marinas static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
9862f4b829cSCatalin Marinas {
9873bbf7157SCatalin Marinas 	pte_t old_pte, pte;
9882f4b829cSCatalin Marinas 
9893bbf7157SCatalin Marinas 	pte = READ_ONCE(*ptep);
9903bbf7157SCatalin Marinas 	do {
9913bbf7157SCatalin Marinas 		old_pte = pte;
9923bbf7157SCatalin Marinas 		pte = pte_wrprotect(pte);
9933bbf7157SCatalin Marinas 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
9943bbf7157SCatalin Marinas 					       pte_val(old_pte), pte_val(pte));
9953bbf7157SCatalin Marinas 	} while (pte_val(pte) != pte_val(old_pte));
9962f4b829cSCatalin Marinas }
9972f4b829cSCatalin Marinas 
9982f4b829cSCatalin Marinas #ifdef CONFIG_TRANSPARENT_HUGEPAGE
9992f4b829cSCatalin Marinas #define __HAVE_ARCH_PMDP_SET_WRPROTECT
10002f4b829cSCatalin Marinas static inline void pmdp_set_wrprotect(struct mm_struct *mm,
10012f4b829cSCatalin Marinas 				      unsigned long address, pmd_t *pmdp)
10022f4b829cSCatalin Marinas {
10032f4b829cSCatalin Marinas 	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
10042f4b829cSCatalin Marinas }
10051d78a62cSCatalin Marinas 
10061d78a62cSCatalin Marinas #define pmdp_establish pmdp_establish
10071d78a62cSCatalin Marinas static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
10081d78a62cSCatalin Marinas 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
10091d78a62cSCatalin Marinas {
1010a3b83713SKemeng Shi 	page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
10111d78a62cSCatalin Marinas 	return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
10121d78a62cSCatalin Marinas }
10132f4b829cSCatalin Marinas #endif
10142f4b829cSCatalin Marinas 
10154f04d8f0SCatalin Marinas /*
10164f04d8f0SCatalin Marinas  * Encode and decode a swap entry:
10173676f9efSCatalin Marinas  *	bits 0-1:	present (must be zero)
1018570ef363SDavid Hildenbrand  *	bits 2:		remember PG_anon_exclusive
1019570ef363SDavid Hildenbrand  *	bits 3-7:	swap type
10209b3e661eSKirill A. Shutemov  *	bits 8-57:	swap offset
1021fdc69e7dSCatalin Marinas  *	bit  58:	PTE_PROT_NONE (must be zero)
10224f04d8f0SCatalin Marinas  */
1023570ef363SDavid Hildenbrand #define __SWP_TYPE_SHIFT	3
1024570ef363SDavid Hildenbrand #define __SWP_TYPE_BITS		5
10259b3e661eSKirill A. Shutemov #define __SWP_OFFSET_BITS	50
10264f04d8f0SCatalin Marinas #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
10274f04d8f0SCatalin Marinas #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
10283676f9efSCatalin Marinas #define __SWP_OFFSET_MASK	((1UL << __SWP_OFFSET_BITS) - 1)
10294f04d8f0SCatalin Marinas 
10304f04d8f0SCatalin Marinas #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
10313676f9efSCatalin Marinas #define __swp_offset(x)		(((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
10324f04d8f0SCatalin Marinas #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
10334f04d8f0SCatalin Marinas 
10344f04d8f0SCatalin Marinas #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
10354f04d8f0SCatalin Marinas #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
10364f04d8f0SCatalin Marinas 
103753fa117bSAnshuman Khandual #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
103853fa117bSAnshuman Khandual #define __pmd_to_swp_entry(pmd)		((swp_entry_t) { pmd_val(pmd) })
103953fa117bSAnshuman Khandual #define __swp_entry_to_pmd(swp)		__pmd((swp).val)
104053fa117bSAnshuman Khandual #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
104153fa117bSAnshuman Khandual 
10424f04d8f0SCatalin Marinas /*
10434f04d8f0SCatalin Marinas  * Ensure that there are not more swap files than can be encoded in the kernel
1044aad9061bSGeert Uytterhoeven  * PTEs.
10454f04d8f0SCatalin Marinas  */
10464f04d8f0SCatalin Marinas #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
10474f04d8f0SCatalin Marinas 
104836943abaSSteven Price #ifdef CONFIG_ARM64_MTE
104936943abaSSteven Price 
105036943abaSSteven Price #define __HAVE_ARCH_PREPARE_TO_SWAP
105136943abaSSteven Price static inline int arch_prepare_to_swap(struct page *page)
105236943abaSSteven Price {
105336943abaSSteven Price 	if (system_supports_mte())
105436943abaSSteven Price 		return mte_save_tags(page);
105536943abaSSteven Price 	return 0;
105636943abaSSteven Price }
105736943abaSSteven Price 
105836943abaSSteven Price #define __HAVE_ARCH_SWAP_INVALIDATE
105936943abaSSteven Price static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
106036943abaSSteven Price {
106136943abaSSteven Price 	if (system_supports_mte())
106236943abaSSteven Price 		mte_invalidate_tags(type, offset);
106336943abaSSteven Price }
106436943abaSSteven Price 
106536943abaSSteven Price static inline void arch_swap_invalidate_area(int type)
106636943abaSSteven Price {
106736943abaSSteven Price 	if (system_supports_mte())
106836943abaSSteven Price 		mte_invalidate_tags_area(type);
106936943abaSSteven Price }
107036943abaSSteven Price 
107136943abaSSteven Price #define __HAVE_ARCH_SWAP_RESTORE
1072da08e9b7SMatthew Wilcox (Oracle) static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
107336943abaSSteven Price {
1074d77e59a8SCatalin Marinas 	if (system_supports_mte())
1075d77e59a8SCatalin Marinas 		mte_restore_tags(entry, &folio->page);
107636943abaSSteven Price }
107736943abaSSteven Price 
107836943abaSSteven Price #endif /* CONFIG_ARM64_MTE */
107936943abaSSteven Price 
1080cba3574fSWill Deacon /*
1081cba3574fSWill Deacon  * On AArch64, the cache coherency is handled via the set_pte_at() function.
1082cba3574fSWill Deacon  */
10834a169d61SMatthew Wilcox (Oracle) static inline void update_mmu_cache_range(struct vm_fault *vmf,
10844a169d61SMatthew Wilcox (Oracle) 		struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
10854a169d61SMatthew Wilcox (Oracle) 		unsigned int nr)
1086cba3574fSWill Deacon {
1087cba3574fSWill Deacon 	/*
1088120798d2SWill Deacon 	 * We don't do anything here, so there's a very small chance of
1089120798d2SWill Deacon 	 * us retaking a user fault which we just fixed up. The alternative
1090120798d2SWill Deacon 	 * is doing a dsb(ishst), but that penalises the fastpath.
1091cba3574fSWill Deacon 	 */
1092cba3574fSWill Deacon }
1093cba3574fSWill Deacon 
10944a169d61SMatthew Wilcox (Oracle) #define update_mmu_cache(vma, addr, ptep) \
10954a169d61SMatthew Wilcox (Oracle) 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
1096cba3574fSWill Deacon #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
1097cba3574fSWill Deacon 
1098529c4b05SKristina Martsenko #ifdef CONFIG_ARM64_PA_BITS_52
1099529c4b05SKristina Martsenko #define phys_to_ttbr(addr)	(((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
1100529c4b05SKristina Martsenko #else
1101529c4b05SKristina Martsenko #define phys_to_ttbr(addr)	(addr)
1102529c4b05SKristina Martsenko #endif
1103529c4b05SKristina Martsenko 
11046af31226SJia He /*
11056af31226SJia He  * On arm64 without hardware Access Flag, copying from user will fail because
11066af31226SJia He  * the pte is old and cannot be marked young. So we always end up with zeroed
11076af31226SJia He  * page after fork() + CoW for pfn mappings. We don't always have a
11086af31226SJia He  * hardware-managed access flag on arm64.
11096af31226SJia He  */
1110e1fd09e3SYu Zhao #define arch_has_hw_pte_young		cpu_has_hw_af
11110388f9c7SWill Deacon 
11120388f9c7SWill Deacon /*
11130388f9c7SWill Deacon  * Experimentally, it's cheap to set the access flag in hardware and we
11140388f9c7SWill Deacon  * benefit from prefaulting mappings as 'old' to start with.
11150388f9c7SWill Deacon  */
1116e1fd09e3SYu Zhao #define arch_wants_old_prefaulted_pte	cpu_has_hw_af
11176af31226SJia He 
1118f8b46c4bSAnshuman Khandual static inline bool pud_sect_supported(void)
1119f8b46c4bSAnshuman Khandual {
1120f8b46c4bSAnshuman Khandual 	return PAGE_SIZE == SZ_4K;
1121f8b46c4bSAnshuman Khandual }
1122f8b46c4bSAnshuman Khandual 
112318107f8aSVladimir Murzin 
11245db568e7SAnshuman Khandual #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
11255db568e7SAnshuman Khandual #define ptep_modify_prot_start ptep_modify_prot_start
11265db568e7SAnshuman Khandual extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
11275db568e7SAnshuman Khandual 				    unsigned long addr, pte_t *ptep);
11285db568e7SAnshuman Khandual 
11295db568e7SAnshuman Khandual #define ptep_modify_prot_commit ptep_modify_prot_commit
11305db568e7SAnshuman Khandual extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
11315db568e7SAnshuman Khandual 				    unsigned long addr, pte_t *ptep,
11325db568e7SAnshuman Khandual 				    pte_t old_pte, pte_t new_pte);
11334f04d8f0SCatalin Marinas #endif /* !__ASSEMBLY__ */
11344f04d8f0SCatalin Marinas 
11354f04d8f0SCatalin Marinas #endif /* __ASM_PGTABLE_H */
1136