xref: /linux/arch/arm64/include/asm/pgtable.h (revision 35e886e88c803920644c9d3abb45a9ecb7f1e761)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_PGTABLE_H
6 #define __ASM_PGTABLE_H
7 
8 #include <asm/bug.h>
9 #include <asm/proc-fns.h>
10 
11 #include <asm/memory.h>
12 #include <asm/mte.h>
13 #include <asm/pgtable-hwdef.h>
14 #include <asm/pgtable-prot.h>
15 #include <asm/tlbflush.h>
16 
17 /*
18  * VMALLOC range.
19  *
20  * VMALLOC_START: beginning of the kernel vmalloc space
21  * VMALLOC_END: extends to the available space below vmemmap
22  */
23 #define VMALLOC_START		(MODULES_END)
24 #if VA_BITS == VA_BITS_MIN
25 #define VMALLOC_END		(VMEMMAP_START - SZ_8M)
26 #else
27 #define VMEMMAP_UNUSED_NPAGES	((_PAGE_OFFSET(vabits_actual) - PAGE_OFFSET) >> PAGE_SHIFT)
28 #define VMALLOC_END		(VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M)
29 #endif
30 
31 #define vmemmap			((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
32 
33 #ifndef __ASSEMBLY__
34 
35 #include <asm/cmpxchg.h>
36 #include <asm/fixmap.h>
37 #include <linux/mmdebug.h>
38 #include <linux/mm_types.h>
39 #include <linux/sched.h>
40 #include <linux/page_table_check.h>
41 
42 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
43 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
44 
45 /* Set stride and tlb_level in flush_*_tlb_range */
46 #define flush_pmd_tlb_range(vma, addr, end)	\
47 	__flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
48 #define flush_pud_tlb_range(vma, addr, end)	\
49 	__flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
50 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
51 
52 static inline bool arch_thp_swp_supported(void)
53 {
54 	return !system_supports_mte();
55 }
56 #define arch_thp_swp_supported arch_thp_swp_supported
57 
58 /*
59  * Outside of a few very special situations (e.g. hibernation), we always
60  * use broadcast TLB invalidation instructions, therefore a spurious page
61  * fault on one CPU which has been handled concurrently by another CPU
62  * does not need to perform additional invalidation.
63  */
64 #define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
65 
66 /*
67  * ZERO_PAGE is a global shared page that is always zero: used
68  * for zero-mapped memory areas etc..
69  */
70 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
71 #define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
72 
73 #define pte_ERROR(e)	\
74 	pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
75 
76 /*
77  * Macros to convert between a physical address and its placement in a
78  * page table entry, taking care of 52-bit addresses.
79  */
80 #ifdef CONFIG_ARM64_PA_BITS_52
81 static inline phys_addr_t __pte_to_phys(pte_t pte)
82 {
83 	pte_val(pte) &= ~PTE_MAYBE_SHARED;
84 	return (pte_val(pte) & PTE_ADDR_LOW) |
85 		((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT);
86 }
87 static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
88 {
89 	return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK;
90 }
91 #else
92 #define __pte_to_phys(pte)	(pte_val(pte) & PTE_ADDR_LOW)
93 #define __phys_to_pte_val(phys)	(phys)
94 #endif
95 
96 #define pte_pfn(pte)		(__pte_to_phys(pte) >> PAGE_SHIFT)
97 #define pfn_pte(pfn,prot)	\
98 	__pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
99 
100 #define pte_none(pte)		(!pte_val(pte))
101 #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
102 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
103 
104 /*
105  * The following only work if pte_present(). Undefined behaviour otherwise.
106  */
107 #define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
108 #define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
109 #define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
110 #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
111 #define pte_rdonly(pte)		(!!(pte_val(pte) & PTE_RDONLY))
112 #define pte_user(pte)		(!!(pte_val(pte) & PTE_USER))
113 #define pte_user_exec(pte)	(!(pte_val(pte) & PTE_UXN))
114 #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
115 #define pte_devmap(pte)		(!!(pte_val(pte) & PTE_DEVMAP))
116 #define pte_tagged(pte)		((pte_val(pte) & PTE_ATTRINDX_MASK) == \
117 				 PTE_ATTRINDX(MT_NORMAL_TAGGED))
118 
119 #define pte_cont_addr_end(addr, end)						\
120 ({	unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;	\
121 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
122 })
123 
124 #define pmd_cont_addr_end(addr, end)						\
125 ({	unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK;	\
126 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
127 })
128 
129 #define pte_hw_dirty(pte)	(pte_write(pte) && !pte_rdonly(pte))
130 #define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
131 #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
132 
133 #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
134 /*
135  * Execute-only user mappings do not have the PTE_USER bit set. All valid
136  * kernel mappings have the PTE_UXN bit set.
137  */
138 #define pte_valid_not_user(pte) \
139 	((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
140 /*
141  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
142  * so that we don't erroneously return false for pages that have been
143  * remapped as PROT_NONE but are yet to be flushed from the TLB.
144  * Note that we can't make any assumptions based on the state of the access
145  * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
146  * TLB.
147  */
148 #define pte_accessible(mm, pte)	\
149 	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
150 
151 /*
152  * p??_access_permitted() is true for valid user mappings (PTE_USER
153  * bit set, subject to the write permission check). For execute-only
154  * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
155  * not set) must return false. PROT_NONE mappings do not have the
156  * PTE_VALID bit set.
157  */
158 #define pte_access_permitted(pte, write) \
159 	(((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
160 #define pmd_access_permitted(pmd, write) \
161 	(pte_access_permitted(pmd_pte(pmd), (write)))
162 #define pud_access_permitted(pud, write) \
163 	(pte_access_permitted(pud_pte(pud), (write)))
164 
165 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
166 {
167 	pte_val(pte) &= ~pgprot_val(prot);
168 	return pte;
169 }
170 
171 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
172 {
173 	pte_val(pte) |= pgprot_val(prot);
174 	return pte;
175 }
176 
177 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
178 {
179 	pmd_val(pmd) &= ~pgprot_val(prot);
180 	return pmd;
181 }
182 
183 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
184 {
185 	pmd_val(pmd) |= pgprot_val(prot);
186 	return pmd;
187 }
188 
189 static inline pte_t pte_mkwrite_novma(pte_t pte)
190 {
191 	pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
192 	pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
193 	return pte;
194 }
195 
196 static inline pte_t pte_mkclean(pte_t pte)
197 {
198 	pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
199 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
200 
201 	return pte;
202 }
203 
204 static inline pte_t pte_mkdirty(pte_t pte)
205 {
206 	pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
207 
208 	if (pte_write(pte))
209 		pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
210 
211 	return pte;
212 }
213 
214 static inline pte_t pte_wrprotect(pte_t pte)
215 {
216 	/*
217 	 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
218 	 * clear), set the PTE_DIRTY bit.
219 	 */
220 	if (pte_hw_dirty(pte))
221 		pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
222 
223 	pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
224 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
225 	return pte;
226 }
227 
228 static inline pte_t pte_mkold(pte_t pte)
229 {
230 	return clear_pte_bit(pte, __pgprot(PTE_AF));
231 }
232 
233 static inline pte_t pte_mkyoung(pte_t pte)
234 {
235 	return set_pte_bit(pte, __pgprot(PTE_AF));
236 }
237 
238 static inline pte_t pte_mkspecial(pte_t pte)
239 {
240 	return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
241 }
242 
243 static inline pte_t pte_mkcont(pte_t pte)
244 {
245 	pte = set_pte_bit(pte, __pgprot(PTE_CONT));
246 	return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
247 }
248 
249 static inline pte_t pte_mknoncont(pte_t pte)
250 {
251 	return clear_pte_bit(pte, __pgprot(PTE_CONT));
252 }
253 
254 static inline pte_t pte_mkpresent(pte_t pte)
255 {
256 	return set_pte_bit(pte, __pgprot(PTE_VALID));
257 }
258 
259 static inline pmd_t pmd_mkcont(pmd_t pmd)
260 {
261 	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
262 }
263 
264 static inline pte_t pte_mkdevmap(pte_t pte)
265 {
266 	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
267 }
268 
269 static inline void set_pte(pte_t *ptep, pte_t pte)
270 {
271 	WRITE_ONCE(*ptep, pte);
272 
273 	/*
274 	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
275 	 * or update_mmu_cache() have the necessary barriers.
276 	 */
277 	if (pte_valid_not_user(pte)) {
278 		dsb(ishst);
279 		isb();
280 	}
281 }
282 
283 extern void __sync_icache_dcache(pte_t pteval);
284 bool pgattr_change_is_safe(u64 old, u64 new);
285 
286 /*
287  * PTE bits configuration in the presence of hardware Dirty Bit Management
288  * (PTE_WRITE == PTE_DBM):
289  *
290  * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
291  *   0      0      |   1           0          0
292  *   0      1      |   1           1          0
293  *   1      0      |   1           0          1
294  *   1      1      |   0           1          x
295  *
296  * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
297  * the page fault mechanism. Checking the dirty status of a pte becomes:
298  *
299  *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
300  */
301 
302 static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep,
303 					   pte_t pte)
304 {
305 	pte_t old_pte;
306 
307 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
308 		return;
309 
310 	old_pte = READ_ONCE(*ptep);
311 
312 	if (!pte_valid(old_pte) || !pte_valid(pte))
313 		return;
314 	if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
315 		return;
316 
317 	/*
318 	 * Check for potential race with hardware updates of the pte
319 	 * (ptep_set_access_flags safely changes valid ptes without going
320 	 * through an invalid entry).
321 	 */
322 	VM_WARN_ONCE(!pte_young(pte),
323 		     "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
324 		     __func__, pte_val(old_pte), pte_val(pte));
325 	VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
326 		     "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
327 		     __func__, pte_val(old_pte), pte_val(pte));
328 	VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)),
329 		     "%s: unsafe attribute change: 0x%016llx -> 0x%016llx",
330 		     __func__, pte_val(old_pte), pte_val(pte));
331 }
332 
333 static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages)
334 {
335 	if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
336 		__sync_icache_dcache(pte);
337 
338 	/*
339 	 * If the PTE would provide user space access to the tags associated
340 	 * with it then ensure that the MTE tags are synchronised.  Although
341 	 * pte_access_permitted() returns false for exec only mappings, they
342 	 * don't expose tags (instruction fetches don't check tags).
343 	 */
344 	if (system_supports_mte() && pte_access_permitted(pte, false) &&
345 	    !pte_special(pte) && pte_tagged(pte))
346 		mte_sync_tags(pte, nr_pages);
347 }
348 
349 static inline void set_ptes(struct mm_struct *mm,
350 			    unsigned long __always_unused addr,
351 			    pte_t *ptep, pte_t pte, unsigned int nr)
352 {
353 	page_table_check_ptes_set(mm, ptep, pte, nr);
354 	__sync_cache_and_tags(pte, nr);
355 
356 	for (;;) {
357 		__check_safe_pte_update(mm, ptep, pte);
358 		set_pte(ptep, pte);
359 		if (--nr == 0)
360 			break;
361 		ptep++;
362 		pte_val(pte) += PAGE_SIZE;
363 	}
364 }
365 #define set_ptes set_ptes
366 
367 /*
368  * Huge pte definitions.
369  */
370 #define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))
371 
372 /*
373  * Hugetlb definitions.
374  */
375 #define HUGE_MAX_HSTATE		4
376 #define HPAGE_SHIFT		PMD_SHIFT
377 #define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
378 #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
379 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
380 
381 static inline pte_t pgd_pte(pgd_t pgd)
382 {
383 	return __pte(pgd_val(pgd));
384 }
385 
386 static inline pte_t p4d_pte(p4d_t p4d)
387 {
388 	return __pte(p4d_val(p4d));
389 }
390 
391 static inline pte_t pud_pte(pud_t pud)
392 {
393 	return __pte(pud_val(pud));
394 }
395 
396 static inline pud_t pte_pud(pte_t pte)
397 {
398 	return __pud(pte_val(pte));
399 }
400 
401 static inline pmd_t pud_pmd(pud_t pud)
402 {
403 	return __pmd(pud_val(pud));
404 }
405 
406 static inline pte_t pmd_pte(pmd_t pmd)
407 {
408 	return __pte(pmd_val(pmd));
409 }
410 
411 static inline pmd_t pte_pmd(pte_t pte)
412 {
413 	return __pmd(pte_val(pte));
414 }
415 
416 static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
417 {
418 	return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
419 }
420 
421 static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
422 {
423 	return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
424 }
425 
426 static inline pte_t pte_swp_mkexclusive(pte_t pte)
427 {
428 	return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
429 }
430 
431 static inline int pte_swp_exclusive(pte_t pte)
432 {
433 	return pte_val(pte) & PTE_SWP_EXCLUSIVE;
434 }
435 
436 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
437 {
438 	return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
439 }
440 
441 /*
442  * Select all bits except the pfn
443  */
444 static inline pgprot_t pte_pgprot(pte_t pte)
445 {
446 	unsigned long pfn = pte_pfn(pte);
447 
448 	return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
449 }
450 
451 #ifdef CONFIG_NUMA_BALANCING
452 /*
453  * See the comment in include/linux/pgtable.h
454  */
455 static inline int pte_protnone(pte_t pte)
456 {
457 	return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
458 }
459 
460 static inline int pmd_protnone(pmd_t pmd)
461 {
462 	return pte_protnone(pmd_pte(pmd));
463 }
464 #endif
465 
466 #define pmd_present_invalid(pmd)     (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))
467 
468 static inline int pmd_present(pmd_t pmd)
469 {
470 	return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
471 }
472 
473 /*
474  * THP definitions.
475  */
476 
477 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
478 static inline int pmd_trans_huge(pmd_t pmd)
479 {
480 	return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
481 }
482 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
483 
484 #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
485 #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
486 #define pmd_valid(pmd)		pte_valid(pmd_pte(pmd))
487 #define pmd_user(pmd)		pte_user(pmd_pte(pmd))
488 #define pmd_user_exec(pmd)	pte_user_exec(pmd_pte(pmd))
489 #define pmd_cont(pmd)		pte_cont(pmd_pte(pmd))
490 #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
491 #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
492 #define pmd_mkwrite_novma(pmd)	pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)))
493 #define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
494 #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
495 #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
496 
497 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
498 {
499 	pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
500 	pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));
501 
502 	return pmd;
503 }
504 
505 #define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))
506 
507 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
508 
509 #define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
510 
511 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
512 #define pmd_devmap(pmd)		pte_devmap(pmd_pte(pmd))
513 #endif
514 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
515 {
516 	return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
517 }
518 
519 #define __pmd_to_phys(pmd)	__pte_to_phys(pmd_pte(pmd))
520 #define __phys_to_pmd_val(phys)	__phys_to_pte_val(phys)
521 #define pmd_pfn(pmd)		((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
522 #define pfn_pmd(pfn,prot)	__pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
523 #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
524 
525 #define pud_young(pud)		pte_young(pud_pte(pud))
526 #define pud_mkyoung(pud)	pte_pud(pte_mkyoung(pud_pte(pud)))
527 #define pud_write(pud)		pte_write(pud_pte(pud))
528 
529 #define pud_mkhuge(pud)		(__pud(pud_val(pud) & ~PUD_TABLE_BIT))
530 
531 #define __pud_to_phys(pud)	__pte_to_phys(pud_pte(pud))
532 #define __phys_to_pud_val(phys)	__phys_to_pte_val(phys)
533 #define pud_pfn(pud)		((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
534 #define pfn_pud(pfn,prot)	__pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
535 
536 static inline void __set_pte_at(struct mm_struct *mm,
537 				unsigned long __always_unused addr,
538 				pte_t *ptep, pte_t pte, unsigned int nr)
539 {
540 	__sync_cache_and_tags(pte, nr);
541 	__check_safe_pte_update(mm, ptep, pte);
542 	set_pte(ptep, pte);
543 }
544 
545 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
546 			      pmd_t *pmdp, pmd_t pmd)
547 {
548 	page_table_check_pmd_set(mm, pmdp, pmd);
549 	return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd),
550 						PMD_SIZE >> PAGE_SHIFT);
551 }
552 
553 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
554 			      pud_t *pudp, pud_t pud)
555 {
556 	page_table_check_pud_set(mm, pudp, pud);
557 	return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud),
558 						PUD_SIZE >> PAGE_SHIFT);
559 }
560 
561 #define __p4d_to_phys(p4d)	__pte_to_phys(p4d_pte(p4d))
562 #define __phys_to_p4d_val(phys)	__phys_to_pte_val(phys)
563 
564 #define __pgd_to_phys(pgd)	__pte_to_phys(pgd_pte(pgd))
565 #define __phys_to_pgd_val(phys)	__phys_to_pte_val(phys)
566 
567 #define __pgprot_modify(prot,mask,bits) \
568 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
569 
570 #define pgprot_nx(prot) \
571 	__pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
572 
573 /*
574  * Mark the prot value as uncacheable and unbufferable.
575  */
576 #define pgprot_noncached(prot) \
577 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
578 #define pgprot_writecombine(prot) \
579 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
580 #define pgprot_device(prot) \
581 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
582 #define pgprot_tagged(prot) \
583 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
584 #define pgprot_mhp	pgprot_tagged
585 /*
586  * DMA allocations for non-coherent devices use what the Arm architecture calls
587  * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
588  * and merging of writes.  This is different from "Device-nGnR[nE]" memory which
589  * is intended for MMIO and thus forbids speculation, preserves access size,
590  * requires strict alignment and can also force write responses to come from the
591  * endpoint.
592  */
593 #define pgprot_dmacoherent(prot) \
594 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, \
595 			PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
596 
597 #define __HAVE_PHYS_MEM_ACCESS_PROT
598 struct file;
599 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
600 				     unsigned long size, pgprot_t vma_prot);
601 
602 #define pmd_none(pmd)		(!pmd_val(pmd))
603 
604 #define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
605 				 PMD_TYPE_TABLE)
606 #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
607 				 PMD_TYPE_SECT)
608 #define pmd_leaf(pmd)		(pmd_present(pmd) && !pmd_table(pmd))
609 #define pmd_bad(pmd)		(!pmd_table(pmd))
610 
611 #define pmd_leaf_size(pmd)	(pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
612 #define pte_leaf_size(pte)	(pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
613 
614 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
615 static inline bool pud_sect(pud_t pud) { return false; }
616 static inline bool pud_table(pud_t pud) { return true; }
617 #else
618 #define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
619 				 PUD_TYPE_SECT)
620 #define pud_table(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
621 				 PUD_TYPE_TABLE)
622 #endif
623 
624 extern pgd_t init_pg_dir[];
625 extern pgd_t init_pg_end[];
626 extern pgd_t swapper_pg_dir[];
627 extern pgd_t idmap_pg_dir[];
628 extern pgd_t tramp_pg_dir[];
629 extern pgd_t reserved_pg_dir[];
630 
631 extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
632 
633 static inline bool in_swapper_pgdir(void *addr)
634 {
635 	return ((unsigned long)addr & PAGE_MASK) ==
636 	        ((unsigned long)swapper_pg_dir & PAGE_MASK);
637 }
638 
639 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
640 {
641 #ifdef __PAGETABLE_PMD_FOLDED
642 	if (in_swapper_pgdir(pmdp)) {
643 		set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
644 		return;
645 	}
646 #endif /* __PAGETABLE_PMD_FOLDED */
647 
648 	WRITE_ONCE(*pmdp, pmd);
649 
650 	if (pmd_valid(pmd)) {
651 		dsb(ishst);
652 		isb();
653 	}
654 }
655 
656 static inline void pmd_clear(pmd_t *pmdp)
657 {
658 	set_pmd(pmdp, __pmd(0));
659 }
660 
661 static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
662 {
663 	return __pmd_to_phys(pmd);
664 }
665 
666 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
667 {
668 	return (unsigned long)__va(pmd_page_paddr(pmd));
669 }
670 
671 /* Find an entry in the third-level page table. */
672 #define pte_offset_phys(dir,addr)	(pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
673 
674 #define pte_set_fixmap(addr)		((pte_t *)set_fixmap_offset(FIX_PTE, addr))
675 #define pte_set_fixmap_offset(pmd, addr)	pte_set_fixmap(pte_offset_phys(pmd, addr))
676 #define pte_clear_fixmap()		clear_fixmap(FIX_PTE)
677 
678 #define pmd_page(pmd)			phys_to_page(__pmd_to_phys(pmd))
679 
680 /* use ONLY for statically allocated translation tables */
681 #define pte_offset_kimg(dir,addr)	((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
682 
683 /*
684  * Conversion functions: convert a page and protection to a page entry,
685  * and a page entry and page directory to the page they refer to.
686  */
687 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
688 
689 #if CONFIG_PGTABLE_LEVELS > 2
690 
691 #define pmd_ERROR(e)	\
692 	pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
693 
694 #define pud_none(pud)		(!pud_val(pud))
695 #define pud_bad(pud)		(!pud_table(pud))
696 #define pud_present(pud)	pte_present(pud_pte(pud))
697 #define pud_leaf(pud)		(pud_present(pud) && !pud_table(pud))
698 #define pud_valid(pud)		pte_valid(pud_pte(pud))
699 #define pud_user(pud)		pte_user(pud_pte(pud))
700 #define pud_user_exec(pud)	pte_user_exec(pud_pte(pud))
701 
702 static inline bool pgtable_l4_enabled(void);
703 
704 static inline void set_pud(pud_t *pudp, pud_t pud)
705 {
706 	if (!pgtable_l4_enabled() && in_swapper_pgdir(pudp)) {
707 		set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
708 		return;
709 	}
710 
711 	WRITE_ONCE(*pudp, pud);
712 
713 	if (pud_valid(pud)) {
714 		dsb(ishst);
715 		isb();
716 	}
717 }
718 
719 static inline void pud_clear(pud_t *pudp)
720 {
721 	set_pud(pudp, __pud(0));
722 }
723 
724 static inline phys_addr_t pud_page_paddr(pud_t pud)
725 {
726 	return __pud_to_phys(pud);
727 }
728 
729 static inline pmd_t *pud_pgtable(pud_t pud)
730 {
731 	return (pmd_t *)__va(pud_page_paddr(pud));
732 }
733 
734 /* Find an entry in the second-level page table. */
735 #define pmd_offset_phys(dir, addr)	(pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
736 
737 #define pmd_set_fixmap(addr)		((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
738 #define pmd_set_fixmap_offset(pud, addr)	pmd_set_fixmap(pmd_offset_phys(pud, addr))
739 #define pmd_clear_fixmap()		clear_fixmap(FIX_PMD)
740 
741 #define pud_page(pud)			phys_to_page(__pud_to_phys(pud))
742 
743 /* use ONLY for statically allocated translation tables */
744 #define pmd_offset_kimg(dir,addr)	((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
745 
746 #else
747 
748 #define pud_page_paddr(pud)	({ BUILD_BUG(); 0; })
749 #define pud_user_exec(pud)	pud_user(pud) /* Always 0 with folding */
750 
751 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
752 #define pmd_set_fixmap(addr)		NULL
753 #define pmd_set_fixmap_offset(pudp, addr)	((pmd_t *)pudp)
754 #define pmd_clear_fixmap()
755 
756 #define pmd_offset_kimg(dir,addr)	((pmd_t *)dir)
757 
758 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
759 
760 #if CONFIG_PGTABLE_LEVELS > 3
761 
762 static __always_inline bool pgtable_l4_enabled(void)
763 {
764 	if (CONFIG_PGTABLE_LEVELS > 4 || !IS_ENABLED(CONFIG_ARM64_LPA2))
765 		return true;
766 	if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT))
767 		return vabits_actual == VA_BITS;
768 	return alternative_has_cap_unlikely(ARM64_HAS_VA52);
769 }
770 
771 static inline bool mm_pud_folded(const struct mm_struct *mm)
772 {
773 	return !pgtable_l4_enabled();
774 }
775 #define mm_pud_folded  mm_pud_folded
776 
777 #define pud_ERROR(e)	\
778 	pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
779 
780 #define p4d_none(p4d)		(pgtable_l4_enabled() && !p4d_val(p4d))
781 #define p4d_bad(p4d)		(pgtable_l4_enabled() && !(p4d_val(p4d) & 2))
782 #define p4d_present(p4d)	(!p4d_none(p4d))
783 
784 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
785 {
786 	if (in_swapper_pgdir(p4dp)) {
787 		set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
788 		return;
789 	}
790 
791 	WRITE_ONCE(*p4dp, p4d);
792 	dsb(ishst);
793 	isb();
794 }
795 
796 static inline void p4d_clear(p4d_t *p4dp)
797 {
798 	if (pgtable_l4_enabled())
799 		set_p4d(p4dp, __p4d(0));
800 }
801 
802 static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
803 {
804 	return __p4d_to_phys(p4d);
805 }
806 
807 #define pud_index(addr)		(((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
808 
809 static inline pud_t *p4d_to_folded_pud(p4d_t *p4dp, unsigned long addr)
810 {
811 	return (pud_t *)PTR_ALIGN_DOWN(p4dp, PAGE_SIZE) + pud_index(addr);
812 }
813 
814 static inline pud_t *p4d_pgtable(p4d_t p4d)
815 {
816 	return (pud_t *)__va(p4d_page_paddr(p4d));
817 }
818 
819 static inline phys_addr_t pud_offset_phys(p4d_t *p4dp, unsigned long addr)
820 {
821 	BUG_ON(!pgtable_l4_enabled());
822 
823 	return p4d_page_paddr(READ_ONCE(*p4dp)) + pud_index(addr) * sizeof(pud_t);
824 }
825 
826 static inline
827 pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long addr)
828 {
829 	if (!pgtable_l4_enabled())
830 		return p4d_to_folded_pud(p4dp, addr);
831 	return (pud_t *)__va(p4d_page_paddr(p4d)) + pud_index(addr);
832 }
833 #define pud_offset_lockless pud_offset_lockless
834 
835 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long addr)
836 {
837 	return pud_offset_lockless(p4dp, READ_ONCE(*p4dp), addr);
838 }
839 #define pud_offset	pud_offset
840 
841 static inline pud_t *pud_set_fixmap(unsigned long addr)
842 {
843 	if (!pgtable_l4_enabled())
844 		return NULL;
845 	return (pud_t *)set_fixmap_offset(FIX_PUD, addr);
846 }
847 
848 static inline pud_t *pud_set_fixmap_offset(p4d_t *p4dp, unsigned long addr)
849 {
850 	if (!pgtable_l4_enabled())
851 		return p4d_to_folded_pud(p4dp, addr);
852 	return pud_set_fixmap(pud_offset_phys(p4dp, addr));
853 }
854 
855 static inline void pud_clear_fixmap(void)
856 {
857 	if (pgtable_l4_enabled())
858 		clear_fixmap(FIX_PUD);
859 }
860 
861 /* use ONLY for statically allocated translation tables */
862 static inline pud_t *pud_offset_kimg(p4d_t *p4dp, u64 addr)
863 {
864 	if (!pgtable_l4_enabled())
865 		return p4d_to_folded_pud(p4dp, addr);
866 	return (pud_t *)__phys_to_kimg(pud_offset_phys(p4dp, addr));
867 }
868 
869 #define p4d_page(p4d)		pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
870 
871 #else
872 
873 static inline bool pgtable_l4_enabled(void) { return false; }
874 
875 #define p4d_page_paddr(p4d)	({ BUILD_BUG(); 0;})
876 
877 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
878 #define pud_set_fixmap(addr)		NULL
879 #define pud_set_fixmap_offset(pgdp, addr)	((pud_t *)pgdp)
880 #define pud_clear_fixmap()
881 
882 #define pud_offset_kimg(dir,addr)	((pud_t *)dir)
883 
884 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
885 
886 #if CONFIG_PGTABLE_LEVELS > 4
887 
888 static __always_inline bool pgtable_l5_enabled(void)
889 {
890 	if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT))
891 		return vabits_actual == VA_BITS;
892 	return alternative_has_cap_unlikely(ARM64_HAS_VA52);
893 }
894 
895 static inline bool mm_p4d_folded(const struct mm_struct *mm)
896 {
897 	return !pgtable_l5_enabled();
898 }
899 #define mm_p4d_folded  mm_p4d_folded
900 
901 #define p4d_ERROR(e)	\
902 	pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e))
903 
904 #define pgd_none(pgd)		(pgtable_l5_enabled() && !pgd_val(pgd))
905 #define pgd_bad(pgd)		(pgtable_l5_enabled() && !(pgd_val(pgd) & 2))
906 #define pgd_present(pgd)	(!pgd_none(pgd))
907 
908 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
909 {
910 	if (in_swapper_pgdir(pgdp)) {
911 		set_swapper_pgd(pgdp, __pgd(pgd_val(pgd)));
912 		return;
913 	}
914 
915 	WRITE_ONCE(*pgdp, pgd);
916 	dsb(ishst);
917 	isb();
918 }
919 
920 static inline void pgd_clear(pgd_t *pgdp)
921 {
922 	if (pgtable_l5_enabled())
923 		set_pgd(pgdp, __pgd(0));
924 }
925 
926 static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
927 {
928 	return __pgd_to_phys(pgd);
929 }
930 
931 #define p4d_index(addr)		(((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
932 
933 static inline p4d_t *pgd_to_folded_p4d(pgd_t *pgdp, unsigned long addr)
934 {
935 	return (p4d_t *)PTR_ALIGN_DOWN(pgdp, PAGE_SIZE) + p4d_index(addr);
936 }
937 
938 static inline phys_addr_t p4d_offset_phys(pgd_t *pgdp, unsigned long addr)
939 {
940 	BUG_ON(!pgtable_l5_enabled());
941 
942 	return pgd_page_paddr(READ_ONCE(*pgdp)) + p4d_index(addr) * sizeof(p4d_t);
943 }
944 
945 static inline
946 p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long addr)
947 {
948 	if (!pgtable_l5_enabled())
949 		return pgd_to_folded_p4d(pgdp, addr);
950 	return (p4d_t *)__va(pgd_page_paddr(pgd)) + p4d_index(addr);
951 }
952 #define p4d_offset_lockless p4d_offset_lockless
953 
954 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long addr)
955 {
956 	return p4d_offset_lockless(pgdp, READ_ONCE(*pgdp), addr);
957 }
958 
959 static inline p4d_t *p4d_set_fixmap(unsigned long addr)
960 {
961 	if (!pgtable_l5_enabled())
962 		return NULL;
963 	return (p4d_t *)set_fixmap_offset(FIX_P4D, addr);
964 }
965 
966 static inline p4d_t *p4d_set_fixmap_offset(pgd_t *pgdp, unsigned long addr)
967 {
968 	if (!pgtable_l5_enabled())
969 		return pgd_to_folded_p4d(pgdp, addr);
970 	return p4d_set_fixmap(p4d_offset_phys(pgdp, addr));
971 }
972 
973 static inline void p4d_clear_fixmap(void)
974 {
975 	if (pgtable_l5_enabled())
976 		clear_fixmap(FIX_P4D);
977 }
978 
979 /* use ONLY for statically allocated translation tables */
980 static inline p4d_t *p4d_offset_kimg(pgd_t *pgdp, u64 addr)
981 {
982 	if (!pgtable_l5_enabled())
983 		return pgd_to_folded_p4d(pgdp, addr);
984 	return (p4d_t *)__phys_to_kimg(p4d_offset_phys(pgdp, addr));
985 }
986 
987 #define pgd_page(pgd)		pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd)))
988 
989 #else
990 
991 static inline bool pgtable_l5_enabled(void) { return false; }
992 
993 /* Match p4d_offset folding in <asm/generic/pgtable-nop4d.h> */
994 #define p4d_set_fixmap(addr)		NULL
995 #define p4d_set_fixmap_offset(p4dp, addr)	((p4d_t *)p4dp)
996 #define p4d_clear_fixmap()
997 
998 #define p4d_offset_kimg(dir,addr)	((p4d_t *)dir)
999 
1000 #endif  /* CONFIG_PGTABLE_LEVELS > 4 */
1001 
1002 #define pgd_ERROR(e)	\
1003 	pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
1004 
1005 #define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
1006 #define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
1007 
1008 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1009 {
1010 	/*
1011 	 * Normal and Normal-Tagged are two different memory types and indices
1012 	 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
1013 	 */
1014 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
1015 			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
1016 			      PTE_ATTRINDX_MASK;
1017 	/* preserve the hardware dirty information */
1018 	if (pte_hw_dirty(pte))
1019 		pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
1020 
1021 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
1022 	/*
1023 	 * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware
1024 	 * dirtiness again.
1025 	 */
1026 	if (pte_sw_dirty(pte))
1027 		pte = pte_mkdirty(pte);
1028 	return pte;
1029 }
1030 
1031 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1032 {
1033 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
1034 }
1035 
1036 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1037 extern int ptep_set_access_flags(struct vm_area_struct *vma,
1038 				 unsigned long address, pte_t *ptep,
1039 				 pte_t entry, int dirty);
1040 
1041 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1042 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1043 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1044 					unsigned long address, pmd_t *pmdp,
1045 					pmd_t entry, int dirty)
1046 {
1047 	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
1048 }
1049 
1050 static inline int pud_devmap(pud_t pud)
1051 {
1052 	return 0;
1053 }
1054 
1055 static inline int pgd_devmap(pgd_t pgd)
1056 {
1057 	return 0;
1058 }
1059 #endif
1060 
1061 #ifdef CONFIG_PAGE_TABLE_CHECK
1062 static inline bool pte_user_accessible_page(pte_t pte)
1063 {
1064 	return pte_present(pte) && (pte_user(pte) || pte_user_exec(pte));
1065 }
1066 
1067 static inline bool pmd_user_accessible_page(pmd_t pmd)
1068 {
1069 	return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
1070 }
1071 
1072 static inline bool pud_user_accessible_page(pud_t pud)
1073 {
1074 	return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud));
1075 }
1076 #endif
1077 
1078 /*
1079  * Atomic pte/pmd modifications.
1080  */
1081 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1082 static inline int __ptep_test_and_clear_young(pte_t *ptep)
1083 {
1084 	pte_t old_pte, pte;
1085 
1086 	pte = READ_ONCE(*ptep);
1087 	do {
1088 		old_pte = pte;
1089 		pte = pte_mkold(pte);
1090 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
1091 					       pte_val(old_pte), pte_val(pte));
1092 	} while (pte_val(pte) != pte_val(old_pte));
1093 
1094 	return pte_young(pte);
1095 }
1096 
1097 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1098 					    unsigned long address,
1099 					    pte_t *ptep)
1100 {
1101 	return __ptep_test_and_clear_young(ptep);
1102 }
1103 
1104 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1105 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1106 					 unsigned long address, pte_t *ptep)
1107 {
1108 	int young = ptep_test_and_clear_young(vma, address, ptep);
1109 
1110 	if (young) {
1111 		/*
1112 		 * We can elide the trailing DSB here since the worst that can
1113 		 * happen is that a CPU continues to use the young entry in its
1114 		 * TLB and we mistakenly reclaim the associated page. The
1115 		 * window for such an event is bounded by the next
1116 		 * context-switch, which provides a DSB to complete the TLB
1117 		 * invalidation.
1118 		 */
1119 		flush_tlb_page_nosync(vma, address);
1120 	}
1121 
1122 	return young;
1123 }
1124 
1125 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1126 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1127 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1128 					    unsigned long address,
1129 					    pmd_t *pmdp)
1130 {
1131 	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
1132 }
1133 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1134 
1135 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1136 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1137 				       unsigned long address, pte_t *ptep)
1138 {
1139 	pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
1140 
1141 	page_table_check_pte_clear(mm, pte);
1142 
1143 	return pte;
1144 }
1145 
1146 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1147 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1148 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1149 					    unsigned long address, pmd_t *pmdp)
1150 {
1151 	pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0));
1152 
1153 	page_table_check_pmd_clear(mm, pmd);
1154 
1155 	return pmd;
1156 }
1157 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1158 
1159 /*
1160  * ptep_set_wrprotect - mark read-only while trasferring potential hardware
1161  * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
1162  */
1163 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1164 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
1165 {
1166 	pte_t old_pte, pte;
1167 
1168 	pte = READ_ONCE(*ptep);
1169 	do {
1170 		old_pte = pte;
1171 		pte = pte_wrprotect(pte);
1172 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
1173 					       pte_val(old_pte), pte_val(pte));
1174 	} while (pte_val(pte) != pte_val(old_pte));
1175 }
1176 
1177 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1178 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1179 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1180 				      unsigned long address, pmd_t *pmdp)
1181 {
1182 	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
1183 }
1184 
1185 #define pmdp_establish pmdp_establish
1186 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1187 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
1188 {
1189 	page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
1190 	return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
1191 }
1192 #endif
1193 
1194 /*
1195  * Encode and decode a swap entry:
1196  *	bits 0-1:	present (must be zero)
1197  *	bits 2:		remember PG_anon_exclusive
1198  *	bits 3-7:	swap type
1199  *	bits 8-57:	swap offset
1200  *	bit  58:	PTE_PROT_NONE (must be zero)
1201  */
1202 #define __SWP_TYPE_SHIFT	3
1203 #define __SWP_TYPE_BITS		5
1204 #define __SWP_OFFSET_BITS	50
1205 #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
1206 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
1207 #define __SWP_OFFSET_MASK	((1UL << __SWP_OFFSET_BITS) - 1)
1208 
1209 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
1210 #define __swp_offset(x)		(((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
1211 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
1212 
1213 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
1214 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
1215 
1216 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1217 #define __pmd_to_swp_entry(pmd)		((swp_entry_t) { pmd_val(pmd) })
1218 #define __swp_entry_to_pmd(swp)		__pmd((swp).val)
1219 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
1220 
1221 /*
1222  * Ensure that there are not more swap files than can be encoded in the kernel
1223  * PTEs.
1224  */
1225 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
1226 
1227 #ifdef CONFIG_ARM64_MTE
1228 
1229 #define __HAVE_ARCH_PREPARE_TO_SWAP
1230 static inline int arch_prepare_to_swap(struct page *page)
1231 {
1232 	if (system_supports_mte())
1233 		return mte_save_tags(page);
1234 	return 0;
1235 }
1236 
1237 #define __HAVE_ARCH_SWAP_INVALIDATE
1238 static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
1239 {
1240 	if (system_supports_mte())
1241 		mte_invalidate_tags(type, offset);
1242 }
1243 
1244 static inline void arch_swap_invalidate_area(int type)
1245 {
1246 	if (system_supports_mte())
1247 		mte_invalidate_tags_area(type);
1248 }
1249 
1250 #define __HAVE_ARCH_SWAP_RESTORE
1251 static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
1252 {
1253 	if (system_supports_mte())
1254 		mte_restore_tags(entry, &folio->page);
1255 }
1256 
1257 #endif /* CONFIG_ARM64_MTE */
1258 
1259 /*
1260  * On AArch64, the cache coherency is handled via the set_pte_at() function.
1261  */
1262 static inline void update_mmu_cache_range(struct vm_fault *vmf,
1263 		struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
1264 		unsigned int nr)
1265 {
1266 	/*
1267 	 * We don't do anything here, so there's a very small chance of
1268 	 * us retaking a user fault which we just fixed up. The alternative
1269 	 * is doing a dsb(ishst), but that penalises the fastpath.
1270 	 */
1271 }
1272 
1273 #define update_mmu_cache(vma, addr, ptep) \
1274 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
1275 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
1276 
1277 #ifdef CONFIG_ARM64_PA_BITS_52
1278 #define phys_to_ttbr(addr)	(((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
1279 #else
1280 #define phys_to_ttbr(addr)	(addr)
1281 #endif
1282 
1283 /*
1284  * On arm64 without hardware Access Flag, copying from user will fail because
1285  * the pte is old and cannot be marked young. So we always end up with zeroed
1286  * page after fork() + CoW for pfn mappings. We don't always have a
1287  * hardware-managed access flag on arm64.
1288  */
1289 #define arch_has_hw_pte_young		cpu_has_hw_af
1290 
1291 /*
1292  * Experimentally, it's cheap to set the access flag in hardware and we
1293  * benefit from prefaulting mappings as 'old' to start with.
1294  */
1295 #define arch_wants_old_prefaulted_pte	cpu_has_hw_af
1296 
1297 static inline bool pud_sect_supported(void)
1298 {
1299 	return PAGE_SIZE == SZ_4K;
1300 }
1301 
1302 
1303 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1304 #define ptep_modify_prot_start ptep_modify_prot_start
1305 extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
1306 				    unsigned long addr, pte_t *ptep);
1307 
1308 #define ptep_modify_prot_commit ptep_modify_prot_commit
1309 extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
1310 				    unsigned long addr, pte_t *ptep,
1311 				    pte_t old_pte, pte_t new_pte);
1312 #endif /* !__ASSEMBLY__ */
1313 
1314 #endif /* __ASM_PGTABLE_H */
1315