xref: /linux/arch/arm64/include/asm/pgtable.h (revision d8310914848223de7ec04d55bd15f013f0dad803)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_PGTABLE_H
6 #define __ASM_PGTABLE_H
7 
8 #include <asm/bug.h>
9 #include <asm/proc-fns.h>
10 
11 #include <asm/memory.h>
12 #include <asm/mte.h>
13 #include <asm/pgtable-hwdef.h>
14 #include <asm/pgtable-prot.h>
15 #include <asm/tlbflush.h>
16 
17 /*
18  * VMALLOC range.
19  *
20  * VMALLOC_START: beginning of the kernel vmalloc space
21  * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
22  *	and fixed mappings
23  */
24 #define VMALLOC_START		(MODULES_END)
25 #define VMALLOC_END		(VMEMMAP_START - SZ_256M)
26 
27 #define vmemmap			((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
28 
29 #ifndef __ASSEMBLY__
30 
31 #include <asm/cmpxchg.h>
32 #include <asm/fixmap.h>
33 #include <linux/mmdebug.h>
34 #include <linux/mm_types.h>
35 #include <linux/sched.h>
36 #include <linux/page_table_check.h>
37 
38 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
39 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
40 
41 /* Set stride and tlb_level in flush_*_tlb_range */
42 #define flush_pmd_tlb_range(vma, addr, end)	\
43 	__flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
44 #define flush_pud_tlb_range(vma, addr, end)	\
45 	__flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
46 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
47 
48 static inline bool arch_thp_swp_supported(void)
49 {
50 	return !system_supports_mte();
51 }
52 #define arch_thp_swp_supported arch_thp_swp_supported
53 
54 /*
55  * Outside of a few very special situations (e.g. hibernation), we always
56  * use broadcast TLB invalidation instructions, therefore a spurious page
57  * fault on one CPU which has been handled concurrently by another CPU
58  * does not need to perform additional invalidation.
59  */
60 #define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
61 
62 /*
63  * ZERO_PAGE is a global shared page that is always zero: used
64  * for zero-mapped memory areas etc..
65  */
66 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
67 #define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
68 
69 #define pte_ERROR(e)	\
70 	pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
71 
72 /*
73  * Macros to convert between a physical address and its placement in a
74  * page table entry, taking care of 52-bit addresses.
75  */
76 #ifdef CONFIG_ARM64_PA_BITS_52
77 static inline phys_addr_t __pte_to_phys(pte_t pte)
78 {
79 	return (pte_val(pte) & PTE_ADDR_LOW) |
80 		((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT);
81 }
82 static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
83 {
84 	return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PTE_ADDR_MASK;
85 }
86 #else
87 #define __pte_to_phys(pte)	(pte_val(pte) & PTE_ADDR_MASK)
88 #define __phys_to_pte_val(phys)	(phys)
89 #endif
90 
91 #define pte_pfn(pte)		(__pte_to_phys(pte) >> PAGE_SHIFT)
92 #define pfn_pte(pfn,prot)	\
93 	__pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
94 
95 #define pte_none(pte)		(!pte_val(pte))
96 #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
97 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
98 
99 /*
100  * The following only work if pte_present(). Undefined behaviour otherwise.
101  */
102 #define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
103 #define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
104 #define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
105 #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
106 #define pte_rdonly(pte)		(!!(pte_val(pte) & PTE_RDONLY))
107 #define pte_user(pte)		(!!(pte_val(pte) & PTE_USER))
108 #define pte_user_exec(pte)	(!(pte_val(pte) & PTE_UXN))
109 #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
110 #define pte_devmap(pte)		(!!(pte_val(pte) & PTE_DEVMAP))
111 #define pte_tagged(pte)		((pte_val(pte) & PTE_ATTRINDX_MASK) == \
112 				 PTE_ATTRINDX(MT_NORMAL_TAGGED))
113 
114 #define pte_cont_addr_end(addr, end)						\
115 ({	unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;	\
116 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
117 })
118 
119 #define pmd_cont_addr_end(addr, end)						\
120 ({	unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK;	\
121 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
122 })
123 
124 #define pte_hw_dirty(pte)	(pte_write(pte) && !pte_rdonly(pte))
125 #define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
126 #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
127 
128 #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
129 /*
130  * Execute-only user mappings do not have the PTE_USER bit set. All valid
131  * kernel mappings have the PTE_UXN bit set.
132  */
133 #define pte_valid_not_user(pte) \
134 	((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
135 /*
136  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
137  * so that we don't erroneously return false for pages that have been
138  * remapped as PROT_NONE but are yet to be flushed from the TLB.
139  * Note that we can't make any assumptions based on the state of the access
140  * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
141  * TLB.
142  */
143 #define pte_accessible(mm, pte)	\
144 	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
145 
146 /*
147  * p??_access_permitted() is true for valid user mappings (PTE_USER
148  * bit set, subject to the write permission check). For execute-only
149  * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
150  * not set) must return false. PROT_NONE mappings do not have the
151  * PTE_VALID bit set.
152  */
153 #define pte_access_permitted(pte, write) \
154 	(((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
155 #define pmd_access_permitted(pmd, write) \
156 	(pte_access_permitted(pmd_pte(pmd), (write)))
157 #define pud_access_permitted(pud, write) \
158 	(pte_access_permitted(pud_pte(pud), (write)))
159 
160 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
161 {
162 	pte_val(pte) &= ~pgprot_val(prot);
163 	return pte;
164 }
165 
166 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
167 {
168 	pte_val(pte) |= pgprot_val(prot);
169 	return pte;
170 }
171 
172 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
173 {
174 	pmd_val(pmd) &= ~pgprot_val(prot);
175 	return pmd;
176 }
177 
178 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
179 {
180 	pmd_val(pmd) |= pgprot_val(prot);
181 	return pmd;
182 }
183 
184 static inline pte_t pte_mkwrite_novma(pte_t pte)
185 {
186 	pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
187 	pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
188 	return pte;
189 }
190 
191 static inline pte_t pte_mkclean(pte_t pte)
192 {
193 	pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
194 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
195 
196 	return pte;
197 }
198 
199 static inline pte_t pte_mkdirty(pte_t pte)
200 {
201 	pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
202 
203 	if (pte_write(pte))
204 		pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
205 
206 	return pte;
207 }
208 
209 static inline pte_t pte_wrprotect(pte_t pte)
210 {
211 	/*
212 	 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
213 	 * clear), set the PTE_DIRTY bit.
214 	 */
215 	if (pte_hw_dirty(pte))
216 		pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
217 
218 	pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
219 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
220 	return pte;
221 }
222 
223 static inline pte_t pte_mkold(pte_t pte)
224 {
225 	return clear_pte_bit(pte, __pgprot(PTE_AF));
226 }
227 
228 static inline pte_t pte_mkyoung(pte_t pte)
229 {
230 	return set_pte_bit(pte, __pgprot(PTE_AF));
231 }
232 
233 static inline pte_t pte_mkspecial(pte_t pte)
234 {
235 	return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
236 }
237 
238 static inline pte_t pte_mkcont(pte_t pte)
239 {
240 	pte = set_pte_bit(pte, __pgprot(PTE_CONT));
241 	return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
242 }
243 
244 static inline pte_t pte_mknoncont(pte_t pte)
245 {
246 	return clear_pte_bit(pte, __pgprot(PTE_CONT));
247 }
248 
249 static inline pte_t pte_mkpresent(pte_t pte)
250 {
251 	return set_pte_bit(pte, __pgprot(PTE_VALID));
252 }
253 
254 static inline pmd_t pmd_mkcont(pmd_t pmd)
255 {
256 	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
257 }
258 
259 static inline pte_t pte_mkdevmap(pte_t pte)
260 {
261 	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
262 }
263 
264 static inline void set_pte(pte_t *ptep, pte_t pte)
265 {
266 	WRITE_ONCE(*ptep, pte);
267 
268 	/*
269 	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
270 	 * or update_mmu_cache() have the necessary barriers.
271 	 */
272 	if (pte_valid_not_user(pte)) {
273 		dsb(ishst);
274 		isb();
275 	}
276 }
277 
278 extern void __sync_icache_dcache(pte_t pteval);
279 bool pgattr_change_is_safe(u64 old, u64 new);
280 
281 /*
282  * PTE bits configuration in the presence of hardware Dirty Bit Management
283  * (PTE_WRITE == PTE_DBM):
284  *
285  * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
286  *   0      0      |   1           0          0
287  *   0      1      |   1           1          0
288  *   1      0      |   1           0          1
289  *   1      1      |   0           1          x
290  *
291  * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
292  * the page fault mechanism. Checking the dirty status of a pte becomes:
293  *
294  *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
295  */
296 
297 static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep,
298 					   pte_t pte)
299 {
300 	pte_t old_pte;
301 
302 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
303 		return;
304 
305 	old_pte = READ_ONCE(*ptep);
306 
307 	if (!pte_valid(old_pte) || !pte_valid(pte))
308 		return;
309 	if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
310 		return;
311 
312 	/*
313 	 * Check for potential race with hardware updates of the pte
314 	 * (ptep_set_access_flags safely changes valid ptes without going
315 	 * through an invalid entry).
316 	 */
317 	VM_WARN_ONCE(!pte_young(pte),
318 		     "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
319 		     __func__, pte_val(old_pte), pte_val(pte));
320 	VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
321 		     "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
322 		     __func__, pte_val(old_pte), pte_val(pte));
323 	VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)),
324 		     "%s: unsafe attribute change: 0x%016llx -> 0x%016llx",
325 		     __func__, pte_val(old_pte), pte_val(pte));
326 }
327 
328 static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages)
329 {
330 	if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
331 		__sync_icache_dcache(pte);
332 
333 	/*
334 	 * If the PTE would provide user space access to the tags associated
335 	 * with it then ensure that the MTE tags are synchronised.  Although
336 	 * pte_access_permitted() returns false for exec only mappings, they
337 	 * don't expose tags (instruction fetches don't check tags).
338 	 */
339 	if (system_supports_mte() && pte_access_permitted(pte, false) &&
340 	    !pte_special(pte) && pte_tagged(pte))
341 		mte_sync_tags(pte, nr_pages);
342 }
343 
344 /*
345  * Select all bits except the pfn
346  */
347 static inline pgprot_t pte_pgprot(pte_t pte)
348 {
349 	unsigned long pfn = pte_pfn(pte);
350 
351 	return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
352 }
353 
354 #define pte_next_pfn pte_next_pfn
355 static inline pte_t pte_next_pfn(pte_t pte)
356 {
357 	return pfn_pte(pte_pfn(pte) + 1, pte_pgprot(pte));
358 }
359 
360 static inline void set_ptes(struct mm_struct *mm,
361 			    unsigned long __always_unused addr,
362 			    pte_t *ptep, pte_t pte, unsigned int nr)
363 {
364 	page_table_check_ptes_set(mm, ptep, pte, nr);
365 	__sync_cache_and_tags(pte, nr);
366 
367 	for (;;) {
368 		__check_safe_pte_update(mm, ptep, pte);
369 		set_pte(ptep, pte);
370 		if (--nr == 0)
371 			break;
372 		ptep++;
373 		pte = pte_next_pfn(pte);
374 	}
375 }
376 #define set_ptes set_ptes
377 
378 /*
379  * Huge pte definitions.
380  */
381 #define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))
382 
383 /*
384  * Hugetlb definitions.
385  */
386 #define HUGE_MAX_HSTATE		4
387 #define HPAGE_SHIFT		PMD_SHIFT
388 #define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
389 #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
390 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
391 
392 static inline pte_t pgd_pte(pgd_t pgd)
393 {
394 	return __pte(pgd_val(pgd));
395 }
396 
397 static inline pte_t p4d_pte(p4d_t p4d)
398 {
399 	return __pte(p4d_val(p4d));
400 }
401 
402 static inline pte_t pud_pte(pud_t pud)
403 {
404 	return __pte(pud_val(pud));
405 }
406 
407 static inline pud_t pte_pud(pte_t pte)
408 {
409 	return __pud(pte_val(pte));
410 }
411 
412 static inline pmd_t pud_pmd(pud_t pud)
413 {
414 	return __pmd(pud_val(pud));
415 }
416 
417 static inline pte_t pmd_pte(pmd_t pmd)
418 {
419 	return __pte(pmd_val(pmd));
420 }
421 
422 static inline pmd_t pte_pmd(pte_t pte)
423 {
424 	return __pmd(pte_val(pte));
425 }
426 
427 static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
428 {
429 	return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
430 }
431 
432 static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
433 {
434 	return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
435 }
436 
437 static inline pte_t pte_swp_mkexclusive(pte_t pte)
438 {
439 	return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
440 }
441 
442 static inline int pte_swp_exclusive(pte_t pte)
443 {
444 	return pte_val(pte) & PTE_SWP_EXCLUSIVE;
445 }
446 
447 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
448 {
449 	return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
450 }
451 
452 #ifdef CONFIG_NUMA_BALANCING
453 /*
454  * See the comment in include/linux/pgtable.h
455  */
456 static inline int pte_protnone(pte_t pte)
457 {
458 	return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
459 }
460 
461 static inline int pmd_protnone(pmd_t pmd)
462 {
463 	return pte_protnone(pmd_pte(pmd));
464 }
465 #endif
466 
467 #define pmd_present_invalid(pmd)     (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))
468 
469 static inline int pmd_present(pmd_t pmd)
470 {
471 	return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
472 }
473 
474 /*
475  * THP definitions.
476  */
477 
478 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
479 static inline int pmd_trans_huge(pmd_t pmd)
480 {
481 	return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
482 }
483 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
484 
485 #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
486 #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
487 #define pmd_valid(pmd)		pte_valid(pmd_pte(pmd))
488 #define pmd_user(pmd)		pte_user(pmd_pte(pmd))
489 #define pmd_user_exec(pmd)	pte_user_exec(pmd_pte(pmd))
490 #define pmd_cont(pmd)		pte_cont(pmd_pte(pmd))
491 #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
492 #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
493 #define pmd_mkwrite_novma(pmd)	pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)))
494 #define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
495 #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
496 #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
497 
498 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
499 {
500 	pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
501 	pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));
502 
503 	return pmd;
504 }
505 
506 #define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))
507 
508 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
509 
510 #define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
511 
512 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
513 #define pmd_devmap(pmd)		pte_devmap(pmd_pte(pmd))
514 #endif
515 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
516 {
517 	return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
518 }
519 
520 #define __pmd_to_phys(pmd)	__pte_to_phys(pmd_pte(pmd))
521 #define __phys_to_pmd_val(phys)	__phys_to_pte_val(phys)
522 #define pmd_pfn(pmd)		((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
523 #define pfn_pmd(pfn,prot)	__pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
524 #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
525 
526 #define pud_young(pud)		pte_young(pud_pte(pud))
527 #define pud_mkyoung(pud)	pte_pud(pte_mkyoung(pud_pte(pud)))
528 #define pud_write(pud)		pte_write(pud_pte(pud))
529 
530 #define pud_mkhuge(pud)		(__pud(pud_val(pud) & ~PUD_TABLE_BIT))
531 
532 #define __pud_to_phys(pud)	__pte_to_phys(pud_pte(pud))
533 #define __phys_to_pud_val(phys)	__phys_to_pte_val(phys)
534 #define pud_pfn(pud)		((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
535 #define pfn_pud(pfn,prot)	__pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
536 
537 static inline void __set_pte_at(struct mm_struct *mm,
538 				unsigned long __always_unused addr,
539 				pte_t *ptep, pte_t pte, unsigned int nr)
540 {
541 	__sync_cache_and_tags(pte, nr);
542 	__check_safe_pte_update(mm, ptep, pte);
543 	set_pte(ptep, pte);
544 }
545 
546 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
547 			      pmd_t *pmdp, pmd_t pmd)
548 {
549 	page_table_check_pmd_set(mm, pmdp, pmd);
550 	return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd),
551 						PMD_SIZE >> PAGE_SHIFT);
552 }
553 
554 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
555 			      pud_t *pudp, pud_t pud)
556 {
557 	page_table_check_pud_set(mm, pudp, pud);
558 	return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud),
559 						PUD_SIZE >> PAGE_SHIFT);
560 }
561 
562 #define __p4d_to_phys(p4d)	__pte_to_phys(p4d_pte(p4d))
563 #define __phys_to_p4d_val(phys)	__phys_to_pte_val(phys)
564 
565 #define __pgd_to_phys(pgd)	__pte_to_phys(pgd_pte(pgd))
566 #define __phys_to_pgd_val(phys)	__phys_to_pte_val(phys)
567 
568 #define __pgprot_modify(prot,mask,bits) \
569 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
570 
571 #define pgprot_nx(prot) \
572 	__pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
573 
574 /*
575  * Mark the prot value as uncacheable and unbufferable.
576  */
577 #define pgprot_noncached(prot) \
578 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
579 #define pgprot_writecombine(prot) \
580 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
581 #define pgprot_device(prot) \
582 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
583 #define pgprot_tagged(prot) \
584 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
585 #define pgprot_mhp	pgprot_tagged
586 /*
587  * DMA allocations for non-coherent devices use what the Arm architecture calls
588  * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
589  * and merging of writes.  This is different from "Device-nGnR[nE]" memory which
590  * is intended for MMIO and thus forbids speculation, preserves access size,
591  * requires strict alignment and can also force write responses to come from the
592  * endpoint.
593  */
594 #define pgprot_dmacoherent(prot) \
595 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, \
596 			PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
597 
598 #define __HAVE_PHYS_MEM_ACCESS_PROT
599 struct file;
600 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
601 				     unsigned long size, pgprot_t vma_prot);
602 
603 #define pmd_none(pmd)		(!pmd_val(pmd))
604 
605 #define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
606 				 PMD_TYPE_TABLE)
607 #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
608 				 PMD_TYPE_SECT)
609 #define pmd_leaf(pmd)		(pmd_present(pmd) && !pmd_table(pmd))
610 #define pmd_bad(pmd)		(!pmd_table(pmd))
611 
612 #define pmd_leaf_size(pmd)	(pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
613 #define pte_leaf_size(pte)	(pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
614 
615 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
616 static inline bool pud_sect(pud_t pud) { return false; }
617 static inline bool pud_table(pud_t pud) { return true; }
618 #else
619 #define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
620 				 PUD_TYPE_SECT)
621 #define pud_table(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
622 				 PUD_TYPE_TABLE)
623 #endif
624 
625 extern pgd_t init_pg_dir[PTRS_PER_PGD];
626 extern pgd_t init_pg_end[];
627 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
628 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
629 extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
630 extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
631 
632 extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
633 
634 static inline bool in_swapper_pgdir(void *addr)
635 {
636 	return ((unsigned long)addr & PAGE_MASK) ==
637 	        ((unsigned long)swapper_pg_dir & PAGE_MASK);
638 }
639 
640 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
641 {
642 #ifdef __PAGETABLE_PMD_FOLDED
643 	if (in_swapper_pgdir(pmdp)) {
644 		set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
645 		return;
646 	}
647 #endif /* __PAGETABLE_PMD_FOLDED */
648 
649 	WRITE_ONCE(*pmdp, pmd);
650 
651 	if (pmd_valid(pmd)) {
652 		dsb(ishst);
653 		isb();
654 	}
655 }
656 
657 static inline void pmd_clear(pmd_t *pmdp)
658 {
659 	set_pmd(pmdp, __pmd(0));
660 }
661 
662 static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
663 {
664 	return __pmd_to_phys(pmd);
665 }
666 
667 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
668 {
669 	return (unsigned long)__va(pmd_page_paddr(pmd));
670 }
671 
672 /* Find an entry in the third-level page table. */
673 #define pte_offset_phys(dir,addr)	(pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
674 
675 #define pte_set_fixmap(addr)		((pte_t *)set_fixmap_offset(FIX_PTE, addr))
676 #define pte_set_fixmap_offset(pmd, addr)	pte_set_fixmap(pte_offset_phys(pmd, addr))
677 #define pte_clear_fixmap()		clear_fixmap(FIX_PTE)
678 
679 #define pmd_page(pmd)			phys_to_page(__pmd_to_phys(pmd))
680 
681 /* use ONLY for statically allocated translation tables */
682 #define pte_offset_kimg(dir,addr)	((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
683 
684 /*
685  * Conversion functions: convert a page and protection to a page entry,
686  * and a page entry and page directory to the page they refer to.
687  */
688 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
689 
690 #if CONFIG_PGTABLE_LEVELS > 2
691 
692 #define pmd_ERROR(e)	\
693 	pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
694 
695 #define pud_none(pud)		(!pud_val(pud))
696 #define pud_bad(pud)		(!pud_table(pud))
697 #define pud_present(pud)	pte_present(pud_pte(pud))
698 #define pud_leaf(pud)		(pud_present(pud) && !pud_table(pud))
699 #define pud_valid(pud)		pte_valid(pud_pte(pud))
700 #define pud_user(pud)		pte_user(pud_pte(pud))
701 #define pud_user_exec(pud)	pte_user_exec(pud_pte(pud))
702 
703 static inline void set_pud(pud_t *pudp, pud_t pud)
704 {
705 #ifdef __PAGETABLE_PUD_FOLDED
706 	if (in_swapper_pgdir(pudp)) {
707 		set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
708 		return;
709 	}
710 #endif /* __PAGETABLE_PUD_FOLDED */
711 
712 	WRITE_ONCE(*pudp, pud);
713 
714 	if (pud_valid(pud)) {
715 		dsb(ishst);
716 		isb();
717 	}
718 }
719 
720 static inline void pud_clear(pud_t *pudp)
721 {
722 	set_pud(pudp, __pud(0));
723 }
724 
725 static inline phys_addr_t pud_page_paddr(pud_t pud)
726 {
727 	return __pud_to_phys(pud);
728 }
729 
730 static inline pmd_t *pud_pgtable(pud_t pud)
731 {
732 	return (pmd_t *)__va(pud_page_paddr(pud));
733 }
734 
735 /* Find an entry in the second-level page table. */
736 #define pmd_offset_phys(dir, addr)	(pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
737 
738 #define pmd_set_fixmap(addr)		((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
739 #define pmd_set_fixmap_offset(pud, addr)	pmd_set_fixmap(pmd_offset_phys(pud, addr))
740 #define pmd_clear_fixmap()		clear_fixmap(FIX_PMD)
741 
742 #define pud_page(pud)			phys_to_page(__pud_to_phys(pud))
743 
744 /* use ONLY for statically allocated translation tables */
745 #define pmd_offset_kimg(dir,addr)	((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
746 
747 #else
748 
749 #define pud_page_paddr(pud)	({ BUILD_BUG(); 0; })
750 #define pud_user_exec(pud)	pud_user(pud) /* Always 0 with folding */
751 
752 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
753 #define pmd_set_fixmap(addr)		NULL
754 #define pmd_set_fixmap_offset(pudp, addr)	((pmd_t *)pudp)
755 #define pmd_clear_fixmap()
756 
757 #define pmd_offset_kimg(dir,addr)	((pmd_t *)dir)
758 
759 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
760 
761 #if CONFIG_PGTABLE_LEVELS > 3
762 
763 #define pud_ERROR(e)	\
764 	pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
765 
766 #define p4d_none(p4d)		(!p4d_val(p4d))
767 #define p4d_bad(p4d)		(!(p4d_val(p4d) & 2))
768 #define p4d_present(p4d)	(p4d_val(p4d))
769 
770 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
771 {
772 	if (in_swapper_pgdir(p4dp)) {
773 		set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
774 		return;
775 	}
776 
777 	WRITE_ONCE(*p4dp, p4d);
778 	dsb(ishst);
779 	isb();
780 }
781 
782 static inline void p4d_clear(p4d_t *p4dp)
783 {
784 	set_p4d(p4dp, __p4d(0));
785 }
786 
787 static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
788 {
789 	return __p4d_to_phys(p4d);
790 }
791 
792 static inline pud_t *p4d_pgtable(p4d_t p4d)
793 {
794 	return (pud_t *)__va(p4d_page_paddr(p4d));
795 }
796 
797 /* Find an entry in the first-level page table. */
798 #define pud_offset_phys(dir, addr)	(p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
799 
800 #define pud_set_fixmap(addr)		((pud_t *)set_fixmap_offset(FIX_PUD, addr))
801 #define pud_set_fixmap_offset(p4d, addr)	pud_set_fixmap(pud_offset_phys(p4d, addr))
802 #define pud_clear_fixmap()		clear_fixmap(FIX_PUD)
803 
804 #define p4d_page(p4d)		pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
805 
806 /* use ONLY for statically allocated translation tables */
807 #define pud_offset_kimg(dir,addr)	((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
808 
809 #else
810 
811 #define p4d_page_paddr(p4d)	({ BUILD_BUG(); 0;})
812 #define pgd_page_paddr(pgd)	({ BUILD_BUG(); 0;})
813 
814 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
815 #define pud_set_fixmap(addr)		NULL
816 #define pud_set_fixmap_offset(pgdp, addr)	((pud_t *)pgdp)
817 #define pud_clear_fixmap()
818 
819 #define pud_offset_kimg(dir,addr)	((pud_t *)dir)
820 
821 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
822 
823 #define pgd_ERROR(e)	\
824 	pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
825 
826 #define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
827 #define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
828 
829 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
830 {
831 	/*
832 	 * Normal and Normal-Tagged are two different memory types and indices
833 	 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
834 	 */
835 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
836 			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
837 			      PTE_ATTRINDX_MASK;
838 	/* preserve the hardware dirty information */
839 	if (pte_hw_dirty(pte))
840 		pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
841 
842 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
843 	/*
844 	 * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware
845 	 * dirtiness again.
846 	 */
847 	if (pte_sw_dirty(pte))
848 		pte = pte_mkdirty(pte);
849 	return pte;
850 }
851 
852 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
853 {
854 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
855 }
856 
857 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
858 extern int ptep_set_access_flags(struct vm_area_struct *vma,
859 				 unsigned long address, pte_t *ptep,
860 				 pte_t entry, int dirty);
861 
862 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
863 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
864 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
865 					unsigned long address, pmd_t *pmdp,
866 					pmd_t entry, int dirty)
867 {
868 	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
869 }
870 
871 static inline int pud_devmap(pud_t pud)
872 {
873 	return 0;
874 }
875 
876 static inline int pgd_devmap(pgd_t pgd)
877 {
878 	return 0;
879 }
880 #endif
881 
882 #ifdef CONFIG_PAGE_TABLE_CHECK
883 static inline bool pte_user_accessible_page(pte_t pte)
884 {
885 	return pte_present(pte) && (pte_user(pte) || pte_user_exec(pte));
886 }
887 
888 static inline bool pmd_user_accessible_page(pmd_t pmd)
889 {
890 	return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
891 }
892 
893 static inline bool pud_user_accessible_page(pud_t pud)
894 {
895 	return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud));
896 }
897 #endif
898 
899 /*
900  * Atomic pte/pmd modifications.
901  */
902 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
903 static inline int __ptep_test_and_clear_young(pte_t *ptep)
904 {
905 	pte_t old_pte, pte;
906 
907 	pte = READ_ONCE(*ptep);
908 	do {
909 		old_pte = pte;
910 		pte = pte_mkold(pte);
911 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
912 					       pte_val(old_pte), pte_val(pte));
913 	} while (pte_val(pte) != pte_val(old_pte));
914 
915 	return pte_young(pte);
916 }
917 
918 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
919 					    unsigned long address,
920 					    pte_t *ptep)
921 {
922 	return __ptep_test_and_clear_young(ptep);
923 }
924 
925 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
926 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
927 					 unsigned long address, pte_t *ptep)
928 {
929 	int young = ptep_test_and_clear_young(vma, address, ptep);
930 
931 	if (young) {
932 		/*
933 		 * We can elide the trailing DSB here since the worst that can
934 		 * happen is that a CPU continues to use the young entry in its
935 		 * TLB and we mistakenly reclaim the associated page. The
936 		 * window for such an event is bounded by the next
937 		 * context-switch, which provides a DSB to complete the TLB
938 		 * invalidation.
939 		 */
940 		flush_tlb_page_nosync(vma, address);
941 	}
942 
943 	return young;
944 }
945 
946 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
947 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
948 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
949 					    unsigned long address,
950 					    pmd_t *pmdp)
951 {
952 	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
953 }
954 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
955 
956 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
957 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
958 				       unsigned long address, pte_t *ptep)
959 {
960 	pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
961 
962 	page_table_check_pte_clear(mm, pte);
963 
964 	return pte;
965 }
966 
967 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
968 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
969 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
970 					    unsigned long address, pmd_t *pmdp)
971 {
972 	pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0));
973 
974 	page_table_check_pmd_clear(mm, pmd);
975 
976 	return pmd;
977 }
978 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
979 
980 /*
981  * ptep_set_wrprotect - mark read-only while trasferring potential hardware
982  * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
983  */
984 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
985 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
986 {
987 	pte_t old_pte, pte;
988 
989 	pte = READ_ONCE(*ptep);
990 	do {
991 		old_pte = pte;
992 		pte = pte_wrprotect(pte);
993 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
994 					       pte_val(old_pte), pte_val(pte));
995 	} while (pte_val(pte) != pte_val(old_pte));
996 }
997 
998 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
999 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1000 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1001 				      unsigned long address, pmd_t *pmdp)
1002 {
1003 	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
1004 }
1005 
1006 #define pmdp_establish pmdp_establish
1007 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1008 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
1009 {
1010 	page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
1011 	return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
1012 }
1013 #endif
1014 
1015 /*
1016  * Encode and decode a swap entry:
1017  *	bits 0-1:	present (must be zero)
1018  *	bits 2:		remember PG_anon_exclusive
1019  *	bits 3-7:	swap type
1020  *	bits 8-57:	swap offset
1021  *	bit  58:	PTE_PROT_NONE (must be zero)
1022  */
1023 #define __SWP_TYPE_SHIFT	3
1024 #define __SWP_TYPE_BITS		5
1025 #define __SWP_OFFSET_BITS	50
1026 #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
1027 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
1028 #define __SWP_OFFSET_MASK	((1UL << __SWP_OFFSET_BITS) - 1)
1029 
1030 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
1031 #define __swp_offset(x)		(((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
1032 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
1033 
1034 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
1035 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
1036 
1037 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1038 #define __pmd_to_swp_entry(pmd)		((swp_entry_t) { pmd_val(pmd) })
1039 #define __swp_entry_to_pmd(swp)		__pmd((swp).val)
1040 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
1041 
1042 /*
1043  * Ensure that there are not more swap files than can be encoded in the kernel
1044  * PTEs.
1045  */
1046 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
1047 
1048 #ifdef CONFIG_ARM64_MTE
1049 
1050 #define __HAVE_ARCH_PREPARE_TO_SWAP
1051 static inline int arch_prepare_to_swap(struct page *page)
1052 {
1053 	if (system_supports_mte())
1054 		return mte_save_tags(page);
1055 	return 0;
1056 }
1057 
1058 #define __HAVE_ARCH_SWAP_INVALIDATE
1059 static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
1060 {
1061 	if (system_supports_mte())
1062 		mte_invalidate_tags(type, offset);
1063 }
1064 
1065 static inline void arch_swap_invalidate_area(int type)
1066 {
1067 	if (system_supports_mte())
1068 		mte_invalidate_tags_area(type);
1069 }
1070 
1071 #define __HAVE_ARCH_SWAP_RESTORE
1072 static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
1073 {
1074 	if (system_supports_mte())
1075 		mte_restore_tags(entry, &folio->page);
1076 }
1077 
1078 #endif /* CONFIG_ARM64_MTE */
1079 
1080 /*
1081  * On AArch64, the cache coherency is handled via the set_pte_at() function.
1082  */
1083 static inline void update_mmu_cache_range(struct vm_fault *vmf,
1084 		struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
1085 		unsigned int nr)
1086 {
1087 	/*
1088 	 * We don't do anything here, so there's a very small chance of
1089 	 * us retaking a user fault which we just fixed up. The alternative
1090 	 * is doing a dsb(ishst), but that penalises the fastpath.
1091 	 */
1092 }
1093 
1094 #define update_mmu_cache(vma, addr, ptep) \
1095 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
1096 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
1097 
1098 #ifdef CONFIG_ARM64_PA_BITS_52
1099 #define phys_to_ttbr(addr)	(((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
1100 #else
1101 #define phys_to_ttbr(addr)	(addr)
1102 #endif
1103 
1104 /*
1105  * On arm64 without hardware Access Flag, copying from user will fail because
1106  * the pte is old and cannot be marked young. So we always end up with zeroed
1107  * page after fork() + CoW for pfn mappings. We don't always have a
1108  * hardware-managed access flag on arm64.
1109  */
1110 #define arch_has_hw_pte_young		cpu_has_hw_af
1111 
1112 /*
1113  * Experimentally, it's cheap to set the access flag in hardware and we
1114  * benefit from prefaulting mappings as 'old' to start with.
1115  */
1116 #define arch_wants_old_prefaulted_pte	cpu_has_hw_af
1117 
1118 static inline bool pud_sect_supported(void)
1119 {
1120 	return PAGE_SIZE == SZ_4K;
1121 }
1122 
1123 
1124 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1125 #define ptep_modify_prot_start ptep_modify_prot_start
1126 extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
1127 				    unsigned long addr, pte_t *ptep);
1128 
1129 #define ptep_modify_prot_commit ptep_modify_prot_commit
1130 extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
1131 				    unsigned long addr, pte_t *ptep,
1132 				    pte_t old_pte, pte_t new_pte);
1133 #endif /* !__ASSEMBLY__ */
1134 
1135 #endif /* __ASM_PGTABLE_H */
1136